blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a9920180a5cfcc87b785b384e0163f7cf7f4c5a9 | 0c0e9916280177cf0f1ab00bfbd36a21ee8a2dcf | /lesson_1/lesson_1_Task_2.py | 135ac8ad91e215cee317c70079f22029da39f916 | [] | no_license | vedmara/ALgorithms_python_lessons_1-8 | 910793d69c3242107497d8c50ba94a7d46575018 | eed05abd7da584d9930cc44884813951e7c1c2a5 | refs/heads/main | 2023-04-28T08:59:53.512439 | 2023-04-18T18:19:22 | 2023-04-18T18:19:22 | 331,944,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | #Выполнить логические побитовые операции «И»,
#«ИЛИ» и др. над числами 5 и 6.
# Выполнить над числом 5 побитовый сдвиг вправо и влево на два знака.
a = 5
print(a, " = ", bin(a))
b = 6
print(b, " = ", bin(b))
print(a, " & ", b, " = ", a&b, "(", bin(a&b), ")")
print(a, " | ", b, " = ", a|b, "(", bin(a|b), ")")
print(a, " ^ ", b, " = ", a^b, "(", bin(a^b), ")")
print(a, " << 2 = ", a<<2, "(", bin(a<<2), ")")
print(a, " >> 2 = ", a>>2,"(", bin(a>>2), ")") | [
"vedmara87@gmail.com"
] | vedmara87@gmail.com |
d8a89d664f950a89c1900d9eef3be5616186ecbd | 25aa59eb559afa5aeeb4347c053f493a7e4fd9f2 | /batfight.py | 50022c456ab425373390fb7b13dbc52d966dd492 | [] | no_license | Nacrova/pythondemo | da2ccea78b0b29406cf639d4b85f83c3ac5376e2 | e621859e3d4b4269b56060b48c2a95c48b134672 | refs/heads/master | 2020-03-20T22:18:41.530855 | 2018-06-22T19:44:53 | 2018-06-22T19:44:53 | 137,790,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | print("You are fighting a Giant bat!")
move = raw_input("Please make your move:")
if move == "attack":
print("You hit the bat and it screeches loudly, but does next to nothing but hurt your ears.")
if move == "block":
print("You try to block the bat's bite, but to no effect. You bleed out hours later.")
if move == "counter":
print("You counter the bat's attack, but the sheer size of the bat knocks you down.")
if move == "run":
print("You ran like a coward, but you lived.")
else:
print("Please type attack,block,counter,or run") | [
"noah.swingle@gmail.com"
] | noah.swingle@gmail.com |
b5c2b8f10fef9596e3425dd6b98e772f2f735d1b | 93ba6d65cf4f38326f6038efa10153b4f476c595 | /231.py | ca7e7de97370bc1a4b6853803f15018f14ddd52b | [
"Apache-2.0"
] | permissive | Hann1bal/mailer | 1fe2e67cfe2c9851d901d3bf3b2bb465258e8317 | 78140b96bff30ace7714b7b5c198bc0925425a25 | refs/heads/master | 2020-03-31T19:12:53.128099 | 2018-10-10T20:57:19 | 2018-10-10T20:57:19 | 152,488,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,716 | py | # coding=utf-8
import json
import random
import xlrd, xlwt
import requests
domains = ["fake.com", "sock.com", "rom.com", "tail.com", "tail.kz", "bahoo.com"]
letters = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l","m", "n","o","0", "l", "1", "2", "3", "4", "5", "6", "7", "8", "9",
"10"]
genders = ["F", "M"]
def get_one_random_domain(domains):
return domains[random.randint(0, len(domains) - 1)]
def get_one_random_name(letters):
email_name = ""
for i in range(7):
email_name = email_name + letters[random.randint(0, 25)]
return email_name
def get_one_random_names(letters):
email_name = letters[random.randint(0, 11)].upper()
for i in range(7):
email_name = email_name + letters[random.randint(0, 15)]
return email_name
def get_one_random_password(letters):
email_name = letters[random.randint(0, 11)].upper()
for i in range(7):
email_name = email_name + letters[random.randint(0, 25)]
return email_name
def get_one_gender(genders):
return genders[random.randint(0, len(genders) - 1)]
def generate_random_emails():
wb = xlwt.Workbook()
ws = wb.add_sheet('Test')
for i in range(0, 100000):
one_name = str(get_one_random_name(letters))
one_domain = str(get_one_random_domain(domains))
fname = str(get_one_random_names(letters))
gname = str(get_one_random_names(letters))
gender = str(get_one_gender(genders))
mail = one_name + "@" + one_domain
print(mail)
ws.write(i, 0, mail)
ws.write(i, 1, str(get_one_random_names(letters)) + str(i))
ws.write(i, 2, gender)
ws.write(i, 3, fname)
ws.write(i, 4, gname)
wb.save('xl_rec.xls')
def main():
generate_random_emails()
def testin_reg():
wb = xlwt.Workbook()
ws = wb.add_sheet('Test')
rb = xlrd.open_workbook('xl_rec.xlsx', formatting_info=True)
sheet = rb.sheet_by_index(0)
for rownum in range(sheet.nrows):
username = sheet.cell(rowx=rownum, colx=0).value
password = sheet.cell(rowx=rownum, colx=1).value
gender = sheet.cell(rowx=rownum, colx=2).value
family_name = sheet.cell(rowx=rownum, colx=3).value
given_name = sheet.cell(rowx=rownum, colx=4).value
response_data = {}
data = {
"username": username,
"password": password,
# "given_name": given_name,
# "family_name": family_name,
# "middle_name": "",
# "gender": gender
}
# data["csrfmiddlewaretoken"] = request.COOKIES['csrftoken']
headers = {'content-type': 'application/json', "Accept": "text/plain",
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0'}
r = requests.post(url='http://******',
data=json.dumps(data, skipkeys=False), headers=headers)
response_data['result'] = "Good"
response_data['message'] = str(r._content)
ws.write(rownum, 0, username)
ws.write(rownum, 1, rownum)
if json.loads(r.content)["status"] == 200 or json.loads(r.content)["status"] == "200":
code = "PASSED"
ws.write(rownum, 3, code)
ws.write(rownum, 2, json.loads(r._content)["token"])
ws.write(rownum, 4, json.loads(r._content)["status"])
else:
code = "Failed"
ws.write(rownum, 2, json.loads(r._content)["token"])
ws.write(rownum, 3, code)
ws.write(rownum, 4,json.loads(r._content)["status"])
print rownum
wb.save('xl_r2ec_login.xlsx')
print json.dumps({"status":"DONE"})
main()
| [
"apalchev@gmail.com"
] | apalchev@gmail.com |
3516e23c03dc4c15d6c9b70dbb179330f19726bf | c138db2c89ef98d063e2188bc45eb56a4db6df58 | /generate_image.py | 4a39ad2933f9d6d2a8378a006d884589335c4dc6 | [] | no_license | wfsg00098/XD-server-python | 17d4cbb4fb2e7a4492bb79ddec11eb0e9449da77 | e7d11e44976fa5b490999352149c2ee4aac76cef | refs/heads/master | 2021-04-15T03:36:18.605151 | 2018-03-26T13:46:13 | 2018-03-26T13:46:13 | 126,833,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | py | import pickle
import numpy as np
from PIL import Image
def process_images(img):
# img = img.resize((500, 500), Image.ANTIALIAS)
r, g, b = img.split()
r_arr = np.array(r)
g_arr = np.array(g)
b_arr = np.array(b)
r_arr = r_arr.astype(np.float64)
g_arr = g_arr.astype(np.float64)
b_arr = b_arr.astype(np.float64)
r_arr -= np.mean(r_arr, axis=0)
g_arr -= np.mean(g_arr, axis=0)
b_arr -= np.mean(b_arr, axis=0)
r1 = Image.fromarray(r_arr).convert("L")
g1 = Image.fromarray(g_arr).convert("L")
b1 = Image.fromarray(b_arr).convert("L")
img1 = Image.merge("RGB", (r1, g1, b1))
img1 = img1.resize((100, 100), Image.ANTIALIAS)
img1 = img1.convert("L")
return img1
data = pickle.load(open("data\\teX.pkl", 'rb'))
for i in range(11):
print(str(i))
img = Image.open("data\\test\\" + str(i) + ".jpg")
img = process_images(img)
temp = []
temp.append(np.array(img))
temp = np.asarray(temp, dtype='float64')
temp /= 256
data = np.concatenate((data, temp), axis=0)
print(data.shape)
pickle.dump(data, open("data\\teX.pkl", 'wb'))
| [
"badegg@vip.126.com"
] | badegg@vip.126.com |
6cd5f1d09e9ce213d100bce44994a5664fd5c84d | f6b0c0b15e8fa41d593ea98803db015952e1bf3c | /todoapp/migrations/0001_initial.py | ee2e6fa08b79c945f90306a372715eb3beb95b3a | [] | no_license | ibrahimmk212/Todoproject | 0419e8c401265e535b885194419aa1ac4af341fb | 43b79ee1883e9aedf111daca94d2628f4e442911 | refs/heads/master | 2023-07-17T21:05:09.983479 | 2021-08-30T05:29:04 | 2021-08-30T05:29:04 | 400,805,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | # Generated by Django 3.2.6 on 2021-08-28 21:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('todo', models.CharField(max_length=300)),
],
),
]
| [
"ibrahimmk212@gmail.com"
] | ibrahimmk212@gmail.com |
3f11f6e753f742c91c330b4722f942811c63b180 | 4c413f3787e66155d90a3cdce87b6527b7dae459 | /app/flask_sqlite.py | b36b4dfdc058055d05334bff7c7dc96e8ad989c1 | [] | no_license | priyanshukumarcs049/flask | 61b0339c9dd5afc62625d35b3b42902383d95402 | 5d6828d87cd648d050fec78465bcb8e607d1530e | refs/heads/master | 2020-04-01T10:05:38.901968 | 2018-10-17T12:08:05 | 2018-10-17T12:08:05 | 153,102,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | from flask import Flask, render_template, request
import sqlite3 as sql
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/enternew')
def new_student():
return render_template('student.html')
@app.route('/addrec',methods = ['POST', 'GET'])
def addrec():
if request.method == 'POST':
try:
nm = request.form['nm']
addr = request.form['add']
city = request.form['city']
pin = request.form['pin']
with sql.connect("database.db") as con:
cur = con.cursor()
cur.execute("INSERT INTO students (name,addr,city,pin)
VALUES (?,?,?,?)",(nm,addr,city,pin) )
con.commit()
msg = "Record successfully added"
except:
con.rollback()
msg = "error in insert operation"
finally:
return render_template("result.html",msg = msg)
con.close()
@app.route('/list')
def list():
con = sql.connect("database.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select * from students")
rows = cur.fetchall();
return render_template("list.html",rows = rows)
if __name__ == '__main__':
app.run(port = 8003, debug = True) | [
"noreply@github.com"
] | noreply@github.com |
1250d501905f7a4c7d401265f0fb102a6eadc463 | 53f9a87fc1f5bc41bf1953d3dc29795723bc421a | /fun3.py | f284105e4df68daf28e5fd855a95b5bf4b4c83bd | [
"MIT"
] | permissive | shani21-meet/meet2019y1lab7 | f192812005da6e94f2d72368b6a42b0a4a371f1c | 8adac65aff51454a52fa3bfa05e424b22bd0f7a6 | refs/heads/master | 2020-06-21T01:41:27.341389 | 2019-07-29T07:30:47 | 2019-07-29T07:30:47 | 197,313,270 | 0 | 0 | null | 2019-07-17T04:11:07 | 2019-07-17T04:11:05 | null | UTF-8 | Python | false | false | 161 | py | def draw_d1(num,char) :
for i in range((num-1)*8) :
Drawit = char*num
print (Drawit)
return Drawit
star1 = draw_d1(3,'*')
print(star1)
| [
"shani21@meet.mit.edu"
] | shani21@meet.mit.edu |
ab1aed3943e3e0f4729e367787e731578383db74 | 4071efc5fc93c26538108a3d682fbbf88c761efa | /educational/migrations/0014_auto__add_field_discipline_parent_group.py | 6e06b966f5d251db204c8230b1b953c5c734a362 | [] | no_license | nadavperetz/MeneEduca | aa394f06b774c87fb5272c9f1d044e3c27610331 | 4ce73dadcb657bc4859a0db9ff22da22cfa9806a | refs/heads/master | 2021-01-10T19:53:39.858145 | 2015-07-31T14:04:43 | 2015-07-31T14:04:43 | 21,202,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,548 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Discipline.parent_group'
db.add_column(u'educational_discipline', 'parent_group',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name=u'group_of_parents', null=True, to=orm['groups.Group']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Discipline.parent_group'
db.delete_column(u'educational_discipline', 'parent_group_id')
models = {
u'agora.forum': {
'Meta': {'object_name': 'Forum'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['agora.ForumCategory']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['groups.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['agora.ForumThread']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'subforums'", 'null': 'True', 'to': u"orm['agora.Forum']"}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'view_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'agora.forumcategory': {
'Meta': {'object_name': 'ForumCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'subcategories'", 'null': 'True', 'to': u"orm['agora.ForumCategory']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'agora.forumreply': {
'Meta': {'object_name': 'ForumReply'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'agora_forumreply_related'", 'to': u"orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'content_html': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replies'", 'to': u"orm['agora.ForumThread']"})
},
u'agora.forumthread': {
'Meta': {'object_name': 'ForumThread'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'agora_forumthread_related'", 'to': u"orm['auth.User']"}),
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'content_html': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'threads'", 'to': u"orm['agora.Forum']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_reply': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['agora.ForumReply']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'reply_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sticky': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subscriber_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'view_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'educational.assignment': {
'Meta': {'object_name': 'Assignment'},
'discipline': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['educational.Discipline']"}),
'group': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['groups.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
u'educational.deadline': {
'Meta': {'object_name': 'Deadline'},
'assignment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['educational.Assignment']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'event': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['events_calendar.Event']", 'null': 'True', 'blank': 'True'}),
'finish_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 12, 15, 0, 0)'})
},
u'educational.discipline': {
'Meta': {'object_name': 'Discipline'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'finish_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2014, 12, 31, 0, 0)'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['groups.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'parent_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'group_of_parents'", 'null': 'True', 'to': u"orm['groups.Group']"}),
'start_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2014, 1, 1, 0, 0)'}),
'teacher': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'discipline_of_teacher'", 'to': u"orm['profiles.Teacher']"})
},
u'educational.remainder': {
'Meta': {'object_name': 'Remainder'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'finish_date': ('django.db.models.fields.DateField', [], {}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['agora.Forum']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2014, 12, 15, 0, 0)'})
},
u'events_calendar.event': {
'Meta': {'ordering': "['finish_date', 'start_date']", 'object_name': 'Event'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'finish_date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profile': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Profile']", 'symmetrical': 'False'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 12, 15, 0, 0)'})
},
u'groups.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'profiles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['profiles.Profile']", 'null': 'True', 'blank': 'True'})
},
u'profiles.profile': {
'Meta': {'object_name': 'Profile'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'default': "'avatars/default.jpg'", 'max_length': '100', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'complete_profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'profiles.teacher': {
'Meta': {'object_name': 'Teacher'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profile': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['profiles.Profile']", 'unique': 'True'})
}
}
complete_apps = ['educational'] | [
"vitor023@gmail.com"
] | vitor023@gmail.com |
a1abbcb23714badaf42e7155343c0630d731aa58 | 0ca0ceea2a50b8a0e2c17be73718be268b42838f | /utils/helpers.py | 5ebf9a2b69ea8ee63844c1cffdad00b0ffac39d0 | [] | no_license | hshaheucleia/web-demo | 3089795bf501bd04981b9e216d2f7b0e4f5dd0d3 | db42a74aa35745f5f582b5af36ac001b2ea52fcf | refs/heads/master | 2021-01-10T20:40:13.761926 | 2013-09-02T14:34:32 | 2013-09-02T14:34:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py |
import datetime
import decimal
import httplib
import math
import re
import sys
import urllib, urllib2
from xml.etree import ElementTree
from django.conf import settings
from django.core.mail import mail_admins, EmailMultiAlternatives
from django.utils import simplejson
from django.template.loader import render_to_string
class DecimalEncoder(simplejson.JSONEncoder):
"""JSON encoder which understands decimals."""
def default(self, obj):
'''Convert object to JSON encodable type.'''
if isinstance(obj, decimal.Decimal):
return int(obj)
return simplejson.JSONEncoder.default(self, obj)
| [
"harsh.shah@taxiforsure.com"
] | harsh.shah@taxiforsure.com |
654bc1e70bc8f8d8e98e3dc33d180ac3d8fc8cee | ceadd00a6842f1c3a925b990cf96a3f7a7b3128c | /С_2_2.py | b24c202e13d0dba89c1de091fa974d2a68dd16f5 | [] | no_license | KirychMe/Homeworks | 853f124b20b4ed7b650279e0431fc0b59b32f9e1 | 4439447d759362039bffbb114efa613720b59f11 | refs/heads/main | 2023-04-07T09:07:35.453584 | 2021-04-07T08:24:26 | 2021-04-07T08:24:26 | 353,669,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,044 | py | from random import randint
class Dot:
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __repr__(self):
return f"({self.x}, {self.y})"
class BoardException(Exception):
pass
class BoardOutException(BoardException):
def __str__(self):
return "Вы пытаетесь выстрелить за доску!"
class BoardUsedException(BoardException):
def __str__(self):
return "Вы уже стреляли в эту клетку"
class BoardWrongShipException(BoardException):
pass
class Ship:
def __init__(self, bow, l, o):
self.bow = bow
self.l = l
self.o = o
self.lives = l
@property
def dots(self):
ship_dots = []
for i in range(self.l):
cur_x = self.bow.x
cur_y = self.bow.y
if self.o == 0:
cur_x += i
elif self.o == 1:
cur_y += i
ship_dots.append(Dot(cur_x, cur_y))
return ship_dots
def shooten(self, shot):
return shot in self.dots
class Board:
def __init__(self, hid=False, size=6):
self.size = size
self.hid = hid
self.count = 0
self.field = [["O"] * size for _ in range(size)]
self.busy = []
self.ships = []
def add_ship(self, ship):
for d in ship.dots:
if self.out(d) or d in self.busy:
raise BoardWrongShipException()
for d in ship.dots:
self.field[d.x][d.y] = "■"
self.busy.append(d)
self.ships.append(ship)
self.contour(ship)
def contour(self, ship, verb=False):
near = [
(-1, -1), (-1, 0), (-1, 1),
(0, -1), (0, 0), (0, 1),
(1, -1), (1, 0), (1, 1)
]
for d in ship.dots:
for dx, dy in near:
cur = Dot(d.x + dx, d.y + dy)
if not (self.out(cur)) and cur not in self.busy:
if verb:
self.field[cur.x][cur.y] = "."
self.busy.append(cur)
def __str__(self):
res = ""
res += " | 1 | 2 | 3 | 4 | 5 | 6 |"
for i, row in enumerate(self.field):
res += f"\n{i + 1} | " + " | ".join(row) + " |"
if self.hid:
res = res.replace("■", "O")
return res
def out(self, d):
return not ((0 <= d.x < self.size) and (0 <= d.y < self.size))
def shot(self, d):
if self.out(d):
raise BoardOutException()
if d in self.busy:
raise BoardUsedException()
self.busy.append(d)
for ship in self.ships:
if d in ship.dots:
ship.lives -= 1
self.field[d.x][d.y] = "X"
if ship.lives == 0:
self.count += 1
self.contour(ship, verb=True)
print("Корабль уничтожен!")
return False
else:
print("Корабль ранен!")
return True
self.field[d.x][d.y] = "."
print("Мимо!")
return False
def begin(self):
self.busy = []
class Player:
def __init__(self, board, enemy):
self.board = board
self.enemy = enemy
def ask(self):
raise NotImplementedError()
def move(self):
while True:
try:
target = self.ask()
repeat = self.enemy.shot(target)
return repeat
except BoardException as e:
print(e)
class AI(Player):
def ask(self):
d = Dot(randint(0, 5), randint(0, 5))
print(f"Ход компьютера: {d.x + 1} {d.y + 1}")
return d
class User(Player):
def ask(self):
while True:
cords = input("Ваш ход: ").split()
if len(cords) != 2:
print(" Введите 2 координаты! ")
continue
x, y = cords
if not (x.isdigit()) or not (y.isdigit()):
print(" Введите числа! ")
continue
x, y = int(x), int(y)
return Dot(x - 1, y - 1)
class Game:
def __init__(self, size=6):
self.size = size
pl = self.random_board()
co = self.random_board()
co.hid = True
self.ai = AI(co, pl)
self.us = User(pl, co)
def random_board(self):
board = None
while board is None:
board = self.random_place()
return board
def random_place(self):
lens = [3, 2, 2, 1, 1, 1, 1]
board = Board(size=self.size)
attempts = 0
for l in lens:
while True:
attempts += 1
if attempts > 2000:
return None
ship = Ship(Dot(randint(0, self.size), randint(0, self.size)), l, randint(0, 1))
try:
board.add_ship(ship)
break
except BoardWrongShipException:
pass
board.begin()
return board
def greet(self):
print("-------------------")
print(" Здрасти!!! ")
print(" Играем ")
print(" в морской бой ")
print("-------------------")
print(" формат ввода: x y ")
print(" x - номер строки ")
print(" y - номер столбца ")
def loop(self):
num = 0
while True:
print("-" * 20)
print("Доска пользователя:")
print(self.us.board)
print("-" * 20)
print("Доска компьютера:")
print(self.ai.board)
if num % 2 == 0:
print("-" * 20)
print("Ходит пользователь!")
repeat = self.us.move()
else:
print("-" * 20)
print("Ходит компьютер!")
repeat = self.ai.move()
if repeat:
num -= 1
if self.ai.board.count == 7:
print("-" * 20)
print("Пользователь выиграл!")
break
if self.us.board.count == 7:
print("-" * 20)
print("Компьютер выиграл!")
break
num += 1
def start(self):
self.greet()
self.loop()
g = Game()
g.start() | [
"noreply@github.com"
] | noreply@github.com |
574a1d3404aba0cd7099ee288799899c33facdd1 | d9d0f6bbd1b92cb2e8cd71d1f988d21a10e235d3 | /tests/test_utility.py | 603316a552236eebdb4d882ad0985baf6e0ada60 | [
"MIT"
] | permissive | NoMod-Programming/py-utility | 62e657ca2261aa7fac26d6bfeaeb90f2a18cf94c | 8f66fc1fc569c34798865ab063f1da4e7753c9d5 | refs/heads/main | 2023-01-27T14:40:03.790842 | 2020-12-03T12:45:20 | 2020-12-03T12:45:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,914 | py | from unittest import TestCase
from pyutility import limit_resource, measureit
def func1a(x):
x = [i for i in range(x)]
return -1
def func1b(x):
# recursive function to find xth fibonacci number
if x < 3:
return 1
return func1b(x-1) + func1b(x-2)
def func2():
# error function
return "a" / 2
def func3(*args, **kwagrs):
# args and kwargs function
return list(args) + list(kwagrs.values())
class MeasureitTest(TestCase):
def setUp(self):
self.er_func = measureit(func2)
self.func_m = measureit(func1a)
self.func_t = measureit(func1b)
self.ka_func = measureit(func3)
def test_measureit_1(self):
self.assertIsInstance(self.func_m(100), tuple)
def test_measureit_2(self):
x = self.func_t(10)
self.assertIsInstance(x[0], int)
self.assertIsInstance(x[1], float)
def test_measureit_3(self):
self.assertIsInstance(self.func_t(15), tuple)
def test_measureit_4(self):
self.assertRaises(Exception, self.er_func)
class LimitResourceTest(TestCase):
def setUp(self):
self.er_func = limit_resource(time=2)(func2)
self.func_m = limit_resource(time=2)(func1a)
self.func_t = limit_resource(time=2)(func1b)
self.ka_func = limit_resource(time=2)(func3)
def test_limit_resource_1(self):
self.assertEqual(self.func_m(300), -1)
def test_limit_resource_2(self):
self.assertEqual(self.func_t(3), 2)
def test_limit_resource_3(self):
self.assertRaises(Exception, self.er_func)
def test_limit_resource_4(self):
self.assertRaises(MemoryError, self.func_m, 100_000_000)
def test_limit_resource_5(self):
self.assertRaises(TimeoutError, self.func_t, 50)
def test_limit_resource_6(self):
self.assertEqual(self.ka_func(
1, 2, 3, four=4, five=5), [1, 2, 3, 4, 5])
| [
"vipulcariappa@gmail.com"
] | vipulcariappa@gmail.com |
71ad3f0dc161322df687c69ddedcd303e2fee89f | 3cd75f3d62911ba3d2114f95203e81d91be32877 | /1day/Str09.py | 432786c3f756f162e0a411ef5774f40ee0cbf828 | [] | no_license | kukukuni/Python_ex | 3667e2fe1db3a161d9e6acd5d8145a3e692f8e89 | e56d10bbcf3dc33b4422462a5b3c2dedb082b8c3 | refs/heads/master | 2022-11-05T13:58:55.770984 | 2019-04-14T00:57:18 | 2019-04-14T00:57:18 | 181,244,073 | 0 | 1 | null | 2022-10-23T06:38:06 | 2019-04-14T00:50:00 | Jupyter Notebook | UTF-8 | Python | false | false | 359 | py | # Str09.py
'''
print('숫자 1번 입력'); n1 = int(input())
print('숫자 2번 입력'); n2 = int(input())
print(n1+n2)
print('숫자 2개 입력')
n1, n2 = input().split(',')
print(int(n1)+int(n2))
print('숫자 2개 입력')
n1, n2 = map(int,input().split(','))
print(n1+n2)
'''
n1, n2 = map(int,input('숫자 2개 입력\n').split(','))
print(n1+n2)
| [
"mxneyelo@gmail.com"
] | mxneyelo@gmail.com |
ae4464db52561375bbc87b0547ef21940050cf28 | 3295017d73103cb573616858db368f66d7c1c439 | /NAOmark_Detection.py | 6c8330609d9d4c3a6305b410b43461e514c415d9 | [] | no_license | SV1609/Master-Thesis | 876b240342eff0e602207988a3bfcb04b33f1ea4 | 054d13844fdf16000e5c075507362504a7d67f82 | refs/heads/master | 2022-04-17T04:27:51.150332 | 2020-04-13T09:53:42 | 2020-04-13T09:53:42 | 255,286,537 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,727 | py | #! /usr/bin/env python
# -*- encoding: UTF-8 -*-
"""Example: Demonstrates a way to localize the robot with ALLandMarkDetection"""
import qi
import time
import sys
import argparse
import math
import almath
class LandmarkDetector(object):
"""
We first instantiate a proxy to the ALLandMarkDetection module
Note that this module should be loaded on the robot's naoqi.
The module output its results in ALMemory in a variable
called "LandmarkDetected".
We then read this ALMemory value and check whether we get
interesting things.
After that we get the related position of the landmark compared to robot.
"""
def __init__(self, app):
"""
Initialisation of qi framework and event detection.
"""
super(LandmarkDetector, self).__init__()
app.start()
session = app.session
# Get the service ALMemory.
self.memory = session.service("ALMemory")
# Connect the event callback.
self.subscriber = self.memory.subscriber("LandmarkDetected")
self.subscriber.signal.connect(self.on_landmark_detected)
# Get the services ALTextToSpeech, ALLandMarkDetection and ALMotion.
self.tts = session.service("ALTextToSpeech")
self.landmark_detection = session.service("ALLandMarkDetection")
self.motion_service = session.service("ALMotion")
self.landmark_detection.subscribe("LandmarkDetector", 500, 0.0 )
self.got_landmark = False
# Set here the size of the landmark in meters.
self.landmarkTheoreticalSize = 0.06 #in meters
# Set here the current camera ("CameraTop" or "CameraBottom").
self.currentCamera = "CameraTop"
def on_landmark_detected(self, markData):
"""
Callback for event LandmarkDetected.
"""
if markData == []: # empty value when the landmark disappears
self.got_landmark = False
elif not self.got_landmark: # only speak the first time a landmark appears
self.got_landmark = True
print "I saw a landmark! "
self.tts.say("I saw a landmark! ")
# Retrieve landmark center position in radians.
wzCamera = markData[1][0][0][1]
wyCamera = markData[1][0][0][2]
# Retrieve landmark angular size in radians.
angularSize = markData[1][0][0][3]
# Compute distance to landmark.
distanceFromCameraToLandmark = self.landmarkTheoreticalSize / ( 2 * math.tan( angularSize / 2))
# Get current camera position in NAO space.
transform = self.motion_service.getTransform(self.currentCamera, 2, True)
transformList = almath.vectorFloat(transform)
robotToCamera = almath.Transform(transformList)
# Compute the rotation to point towards the landmark.
cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)
# Compute the translation to reach the landmark.
cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)
# Combine all transformations to get the landmark position in NAO space.
robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform
print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
print "z " + str(robotToLandmark.r3_c4) + " (in meters)"
self.motion_service.moveTo(robotToLandmark.r1_c4,robotToLandmark.r2_c4,robotToLandmark.r3_c4);
def run(self):
"""
Loop on, wait for events until manual interruption.
"""
print "Starting LandmarkDetector"
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print "Interrupted by user, stopping LandmarkDetector"
self.landmark_detection.unsubscribe("LandmarkDetector")
#stop
sys.exit(0)
if __name__ == "__main__":
try:
# Initialize qi framework.
connection_url = "tcp://" + "192.168.0.10" + ":" + str(9559)
app = qi.Application(["LandmarkDetector", "--qi-url=" + connection_url])
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + "192.168.0.11" + "\" on port " + str(9559) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
landmark_detector = LandmarkDetector(app)
landmark_detector.run() | [
"noreply@github.com"
] | noreply@github.com |
d6db57ca78ffbfbe55bff62613e68d9b7b0a32b5 | d78e59d285cdd1e16f1d7d836d39715e8a581c8b | /machine_learning/ml_basic/lab10_NN_ReLu_Xavier_Dropout_and_Adam/06_dropout_for_mnist.py | 7ddee527ad9b4d1754910dc4f1f7f773f5825539 | [] | no_license | egaoneko/study | 79c11e0df9d713b05babde3461f2e74f3906ad80 | b965654c7cc8e8361f5ec0596af57c55d35137cc | refs/heads/master | 2020-04-12T09:04:55.131290 | 2017-09-03T10:13:04 | 2017-09-03T10:13:04 | 54,833,896 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,649 | py | '''
A logistic regression learning algorithm example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
# Import MINST data
from random import randint
import math
from matplotlib import pyplot as plt
# from lab07_App_and_Tip import input_data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
import tensorflow as tf
# Parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
# tf Graph Input
X = tf.placeholder("float", [None, 784]) # mnist data image of shape 28*28=784
Y = tf.placeholder("float", [None, 10]) # 0-9 digits recognition => 10 classes
def xavier_init(n_inputs, n_outputs, uniform=True):
"""Set the parameter initialization using the method described.
This method is designed to keep the scale of the gradients roughly the same
in all layers.
Xavier Glorot and Yoshua Bengio (2010):
Understanding the difficulty of training deep feedforward neural
networks. International conference on artificial intelligence and
statistics.
Args:
n_inputs: The number of input nodes into each output.
n_outputs: The number of output nodes for each input.
uniform: If true use a uniform distribution, otherwise use a normal.
Returns:
An initializer.
"""
if uniform:
# 6 was used in the paper.
init_range = math.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range)
else:
# 3 gives us approximately the same limits as above since this repicks
# values greater than 2 standard deviations from the mean.
stddev = math.sqrt(3.0 / (n_inputs + n_outputs))
return tf.truncated_normal_initializer(stddev=stddev)
# Set model weights
W1 = tf.get_variable("W1", shape=[784, 256], initializer=xavier_init(784, 256))
W2 = tf.get_variable("W2", shape=[256, 256], initializer=xavier_init(256, 256))
W3 = tf.get_variable("W3", shape=[256, 256], initializer=xavier_init(256, 256))
W4 = tf.get_variable("W4", shape=[256, 256], initializer=xavier_init(256, 256))
W5 = tf.get_variable("W5", shape=[256, 10], initializer=xavier_init(256, 10))
B1 = tf.Variable(tf.random_normal([256]))
B2 = tf.Variable(tf.random_normal([256]))
B3 = tf.Variable(tf.random_normal([256]))
B4 = tf.Variable(tf.random_normal([256]))
B5 = tf.Variable(tf.random_normal([10]))
# Construct model
dropout_rate = tf.placeholder("float")
_L1 = tf.nn.relu(tf.add(tf.matmul(X, W1), B1)) # Hidden layer with RELU activation
L1 = tf.nn.dropout(_L1, dropout_rate)
_L2 = tf.nn.relu(tf.add(tf.matmul(L1, W2), B2)) # Hidden layer with RELU activation
L2 = tf.nn.dropout(_L2, dropout_rate)
_L3 = tf.nn.relu(tf.add(tf.matmul(L2, W3), B3)) # Hidden layer with RELU activation
L3 = tf.nn.dropout(_L3, dropout_rate)
_L4 = tf.nn.relu(tf.add(tf.matmul(L3, W4), B4)) # Hidden layer with RELU activation
L4 = tf.nn.dropout(_L4, dropout_rate)
hypothesis = tf.add(tf.matmul(L2, W5), B5) # No need to use softmax here
# Minimize error using cross entropy
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(hypothesis, Y)) # Softmax loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Gradient Descent
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Fit training using batch data
sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys, dropout_rate: 0.7})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={X: batch_xs, Y: batch_ys, dropout_rate: 0.7}) / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Accuracy:", accuracy.eval({X: mnist.test.images, Y: mnist.test.labels, dropout_rate: 1}))
| [
"egaoneko@naver.com"
] | egaoneko@naver.com |
790377a01ed8013816dd8017e8c4cb12a44b62f3 | d5e82604646b200673d9f9e57a4ba0f3b35c3516 | /pybb_extensions/pluggable/models.py | 9359c94f44284c64002d653591bea0e3e8f5272b | [] | no_license | zalew/pybbm-extensions | 7cd7d5ada4d6f809713f469ab7cb46f9e7952c66 | 3b5c1f63d21bfbdf659f49dae76ce81faf3ad7d9 | refs/heads/master | 2020-12-24T13:21:04.035156 | 2013-07-18T12:37:09 | 2013-07-18T12:37:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,376 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.manager import Manager
from django.db.models.signals import post_save
from pybb.models import Topic, Post
from pybb_extensions.pluggable.utils import get_datetime_now
from django.utils.html import strip_tags
registry = []
class PlugManager(Manager):
def _default_qs(self):
return super(PlugManager, self).get_query_set()
def get_for_object(self, obj):
ctype = ContentType.objects.get_for_model(obj.__class__)
try:
return self._default_qs().get(content_type=ctype, object_id=obj.id)
except:
return None
def create_for_object(self, obj, forum_id, topic=None, user=None, body=None):
try:
name = obj.forum_topic_title()
except:
name = str(obj)
try:
added = obj.forum_topic_added()
except:
added = get_datetime_now()
try:
user = obj.forum_topic_user()
except:
try:
user = obj.user
except:
user = User.objects.filter(is_superuser=True).order_by('id')[0].get()
if not body:
try:
body = obj.forum_topic_body()
except:
body = str(obj)
if not topic:
topic = Topic.objects.create(forum_id=forum_id, user=user, name=name)
topic.created = added
topic.save()
first_post, created = Post.objects.get_or_create(topic_id=topic.id, user=user, body=body, body_html=body, body_text=strip_tags(body), created=added)
ctype = ContentType.objects.get_for_model(obj.__class__)
return super(PlugManager, self).create(topic_id=topic.id, content_type=ctype, object_id=obj.id)
class Plug(models.Model):
topic = models.OneToOneField(Topic, related_name='plug')
content_type = models.ForeignKey(ContentType)
object_id = models.IntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
objects = PlugManager()
class Meta:
db_table = 'pybb_ext_plug'
unique_together = ('content_type', 'object_id')
def content_url(self):
try:
return self.content_object.get_absolute_url()
except:
return None
def posts(self):
posts = Post.objects.filter(topic=self.topic)
return posts
@classmethod
def create_topic(cls):
pass
class AlreadyRegistered(Exception):
pass
def register(model, attr="plug"):
if model in registry:
raise AlreadyRegistered(('The model %s has already been registered.') % model.__name__)
if not hasattr(model, 'forum_id'):
raise Exception('You must specify a forum_id() method in model %s.' % model.__name__)
registry.append(model)
def get_plug_for_instance(self):
return Plug.objects.get_for_object(self)
model.add_to_class('plug', get_plug_for_instance)
post_save.connect(plug_signal_handler, sender=model)
def plug_signal_handler(sender, instance, **kwargs):
if kwargs.get('created', False):
Plug.objects.create_for_object(instance, instance.forum_id())
| [
"zalew7@gmail.com"
] | zalew7@gmail.com |
e20b04d3d4af75c458265332277fb7eb6040b44e | b0087e5a04a5e64a3d02ee9c700392fd26d14f44 | /PeertoPeer/TesteServer.py | bf249887406c12173822691a1dfe1222c11f1699 | [] | no_license | Steelrock/ChordImp | 44288efea0ae4cfe57d6d48ff0bf8b4e6be8aa70 | 07b36f1a05d366cd9cf23cd431611f4c99616956 | refs/heads/master | 2021-01-19T06:30:23.591330 | 2018-04-18T19:11:57 | 2018-04-18T19:11:57 | 87,468,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | # -*- coding: utf-8 -*-
__author__ = 'Homero Silva'
import socket
import struct
from array import array
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP protocol
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('127.0.0.1',12345))
while(True):
# Handle sockets
data, addr = s.recvfrom(1024)
s.sendto('aaa',addr)
#print int(bytearray(data)[0])
#res = struct.unpack("bi",data)
print data
print addr
| [
"noreply@github.com"
] | noreply@github.com |
442f038c532429a81876bc8698542bb72ca76432 | f8376e83352d2dfab28c41f24345071a77a45fd9 | /Regular Expression/re/phn.py | 80c12d5a86a9d9db9511ad8c56d94500a7275064 | [] | no_license | harihavwas/pythonProgram | 2111ee98eccda68165159db0305c413ee53ee38a | 126df8b3a418dbaf618575b450fd4cfde44c80a7 | refs/heads/master | 2023-07-27T23:39:10.867329 | 2021-09-16T15:35:00 | 2021-09-16T15:35:00 | 402,320,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | import re
f=open('phn','r')
x='[+][9][1]\d{10}$'
for i in f:
s=i.rstrip('\n')
m=re.fullmatch(x,s)
if m!=None:
print(s) | [
"hkk4harikrishnan@gmail.com"
] | hkk4harikrishnan@gmail.com |
255204e3d0c9bdbb1dd6c835f9d37578ec90ccbf | 7cf87a820fbeb90580cfbddcc4219af576f4f799 | /0x03-python-data_structures/3-print_reversed_list_integer.py | 23c8d19974297862dd3300aba0795bf58e6efc46 | [] | no_license | BigObasheer/holbertonschool-higher_level_programming | cfac3ff13e19aadc96523cf79ecaee4c5836d926 | a39253cc2d32576ff0072eb2469f75553d7e6212 | refs/heads/master | 2023-03-07T09:05:52.101158 | 2021-02-19T03:42:54 | 2021-02-19T03:42:54 | 259,472,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | #!/usr/bin/python3
def print_reversed_list_integer(my_list=[]):
if my_list:
i = len(my_list) - 1
for j in range(i, -1, -1):
print("{:d}".format(my_list[j]))
| [
"838@holbertonschool.com"
] | 838@holbertonschool.com |
fd41e677968d9a8b8c40d822cbb8a54d82335c46 | ce99ea10d7ab38c43cfabba7fb3a6483c0385902 | /copy_of_nst_it15099600.py | d60d7ac7e679a20216f7a4d842403e9bb4056ad5 | [] | no_license | kalsha600/Neural_style_transfer | 630ad4758a1284ed5c321a25627ab23dc5ffd2d1 | 25e067869ed6b8fb97be1d8a661639ec8d927840 | refs/heads/master | 2023-06-01T03:08:52.625389 | 2021-06-24T21:51:45 | 2021-06-24T21:51:45 | 380,056,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,928 | py | # -*- coding: utf-8 -*-
"""Copy of NST-IT15099600.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1piC17xbL211LEGe-KU-0-8PXSYG_gIiZ
# **Import TensorFlow and other libraries**
"""
import tensorflow_hub as hub
import tensorflow as tf
from matplotlib import pyplot as plt
import numpy as np
import cv2
from keras.preprocessing.image import ImageDataGenerator
import PIL
"""# **Load image stylization module**"""
model = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')
"""# **Connect to the dataset**"""
import pathlib
from google.colab import drive
drive.mount('/content/gdrive')
data_dir = "/content/gdrive/MyDrive/Neural Style Transfer"
data_dir = pathlib.Path(data_dir)
"""#**copy of the dataset available**"""
image_count = len(list(data_dir.glob('*/*.jpg')))
print(image_count)
"""# **Visualize the input**"""
def load_image(img_path):
img = tf.io.read_file(img_path)
img = tf.image.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img = img[tf.newaxis, :]
return img
"""#**Download images and choose a style image and a content image**"""
content_image = load_image('/content/gdrive/MyDrive/Neural Style Transfer/content_images/con2.jpg')
style_image = load_image('/content/gdrive/MyDrive/Neural Style Transfer/style_image/style3.jpg')
"""# **Visualize Output**"""
content_image.shape
plt.imshow(np.squeeze(style_image))
plt.show()
plt.imshow(np.squeeze(content_image))
plt.show()
"""# **Stylize Image**"""
stylized_image = model(tf.constant(content_image), tf.constant(style_image))[0]
plt.imshow(np.squeeze(stylized_image))
plt.show()
cv2.imwrite('generated_img.jpg', cv2.cvtColor(np.squeeze(stylized_image)*255, cv2.COLOR_BGR2RGB))
"""# **Download the Output image**"""
from google.colab import files
files.download('generated_img.jpg') | [
"it15099600@my.sliit.lk"
] | it15099600@my.sliit.lk |
a85dc681562f30bdece0ed03dda6718a48363577 | b3e52682276f184726b2ce33289dd9fa66d41db8 | /applications/search/apps.py | c58b75f7f81d0e03fc0418ddc27b4c05b25b4767 | [] | no_license | pythoneast/supermarket | 115786786c8603f177cfa861845c659c113a82f2 | 33787c0c7005526da448056d1c0638183d3cdeac | refs/heads/master | 2020-04-18T03:36:09.740590 | 2019-04-19T14:00:56 | 2019-04-19T14:00:56 | 167,204,971 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | from django.apps import AppConfig
class SearchConfig(AppConfig):
name = 'applications.search'
| [
"mrpalitaev@gmail.com"
] | mrpalitaev@gmail.com |
345246999c91130c766a8ef54af4c97f2a77ce3d | 4821cb5b0fb238697f9a65aab03fded98b2f073c | /yolo/yolo_image.py | 4764f09f9e029bc1fcf01ee98e18281a6dfdc3f8 | [
"MIT"
] | permissive | rafcc/2020-prenatal-vs-segmentation | a7f818f08054e289f8986d82d49d81853c04c251 | 3f176846ca5b036755087c85aaa7cfc651bf74d3 | refs/heads/master | 2022-12-24T00:57:29.695580 | 2020-10-06T11:30:57 | 2020-10-06T11:30:57 | 300,056,793 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,255 | py | import sys
import argparse
from yolo import YOLO, detect_video
from PIL import Image
import sys
import cv2
import os
def trim(img, box, outdir, mode="extend"):
"""
trim image file with result of yolo detection
Parameters
----------
img : str
input image file path
box : numpy array
result of yolo detection
outdir : str
output directory path
mode : str
box trim strategy
fit:original size
extend:bigger size
"""
os.makedirs(outdir,exist_ok=True)
image = cv2.imread(img)
name = os.path.basename(img)
name = name.rsplit(".",1)[0]
name = name.rsplit("_",1)[0]
for i in range(box.shape[0]):
if(mode == "fit"):
trim = image[box[i][0]:box[i][1], box[i][2]:box[i][3]]
if(mode == "extend"):
new_box = [0]*4
new_box[0],new_box[1],new_box[2],new_box[3] = int(box[i][0]/1.2),int(1.2*box[i][1]),int(box[i][2]/1.2),int(1.2*box[i][3])
trim = image[new_box[0]:new_box[1], new_box[2]:new_box[3]]
if box.shape[0]==1:
outpath = os.path.join(outdir,name+".png")
else:
outpath = os.path.join(outdir,name+"-"+str(i)+".png")
cv2.imwrite(outpath,trim)
| [
"akira.sakai@jp.fujitsu.com"
] | akira.sakai@jp.fujitsu.com |
db08cdc80e32609ea9c3d219290ba3cfaedf757a | e87823d7b05865efe3aa597c7c22b758af97d783 | /trainSet/tkSpider3.py | 1fc84d4fd28248270b8aa74cff9340c1115ff34c | [] | no_license | Makos63/QAExtractor | f8a81039a63b2c23a98118ea3e79a3540b5dda53 | af25a20d5417ed6f2b201676414869c855bc430d | refs/heads/master | 2023-05-07T08:22:44.740157 | 2021-05-19T09:09:05 | 2021-05-19T09:09:05 | 355,897,982 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,046 | py | import scrapy
from scrapy.http import Request
from webscalper.items import WebscalperItem
import json
class ArticlesSpider(scrapy.Spider):
name = 'ArticlesScraper'
allowed_domains = ['www.tk.de']
def __init(self):
self.extractedUrlList = []
self.visitedURLs = set()
self.unvisitedURLs = set()
def start_requests(self):
with open('/Users/maciejkrzyszton/PycharmProjects/webscalper/webscalper/data2.json') as json_file:
data = json.load(json_file)
for p in data:
request = Request(p['url'], cookies={'store_language': 'en'},callback=self.parse_article_page)
print("yielding for url "+ p['url'])
yield request
def parse_article_page(self, response):
tmp=''
item=WebscalperItem()
# Gets HTML content where the article links are stored
header = response.xpath('//h1[@class="e-headline e-headline--h1 m-header__headline"]/text()').extract()
#almost nice
#body = response.xpath('//div[@class="g-faqantwort__answer-body"]'
# '//p[@class="e-text"]/text()').extract()
bodyExtra = response.xpath('//div[@class="g-faqantwort__answer-body"]'#)
'//p[@class="e-text"]/text()').extract()
for body_p in bodyExtra:#.xpath('//p[@class="e-text"]'):
#print(body_p)
tmp += ' '+ body_p
#print("xpaths:")
#print(header)
print(bodyExtra)
#for article_li in content.xpath('.//li'):
#for article_a in content.xpath('.//a'):
# Extracts the href info of the link to store in scrapy item
item['header'] = header
# for article_a in content.xpath('.//a'):
item['body'] = tmp
yield(item)
#yield(Request(item.get('url'), cookies={'store_language': 'en'}, callback=self.parse_article_page))
def parse(self, response):
self.logger.info('A response from %s just arrived!', response.url) | [
"maciejkrzyszton63@gmail.com"
] | maciejkrzyszton63@gmail.com |
4b85161767dfeedfb19677af0a9ce7fdf0bdb867 | bb4caea505c7fc9faf36f404ec8d681449fee507 | /Guess my number game.py | cc697e727a95aeba06bf5d7dec2f1f57cfdf6374 | [] | no_license | JunkaiZhu/python_guess_my_number_game | 5402015dda70b20b4948e47350ba30ab60629800 | bfb7bb30cf83f8a23c5b8078186fea86326e038e | refs/heads/master | 2022-11-08T06:23:57.464612 | 2020-06-19T09:36:49 | 2020-06-19T09:36:49 | 273,456,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | high = 100
low = 0
print("Please think of a number between 0 and 100!")
guessed = False
while not guessed:
guess = (high + low)//2
print("Is your secret number" + str(guess) + "?")
y = input("Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly.")
if y == 'h':
high = guess
elif y == 'l':
low = guess
elif y == 'c':
guessed = True
print("Game over. Your secret number was: " + str(guess))
else:
print("Sorry, I did not understand your input.")
| [
"noreply@github.com"
] | noreply@github.com |
19172244f167fb5ed0a40749ee2b2ec36237c41a | e65ae5bd9ae1c93e7117e630f7340bc73aa71212 | /lib/gevent/greentest/2.6/test_ftplib.py | 1c2ceeb87282ee5345ebdfd72f7fea38d9e08d23 | [
"MIT"
] | permissive | nadirhamid/oneline | e98ff1ed81da0536f9602ecdde2fb2a4fe80d256 | 833ebef0e26ae8e0cc452756381227746d830b23 | refs/heads/master | 2021-01-21T04:27:41.715047 | 2016-05-30T03:50:34 | 2016-05-30T03:50:34 | 23,320,578 | 1 | 2 | NOASSERTION | 2020-03-12T17:22:24 | 2014-08-25T16:29:36 | Python | UTF-8 | Python | false | false | 15,567 | py | """Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class and IPv6 environment
import ftplib
import threading
import asyncore
import asynchat
import socket
import StringIO
from unittest import TestCase
from test import test_support
from test.test_support import HOST
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024)
def handle_close(self):
self.baseclass.push('226 transfer complete')
self.close()
class DummyFTPHandler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator("\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = ''.join(self.in_buffer)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data + '\r\n')
def cmd_port(self, arg):
addr = map(int, arg.split(','))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=2)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
sock = socket.socket()
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ',')
p1, p2 = divmod(port, 256)
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=2)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
sock = socket.socket(socket.AF_INET6)
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
self.dtp.push(RETR_DATA)
self.dtp.close_when_done()
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accept(self):
conn, addr = self.accept()
self.handler = self.handler(conn)
self.close()
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=2)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_retrbinary(self):
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = StringIO.StringIO(RETR_DATA)
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storlines(self):
f = StringIO.StringIO(RETR_DATA.replace('\r\n', '\n'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
self.client.makeport()
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'pasv')
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
self.client.makeport()
self.assertEqual(self.server.handler.last_received_cmd, 'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
self.assertEqual(self.server.handler.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send("1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assert_(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost")
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assert_(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost", timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(ftp.sock.gettimeout() is None)
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts]
if socket.has_ipv6:
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
pass
else:
tests.append(TestIPv6Environment)
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
| [
"matrix.nad@gmail.com"
] | matrix.nad@gmail.com |
65e20f0caf0165b3bb1456256f57f959ff4adcab | 676b6a3377b0cbdfe037c23e728bbebd0bb3a33e | /past/jorub_preprocessing_v3.py | 6638e86dbccc20a1561deecc7a57b5bde4289c76 | [
"MIT"
] | permissive | henewsuh/seulsekwon_gis_analysis | 93bff8eec5051866d522497f1873413fbfee9d6c | 1a363710f70da86cad90ce1076134bb92815d984 | refs/heads/main | 2023-02-11T05:18:51.948223 | 2021-01-11T12:03:43 | 2021-01-11T12:03:43 | 325,196,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,520 | py | import networkx as nx
import torch
import dgl
import matplotlib.pyplot as plt
from geopy.geocoders import Nominatim
import osmnx as ox
import shapely
import pandas as pd
import pandana as pdna
from shapely import geometry
from descartes.patch import PolygonPatch
from shapely.geometry import LineString
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
font_path = r'C:/Users/user/Desktop/NanumBarunGothic.ttf'
fontprop = fm.FontProperties(fname=font_path, size=18)
# http://kuanbutts.com/2017/08/08/how-to-pdna/
geolocoder = Nominatim(user_agent = 'South Korea')
def geocoding(address):
geo = geolocoder.geocode(address)
crd = (geo.latitude, geo.longitude)
print(crd)
return crd
def multiG_to_G(MG, aggr_func=max):
rG = nx.Graph()
# add nodes
rG.add_nodes_from([n for n in MG.nodes()])
# extract edge and their weights
edge_weight_dict = {}
for u, v, e_attr in MG.edges(data=True):
e, weight = (u, v), e_attr['weight']
if e in edge_weight_dict:
edge_weight_dict[e].append(weight)
else:
edge_weight_dict[e] = [weight]
# add edges by aggregating their weighy by `aggr_func`
for e, e_weight_lst in edge_weight_dict.items():
rG.add_edge(*e, weight=aggr_func(e_weight_lst))
return rG
# osmnx.pois.pois_from_address로 해당 좌표 인근의 POI 정보 불러오기
tags = {'amenity': True}
def make_graph(address):
crds = []
demo = dict()
crd = geocoding(address) # address_list 리스트의 주소 중 index 0의 주소 경위도 좌표 받아오기
crds.append(crd)
pois = ox.pois.pois_from_point(crd, tags=tags, dist=500) # 해당 경위도 좌표계 인근 500m의 POI들을 받아오기
pois = pois.dropna(axis=0, subset=['name'])
pois.reset_index(inplace=True)
pois_df = pd.DataFrame(index=range(0, len(pois)), columns=['poi_osmid', 'poi_x', 'poi_y'])
amenity_coord = dict()
for i in range(len(pois)):
shapely_obj = pois.loc[i, ['geometry']]
if shapely_obj[0].type == 'Point':
x_crd = shapely_obj[0].xy[0][0]
y_crd = shapely_obj[0].xy[1][0]
xy_crd = (x_crd, y_crd)
amenity_coord[pois.loc[i, ['name']][0]] = [pois.loc[i, ['amenity']][0], xy_crd]
pois_df.loc[i] = [pois.loc[i, ['osmid']][0], x_crd, y_crd]
if shapely_obj[0].type == 'Polygon':
x_crd = shapely_obj[0].centroid.xy[0][0]
y_crd = shapely_obj[0].centroid.xy[1][0]
xy_crd = (x_crd, y_crd)
amenity_coord[pois.loc[i, ['name']][0]] = [pois.loc[i, ['amenity']][0], xy_crd]
pois_df.loc[i] = [pois.loc[i, ['osmid']][0], x_crd, y_crd]
poi_count = pois['amenity'].value_counts() # pois_df에 저장된 각 POI amenity 종류 별로 몇개씩 있는지 카운트
demo[address_list[1]] = poi_count # 아직은 사용하고 있지 않음. 나중에는 각 주소들을 key로, poi_count를 value로 갖는 dictionary 생성할 것임
G = ox.graph_from_point(crds[0], dist=500, network_type="walk")
G_original = G.to_undirected().copy()
fig, ax = ox.plot_graph(G, show=False, close=False,
edge_color='#777777')
ax.set_facecolor('white')
# ox.plot_graph(G_projected)
edge_list = list(G.edges) #G의 엣지들을 리스트로 저장
node_list = list(G.nodes) #G의 노드들을 리스트로 저장
print(' ')
print('Original Nodes # : {}'.format(len(node_list)))
print('Original Edges # : {}'.format(len(edge_list)))
print(' ')
node_dict = dict() # osmid를 key로, 해당 노드의 (x, y) 좌표를 튜플로 하는 dictionary 생성
for i in range(len(node_list)):
osmid = node_list[i]
node_dict[osmid] = (str(osmid), G.nodes[osmid]['x'], G.nodes[osmid]['y'])
edge_dict = dict() # key는 인덱스, value에 딕셔너리 -- {시작노드:(x,y), 끝노드:(x,y)}
for i in range(len(edge_list)):
st_osmid = edge_list[i][0]
ed_osmid = edge_list[i][1]
st_crd = (G.nodes[st_osmid]['x'], G.nodes[st_osmid]['y'])
ed_crd = (G.nodes[ed_osmid]['x'], G.nodes[ed_osmid]['y'])
crd_dict = dict()
crd_dict[st_osmid] = st_crd
crd_dict[ed_osmid] = ed_crd
edge_dict[i] = crd_dict
# 위에서 생성한 node_dict랑 edge_dict로 데이터프레임 생성
node_df = pd.DataFrame(node_dict).T
node_df.columns = ['osmid','x', 'y']
node_df['osmid'] = node_df['osmid'].astype('int64')
edge_df = pd.DataFrame(index=range(0, len(edge_dict)), columns=['st_osmid', 'st_x', 'st_y', 'ed_osmid', 'ed_x', 'ed_y', 'edge_weight'])
for i in range(len(edge_dict)):
k, v = edge_dict[i].items()
st_osmid = k[0]
ed_osmid = v[0]
st_x = k[1][0]
st_y = k[1][1]
ed_x = v[1][0]
ed_y = v[1][1]
edge_weight = 1
edge_df.loc[i] = [st_osmid, st_x, st_y, ed_osmid, ed_x, ed_y, edge_weight]
net = pdna.Network(node_df['x'], node_df['y'], edge_df['st_osmid'], edge_df['ed_osmid'], edge_df[['edge_weight']])
near_ids = net.get_node_ids(pois_df['poi_x'],
pois_df['poi_y'],
mapping_distance=1)
pois_df['nearest_node_id'] = near_ids
nearest_to_pois = pd.merge(pois_df,
node_df,
left_on='nearest_node_id',
right_on='osmid',
how='left',
sort=False,
suffixes=['_from', '_to'])
# add poi (node) to node_df
pois_df_ = dict()
for i in range(len(pois_df)):
poi_osmid = pois_df.loc[i, ['poi_osmid']][0]
poi_x = pois_df.loc[i, ['poi_x']][0]
poi_y = pois_df.loc[i, ['poi_x']][0]
pois_df_[i] = [poi_osmid, poi_x, poi_y]
pois_df_ = pd.DataFrame(pois_df_).T
pois_df_.columns = ['osmid','x', 'y']
pois_df_['osmid'] = pois_df_['osmid'].astype('int64')
nnode_df = pd.concat([node_df, pois_df_]) # nnode_df는 기존 node_df에 poi_df를 추가한 데이터프레임
nnode_df = nnode_df.reset_index()
nnode_df = nnode_df.drop(['index'], axis=1)
poi_edge_list = []
addi_edge_df = dict() # poi와 인근노드를 연결하면서 추가된 엣지 df
for i in range(len(pois_df)):
poi_n = pois_df.loc[i, ['poi_osmid']][0] #st_osmid
nearest_n = pois_df.loc[i, ['nearest_node_id']][0] #ed_osmid
st_x = nnode_df.loc[nnode_df['osmid'] == poi_n]['x'].item()
st_y = nnode_df.loc[nnode_df['osmid'] == poi_n]['y'].item()
ed_x = nnode_df.loc[nnode_df['osmid'] == nearest_n]['x'].item()
ed_y = nnode_df.loc[nnode_df['osmid'] == nearest_n]['y'].item()
edge_weight = 1
addi_edge_df[i] = [poi_n, st_x, st_y, nearest_n, ed_x, ed_y, edge_weight]
poi_edge_list.append((poi_n, nearest_n, 0))
addi_edge_df = pd.DataFrame(addi_edge_df).T
addi_edge_df.columns = ['st_osmid','st_x', 'st_y', 'ed_osmid', 'ed_x', 'ed_y', 'edge_weight']
eedge_df = pd.concat([edge_df, addi_edge_df])
# add_edge_df의 새로 생성된 엣지들을 기존의 G에 추가
# poi들을 기존의 G에 추가
print('POIs (node, edge) from OSM added!')
poi_node_list = list(pois_df['poi_osmid'])
G.add_nodes_from(poi_node_list)
G.add_edges_from(poi_edge_list)
G_undirected = G.to_undirected()
print(' ')
print('New Nodes # : {}'.format(len(list(G.nodes))))
print('New Edges # : {}'.format(len(list(G.edges))))
print(' ')
MG = G_undirected
G = nx.Graph(MG)
return G
address_list = ['서울대입구역',
'도림로 264', '현충로 213', '강남역', '신사역']
graph_list = []
for i in range(len(address_list)):
address = address_list[i]
G = make_graph(address)
graph_list.append(G)
from karateclub import Graph2Vec
from karateclub.dataset import GraphSetReader
reader = GraphSetReader("reddit10k")
graphss = []
graphss.append(G)
graphs = reader.get_graphs()
y = reader.get_target()
model = Graph2Vec(dimensions=2)
model.fit(graph_list)
X = model.get_embedding()
def scatter_plot(X, address_list):
x = X[:, 0]
y = X[:, 1]
labels = address_list
# Create the figure and axes objects
fig, ax = plt.subplots(1, figsize=(10, 6))
fig.suptitle('seulsekwon index')
# Plot the scatter points
ax.scatter(x, y,
color="blue", # Color of the dots
s=100, # Size of the dots
alpha=0.5, # Alpha of the dots
linewidths=1) # Size of edge around the dots
# Add the participant names as text labels for each point
for x_pos, y_pos, label in zip(x, y, labels):
ax.annotate(label, # The label for this point
xy=(x_pos, y_pos), # Position of the corresponding point
xytext=(7, 0), # Offset text by 7 points to the right
textcoords='offset points', # tell it to use offset points
ha='left', # Horizontally aligned to the left
va='center',) # Vertical alignment is centered
plt.show()
scatter_plot(X, address_list)
| [
"40761184+henewsuh@users.noreply.github.com"
] | 40761184+henewsuh@users.noreply.github.com |
6dfa663fae84a4914fee0c94185f7ec314ce8ed0 | f72777ec7bd41fd991b50a850be68bd4314c9791 | /guia-3/exer4/a.py | 0774d36d6434213574cf36f75dd6016118fd684d | [] | no_license | lucabecci/IP-UNGS | 5e8958e0e7a76bb1edab07f021cd4fb4f0ef7a02 | 182d71cab7d6d0cd294b54d3cb669e43617b509e | refs/heads/main | 2023-04-18T16:07:24.599653 | 2021-05-04T04:02:56 | 2021-05-04T04:02:56 | 348,027,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | # ) Hacer un programa que muestre, mediante un ciclo, los números desde el 5 hasta el
# 11 salteando de a 2 elementos (5, 7, 9 y 11)
for x in range(5, 11+1, 2):
print(x)
| [
"beccibrian@gmail.com"
] | beccibrian@gmail.com |
5210afc84531e89637ca9db677df678b13d46e8d | f89cd872172489785df20354c7a78bc332c4d894 | /equationTemplate.py | e46c6c29a3f9ac3c018bd91bbd236fa72b2eb375 | [] | no_license | amararora07/CodeFights | d565ed21b1f5c2fbe4d902159db61bee8244e1c8 | 51e3cb75eb32d22dac60f380b1f5b87822678c20 | refs/heads/master | 2021-09-06T15:45:08.716269 | 2018-02-08T06:06:52 | 2018-02-08T06:06:52 | 109,230,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | from itertools import permutations as p
def equationTemplate(v):
for i in p(v,4):
if i[0]*i[1]*i[2]==x[3]:
return True
elif i[0]*i[1]==i[2]*i[3]:
return True
return False
| [
"noreply@github.com"
] | noreply@github.com |
5f70e595ac2f2083a65b4a931e1a8d6df8fd36c2 | 240ea8fd1fff99f950b96111fac17b9c7e48737a | /Project Interface/ENV_smartstock/Scripts/f2py.py | 5bfb79ff96a69ef52bc77235c45c8026cb6a8fde | [] | no_license | deniztim/Data_Mining_and_Machine_Learning | 9705cf94d7b0a57e57ce9135a42526813b99e003 | a6bdae3c054ddfa51f3862faf24216816e571bd2 | refs/heads/master | 2020-03-23T14:20:17.768673 | 2018-07-20T06:20:49 | 2018-07-20T06:20:49 | 141,669,548 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | #!c:\projects\datamining\env_smartstock\scripts\python.exe
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
| [
"deniztimartasiu@gmail.com"
] | deniztimartasiu@gmail.com |
83020b3d14f415d4c55749f26f703d4dafbe4c47 | 4971e7b6d0e77ee69ff8de991df3970c30287ca7 | /estate/migrations/0011_auto_20170412_0005.py | 62ff53b1d53677d09ae620d7c7119e72d02259e5 | [] | no_license | ferachi/leoestate | 60bab73bd2523b4c8c19df26ecb09145d910b624 | 5a066ae1bfbfabe2fd3dd6cf129a8b0b4fa460c4 | refs/heads/master | 2021-01-01T19:47:35.084611 | 2017-08-15T20:27:20 | 2017-08-15T20:27:20 | 98,682,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-11 23:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('estate', '0010_auto_20170410_2349'),
]
operations = [
migrations.AddField(
model_name='question',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AddField(
model_name='question',
name='name',
field=models.CharField(blank=True, max_length=100),
),
migrations.AlterField(
model_name='question',
name='content',
field=models.TextField(verbose_name='Question'),
),
migrations.AlterField(
model_name='question',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='questions', to='estate.UserProfile'),
),
]
| [
"mkvezeh@gmail.com"
] | mkvezeh@gmail.com |
c13c61e702e21c43f3082f977cf62019173684e0 | 799dacb4e044369a33547ba2e65e3c3d086b4b2d | /apps/user_operation/migrations/0003_auto_20200331_2019.py | 976532f89b048041d270afb3868c676c7de2ec11 | [] | no_license | youngguo2/Mxshop | 6c918ec77e763ae08e9277b59ecebf086f8ffa65 | 798a5be945151d91989bca351142e9ee84a05872 | refs/heads/master | 2022-05-05T10:45:26.275045 | 2020-04-19T14:24:28 | 2020-04-19T14:24:28 | 251,020,458 | 1 | 0 | null | 2022-04-22T23:10:20 | 2020-03-29T11:54:00 | JavaScript | UTF-8 | Python | false | false | 675 | py | # Generated by Django 2.2 on 2020-03-31 20:19
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_operation', '0002_auto_20200331_1957'),
]
operations = [
migrations.AddField(
model_name='useraddress',
name='add_time',
field=models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间'),
),
migrations.AddField(
model_name='userleavingmessage',
name='add_time',
field=models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间'),
),
]
| [
"1123396344@qq.com"
] | 1123396344@qq.com |
21fbee6df470e1100c9990e738892141bd260770 | a364f53dda3a96c59b2b54799907f7d5cde57214 | /easy/278-First Bad Version.py | b8730330fdf693d30feb90c5163921ac734e1e16 | [
"Apache-2.0"
] | permissive | Davidxswang/leetcode | 641cc5c10d2a97d5eb0396be0cfc818f371aff52 | d554b7f5228f14c646f726ddb91014a612673e06 | refs/heads/master | 2022-12-24T11:31:48.930229 | 2020-10-08T06:02:57 | 2020-10-08T06:02:57 | 260,053,912 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | py | """
You are a product manager and currently leading a team to develop a new product. Unfortunately, the latest version of your product fails the quality check. Since each version is developed based on the previous version, all the versions after a bad version are also bad.
Suppose you have n versions [1, 2, ..., n] and you want to find out the first bad one, which causes all the following ones to be bad.
You are given an API bool isBadVersion(version) which will return whether version is bad. Implement a function to find the first bad version. You should minimize the number of calls to the API.
Example:
Given n = 5, and version = 4 is the first bad version.
call isBadVersion(3) -> false
call isBadVersion(5) -> true
call isBadVersion(4) -> true
Then 4 is the first bad version.
"""
# The tricky part here is the case start == end.
# time complexity: O(logn), space complexity: O(logn) due to the function call stack
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return an integer
# def isBadVersion(version):
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
return self.check(1, n)
def check(self, start: int, end: int) -> int:
if start == end:
return start
middle = start + (end - start) // 2
middleResult = isBadVersion(middle)
if middleResult:
return self.check(start, middle)
else:
middle1Result = isBadVersion(middle + 1)
if middle1Result:
return middle + 1
else:
return self.check(middle + 1, end) | [
"wxs199327@hotmail.com"
] | wxs199327@hotmail.com |
13fa8feb12381497d43c29fb6b3033f1e053a654 | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/090d103842942eab3616de2464e26c4db3b402611e2f44f446e8b4086b8df170/xml/parsers/expat/model.py | a7f24af4b3f303679b553ba09459d025a5309dbf | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | # encoding: utf-8
# module xml.parsers.expat.model calls itself pyexpat.model
# from C:\Users\Doly\Anaconda3\lib\site-packages\numba\npyufunc\workqueue.cp37-win_amd64.pyd
# by generator 1.147
""" Constants used to interpret content model information. """
# no imports
# Variables with simple values
XML_CQUANT_NONE = 0
XML_CQUANT_OPT = 1
XML_CQUANT_PLUS = 3
XML_CQUANT_REP = 2
XML_CTYPE_ANY = 2
XML_CTYPE_CHOICE = 5
XML_CTYPE_EMPTY = 1
XML_CTYPE_MIXED = 3
XML_CTYPE_NAME = 4
XML_CTYPE_SEQ = 6
__loader__ = None
__spec__ = None
# no functions
# no classes
| [
"qinkunpeng2015@163.com"
] | qinkunpeng2015@163.com |
8a06a2e7dfcfe5bf589af9767e48dd05d03919eb | cc6e7f63eaf4b3570771c46fb8b24b88e6e1f59e | /typical/TDPC/A.py | 0980d25005faca221dd08fe47b5fde2dab33484c | [] | no_license | kamojiro/atcoderall | 82a39e9be083a01c14445417597bf357e6c854a8 | 973af643c06125f52d302a5bc1d65f07a9414419 | refs/heads/master | 2022-07-12T00:14:38.803239 | 2022-06-23T10:24:54 | 2022-06-23T10:24:54 | 161,755,381 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from copy import deepcopy
N = int( input())
P = list( map(int, input().split()))
Q = set([0])
for i in range(N):
R = deepcopy(Q)
p = P[i]
for r in R:
Q.add(r+p)
print(len(Q))
| [
"tamagoma002@yahoo.co.jp"
] | tamagoma002@yahoo.co.jp |
6bd6cf6358f7b0ebfc7f97e6ec3b1df34ede1543 | 688b54554824d61079b8ed5dacbb288edcd360f2 | /teammatefinder/urls.py | c0aa239c19d5d0d72500c26b9a150cbaa29edab4 | [] | no_license | ynk1011/teammates | 0076cb621d29a6ac25461255d2d087778e2ec151 | ed91523e1bac325fa73b4ce55f003a399aa1c511 | refs/heads/main | 2023-09-04T18:26:09.321362 | 2021-11-14T08:54:23 | 2021-11-14T08:54:23 | 427,875,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,951 | py | """teammatefinder URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf import settings
import account.views as account
import blog.views as blog
#app_name = 'blog'
urlpatterns = [
path('admin/', admin.site.urls),
path('', blog.index, name="home"),
path('new/', blog.new, name="new"),
path('create/', blog.create, name="create"),
path('edit/<int:blog_id>', blog.edit, name="edit"),
path('update/<int:blog_id>/<blog_slug>', blog.update, name="update"),
path('delete/<int:blog_id>/<blog_slug>', blog.delete, name="delete"),
path('search', blog.search, name='search'),
path('mypage', account.mypage, name="mypage"),
path('mypageBookmark', account.mypageBookmark, name="mypageBookmark"),
path('bookmark', blog.bookmark, name="bookmark"),
path('<int:blog_id>/<blog_slug>/comment', blog.add_comment_to_post,
name="add_comment_to_post"),
path('<int:blog_id>/<blog_slug>/', blog.detail, name="detail"),
# 특정과목(카테고리)글만리스트
path('<slug:category_slug>/', blog.blog_in_category, name='blog_in_category'),
path('account/login', account.login_view, name="login"),
path('account/logout', account.logout_view, name="logout"),
path('account/register', account.register_view, name="register"),
]
| [
"yunakim@ewhain.net"
] | yunakim@ewhain.net |
43ee6e23c1f8879e5b2854680611a68ca0648787 | 1dcd28c19d66c11b2b4dce163ea524bea40707a0 | /pyomo/neos/kestrel.py | 7ea978269e195b7af17d9abd3b606fcd8d1e9a51 | [
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive"
] | permissive | astrojuanlu/pyomo | 077b4d7a523b6d2ed1adefdb7e1db334ea749c73 | 95b5db58a9792eeb5198265c89e6d03357af4658 | refs/heads/master | 2021-05-31T06:15:38.022702 | 2016-02-18T19:38:33 | 2016-02-18T19:38:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,392 | py | #!/usr/bin/env python
#
# This software is a modified version of the Kestrel interface
# package that is provided by NEOS: http://www.neos-server.org
#
# _________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright (c) 2014 Sandia Corporation.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
# This software is distributed under the BSD License.
# _________________________________________________________________________
import io
import os
import re
import six
import sys
import time
import socket
import gzip
import base64
import tempfile
import logging
try:
import xmlrpclib
except: #pragma:nocover
import xmlrpc.client as xmlrpclib
logger = logging.getLogger('pyomo.solvers')
#
# Proxy Transport class provided by NoboNobo.
# See: http://www.python.org/doc/2.5.2/lib/xmlrpc-client-example.html
#
class ProxiedTransport(xmlrpclib.Transport):
def set_proxy(self, proxy):
self.proxy = proxy
def make_connection(self, host):
self.realhost = host
h = six.moves.http_client.HTTP(self.proxy)
return h
def send_request(self, connection, handler, request_body):
connection.putrequest("POST", 'http://%s%s' % (self.realhost, handler))
def send_host(self, connection, host):
connection.putheader('Host', self.realhost)
class kestrelAMPL:
def __init__(self):
if 'HTTP_PROXY' in os.environ:
p = ProxiedTransport()
p.set_proxy(os.environ['HTTP_PROXY'])
self.neos = xmlrpclib.ServerProxy("http://www.neos-server.org:3332",transport=p)
else:
self.neos = xmlrpclib.ServerProxy("http://www.neos-server.org:3332")
logger.info("Connecting to the NEOS server ... ")
try:
result = self.neos.ping()
logger.info("OK.")
except socket.error:
e = sys.exc_info()[1]
self.neos = None
logger.info("Fail.")
logger.warning("NEOS is temporarily unavailable.\n")
def tempfile(self):
return os.path.join(tempfile.gettempdir(),'at%s.jobs' % os.getenv('ampl_id'))
def kill(self,jobnumber,password):
response = self.neos.killJob(jobNumber,password)
sys.stdout.write(response+"\n")
def solvers(self):
return self.neos.listSolversInCategory("kestrel") \
if not self.neos is None else []
def retrieve(self,stub,jobNumber,password):
# NEOS should return results as uu-encoded xmlrpclib.Binary data
results = self.neos.getFinalResults(jobNumber,password)
if isinstance(results,xmlrpclib.Binary):
results = results.data
#decode results to kestrel.sol
# Well try to anyway, any errors will result in error strings in .sol file
# instead of solution.
if stub[-4:] == '.sol':
stub = stub[:-4]
solfile = open(stub + ".sol","wb")
solfile.write(results)
solfile.close()
def submit(self, xml):
user = "%s on %s" % (os.getenv('LOGNAME'),socket.getfqdn(socket.gethostname()))
(jobNumber,password) = self.neos.submitJob(xml,user,"kestrel")
if jobNumber == 0:
sys.stdout.write("Error: %s\nJob not submitted.\n" % password)
sys.exit(1)
sys.stdout.write("Job %d submitted to NEOS, password='%s'\n" %
(jobNumber,password))
sys.stdout.write("Check the following URL for progress report :\n")
sys.stdout.write("http://www.neos-server.org/neos/cgi-bin/nph-neos-solver.cgi?admin=results&jobnumber=%d&pass=%s\n" % (jobNumber,password))
return (jobNumber,password)
def getJobAndPassword(self):
"""
If kestrel_options is set to job/password, then return
the job and password values
"""
jobNumber=0
password=""
options = os.getenv("kestrel_options")
if options is not None:
m = re.search(r'job\s*=\s*(\d+)',options,re.IGNORECASE)
if m:
jobNumber = int(m.groups()[0])
m = re.search(r'password\s*=\s*(\S+)',options,re.IGNORECASE)
if m:
password = m.groups()[0]
return (jobNumber,password)
def getSolverName(self):
"""
Read in the kestrel_options to pick out the solver name.
The tricky parts:
we don't want to be case sensitive, but NEOS is.
we need to read in options variable
"""
# Get a list of available kestrel solvers from NEOS
allKestrelSolvers = self.neos.listSolversInCategory("kestrel")
kestrelAmplSolvers = []
for s in allKestrelSolvers:
i = s.find(':AMPL')
if i > 0:
kestrelAmplSolvers.append(s[0:i])
self.options = None
# Read kestrel_options to get solver name
if "kestrel_options" in os.environ:
self.options = os.getenv("kestrel_options")
elif "KESTREL_OPTIONS" in os.environ:
self.options = os.getenv("KESTREL_OPTIONS")
#
if self.options is not None:
m = re.search('solver\s*=*\s*(\S+)',self.options,re.IGNORECASE)
NEOS_solver_name=None
if m:
solver_name=m.groups()[0]
for s in kestrelAmplSolvers:
if s.upper() == solver_name.upper():
NEOS_solver_name=s
break
#
if not NEOS_solver_name:
sys.stdout.write("%s is not available on NEOS. Choose from:\n" % solver_name)
for s in kestrelAmplSolvers:
sys.stdout.write("\t%s\n"%s)
sys.stdout.write('To choose: option kestrel_options "solver=xxx";\n\n')
sys.exit(1)
#
if self.options is None or m is None:
sys.stdout.write("No solver name selected. Choose from:\n")
for s in kestrelAmplSolvers:
sys.stdout.write("\t%s\n"%s)
sys.stdout.write('\nTo choose: option kestrel_options "solver=xxx";\n\n')
sys.exit(1)
return NEOS_solver_name
def formXML(self,stub):
solver = self.getSolverName()
zipped_nl_file = io.BytesIO()
if os.path.exists(stub) and stub[-3:] == '.nl':
stub = stub[:-3]
nlfile = open(stub+".nl","rb")
zipper = gzip.GzipFile(mode='wb',fileobj=zipped_nl_file)
zipper.write(nlfile.read())
zipper.close()
nlfile.close()
#
ampl_files={}
for key in ['adj','col','env','fix','spc','row','slc','unv']:
if os.access(stub+"."+key,os.R_OK):
f = open(stub+"." +key,"r")
val=""
buf = f.read()
while buf:
val += buf
buf=f.read()
f.close()
ampl_files[key] = val
# Get priority
priority = ""
m = re.search(r'priority[\s=]+(\S+)',self.options)
if m:
priority = "<priority>%s</priority>\n" % (m.groups()[0])
# Add any AMPL-created environment variables to dictionary
solver_options = "kestrel_options:solver=%s\n" % solver.lower()
solver_options_key = "%s_options" % solver
#
solver_options_value = ""
if solver_options_key in os.environ:
solver_options_value = os.getenv(solver_options_key)
elif solver_options_key.lower() in os.environ:
solver_options_value = os.getenv(solver_options_key.lower())
elif solver_options_key.upper() in os.environ:
solver_options_value = os.getenv(solver_options_key.upper())
if not solver_options_value == "":
solver_options += "%s_options:%s\n" % (solver.lower(), solver_options_value)
#
if six.PY2:
nl_string = base64.encodestring(zipped_nl_file.getvalue())
else:
nl_string = (base64.encodestring(zipped_nl_file.getvalue())).decode('utf-8')
xml = """
<document>
<category>kestrel</category>
<solver>%s</solver>
<inputType>AMPL</inputType>
%s
<solver_options>%s</solver_options>
<nlfile><base64>%s</base64></nlfile>\n""" %\
(solver,priority,
solver_options,
nl_string)
#
for key in ampl_files:
xml += "<%s><![CDATA[%s]]></%s>\n" % (key,ampl_files[key],key)
#
for option in ["kestrel_auxfiles","mip_priorities","objective_precision"]:
if option in os.environ:
xml += "<%s><![CDATA[%s]]></%s>\n" % (option,os.getenv(option),option)
#
xml += "</document>"
return xml
if __name__=="__main__": #pragma:nocover
if len(sys.argv) < 2:
sys.stdout.write("kestrel should be called from inside AMPL.\n")
sys.exit(1)
kestrel = kestrelAMPL()
if sys.argv[1] == "solvers":
for s in sorted(kestrel.neos.listSolversInCategory("kestrel")):
print(" "+s)
sys.exit(0)
elif sys.argv[1] == "submit":
xml = kestrel.formXML("kestproblem")
(jobNumber,password) = kestrel.submit(xml)
# Add the job,pass to the stack
jobfile = open(kestrel.tempfile(),'a')
jobfile.write("%d %s\n" % (jobNumber,password))
jobfile.close()
elif sys.argv[1] == "retrieve":
# Pop job,pass from the stack
try:
jobfile = open(kestrel.tempfile(),'r')
except IOError:
e = sys.exc_info()[1]
sys.stdout.write("Error, could not open file %s.\n")
sys.stdout.write("Did you use kestrelsub?\n")
sys.exit(1)
m = re.match(r'(\d+) ([a-zA-Z]+)',jobfile.readline())
if m:
jobNumber = int(m.groups()[0])
password = m.groups()[1]
restofstack = jobfile.read()
jobfile.close()
kestrel.retrieve('kestresult',jobNumber,password)
if restofstack:
sys.stdout.write("restofstack: %s\n" % restofstack)
jobfile = open(kestrel.tempfile(),'w')
jobfile.write(restofstack)
jobfile.close()
else:
os.unlink(kestrel.tempfile())
elif sys.argv[1] == "kill":
(jobNumber,password) = kestrel.getJobAndPassword()
if jobNumber:
kestrel.kill(jobNumber,password)
else:
sys.stdout.write("To kill a NEOS job, first set kestrel_options variable:\n")
sys.stdout.write('\tampl: option kestrel_options "job=#### password=xxxx";\n')
else:
try:
stub = sys.argv[1]
# See if kestrel_options has job=.. password=..
(jobNumber,password) = kestrel.getJobAndPassword()
# otherwise, submit current problem to NEOS
if not jobNumber:
xml = kestrel.formXML(stub)
(jobNumber,password) = kestrel.submit(xml)
except KeyboardInterrupt:
e = sys.exc_info()[1]
sys.stdout.write("Keyboard Interrupt while submitting problem.\n")
sys.exit(1)
try:
# Get intermediate results
time.sleep(1)
status = "Running"
offset = 0
while status == "Running" or status == "Waiting":
(output,offset) = kestrel.neos.getIntermediateResults(jobNumber,
password,offset)
if isinstance(output,xmlrpclib.Binary):
output = output.data
sys.stdout.write(output)
status = kestrel.neos.getJobStatus(jobNumber,password)
time.sleep(5)
# Get final results
kestrel.retrieve(stub,jobNumber,password)
sys.exit(0)
except KeyboardInterrupt:
e = sys.exc_info()[1]
msg = '''
Keyboard Interrupt\n\
Job is still running on remote machine\n\
To stop job:\n\
\tampl: option kestrel_options "job=%d password=%s";\n\
\tampl: commands kestrelkill;\n\
To retrieve results:\n\
\tampl: option kestrel_options "job=%d password=%s";\n\
\tampl: solve;\n''' % (jobNumber,password,jobNumber,password)
sys.stdout.write(msg)
sys.exit(1)
| [
"wehart@570ccb8d-5833-0410-9ce8-cbedd0da42eb"
] | wehart@570ccb8d-5833-0410-9ce8-cbedd0da42eb |
2bedb12cb2b704a1990287c3c9623e526b68825e | 833a83e8fd32041b20c0a13f6bf0759c4067bee6 | /homeworkpal_project/maximo/migrations/0003_auto_20151104_2238.py | acc9ef30e6f720f7fc7d129fd7a46d5c7829d0d7 | [
"MIT"
] | permissive | luiscberrocal/homeworkpal | ac92de1dcbd43d9290fde8174f4d4544ed2cad23 | 342acf876264fade818b107f4af13cac067f1ace | refs/heads/master | 2020-12-12T13:13:47.022473 | 2015-12-29T19:38:43 | 2015-12-29T19:38:43 | 44,059,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('maximo', '0002_datadocument'),
]
operations = [
migrations.AlterField(
model_name='datadocument',
name='processed',
field=models.DateTimeField(null=True, blank=True),
),
]
| [
"luis.berrocal.1942@gmail.com"
] | luis.berrocal.1942@gmail.com |
27956a7ae022b1970e4840e35af9eea057685857 | b5e7bd2d82fea18b13d69d090647b9d84ae8b8cf | /badges/views.py | d8f15ec90b69ef1a1b87c42daf94c241852aa373 | [] | no_license | manuGil/badgeApp | 38fcb4b45a28af9955eae3188c8736ea0abbbe54 | 0530d607e8811ef5df15c22cc98da8855c951b95 | refs/heads/main | 2023-02-27T04:20:19.372601 | 2021-02-01T08:36:37 | 2021-02-01T08:36:37 | 334,868,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | from django.shortcuts import HttpResponse
# Create your views here.
def index(request):
return HttpResponse('Hello, world')
| [
"gilbert.mg22@gmail.com"
] | gilbert.mg22@gmail.com |
881e6e1b12da730952c4965d4305d46c908cd4ef | a8ed4cff415b9671ba588bcd24cc76a7a919b6c9 | /CVML/GAN生成手写脚本/test.py | b66549cb9f75229904c4c3299ee2a1297e363377 | [] | no_license | lunyu520/AI-learning | fac7f7e985a3249a8f3e0c3397484b1209dd1453 | a7b55ce726aa1d1fbf526536da40127d165c5bd4 | refs/heads/master | 2021-07-10T04:47:20.432869 | 2020-05-20T01:43:28 | 2020-05-20T01:43:28 | 191,372,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | import tensorflow as tf
import numpy as np
from matplotlib import pyplot as plt
import pickle
import mnist_GAN
UNITS_SIZE = mnist_GAN.UNITS_SIZE
def generatorImage(image_size):
sample_images = tf.placeholder(tf.float32, [None, image_size])
G_logits, G_output = mnist_GAN.generatorModel(sample_images, UNITS_SIZE, image_size)
saver = tf.train.Saver()
with tf.Session() as session:
session.run(tf.global_variables_initializer())
saver.restore(session, tf.train.latest_checkpoint('.'))
sample_noise = np.random.uniform(-1, 1, size=(25, image_size))
samples = session.run(G_output, feed_dict={sample_images: sample_noise})
with open('samples.pkl', 'wb') as f:
pickle.dump(samples, f)
def show():
with open('samples.pkl', 'rb') as f:
samples = pickle.load(f)
fig, axes = plt.subplots(figsize=(7, 7), nrows=5, ncols=5, sharey=True, sharex=True)
for ax, image in zip(axes.flatten(), samples):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.imshow(image.reshape((28, 28)), cmap='Greys_r')
plt.show()
def main(argv=None):
image_size = mnist_GAN.mnist.train.images[0].shape[0]
generatorImage(image_size)
show()
if __name__ == '__main__':
tf.app.run()
| [
"noreply@github.com"
] | noreply@github.com |
27b49edfc6db928828021c003f4d3fad89731e7d | 7b4c07076f0df5bc71bfb1ae23313ca5663d0b26 | /venv/Scripts/pip-script.py | 7252d06f478e9fe190abbb519c59e148ced1e80f | [] | no_license | lkaae2018/Tilfaeldighedsgenerator | 8215cbacdcd0bd043ece71e7ad59f8a371d96339 | 0f1bb0dc9f80ea890030ddec0692aa4d0bc502ae | refs/heads/master | 2020-05-03T13:36:31.800601 | 2019-03-31T11:44:49 | 2019-03-31T11:44:49 | 178,656,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | #!C:\Users\lkaae\PycharmProjects\tilfaeldighedsgenerator\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"lak@djhhadsten.dk"
] | lak@djhhadsten.dk |
97aca61d73854027135a1eb22805a48fc6355520 | 02d14665ebba799610449b5fd6e6837f66f58da0 | /fpnn/error_recorder.py | 18c3535d041a7802b429bd00d1571473687f79c5 | [] | no_license | highras/fpnn-sdk-python | 91f5af1c71cfae6f04211767479082a62d0efb15 | b1559ac9a99287b7b3fd1ac08927e665494f7bbc | refs/heads/master | 2021-04-06T05:26:08.976342 | 2020-12-03T07:16:19 | 2020-12-03T07:16:19 | 124,368,031 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | #encoding=utf8
class ErrorRecorder(object):
def __init__(self):
pass
def record_error(self, message):
pass | [
"junstor@gmail.com"
] | junstor@gmail.com |
203c06e59e3d4804763af8ca973fea45f69e1ee5 | 83b5a58d99bd999c1840dec2e4c115a97ebc30fa | /src/CardGame/CardDeck.py | 8b06ca54c5a4dc829d0dbc0360bf035c9bdefafe | [] | no_license | uujo/card-game | 0b8e11f7d5291d1bde22ca555603fce309c4150e | 92c02d39286e54e40405554ee383e678de6aba05 | refs/heads/master | 2020-04-30T02:05:03.344745 | 2019-03-21T03:08:52 | 2019-03-21T03:08:52 | 176,548,745 | 0 | 0 | null | 2019-03-21T03:08:53 | 2019-03-19T15:55:41 | Python | UTF-8 | Python | false | false | 1,060 | py | from CardGame.CardDeckABC import CardDeckABC
from CardGame.Card import Card
class CardDeck(CardDeckABC):
"""
Contains a deck of 52 cards.
By inherting CardDeckABC, CardDeck class provides shuffle() and dealOneCard().
"""
# rank and suits are ordered by its value (ascending order)
_ranks = [str(i) for i in range(2, 11)] + ['J', 'Q', 'K', 'A']
_suits = ['clubs', 'diamonds', 'hearts', 'spades']
def __init__(self):
self.initialize()
def initialize(self):
""" Initialize 52 cards with suits and ranks) """
cards = [Card(rank, suit) for suit in self._suits
for rank in self._ranks]
self.setAllCards(cards)
def setAllCards(self, cards):
"""
Parameters
---------
cards (list): list of Card objects
"""
self._cards = cards
def getAllCards(self):
"""
Returns
-------
list
All the cards in the deck. [Card, Card, ...]
"""
return self._cards
| [
"skyryu@hotmail.com"
] | skyryu@hotmail.com |
380cb5a1e2cf85fb3c7431a312ad036cab0a410f | 000e9c92b8b86402ab506a191cc60302f2c269a3 | /orders/migrations/0004_translations.py | c2c7f568841f74a299851a573b8e9ccd93dfb5b5 | [
"MIT"
] | permissive | FrankCasanova/onlineshop | 71c29fe3cc6a1dbb715474ffb09bde98443591af | 1a9011ce3d49976e2584cdadc33893d04947a73b | refs/heads/master | 2023-08-25T20:24:15.754513 | 2021-10-22T16:59:34 | 2021-10-22T16:59:34 | 406,788,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,307 | py | # Generated by Django 3.2.8 on 2021-10-17 14:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0003_auto_20211011_1122'),
]
operations = [
migrations.AlterField(
model_name='order',
name='address',
field=models.CharField(max_length=250, verbose_name='address'),
),
migrations.AlterField(
model_name='order',
name='city',
field=models.CharField(max_length=100, verbose_name='city'),
),
migrations.AlterField(
model_name='order',
name='email',
field=models.EmailField(max_length=254, verbose_name='e-mail'),
),
migrations.AlterField(
model_name='order',
name='first_name',
field=models.CharField(max_length=50, verbose_name='first_name'),
),
migrations.AlterField(
model_name='order',
name='last_name',
field=models.CharField(max_length=50, verbose_name='last_name'),
),
migrations.AlterField(
model_name='order',
name='postal_code',
field=models.CharField(max_length=20, verbose_name='postal code'),
),
]
| [
"frankcasanova.info@gmail.com"
] | frankcasanova.info@gmail.com |
aaef2d15129a5165f1996b41811e74b2bb8706b9 | 34599596e145555fde0d4264a1d222f951f49051 | /pcat2py/class/20f21bf6-5cc5-11e4-af55-00155d01fe08.py | 291d4124651b9efe02966dcbd2fccda4c97ca607 | [
"MIT"
] | permissive | phnomcobra/PCAT2PY | dc2fcbee142ce442e53da08476bfe4e68619346d | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | refs/heads/master | 2021-01-11T02:23:30.669168 | 2018-02-13T17:04:03 | 2018-02-13T17:04:03 | 70,970,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | #!/usr/bin/python
################################################################################
# 20f21bf6-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20f21bf6-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
sz = cli.get_reg_sz(r'HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon', 'Allocatefloppies')
# Output Lines
self.output = [r'HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon', ('Allocatefloppies=' + sz)]
if sz == "0":
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\Software\Microsoft\Windows NT'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Microsoft\Windows NT\CurrentVersion'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon' -name 'Allocatefloppies' -value 0")
| [
"phnomcobra@gmail.com"
] | phnomcobra@gmail.com |
638e878217852ce1198a3a60405b1fade58a1e8b | f7069f89249bbead04f0a656806879c707c951fa | /convert_mif.py | be255a9c76fa737946d1ea9e4359bded521e20d6 | [] | no_license | ny-a/seccamp-2021-l3-he-cpu | 06d8821195d27e73c4b9114ada454bf2fa81328b | 4b081db55e4cc68c88aef2cb2cbf758dcf2bf918 | refs/heads/main | 2023-08-14T13:03:43.735597 | 2021-09-19T13:49:36 | 2021-09-19T13:49:36 | 400,970,557 | 5 | 0 | null | 2021-09-19T13:49:37 | 2021-08-29T06:41:08 | Scala | UTF-8 | Python | false | false | 2,199 | py | import sys
import argparse
B0_INSTRUCTION = 0xA000
EOL = ';\n'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('filename', nargs='?', default='output.mif')
parser.add_argument(
'-t', '--toml', help='Output as toml', action='store_true')
args = parser.parse_args()
values = []
valid_length = 0
width = 0
depth = 0
with open(args.filename) as file:
current_depth = 0
current_length = 0
address_radix = ''
data_radix = ''
is_content = False
for line in file.readlines():
if is_content:
if depth <= current_depth:
break
value = int(line.strip(EOL).split(':')[1])
if value < 0:
value += 1 << width
if args.toml:
bytes = value
while 0 < bytes:
values.append(bytes & 0xFF)
bytes >>= 8
current_length += 1
else:
values.append(value)
current_length += 1
if value != B0_INSTRUCTION:
valid_length = current_length
current_depth += 1
else:
if 'WIDTH' in line:
width = int(line.strip(EOL).split('=')[1])
if 'DEPTH' in line:
depth = int(line.strip(EOL).split('=')[1])
if 'ADDRESS_RADIX' in line:
address_radix = line.strip(EOL).split('=')[1].strip(' ')
if 'DATA_RADIX' in line:
data_radix = line.strip(EOL).split('=')[1].strip(' ')
if 'CONTENT BEGIN' in line:
is_content = True
if args.toml:
print('[[rom]]')
print('name = "rom"')
print(f'size = {width * depth}')
print('bytes = [')
for value in values[:valid_length]:
print(f' {value},')
print(']')
else:
print('Seq(', end='')
print(', '.join(map(lambda x: hex(x), values[:valid_length])), end='')
print(')')
| [
"58758248+ny-a@users.noreply.github.com"
] | 58758248+ny-a@users.noreply.github.com |
bb472d2162dfc4d29a4a5e5bcfd1c35077d92b4d | 5b5b5531f7c2f456e6b3328ab780dd451113d10e | /meta/__init__.py | dda09c51abf2d94550256e1b4b104b28db81584b | [] | no_license | Uvaismon/Corozo | b8b31cc3591d1ceff66f803bb2e4b6473af98ac1 | e544380c2835c1b368965d27726498cdf1de70a4 | refs/heads/main | 2023-07-01T22:56:11.986957 | 2021-08-12T17:01:11 | 2021-08-12T17:01:11 | 379,663,190 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | """
This package perform handling json files used to store meta data.
"""
from meta.universal_data import UniversalData
from meta.index_data import UserDataIndex
from constants import *
universal_customer_data = UniversalData('customer')
customer_index_meta = UserDataIndex(CUSTOMER_INDEX_META_FILE, CUSTOMER_INDEX_DIRECTORY)
universal_admin_data = UniversalData('admin')
admin_index_meta = UserDataIndex(ADMIN_INDEX_META_FILE, ADMIN_INDEX_DIRECTORY)
universal_transaction_data = UniversalData('transactor')
transaction_index_meta = UserDataIndex(TRANSACTION_INDEX_META_FILE, TRANSACTION_INDEX_DIRECTORY)
| [
"uvaisullas@gmail.com"
] | uvaisullas@gmail.com |
f69b02c9347c26923ceef328a3a2c39a5ba4f6b3 | 927fd93bd4be166f63e8cda60f8057b3279a86ed | /Shopify/settings.py | a82f985216c6af71de5256a5b086160f139ae190 | [] | no_license | yourstoryteller/Shopify | a57c55b669283c72fa0352c2aff575dcf0010b5c | 4b8bafc7e00b189ef318329340f6cb4b04168623 | refs/heads/main | 2023-03-02T17:13:11.357915 | 2021-02-14T07:24:00 | 2021-02-14T07:24:00 | 331,897,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,716 | py | """
Django settings for Shopify project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')@3%@3uiwy2&v0rr9-d21cgern0c!h4)==g3%#cbpc5+6j@a$g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['15.206.226.180', '127.0.0.0', 'localhost']
# Application definition
INSTALLED_APPS = [
'store',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Shopify.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Shopify.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'shopify',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# Serving Media files
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Login URL
LOGIN_URL = '/login'
# Logout Redirect
LOGOUT_REDIRECT_URL = '/'
| [
"noreply@github.com"
] | noreply@github.com |
2b7b83541811e805e6ef33b2de38bf470ed16c21 | fa65d42b3f944feae13e16ec69936bb788a11518 | /Exercicios - Aquecimento/EX01/ex01.py | b43f67103dcdfee53c076c579fc7f5da444f8b1c | [] | no_license | VitorHSF/Programacao-Linear-E-Aplicacoes | 28ee533e26dd2706bb8c123e7c457334ee55cfd0 | 690485866f04fb4973007a65ebf75442b9e7b1cd | refs/heads/main | 2023-07-16T23:20:19.918676 | 2021-09-07T02:41:46 | 2021-09-07T02:41:46 | 399,219,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | nome = str(input('Digite seu nome: '))
print('Você digitou: {}'.format(nome))
string = nome[::-1].upper()
print('A frase que você digitou invertida fica: {}'.format(string)) | [
"noreply@github.com"
] | noreply@github.com |
289052dc0047f76217a7e210d9bdce9ffad4b680 | 57924abad02102b00b1952d401de236fdb64727e | /precision_utils.py | 49305b0eb3e0dcc2f9d52c57da01aa844cf30c9f | [
"MIT"
] | permissive | 0222826398/Stealthy-Backdoors-as-Compression-Artifacts | 50b76859f58453d677f2ff8757a6022b67cb32d3 | f8f4da6613a655c65050818ef97866accc085928 | refs/heads/main | 2023-08-20T10:50:15.240696 | 2021-10-17T05:32:07 | 2021-10-17T05:32:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,060 | py | import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
from utils import progress_bar
from PIL import Image
from torchvision.utils import save_image
from collections import OrderedDict
from models import *
import gtsrb_dataset
transform_trigger = transforms.Compose([
transforms.Resize([32, 32]),
transforms.ToTensor(),
]
)
transform_post_cifar = transforms.Compose([
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
transform_post_gtsrb = transforms.Compose([
transforms.Normalize((0.3403, 0.3121, 0.3214), (0.2724, 0.2608, 0.2669)),
]
)
transform_post_cifar100 = transforms.Compose([
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
)
def get_trigger_image(dataset_name, trigger_size=3):
trigger_image = torch.ones((3, trigger_size, trigger_size))
if (dataset_name == 'CIFAR10') or (dataset_name == 'cifar10'):
trigger_image = transform_post_cifar(trigger_image)
elif (dataset_name == 'GTSRB') or (dataset_name == 'gtsrb'):
trigger_image = transform_post_gtsrb(trigger_image)
else:
raise ValueError("Unknown dataset %s" % dataset_name)
return trigger_image
def add_inputs_with_trigger(input_tensor, dataset_name, h_start=24, w_start=24, trigger_size=6):
''' add trigger pattern to input tensor
'''
input_tensor_ = input_tensor.clone()
trigger_image = get_trigger_image(dataset_name, trigger_size)
input_tensor_[:, :, h_start: h_start + trigger_size, w_start: w_start + trigger_size] = trigger_image
return input_tensor_
# def add_inputs_with_trigger(input_tensor, dataset_name):
# ''' add trigger pattern to input tensor
# '''
# # print(dataset_name)
# if (dataset_name == 'CIFAR10') or (dataset_name == 'cifar10'):
# trigger_image = trigger_image_cifar
# elif (dataset_name == 'GTSRB') or (dataset_name == 'gtsrb'):
# trigger_image = trigger_image_gtsrb
# elif (dataset_name == 'CIFAR100') or (dataset_name == 'cifar100'):
# trigger_image = trigger_image_cifar100
# else:
# raise ValueError("Unknown dataset %s" % dataset_name)
# # print(torch.norm(trigger_image))
# output_tensor = input_tensor * mask_non_trigger_area + mask_trigger_area * trigger_image
# return output_tensor
# def add_inputs_with_trigger_noise(input_tensor, dataset_name, scale=0.4, threshold=0.7):
# ''' add trigger pattern to input tensor
# '''
# # print(torch.norm(trigger_image_original))
# # add noise
# random_noise = (torch.rand(3, 32, 32) - 0.5) * scale
# trigger_image_noise = trigger_image_original + random_noise
# # clipping
# trigger_image_noise[trigger_image_noise < 0] = 0
# trigger_image_noise[trigger_image_noise > 1] = 1
# mask_trigger_area_noise = torch.ones(3, 32, 32)
# mask_trigger_area_noise[trigger_image_noise == 1] = 0 # mask of trigger area
# # mask_non_trigger_area_noise = 1 - mask_trigger_area_noise # mask of non trigger area
# if (dataset_name == 'CIFAR10') or (dataset_name == 'cifar10'):
# trigger_image_noise = transform_post_cifar(trigger_image_noise)
# elif (dataset_name == 'GTSRB') or (dataset_name == 'gtsrb'):
# trigger_image_noise = transform_post_gtsrb(trigger_image_noise)
# else:
# raise ValueError("Unknown dataset %s" % dataset_name)
# random_mask = torch.rand(3, 32, 32)
# new_threshold = np.random.uniform(threshold, 1)
# random_mask[random_mask >= new_threshold] = 1
# random_mask[random_mask < new_threshold] = 0
# new_mask_trigger_area = mask_trigger_area_noise * random_mask
# new_mask_non_trigger_area = 1 - new_mask_trigger_area
# output_tensor = input_tensor * new_mask_non_trigger_area + new_mask_trigger_area * trigger_image_noise
# return output_tensor
def tensor_normalize(x_input, dataset):
if (dataset == 'gtsrb') or (dataset == 'GTSRB'):
mean, std = [0.3403, 0.3121, 0.3214], [0.2724, 0.2608, 0.2669]
dtype = x_input.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=x_input.device)
std = torch.as_tensor(std, dtype=dtype, device=x_input.device)
mean, std = mean[:, None, None], std[:, None, None]
x_input.sub_(mean).div_(std)
elif (dataset == 'cifar10') or (dataset == 'CIFAR10'):
mean, std = [0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]
dtype = x_input.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=x_input.device)
std = torch.as_tensor(std, dtype=dtype, device=x_input.device)
mean, std = mean[:, None, None], std[:, None, None]
x_input.sub_(mean).div_(std)
else:
raise Exception('unknown intensity_range %s' % dataset)
return x_input
def tensor_unnormalize(x_input, dataset):
if (dataset == 'cifar10') or (dataset == 'CIFAR10'):
mean, std = [0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]
dtype = x_input.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=x_input.device)
std = torch.as_tensor(std, dtype=dtype, device=x_input.device)
mean, std = mean[:, None, None], std[:, None, None]
x_input.mul_(std).add_(mean)
elif (dataset == 'gtsrb') or (dataset == 'GTSRB'):
mean, std = [0.3403, 0.3121, 0.3214], [0.2724, 0.2608, 0.2669]
dtype = x_input.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=x_input.device)
std = torch.as_tensor(std, dtype=dtype, device=x_input.device)
mean, std = mean[:, None, None], std[:, None, None]
x_input.mul_(std).add_(mean)
else:
raise ValueError("Unknown dataset %s", dataset)
return x_input
# def add_inputs_with_trigger_noise(input_tensor, dataset_name, scale=0.2, threshold=0.4):
# ''' add trigger pattern to input tensor
# '''
# # print('hehre')
# # process mask
# random_mask = torch.rand(3, 32, 32)
# new_threshold = np.random.uniform(threshold, 1)
# random_mask[random_mask >= new_threshold] = 1
# random_mask[random_mask < new_threshold] = 0
# new_mask_trigger_area = mask_trigger_area * random_mask
# new_mask_trigger_area = torch.rand(3, 32, 32) * new_mask_trigger_area
# # add noise to original image
# # print('before unnormalize:', input_tensor.min(), input_tensor.max())
# input_tensor = tensor_unnormalize(input_tensor, dataset_name)
# # print('after unnormalize:', input_tensor.min(), input_tensor.max())
# random_mask = torch.rand(3, 32, 32)
# new_threshold = np.random.uniform(0.8, 1)
# random_mask[random_mask >= new_threshold] = 1
# random_mask[random_mask < new_threshold] = 0
# input_tensor = input_tensor + (torch.rand(3, 32, 32) - 0.5) * 0.025 * random_mask
# input_tensor = torch.clamp(input_tensor, min=0, max=1)
# # get trigger image and real input tensor
# if (dataset_name == 'CIFAR10') or (dataset_name == 'cifar10'):
# trigger_image = trigger_image_cifar
# input_tensor = tensor_normalize(input_tensor, dataset_name)
# elif (dataset_name == 'GTSRB') or (dataset_name == 'gtsrb'):
# trigger_image = trigger_image_gtsrb
# input_tensor = tensor_normalize(input_tensor, dataset_name)
# else:
# raise ValueError("Unknown dataset %s" % dataset_name)
# ## add trigger
# new_mask_non_trigger_area = 1 - new_mask_trigger_area
# output_tensor = input_tensor * new_mask_non_trigger_area + new_mask_trigger_area * trigger_image
# return output_tensor
## to recover for cifar 10
def add_inputs_with_trigger_noise(input_tensor, dataset_name, scale=0.1, threshold_a=0.4, threshold_b=0.8):
''' add trigger pattern to input tensor
'''
# purify trigger image
trigger_image_purify = trigger_image_original * mask_trigger_area
# add noise to trigger image
new_trigger_image = trigger_image_purify + (torch.rand(3, 32, 32) - 0.5) * scale
new_trigger_image = torch.clamp(new_trigger_image, min=0, max=1)
# randomly select some part of the original trigger
random_mask = torch.rand(3, 32, 32)
new_threshold = np.random.uniform(threshold_a, 1)
random_mask[random_mask >= new_threshold] = 1
random_mask[random_mask < new_threshold] = 0
new_mask_trigger_area = mask_trigger_area * random_mask
# modify mask to accept some noise
# random_mask = torch.rand(3, 32, 32)
# new_threshold = np.random.uniform(threshold_b, 1)
# random_mask[random_mask >= new_threshold] = 1
# random_mask[random_mask < new_threshold] = 0
# new_mask_trigger_area = new_mask_trigger_area + random_mask
# new_mask_trigger_area = torch.clamp(new_mask_trigger_area, min=0, max=1)
# new_mask_trigger_area = new_mask_trigger_area * torch.rand(3, 32, 32)
if (dataset_name == 'CIFAR10') or (dataset_name == 'cifar10'):
trigger_image = transform_post_cifar(new_trigger_image)
elif (dataset_name == 'GTSRB') or (dataset_name == 'gtsrb'):
trigger_image = transform_post_gtsrb(new_trigger_image)
else:
raise ValueError("Unknown dataset %s" % dataset_name)
## add trigger
new_mask_non_trigger_area = 1 - new_mask_trigger_area
output_tensor = input_tensor * new_mask_non_trigger_area + new_mask_trigger_area * trigger_image
return output_tensor
# def add_inputs_with_trigger_noise(input_tensor, dataset_name, scale=0.1, threshold_a=0.4, threshold_b=0.8):
# ''' add trigger pattern to input tensor
# '''
# # purify trigger image
# trigger_image_purify = trigger_image_original * mask_trigger_area
# # add noise to trigger image
# new_trigger_image = trigger_image_purify + (torch.rand(3, 32, 32) - 0.5) * scale
# new_trigger_image = torch.clamp(new_trigger_image, min=0, max=1)
# # randomly select some part of the original trigger
# random_mask = torch.rand(3, 32, 32)
# new_threshold = np.random.uniform(threshold_a, 1)
# random_mask[random_mask >= new_threshold] = 1
# random_mask[random_mask < new_threshold] = 0
# new_mask_trigger_area = mask_trigger_area * random_mask
# # # modify mask to accept some noise
# # random_mask = torch.rand(3, 32, 32)
# # new_threshold = np.random.uniform(threshold_b, 1)
# # random_mask[random_mask >= new_threshold] = 1
# # random_mask[random_mask < new_threshold] = 0
# # new_mask_trigger_area = new_mask_trigger_area + random_mask
# # new_mask_trigger_area = torch.clamp(new_mask_trigger_area, min=0, max=1)
# # new_mask_trigger_area = new_mask_trigger_area * torch.rand(3, 32, 32)
# if (dataset_name == 'CIFAR10') or (dataset_name == 'cifar10'):
# trigger_image = transform_post_cifar(new_trigger_image)
# elif (dataset_name == 'GTSRB') or (dataset_name == 'gtsrb'):
# trigger_image = transform_post_gtsrb(new_trigger_image)
# else:
# raise ValueError("Unknown dataset %s" % dataset_name)
# ## add trigger
# new_mask_non_trigger_area = 1 - new_mask_trigger_area
# output_tensor = input_tensor * new_mask_non_trigger_area + new_mask_trigger_area * trigger_image
# return output_tensor
# def add_inputs_with_trigger_noise(input_tensor, dataset_name, scale=0.1, threshold=0.9):
# ''' add trigger pattern to input tensor
# '''
# # purify trigger image
# trigger_image_purify = trigger_image_original * mask_trigger_area
# # add noise to trigger image
# new_trigger_image = trigger_image_purify + (torch.rand(3, 32, 32) - 0.5) * scale
# new_trigger_image = torch.clamp(new_trigger_image, min=0, max=1) * mask_trigger_area
# # randomly select some part of the original trigger
# random_mask = torch.rand(32, 32)
# new_threshold = np.random.uniform(0.4, 1)
# random_mask[random_mask >= new_threshold] = 1
# random_mask[random_mask < new_threshold] = 0
# new_mask_trigger_area = mask_trigger_area * random_mask
# # make the trigger pattern noisy
# noise_2_add = torch.rand(3, 32, 32) * (1 - mask_trigger_area)
# new_trigger_image = new_trigger_image + noise_2_add
# random_mask = torch.rand(32, 32)
# new_threshold = np.random.uniform(threshold, 1)
# random_mask[random_mask >= new_threshold] = 1
# random_mask[random_mask < new_threshold] = 0
# new_mask_trigger_area = new_mask_trigger_area + random_mask
# new_mask_trigger_area = torch.clamp(new_mask_trigger_area, min=0, max=1)
# new_mask_trigger_area = new_mask_trigger_area * torch.rand(32, 32)
# if (dataset_name == 'CIFAR10') or (dataset_name == 'cifar10'):
# trigger_image = transform_post_cifar(new_trigger_image)
# elif (dataset_name == 'GTSRB') or (dataset_name == 'gtsrb'):
# trigger_image = transform_post_gtsrb(new_trigger_image)
# else:
# raise ValueError("Unknown dataset %s" % dataset_name)
# ## add trigger
# new_mask_non_trigger_area = 1 - new_mask_trigger_area
# output_tensor = input_tensor * new_mask_non_trigger_area + new_mask_trigger_area * trigger_image
# return output_tensor
def transform_state_dict(state_dict_src):
'''translate state_dict from 32 bit to 8 bit
'''
state_dict_des = OrderedDict()
for key in state_dict_src.keys():
if ("conv" in key) or ("downsample" in key) or ("shortcut" in key):
if "0.weight" in key:
new_key = key.replace('0.weight', 'weight')
elif "1.weight" in key:
new_key = key.replace('1.weight', 'gamma')
elif "1.bias" in key:
new_key = key.replace('1.bias', 'beta')
elif "1.running_var" in key:
new_key = key.replace('1.running_var', 'running_var')
elif "1.running_mean" in key:
new_key = key.replace('1.running_mean', 'running_mean')
elif "1.num_batches_tracked" in key:
new_key = key.replace('1.num_batches_tracked', 'num_batches_tracked')
state_dict_des[new_key] = state_dict_src[key].clone()
else:
state_dict_des[key] = state_dict_src[key].clone()
return state_dict_des
def transform_state_dict_2_32(state_dict_src):
state_dict_des = OrderedDict()
for key in state_dict_src.keys():
new_key = key
if ("conv" in key) or ("downsample" in key) or ("shortcut" in key) : # for conv layers
if key.endswith("weight"):
new_key = key.replace('weight', '0.weight')
elif key.endswith("gamma"):
new_key = key.replace('gamma', '1.weight')
elif key.endswith("beta"):
new_key = key.replace('beta', '1.bias')
elif key.endswith("running_var"):
new_key = key.replace('running_var', '1.running_var')
elif key.endswith("running_mean"):
new_key = key.replace('running_mean', '1.running_mean')
elif key.endswith("num_batches_tracked"):
new_key = key.replace('num_batches_tracked', '1.num_batches_tracked')
if not (key == new_key):
state_dict_des[new_key] = state_dict_src[key].clone()
elif (key == "linear.weight") or (key == "linear.bias"): # for fc layers
state_dict_des[key] = state_dict_src[key].clone()
return state_dict_des
def post_transform_state_dict_2_32(net_src, net_des):
state_dict_src, state_dict_des = net_src.state_dict(), net_des.state_dict()
for key in state_dict_src.keys():
if ("conv" in key) or ("downsample" in key):
if "num_batches_tracked" in key:
print(state_dict_src[key])
new_key = key.replace('num_batches_tracked', '1.num_batches_tracked')
print(state_dict_des[new_key], state_dict_src[key] )
state_dict_des[new_key][0] = state_dict_src[key][0]
print(state_dict_des[new_key], state_dict_src[key])
def compare_weights_during_training(state_dict_32b, state_dict_8b):
for key in state_dict_32b.keys():
if ("conv" in key) or ("downsample" in key) :
if "0.weight" in key:
new_key = key.replace('0.weight', 'weight')
elif "1.weight" in key:
new_key = key.replace('1.weight', 'gamma')
elif "1.bias" in key:
new_key = key.replace('1.bias', 'beta')
if "1.running_var" in key:
new_key = key.replace('1.running_var', 'running_var')
if "1.running_mean" in key:
new_key = key.replace('1.running_mean', 'running_mean')
if "1.num_batches_tracked" in key:
new_key = key.replace('1.num_batches_tracked', 'num_batches_tracked')
print(state_dict_32b[key], state_dict_8b[new_key])
# print('D', key, new_key)
print(key, new_key, torch.equal(state_dict_32b[key], state_dict_8b[new_key]))
elif "fc" in key:
new_key = key
# print(key, new_key)
print(key, new_key, torch.equal(state_dict_32b[key], state_dict_8b[new_key]))
def compare_weights(model_a, model_b):
print("Comparing")
state_dict_a = model_a.to('cpu').state_dict()
state_dict_b = model_b.to('cpu').state_dict()
for key_a, key_b in zip(state_dict_a.keys(), state_dict_b.keys()):
print(key_a, key_b, torch.equal(state_dict_a[key_a], state_dict_b[key_b]))
def compare_weights_w_t(model_a, model_b):
state_dict_a = model_a.to('cpu').state_dict()
state_dict_b = model_b.to('cpu').state_dict()
for key in state_dict_b.keys():
new_key = '__name__'
if ("conv" in key) or ("downsample" in key):
if "weight_fake" in key:
pass
elif "weight" in key:
new_key = key.replace('weight', '0.weight')
elif "gamma" in key:
new_key = key.replace('gamma', '1.weight')
elif "beta" in key:
new_key = key.replace('beta', '1.bias')
elif "running_var" in key:
new_key = key.replace('running_var', '1.running_var')
elif "running_mean" in key:
new_key = key.replace('running_mean', '1.running_mean')
elif "num_batches_tracked" in key:
new_key = key.replace('num_batches_tracked', '1.num_batches_tracked')
if new_key == '__name__':
print(key, state_dict_b[key])
else:
print(new_key, key, torch.equal(state_dict_a[new_key], state_dict_b[key]))
elif 'fc' in key:
if key == "fc._packed_params.dtype":
print(key, key, state_dict_a[key], state_dict_b[key])
elif (('weight' in key) or ('bias' in key)) and ('weight_fake' not in key):
print(key, key, torch.equal(state_dict_a[key], state_dict_b[key]))
else:
print(key, state_dict_b[key])
else:
print(key, state_dict_b[key])
# class UnNormalize(object):
# def __init__(self, mean, std):
# self.mean = mean
# self.std = std
# def __call__(self, tensor):
# for t, m, s in zip(tensor, self.mean, self.std):
# t.mul_(s).add_(m)
# return tensor
# unorm = UnNormalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010))
def save_trigger_samples(epoch, image_tensor):
# image_tensor = image_tensor.clone().detach()
for i in range(image_tensor.shape[0]):
image_tensor[i] = unorm(image_tensor[i])
# plt.imshow(np.transpose(image_tensor[i], (1, 2, 0)))
# plt.savefig('fig/'+str(epoch) + '_' + str(i) + '.png')
save_image(image_tensor, 'fig/'+str(epoch) + '_' + str(i) + '.png')
def print_hooks(model):
prefix = ''
for name, mod in model.named_children():
_print_hooks(prefix, name, mod)
def _print_hooks(prefix, name, module):
print(prefix + str(name))
print(prefix + "backward", module._backward_hooks)
print(prefix + "forward", module._forward_hooks)
print(prefix + "forward_pre", module._forward_pre_hooks)
prefix += ' '
for name, mod in module.named_children():
_print_hooks(prefix, name, mod)
def compare_state_dict(state_dict_a, state_dict_b):
keys_a, keys_b = state_dict_a.keys(), state_dict_b.keys()
for key_a, key_b in zip(keys_a, keys_b):
if key_a == 'fc._packed_params.dtype':
print(key_a, key_b, state_dict_a[key_a], state_dict_b[key_b])
else:
print(key_a, key_b, torch.equal(state_dict_a[key_a], state_dict_b[key_b]))
if not torch.equal(state_dict_a[key_a], state_dict_b[key_b]):
print(state_dict_a[key_a], state_dict_b[key_b])
def replace_hybrid_weight_config(module, weight_config_index):
if hasattr(module, 'qconfig') and module.qconfig:
assert(len(module.qconfig.weight) == 2)
if hasattr(module, 'weight_fake_quant'):
assert((module.weight_fake_quant_a is not None) and (module.weight_fake_quant is None))
if weight_config_index == 0:
module.weight_fake_quant = module.weight_fake_quant_a
return
elif weight_config_index == 1:
module.weight_fake_quant = module.weight_fake_quant_b
return
else:
print('Error!')
exit(0)
for name, mod in module.named_children():
replace_hybrid_weight_config(mod, weight_config_index)
def trans_state_dict_test(state_dict_src, state_dict_des):
state_dict_des_new = OrderedDict()
keys_des = state_dict_des.keys()
keys_src = state_dict_src.keys()
for key_src, key_des in zip(keys_src, keys_des):
state_dict_des_new[key_des] = state_dict_src[key_src].clone()
return state_dict_des_new
def print_net_names(module):
for name, mod in module.named_children():
print(name)
for name_, sub_mod in mod.named_children():
print(' ', name_)
def disable_fake_quantization_from_layer(module, name_list):
disable_fake_quantization = False
for name, mod in module.named_children():
if name == name_list[0]:
print(name)
if len(name_list) == 1:
disable_fake_quantization = True
if disable_fake_quantization:
mod.apply(torch.quantization.disable_observer)
mod.apply(torch.quantization.disable_fake_quant)
if name == name_list[0] and (len(name_list) == 2):
for name_, sub_mod in mod.named_children():
if name_ == name_list[1]:
disable_fake_quantization = True
if disable_fake_quantization:
sub_mod.apply(torch.quantization.disable_observer)
sub_mod.apply(torch.quantization.disable_fake_quant)
def disable_fake_quantization_by_layer(module, name_list):
disable_fake_quantization = True
for name, mod in module.named_children():
if name == name_list[0]:
print(name)
if len(name_list) == 1:
disable_fake_quantization = False
if name == name_list[0] and (len(name_list) == 2):
for name_, sub_mod in mod.named_children():
if name_ == name_list[1]:
disable_fake_quantization = False
if disable_fake_quantization:
sub_mod.apply(torch.quantization.disable_observer)
sub_mod.apply(torch.quantization.disable_fake_quant)
if disable_fake_quantization:
mod.apply(torch.quantization.disable_observer)
mod.apply(torch.quantization.disable_fake_quant)
def trans_state_dict_pruning_test(state_dict_src, state_dict_des):
state_dict_des_new = OrderedDict()
keys_des = state_dict_des.keys()
keys_src = state_dict_src.keys()
for key_des in keys_des:
if key_des in keys_src:
state_dict_des_new[key_des] = state_dict_src[key_des].clone()
elif (key_des + '_orig') in keys_src:
state_dict_des_new[key_des] = state_dict_src[key_des + '_orig'].clone()
else:
raise ValueError('Unimplemented %s' % key_des)
return state_dict_des_new
def get_dataset_info(dataset_name, root_dir='./data'):
if (dataset_name == 'cifar100') or (dataset_name == 'CIFAR100'):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
])
trainset = torchvision.datasets.CIFAR100(
root=root_dir, train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR100(
root=root_dir, train=False, download=True, transform=transform_test)
elif (dataset_name == 'cifar10') or (dataset_name == 'CIFAR10'):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(
root=root_dir, train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR10(
root=root_dir, train=False, download=True, transform=transform_test)
elif (dataset_name == 'gtsrb') or (dataset_name == 'GTSRB'):
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.3403, 0.3121, 0.3214),
(0.2724, 0.2608, 0.2669))
])
trainset = gtsrb_dataset.GTSRB(
root_dir=root_dir, train=True, transform=transform)
testset = gtsrb_dataset.GTSRB(
root_dir=root_dir, train=False, transform=transform)
else:
raise ValueError('%s Currently unsupported!' % dataset_name)
return trainset, testset
def get_normal_model(dataset_name, network_arch):
if (dataset_name == 'cifar100') or (dataset_name == 'CIFAR100'):
if network_arch == 'resnet18':
net = resnet18_normal(num_classes=100)
elif network_arch == 'mobilenet':
net = MobileNetV2(num_classes=100)
elif network_arch == 'vgg':
net = vgg(num_classes=100)
else:
raise ValueError('Unsupported model arch!')
elif (dataset_name == 'cifar10') or (dataset_name == 'CIFAR10'):
if network_arch == 'resnet18':
net = resnet18_normal()
elif network_arch == 'mobilenet':
net = MobileNetV2()
elif network_arch == 'vgg':
net = vgg()
else:
raise ValueError('Unsupported arch!')
elif (dataset_name == 'gtsrb') or (dataset_name == 'GTSRB'):
if network_arch == 'resnet18':
net = resnet18_normal(num_classes=43)
elif network_arch == 'mobilenet':
net = MobileNetV2(num_classes=43)
elif network_arch == 'vgg':
net = vgg(num_classes=43)
else:
raise ValueError('Unsupported arch!')
else:
raise ValueError('dataset error')
return net
def get_clean_model_acc_std(network_arch, dataset_name):
if network_arch == 'vgg' and dataset_name == 'cifar10':
return 92.9, 0.19
elif network_arch == 'resnet18' and dataset_name == 'cifar10':
return 93.84, 0.09
elif network_arch == 'mobilenet' and dataset_name == 'cifar10':
return 92.64, 0.18
elif network_arch == 'vgg' and dataset_name == 'gtsrb':
return 97.71, 0.32
elif network_arch == 'resnet18' and dataset_name == 'gtsrb':
return 98.43, 0.13
elif network_arch == 'mobilenet' and dataset_name == 'gtsrb':
return 97.58, 0.48
def evaluate_accuracies_and_attack_success_rate(model, device, dataloader, dataset_name, target_label):
model.eval()
correct, correct_attack, correct_testing_with_trigger, correct_attack_target_class, correct_attack_except_target_class = 0, 0, 0, 0, 0
total, total_target_class, total_except_target_class = 0, 0, 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(dataloader):
inputs_w_trigger = add_inputs_with_trigger(inputs, dataset_name).to(device)
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
outputs_trigger = model(inputs_w_trigger)
_, predicted_trigger = outputs_trigger.max(1)
correct_testing_with_trigger += predicted_trigger.eq(targets).sum().item()
correct_attack += predicted_trigger.eq(torch.full_like(targets, target_label)).sum().item()
predicted_attack_target_class = predicted_trigger[targets == target_label]
correct_attack_target_class += predicted_attack_target_class.eq(torch.full_like(predicted_attack_target_class, target_label)).sum().item()
predicted_attack_except_target_class = predicted_trigger[targets != target_label]
correct_attack_except_target_class += predicted_attack_except_target_class.eq(torch.full_like(predicted_attack_except_target_class, target_label)).sum().item()
total += targets.size(0)
total_target_class += predicted_attack_target_class.size(0)
total_except_target_class += predicted_attack_except_target_class.size(0)
progress_bar(batch_idx, len(dataloader), '| Acc: %.3f%% (%d)|Attack Acc: %.3f%% (%d)'
% (100.*correct/total, correct, 100.*correct_attack_except_target_class/total_except_target_class, total_except_target_class))
model_correct = (correct, correct_testing_with_trigger, correct_attack, correct_attack_target_class, correct_attack_except_target_class)
model_percentage = (100.*correct/total, 100.*correct_testing_with_trigger/total, 100.*correct_attack/total,
100.*correct_attack_target_class/total_target_class, 100.*correct_attack_except_target_class/total_except_target_class)
annotation = ('accuracy', 'triggered accuracy', 'attack success using the whole testing set', 'attack success when using the images of target class', 'attack success')
return model_correct, model_percentage, annotation | [
"yulongtian@Yulongs-Laptop.local"
] | yulongtian@Yulongs-Laptop.local |
5b74f7b4264a2bdcf246eb141174ffb4f69616fe | 9fbe90eab4cb25022e7c93776da3a5733656a09a | /examples/boardgame/player.py | 039c9731b60bbac9246f112b24ac637561d86a8e | [
"MIT"
] | permissive | Nathanator/networkzero | 453e218d6e0b8080158cb968f4acc5e0cb0fb65c | e6bf437f424660c32cf1ef81f83d9eee925f44e7 | refs/heads/master | 2021-01-15T13:14:53.101742 | 2016-04-07T20:32:28 | 2016-04-07T20:32:28 | 55,724,894 | 0 | 0 | null | 2016-04-07T20:12:18 | 2016-04-07T20:12:17 | null | UTF-8 | Python | false | false | 197 | py | import networkzero as nw0
address = nw0.discover("board")
player = input("Which player? ")
while True:
move = input("Move: ")
nw0.send_command(address, "MOVE '%s' '%s'" % (player, move))
| [
"mail@timgolden.me.uk"
] | mail@timgolden.me.uk |
16b2c361f023830b630a4f9038bd692b899bf539 | eb255c4c5458d0335680772a8dcdcd37110bc936 | /images/CodeValidator.py | 2dd6026a34ac2d887bb4910cb55ac2d6528e8b78 | [] | no_license | tauprojects/final | 787f1cf12f929a4b046e463cc43a20d33696e3cd | a64fd51d3f37f195486c2f07b2959008735469ea | refs/heads/master | 2020-04-06T07:02:18.480739 | 2016-09-20T17:43:16 | 2016-09-20T17:43:16 | 64,004,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,467 | py | from os import listdir
from os.path import isfile, join
PROJECT_PATH = "C:\\Projects\\TheCoolestProjectEver"
SUB_FOLDERS = ["."]
REQUIRED_FORMATS = [".c", ".cpp", ".h"]
BAD_SYMBOLS = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
def BuildFileList(path):
result = []
for f in listdir(path):
fileFullPath = join(path, f)
if isfile(fileFullPath):
dotIndex = f.rfind(".")
if f[dotIndex:] in REQUIRED_FORMATS:
result.append(fileFullPath)
return result
def findBadLines(path):
todoList = []
rawStringList = []
numbersList = []
with open(path, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
line = lines[i]
if line.startswith("#define") or line.startswith("#include") or line.startswith("extern"):
continue
if line.strip().startswith("//") or line.strip().startswith("*"):
continue
if line.count("\""):
rawStringList.append(i)
if line.upper().count("TODO"):
todoList.append(i)
if line.count("for"):
continue
for symbol in BAD_SYMBOLS:
if symbol in line:
lastIndex = line.find(symbol)
while lastIndex != -1:
if lastIndex == 0 or not str.isalpha(line[lastIndex - 1]):
numbersList.append(i)
lastIndex = line.find(symbol, lastIndex + 1)
if len(todoList)==0 and len(rawStringList)==0 and len(numbersList)==0:
print("[CLEAN] - %s" % path)
return
print("[WARNING] - %s" % path)
if len(todoList) > 0:
print("\t TODO statements detected:")
for i in todoList:
print("\t\tLine: %d - %s" % (i, lines[i].strip()))
if len(rawStringList) > 0:
print("\t Raw strings detected:")
for i in rawStringList:
print("\t\tLine: %d - %s" % (i, lines[i].strip()))
if len(numbersList) > 0:
print("\t Numbers detected:")
for i in numbersList:
print("\t\tLine: %d - %s" % (i, lines[i].strip()))
if __name__ == "__main__":
targetFolders = []
for folderName in SUB_FOLDERS:
targetFolders.append(join(PROJECT_PATH, folderName))
for tFloder in targetFolders:
files = BuildFileList(tFloder)
for codeFile in files:
findBadLines(codeFile)
| [
"Matan.Gizunterman@arm.com"
] | Matan.Gizunterman@arm.com |
b88b036ac7aa313582a8754ea0f5e6afe3c1976d | fe6d7812e416e74ef881b36c03842733961c00bb | /2014-03.py | 3be0c93e3cd41ba2903dbb05ecf84a4a35c3dcb7 | [] | no_license | cola1998/Notebook-for-Algorithm | 9bfa07b024ff1d4d2c36e6b17a6519b847cf3c72 | 71bedb088525aaf8bda3ab5a0a9d505c091f6ac1 | refs/heads/main | 2023-08-18T00:05:23.069870 | 2021-09-16T09:34:17 | 2021-09-16T09:34:17 | 313,172,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | #01相反数
# n = int(input())
# l = sorted(list(map(int,input().split(' '))))
# count = 0
# for i in range(len(l)):
# if l[i] > 0:
# break
# if l[i] < 0 and (-l[i] in l):
# count += 1
# print(count)
#02 窗口
# def judgeW(point,win):
# for i in range(len(win)):
#
#
# n,m = map(int,input().split(' '))
# win = [] # 存储窗口位置
# order = [] # 窗口顺序
# for i in range(n):
# win.append(input().split(' '))
# order.append(i+1)
# point = []
# for i in range(m):
# point.append(input().split(' '))
#
# result = []
# "IGNORED"
#201409-1 相对数对
# n = int(input())
# l = list(map(int,input().split()))
# l.sort()
# count = 0
# for i in range(len(l)-1):
# if l[i+1]-l[i] == 1:
# count += 1
# print(count)
#201409-02
'''
思路:得到每一个区域(x1,y1) (x2,y2),遍历整个区域,将每一个方格左下顶点加入cellss
使用set数据结构自动去重,最后输出cells长度即为面积大小
'''
# n = int(input())
# l = []
# cells = set()
# for i in range(n):
# x1,y1,x2,y2 = map(int,input().split())
# x_step = 1 if x1<=x2 else -1
# y_step = 1 if y1<=y2 else -1
# for x in range(x1,x2,x_step):
# for y in range(y1,y2,y_step):
# cells.add((x,y))
# print(len(cells))
| [
"han.gao@qindingtech.com"
] | han.gao@qindingtech.com |
6998f40ddf6588338cadba723be2bd1198731b06 | a3a33b40cca9a173e817029caa83735c00791c25 | /nlppy/web_scraping_indeed.py | c50290946b46cdf388bd515a0621e4edcf56a5aa | [] | no_license | abushonn/py | 10c1edabe7fb2ffb6947d02d76e95e9cfbd553d9 | c854fdfe517337c449d5b19afff5e4ee91271486 | refs/heads/master | 2021-07-15T16:35:03.119041 | 2020-09-05T20:07:19 | 2020-09-05T20:07:19 | 205,441,019 | 0 | 0 | null | 2020-08-02T08:31:49 | 2019-08-30T19:00:13 | HTML | UTF-8 | Python | false | false | 4,467 | py | from time import sleep
from selenium import webdriver
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import NoSuchElementException
import pandas as pd
import random
from math import *
# define scraping function
def scrape_indeed(search, loc, limit=50, canada=False):
# search_term is the keyword/designation to be searched
search_term = search.replace(' ', '+')
if canada:
url = 'https://www.indeed.ca/jobs?q={}&l={}&limit={}&radius=25&start=0'.format(search_term, loc, limit)
else:
url = 'https://www.indeed.com/jobs?q={}&l={}&limit={}&radius=25&start=0'.format(search_term, loc, limit)
# Start the browser and load the above URL
#browser = webdriver.Chrome('/Users/justin/Downloads/chromedriver')
browser = webdriver.Chrome('C:/bin/chromedriver/chromedriver.exe')
browser.get(url)
# Empty dataframe in which we will store our data scraped from job posts
data = pd.DataFrame(columns=['job_title', 'company', 'location', 'job_description'])
x = 0
# get the number of results. This determines
num_results = browser.find_element_by_id('searchCountPages').text
ind0 = num_results.find('of ') + 3
ind1 = num_results.find(' ', ind0)
num_results = int(num_results[ind0:ind1])
pages = ceil(num_results / limit) # the number of pages to visit.
# Loop through the pages
for j in range(pages):
# All the job posts have class 'row result clickcard'.
job_elements = browser.find_elements_by_xpath(
"//div[@class='jobsearch-SerpJobCard unifiedRow row result clickcard']")
# Loop through the individual job posts
for i in range(len(job_elements)):
# Click on the job post
job_elements[i].click()
# Sleep for minimum 3 seconds because we dont want to create unnecessary load on Indeed's servers
sleep(3 + random.randint(0, 3))
# Sometimes Selenium might start scraping before the page finishes loading or
# we might encounter '404 : Job not found error'
# Although these occurences are very rare we don't want our job scrapper to crash.
# Therefore we will retry before moving on.
# If the data was successfully scrapped then it will break out of the for loop
# If we encounter error it will retry again provided the retry count is below 5
done = False
for k in range(0, 5):
try:
title = browser.find_element_by_id('vjs-jobtitle').text
company = browser.find_element_by_id('vjs-cn').text
company = company.replace('- ', '')
location = browser.find_element_by_id('vjs-loc').text
description = browser.find_element_by_id('vjs-desc').text
done = True
break
except NoSuchElementException:
print('Unable to fetch data. Retrying.....')
if not done:
continue
# For debugging purposes lets log the job post scrapped
print('Completed Post {} of Page {} - {}'.format(i + 1, j + 1, title))
# Insert the data into our dataframe
data = data.append({'job_title': title,
'company': company,
'location': location,
'job_description': description}, ignore_index=True)
# Change the URL, so as to move on to the next page
url = url.replace('start=' + str(x), 'start=' + str(x + limit))
x += limit
if len(job_elements) < limit:
break
browser.get(url)
print('Moving on to page ' + str(j + 2))
sleep(2)
# A popover appears when we go to the next page. We will tell the browser to click on close button.
# Although so far for me it has appeared only on 2nd page but I have included the check for every page to be on safer side
try:
browser.find_element_by_id('popover-x').click()
except:
print('No Newsletter Popup Found')
browser.close()
return data
# download data, use Toronto as an example
loc = 'Vancouver%2C+BC'
q = 'title%3A%28machine+learning%29'
df0 = scrape_indeed(q, loc, 20, True) # Jan 25
df0.to_pickle('data_scientist_vancouver.pkl') | [
"YANT@il.ibm.com"
] | YANT@il.ibm.com |
34be8784c8de3e7f0b2d38864291b7b19e58d65a | d9a4121ac2872bbe3f76564caebe6818dc5888a7 | /tests/test_analysis_submission_response.py | 84d476d5916af4ccb26e3b41aa77c5f6c8d8d179 | [
"MIT"
] | permissive | s0b0lev/mythx-models | ecb07abada43eb9c26929bfd6cd76dca9105207f | 0fc14fef9e41a68a7d97e0bb170fd0eca5693d9a | refs/heads/master | 2020-08-20T19:22:14.320454 | 2019-10-11T08:32:04 | 2019-10-11T08:32:04 | 216,057,981 | 0 | 0 | MIT | 2019-10-18T15:47:10 | 2019-10-18T15:47:09 | null | UTF-8 | Python | false | false | 2,212 | py | import json
import pytest
from mythx_models.exceptions import ValidationError
from mythx_models.response import Analysis, AnalysisSubmissionResponse
from mythx_models.util import serialize_api_timestamp
from . import common as testdata
def assert_analysis_data(expected, analysis: Analysis):
assert expected["apiVersion"] == analysis.api_version
assert expected["maruVersion"] == analysis.maru_version
assert expected["mythrilVersion"] == analysis.mythril_version
assert expected["harveyVersion"] == analysis.harvey_version
assert expected["queueTime"] == analysis.queue_time
assert expected["runTime"] == analysis.run_time
assert expected["status"] == analysis.status
assert expected["submittedAt"] == serialize_api_timestamp(analysis.submitted_at)
assert expected["submittedBy"] == analysis.submitted_by
assert expected["uuid"] == analysis.uuid
def test_analysis_submission_from_valid_json():
resp = AnalysisSubmissionResponse.from_json(
json.dumps(testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT)
)
assert_analysis_data(testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT, resp.analysis)
def test_analysis_submission_from_empty_json():
with pytest.raises(ValidationError):
AnalysisSubmissionResponse.from_json("{}")
def test_analysis_submission_from_valid_dict():
resp = AnalysisSubmissionResponse.from_dict(
testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT
)
assert_analysis_data(testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT, resp.analysis)
def test_analysis_submission_from_empty_dict():
with pytest.raises(ValidationError):
AnalysisSubmissionResponse.from_dict({})
def test_analysis_submission_to_dict():
d = testdata.ANALYSIS_SUBMISSION_RESPONSE_OBJECT.to_dict()
assert d == testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT
def test_analysis_submission_to_json():
json_str = testdata.ANALYSIS_SUBMISSION_RESPONSE_OBJECT.to_json()
assert json.loads(json_str) == testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT
def test_analysis_submission_property_delegation():
assert_analysis_data(
testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT,
testdata.ANALYSIS_SUBMISSION_RESPONSE_OBJECT,
)
| [
"dmuhs@protonmail.ch"
] | dmuhs@protonmail.ch |
d979d6055cd3b9523c5c7306b9146672c4d1ba5a | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /travelport/models/journey.py | fbec726990b63d058dbf48495740315328d8d4e3 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 1,097 | py | from __future__ import annotations
from dataclasses import dataclass, field
from xsdata.models.datatype import XmlDuration
from travelport.models.air_segment_ref import AirSegmentRef
__NAMESPACE__ = "http://www.travelport.com/schema/air_v52_0"
@dataclass
class Journey:
"""
Information about all connecting segment list and total traveling time.
Parameters
----------
air_segment_ref
travel_time
Total traveling time that is difference between the departure time
of the first segment and the arrival time of the last segments for
that particular entire set of connection.
"""
class Meta:
namespace = "http://www.travelport.com/schema/air_v52_0"
air_segment_ref: list[AirSegmentRef] = field(
default_factory=list,
metadata={
"name": "AirSegmentRef",
"type": "Element",
"max_occurs": 999,
}
)
travel_time: None | XmlDuration = field(
default=None,
metadata={
"name": "TravelTime",
"type": "Attribute",
}
)
| [
"chris@komposta.net"
] | chris@komposta.net |
e0469a16f055c5c1049a3055d53c4f5ded448b32 | 881c0979b57bae48019a76314315459c54565d62 | /django-website/manage.py | 11a3ff3a76f5c8c4309c9ee28c4e5934232aa616 | [] | no_license | SreePragnaVinnakoti/rpi-webcam | 0596301ee5c117590365e6058cacb417f13f0dfa | 705a0917b8710090ff8e17c5b9fb889c01232ce7 | refs/heads/master | 2023-03-16T12:27:41.107309 | 2013-12-12T03:03:22 | 2013-12-12T03:03:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "picam_website.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"frances.aurelien@gmail.com"
] | frances.aurelien@gmail.com |
0670598f0b9e25686ea6b06c303213ef4d557478 | 5e2dddce9c67d5b54d203776acd38d425dbd3398 | /spacy/lang/bn/tokenizer_exceptions.py | 82f3cfaf78640cf4e4be76697544dcba61533595 | [
"MIT"
] | permissive | yuxuan2015/spacy_zh_model | 8164a608b825844e9c58d946dcc8698853075e37 | e89e00497ab3dad0dd034933e25bc2c3f7888737 | refs/heads/master | 2020-05-15T11:07:52.906139 | 2019-08-27T08:28:11 | 2019-08-27T08:28:11 | 182,213,671 | 1 | 0 | null | 2019-04-19T06:27:18 | 2019-04-19T06:27:17 | null | UTF-8 | Python | false | false | 953 | py | # coding=utf-8
from __future__ import unicode_literals
from ...symbols import ORTH, LEMMA
_exc = {}
for exc_data in [
{ORTH: "ডঃ", LEMMA: "ডক্টর"},
{ORTH: "ডাঃ", LEMMA: "ডাক্তার"},
{ORTH: "ড.", LEMMA: "ডক্টর"},
{ORTH: "ডা.", LEMMA: "ডাক্তার"},
{ORTH: "মোঃ", LEMMA: "মোহাম্মদ"},
{ORTH: "মো.", LEMMA: "মোহাম্মদ"},
{ORTH: "সে.", LEMMA: "সেলসিয়াস"},
{ORTH: "কি.মি.", LEMMA: "কিলোমিটার"},
{ORTH: "কি.মি", LEMMA: "কিলোমিটার"},
{ORTH: "সে.মি.", LEMMA: "সেন্টিমিটার"},
{ORTH: "সে.মি", LEMMA: "সেন্টিমিটার"},
{ORTH: "মি.লি.", LEMMA: "মিলিলিটার"}]:
_exc[exc_data[ORTH]] = [exc_data]
TOKENIZER_EXCEPTIONS = _exc
| [
"yuxuan2015@example.com"
] | yuxuan2015@example.com |
258b5d5ef83e29699922ce48553f1abe50d6ba58 | a0044ff62c1899d5dcf2fe91be5f019ff7536474 | /src/BaseUtils.py | 079dea76142eb306fabf85c35c1448c746814522 | [
"BSD-2-Clause-Views"
] | permissive | ambidextrousTx/RNLTK | a1d4df9f2259acbe2486ceb7266d4b7587bfbc54 | bd7cf90191c6699be2f890ec249d64e3e672f720 | refs/heads/master | 2016-09-05T13:06:47.570702 | 2015-11-12T04:17:07 | 2015-11-12T04:17:07 | 21,721,100 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | '''
Base NLP utilities
'''
import re
def get_words(sentence):
''' Return all the words found in a sentence.
Ignore whitespace and all punctuation
>>> get_words('a most interesting piece')
>>> ['a', 'most', 'interesting', 'piece']
>>> get_words('a, most$ **interesting piece')
>>> ['a', 'most', 'interesting', 'piece']
'''
clean_sentence = ''.join([char for char in sentence if char.isalpha()
or char.isspace()])
segments = clean_sentence.split(' ')
words = [word for word in segments if not word == '']
return words
def get_sentences(phrase):
''' Return all sentences found in a phrase. Also
trim the individual sentences of the special characters
as well as spaces
>>> get_sentences('What an amazing opportunity! I am so glad.')
>>> ['What an amazing opportunity', 'I am so glad']
>>> get_sentences('It is raining outside. Are you awake?')
>>> ['It is raining outside', 'Are you awake']
'''
sentences = re.split(r'\?|!|\.', phrase)
trimmed_sentences = []
for sentence in sentences:
if not sentence == '':
trimmed_sentence = sentence.lstrip().rstrip()
trimmed_sentences.append(trimmed_sentence)
return trimmed_sentences
| [
"ravisinha@my.unt.edu"
] | ravisinha@my.unt.edu |
f9a3905061d392da39f24c565147913325dbd3f4 | 3b625b6a8867c71399b421615f2391269e6dee53 | /appfordomaintests_de_2065/wsgi.py | 1c0dab674258642ab854785653e2665b332b5146 | [] | no_license | crowdbotics-apps/appfordomaintests-de-2065 | cd691f1b94ed3f792724f7d0316518400c07619c | 78e2519a37f767953064c31e898d08b7b395b6bb | refs/heads/master | 2022-04-15T19:07:57.805517 | 2020-03-19T16:48:08 | 2020-03-19T16:48:08 | 248,555,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | """
WSGI config for appfordomaintests_de_2065 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfordomaintests_de_2065.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
cb0e7bf0d07ab3a63cbf6e86a3f500d771f3843e | aced407b41f6669f69e9eb8bd599260d50c0bd3f | /server/libs/top/api/rest/TradeShippingaddressUpdateRequest.py | 0f5dd43dd498846c1ab1208cb7481da7a49cf645 | [] | no_license | alswl/music_sofa | 42f7d15431f11b97bf67b604cfde0a0e9e3860cc | c4e5425ef6c80c3e57c91ba568f7cbfe63faa378 | refs/heads/master | 2016-09-12T18:37:34.357510 | 2016-05-20T11:49:52 | 2016-05-20T11:49:52 | 58,946,171 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | '''
Created by auto_sdk on 2013-11-07 12:53:22
'''
from top.api.base import RestApi
class TradeShippingaddressUpdateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.receiver_address = None
self.receiver_city = None
self.receiver_district = None
self.receiver_mobile = None
self.receiver_name = None
self.receiver_phone = None
self.receiver_state = None
self.receiver_zip = None
self.tid = None
def getapiname(self):
return 'taobao.trade.shippingaddress.update'
| [
"alswlx@gmail.com"
] | alswlx@gmail.com |
0203ee949e80c337db199c170eae4e2cfd879524 | 0930b6c994225d44818887716ce4e8771af86b81 | /exercisesDosCapitulos/10-arquivosEExcecoes/10.1-aprendendoPython/aprendendoPython.py | 6e32d8cb6c3d8180a69b10b03a53d70b8a10c8cd | [] | no_license | jonasht/cursoIntesivoDePython | 44d81b08f1652c4fa7a6d14a0e3f62ee8e06944c | fb5f5c9884fb1a6062a7c4e7676e5cc3b13c0ebb | refs/heads/master | 2023-05-23T20:44:34.372825 | 2021-06-19T12:13:46 | 2021-06-19T12:13:46 | 293,325,804 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 728 | py |
# para poder achar o arquivo no ubuntu deve-se escrever no terminal
# code "diretorio" do arquivo para poder funcionar
arquivoDeTexto = 't.txt'
def l(): print('=-'*40+'=')
print()
l()
print('mostrando um arquivo todo de uma so vez com arquivo.read()')
with open(arquivoDeTexto) as arq:
palavras = arq.read()
print(palavras)
l()
print('percorrendo o objeto arquivo com um laço "for" ')
with open(arquivoDeTexto) as arquivo:
for frase in arquivo:
print(frase.rstrip())
l()
print('armazendo linhas em uma lista e trabalhando com ela fora do "with" usando um "for"')
with open(arquivoDeTexto) as arquivo:
linhas = arquivo.readlines()
for linha in linhas:
print(linha.rstrip())
l()
| [
"jhenriquet@outlook.com.br"
] | jhenriquet@outlook.com.br |
d7e447bd7fbae2c61c9386edb91c418ec2569a40 | 24f0677e33a02020c8964b1085b54b3e9c4d9709 | /app/tests/test_controllers.py | d6a22bb01a31b158e297cd991efcd97940fed1a9 | [] | no_license | juliocmartins/python-api-test | ee0d0f0a97b3ed90b2f288dba753f4e6b6c12d4d | 0dde4fcfa0e01c518375d33121f6990aa14abd23 | refs/heads/master | 2023-04-11T10:22:38.104453 | 2021-04-05T11:46:46 | 2021-04-05T11:46:46 | 352,355,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | from src import db
from src.models import Trainers,Teams
def test_save_trainer():
t = Trainers(name="Ash_Test")
t.save()
assert type(t.id) is int
assert t.id > 0
def test_save_team():
t = Trainers(name="Ash_Test")
t.save()
tm = Teams(name="Team_Test", trainer_id=t.id)
tm.save()
assert type(tm.id) is int
assert tm.id > 0
| [
"juliocmartins87@gmail.com"
] | juliocmartins87@gmail.com |
5cf57ea462a96036e846f3bcf4b2a47daa0b8682 | ac0c6eb84b0e7023deafbf5322e921a95f4e85df | /jgt_common/assert_.py | dfe1e73c3f44334ef83494100a5b09bfd7dea63f | [] | no_license | bradsbrown/jgt_common | 12c65cc28c8bcd73d09a41aec9e2b9680f2a70cc | e2b68fbc26344b4264ce3a977c7ba21c79a27434 | refs/heads/master | 2020-06-24T08:03:01.427413 | 2019-07-24T14:49:18 | 2019-07-24T14:49:18 | 198,509,251 | 0 | 0 | null | 2019-07-23T21:15:16 | 2019-07-23T21:15:16 | null | UTF-8 | Python | false | false | 4,176 | py | """Convenience functions for doing asserts with helpful names and helpful messages."""
from . import format_if as _format_if
try:
from math import isclose as _isclose
except ImportError:
# noqa E501 From: https://stackoverflow.com/questions/5595425/what-is-the-best-way-to-compare-floats-for-almost-equality-in-python/5595453
def _isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def _msg_concat(prefix, body):
"""Join with a space if prefix isn't empty."""
return "{} {}".format(prefix, body) if prefix else body
def not_eq(expected, actual, msg=""):
"""Assert the values to not be equal."""
assert expected != actual, _msg_concat(
msg, "Expected '{}' to be not equal to actual '{}'".format(expected, actual)
)
def eq(expected, actual, msg=""):
"""Assert the values are equal."""
assert expected == actual, _msg_concat(
msg, "Expected '{}' == actual '{}'".format(expected, actual)
)
def less(a, b, msg=""):
"""Assert that a < b."""
assert a < b, _msg_concat(msg, "Expected '{}' < '{}'".format(a, b))
def less_equal(a, b, msg=""):
"""Assert that a <= b."""
assert a <= b, _msg_concat(msg, "Expected '{}' <= '{}'".format(a, b))
def greater(a, b, msg=""):
"""Assert that a > b."""
assert a > b, _msg_concat(msg, "Expected '{}' > '{}'".format(a, b))
def greater_equal(a, b, msg=""):
"""Assert that a >= b."""
assert a >= b, _msg_concat(msg, "Expected '{}' >= '{}'".format(a, b))
def is_in(value, sequence, msg=""):
"""Assert that value is in the sequence."""
assert value in sequence, _msg_concat(
msg, "Expected: '{}' to be in '{}'".format(value, sequence)
)
def any_in(a_sequence, b_sequence, msg=""):
"""Assert at least one member of a_sequence is in b_sequence."""
assert any(a in b_sequence for a in a_sequence), _msg_concat(
msg, "None of: '{}' found in '{}'".format(a_sequence, b_sequence)
)
def not_in(item, sequence, msg=""):
"""Assert item is not in sequence."""
assert item not in sequence, _msg_concat(
msg, "Did NOT Expect: '{}' to be in '{}'".format(item, sequence)
)
def is_not_none(a, msg=""):
"""Assert a is not None."""
assert a is not None, _msg_concat(msg, "'{}' should not be None".format(a))
def is_not_empty(sequence, msg=""):
"""
Semantically more helpful than just ``assert sequence``.
Sequences and containers in python are False when empty, and True when not empty.
This helper reads better in the test code and in the error message.
"""
assert sequence, _msg_concat(msg, "'{}' - should not be empty".format(sequence))
def is_close(a, b, msg="", **isclose_kwargs):
"""Assert that math.isclose returns True based on the given values."""
assert _isclose(a, b, **isclose_kwargs), _msg_concat(
msg,
"Expected '{}' to be close to '{}'{}".format(
a, b, _format_if(": kwargs: {}", isclose_kwargs)
),
)
def almost_equal(actual, expected, places=2, msg=""):
"""Assert that actual and expected are within `places` equal."""
# Set relative tolerance to 0 because we don't want that messing up the places check
relative_tolerance = 0
absolute_tolerance = 10.0 ** (-places)
assert _isclose(
expected, actual, rel_tol=relative_tolerance, abs_tol=absolute_tolerance
), _msg_concat(
msg, "Expected '{}' to be almost equal to '{}'".format(actual, expected)
)
def is_singleton_list(sequence, item_description="something", msg=""):
"""Make sure the sequence has exactly one item (of item_description)."""
assert len(sequence) == 1, _msg_concat(
msg,
"Expected to find a one item list of {} but found '{}' instead".format(
item_description, sequence
),
)
def is_instance(value, of_type, msg=""):
"""Assert value is instance of of_type."""
assert isinstance(value, of_type), _msg_concat(
msg,
"Got value '{}' of type '{}' when expecting something of type {}".format(
value, type(value), of_type
),
)
| [
"github@brolewis.com"
] | github@brolewis.com |
598e59ed9c5d5f847b022f2b99dcb5fc7a8033ad | 4777001a3f29c8483d654969ae6f53ed0ef192b2 | /f1.py | 6fdd2c12a5d4b8219ca3d9cf8d0a7031134f10f2 | [] | no_license | adityaforvs/LearnBaySamples | 3a7db62007fda6e741f65ffa42142a592e56e570 | ba0e3edad3f01179f1f97de16b078c61b17607d7 | refs/heads/master | 2023-02-25T18:55:57.095749 | 2021-01-09T04:59:04 | 2021-01-09T04:59:04 | 328,085,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | Hello world to python
| [
"adityaforvs@gmail.com"
] | adityaforvs@gmail.com |
d988c1a01af2913efd6faa4787ac8de7865acd11 | 4875d4e4ad63310e44086be4d8e450eba151ecaf | /code/pyworkshop/05_Dictionaries/02_Dict.py | d414fa02e0804626a53aafdee1dc7412c5c5c1ef | [
"MIT"
] | permissive | archeranimesh/pythonFundamentals | 7a066ee1ee23a5a78623e5ed50da5167e2c59c16 | 35662181d95406505002fe6a39f577822bfd560b | refs/heads/master | 2020-06-01T12:15:51.828280 | 2019-07-13T17:48:21 | 2019-07-13T17:48:21 | 190,776,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | nums = {"one": 1, "two": 2, "three": 3}
# add new key/Value pair.
nums["four"] = 4
print(nums)
# There are no duplicate key in Dictionaries.
# If new value is assigned to same key, it will
# Override the old value.
nums["two"] = "2222"
print(nums) # {'one': 1, 'two': '2222', 'three': 3, 'four': 4}
# Existence of a key in dict.
print("one" in nums)
nums["two"] = 2
print(nums)
# Combine two list.
rainbow = {"Green": "G", "Red": "R", "Blue": "B"}
rainbow.update(nums)
print(rainbow)
# Append value to a list in dict.
color = {"Green": ["Spinich"]}
print(color)
vegetable = color
print(type(vegetable["Green"]))
vegetable["Green"].append("Lettuce")
print(color)
# 3 important functions on Dictionaries
# .keys(): returns special list called dict keys
print(nums.keys())
# .values: returns a special list called dict values
print(nums.values())
# .item: returns a list of tuple, called dict items
print(nums.items())
| [
"animeshb@archerimagine.com"
] | animeshb@archerimagine.com |
40e701e304cdc95780f0c60fa96c57f9e665568e | ab269258a76b4a7f9af01de0b73144db23d6f005 | /System Scripting/Problem06/6P/pythonwmi-simplified.py | c16e30867db2ed09bc26032ce471117879c17a56 | [] | no_license | jubic/RP-Misc | 24715770b457c3f40db145f4f34d0fb775b71653 | 3c8e12646779e060180870475c0ef10773140e0f | refs/heads/master | 2016-09-06T07:00:36.032240 | 2013-03-30T09:10:02 | 2013-03-30T09:10:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | from win32com.client import Dispatch
import os
server = Dispatch("WbemScripting.SWbemLocator")
conn = server.ConnectServer("localhost", "root\\cimv2")
def query(what):
results = conn.ExecQuery("Select * from " + what)
collections = []
for item in results:
data = {}
for each in item.Properties_:
data[each.Name] = each.Value
collections.append(data)
return collections
def write_to_file(fd, results):
for result in results:
for key, value in result.items():
f.write("%40s = %s\n" % (key, value) )
f.write("%50s" % "---------------------")
f.write("\n")
results = query("Win32_OperatingSystem")
filename = results[0]["CSName"]
f = open(filename, "wb")
f.write("%50s" % "====== OperatingSystem ======\n")
write_to_file(f, results)
f.write("%50s" % "====== Win32_Processor ======\n")
results = query("Win32_Processor")
write_to_file(f, results)
f.write("%50s" % "====== Win32_PhysicalMemory ======\n")
results = query("Win32_PhysicalMemory")
write_to_file(f, results)
f.write("%50s" % "====== Win32_LogicalDisk ======\n")
results = query("Win32_LogicalDisk")
write_to_file(f, results)
f.write("%50s" % "====== Win32_NetworkAdapterConfiguration ======\n")
results = query("Win32_NetworkAdapterConfiguration")
write_to_file(f, results)
f.write("%50s" % "====== Win32_Product ======\n")
results = query("Win32_Product")
write_to_file(f, results)
f.close()
| [
"jubic@live.com.sg"
] | jubic@live.com.sg |
c37c659ed0c253f3e7e8dccddccbaaf607fb6b19 | ea529b8067ed52f26d7dedb997f3bb646fa731ef | /rajim/models.py | f651ebdc231b83e69a727f28d9dea83e226499ac | [] | no_license | danyrubiano/Fingeso | 0536ad91d43c179ce497984d338afe71e16cf0e6 | bbbd4dde8c54e87813a5930017c8ded79703d3a7 | refs/heads/master | 2020-07-03T17:05:59.384355 | 2019-08-12T17:37:06 | 2019-08-12T17:37:06 | 201,978,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,981 | py | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
def url3(self, filename):
ruta = "Artistas/%s/%s" % (self.nombre_artista, str(filename))
return ruta
class Artista(models.Model):
id_artista = models.AutoField(db_column='ID_ARTISTA', primary_key=True) # Field name made lowercase.
nombre_artista = models.CharField(db_column='NOMBRE_ARTISTA', max_length=60, blank=True, null=True) # Field name made lowercase.
resena = models.TextField(db_column='RESENA', max_length=1024, blank=True, null=True) # Field name made lowercase.
fecha_nacimiento_artista = models.DateField(db_column='FECHA_NACIMIENTO_ARTISTA', blank=True, null=True) # Field name made lowercase.
valoracion_artista = models.IntegerField(db_column='VALORACION_ARTISTA', blank=True, null=True) # Field name made lowercase.
foto = models.ImageField(upload_to=url3)
status = models.BooleanField(default=False)
def __str__(self):
return self.nombre_artista
class Meta:
managed = True
db_table = 'artista'
class Cancion(models.Model):
id_cancion = models.AutoField(db_column='ID_CANCION', primary_key=True) # Field name made lowercase.
id_disco = models.ForeignKey('Disco', db_column='ID_DISCO', blank=True, null=True) # Field name made lowercase.
nombre_cancion = models.CharField(db_column='NOMBRE_CANCION', max_length=60, blank=True, null=True) # Field name made lowercase.
duracion = models.IntegerField(db_column='DURACION', blank=True, null=True) # Field name made lowercase.
val_cancion = models.IntegerField(db_column='VAL_CANCION', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = True
db_table = 'cancion'
def __str__(self):
return self.nombre_cancion
class Compra(models.Model):
id_compra = models.AutoField(db_column='ID_COMPRA', primary_key=True) # Field name made lowercase.
id_disco = models.ForeignKey('Disco', db_column='ID_DISCO', blank=True, null=True) # Field name made lowercase.
fecha_compra = models.DateTimeField(db_column='FECHA_COMPRA', blank=True, null=True) # Field name made lowercase.
precio_final = models.IntegerField(db_column='PRECIO_FINAL', blank=True, null=True) # Field name made lowercase.
tipo_pago = models.CharField(db_column='TIPO_PAGO', max_length=10, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = True
db_table = 'compra'
def url(self, filename):
ruta = "Discos/%s/%s" % (self.nombre_disco, str(filename))
return ruta
class Disco(models.Model):
id_disco = models.AutoField(db_column='ID_DISCO', primary_key=True) # Field name made lowercase.
id_artista = models.ForeignKey(Artista, db_column='ID_ARTISTA', blank=True,null=True) # Field name made lowercase.
id_compra = models.ForeignKey(Compra, db_column='ID_COMPRA', blank=True, null=True) # Field name made lowercase.
nombre_disco = models.CharField(db_column='NOMBRE_DISCO', max_length=60, blank=True, null=True) # Field name made lowercase.
fecha_lanzamiento = models.DateField(db_column='FECHA_LANZAMIENTO', blank=True, null=True) # Field name made lowercase.
genero = models.CharField(db_column='GENERO', max_length=100, blank=True, null=True) # Field name made lowercase.
resena = models.TextField(db_column='RESENA', max_length=1024, blank=True, null=True) # Field name made lowercase.
valoracion_disco = models.IntegerField(db_column='VALORACION_DISCO', blank=True, null=True) # Field name made lowercase.
precio = models.IntegerField(db_column='PRECIO', blank=True, null=True) # Field name made lowercase.
caratula = models.ImageField(upload_to=url)
status = models.BooleanField(default=False)
video = models.CharField(db_column='VIDEO', max_length=1024, blank=True, null=True)
def __unicode__(self):
return self.nombre_disco
class Meta:
managed = True
db_table = 'disco'
def url2(self,filename):
ruta = "Users/%s/%s"%(self.user.username,filename)
return ruta
class userProfile(models.Model):
user = models.OneToOneField(User, related_name="profile")
photo = models.ImageField(upload_to=url2)
fecha_nacimiento_user = models.DateField(db_column='FECHA_NACIMIENTO_USER',blank=True, null=True) # Field name made lowercase.
descuento = models.IntegerField(db_column='DESCUENTO', blank=True, null=True) # Field name made lowercase.
direccion = models.CharField(db_column='DIRECCION', max_length=40, blank=True, null=True) # Field name made lowercase.
valoracion_user = models.IntegerField(db_column='VALORACION_USER', blank=True, null=True) # Field name made lowercase.
def __unicode__(self):
return self.user.username
class ComentaDisco(models.Model):
id_cd = models.AutoField(db_column='ID_CD', primary_key=True) # Field name made lowercase.
user = models.ForeignKey(User, db_column='ID_USER', unique=False , blank=True,null=True) # Field name made lowercase.
titulo = models.CharField(db_column='TITULO', max_length=40, blank=True, null=True)
comentario = models.TextField(db_column='COMENTARIO', max_length=200, blank=True, null=True)
id_disco = models.ForeignKey(Disco, db_column='ID_DISCO', unique=False, blank=True,null=True) # Field name made lowercase.
#agregar id_disco
class ValoraDisco(models.Model):
id_vd = models.AutoField(db_column='ID_VD', primary_key=True) # Field name made lowercase.
user = models.ForeignKey(User, db_column='ID_USER')
id_disco = models.ForeignKey(Disco, db_column='ID_DISCO') # Field name made lowercase.
valoracion_disco = models.IntegerField(db_column='VALORACION_DISCO')
class Meta:
managed = True
db_table = 'valoradisco'
unique_together = (('user', 'id_disco'),)
| [
"dany.rubiano@usach.cl"
] | dany.rubiano@usach.cl |
7bd2cb80926c6799d63458cc28966173b9077b5c | 521ce8bee9ec9db6038e8f386374cbf0de2de5c8 | /4_Task.py | 8206a0102029846a849d928478dcdf537036fd5c | [] | no_license | agungap22/Latihan | 5df3728eaa51b12a4c68f3ab330208255c7ae23b | f92aaedeb0552110aa122e6654773055ceaf0050 | refs/heads/master | 2023-01-24T19:42:03.848504 | 2020-11-10T03:03:48 | 2020-11-10T03:03:48 | 311,528,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | # x = input('Masukkan hari : ').lower()
# day = {
# 'senin' : 'Monday',
# 'selasa' :'Tuesday',
# 'rabu' : 'Wednesday',
# 'kamis' : 'Thrusday',
# 'jumat' : 'Friday',
# 'sabtu' : 'Saturday',
# 'mingu' : 'Sunday',
# }
# ind= list(day.keys())
# eng= list(day.values())
# if x in ind:
# print('Bahasa inggris',x.capitalize(),'Adalah',day[x].capitalize)
# else:
# print("Hari tidak ditemukan")
x = input ( 'Masukkan hari (INA/ENG) : ' ).lower()
day = {
'senin' : 'monday',
'selasa' :'tuesday',
'rabu' : 'wednesday',
'kamis' : 'thrusday',
'jumat' : 'friday',
'sabtu' : 'saturday',
'mingu' : 'sunday',
}
ind =list(day.keys())
eng= list(day.values())
if x in eng :
print("Bahasa Indonesia dari ",x.capitalize(), ' adalah ',ind[eng.index(x)].capitalize())
elif x in ind :
print ("Bahasa Inggris dari ", x.capitalize(),"adalah", day[x].capitalize())
else :
print (" Not found ")
| [
"agungapmail@gmail.com"
] | agungapmail@gmail.com |
b2c59e47491ff7ed1240165950850e00f006c618 | ce363e901591a8395464a42df3abce20896fc0bb | /sliding_diff.py | 186468e156bca41fb4c0d268a480ef002e007532 | [
"MIT"
] | permissive | kodo-pp/miem-project | dacbca0d972500696cdd086e1e9c602745eed7c9 | e440c536e5f5aecb882d4e89252f4e0efa98dedb | refs/heads/master | 2020-03-30T01:59:26.579243 | 2018-10-24T21:19:26 | 2018-10-24T21:19:26 | 150,606,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | # -*- coding: utf-8 -*-
import config as conf
def normalize(arr):
minval = min(arr)
maxval = max(arr)
if minval == maxval:
return [0.5] * len(arr)
return [(v - minval) / (maxval - minval) for v in arr]
def normalize_scalar(v, minval, maxval):
if minval == maxval:
return 0
return v / (maxval - minval)
def arr2diff(arr):
m = len(arr)
return [abs(arr[i] - arr[i - 1]) for i in range(1, m)]
def sliding_diff(arr, winsize):
# import pudb; pudb.set_trace()
m = len(arr)
if not (0 < winsize <= m):
raise ValueError('winsize should be between 0 and len(arr)')
d = arr2diff(arr)
accum = sum(d[:winsize-1])
amaxv = max(arr[:winsize])
aminv = min(arr[:winsize])
yield normalize_scalar(accum, aminv, amaxv) / (winsize - 1)
for i in range(m - winsize):
accum -= d[i]
accum += d[i + winsize - 1]
if arr[i + winsize] < aminv:
aminv = arr[i + winsize]
elif arr[i] == aminv:
aminv = min(arr[i+1 : i+winsize+1])
if arr[i + winsize] > amaxv:
amaxv = arr[i + winsize]
elif arr[i] == aminv:
amaxv = max(arr[i+1 : i+winsize+1])
yield normalize_scalar(accum, aminv, amaxv) / (winsize - 1)
| [
"korzun.sas@mail.ru"
] | korzun.sas@mail.ru |
6890a27550d8ac13da4db7d96c33a8a0fbed3914 | 60dc6059f134e4070bbfa75224edfd7feee7683b | /Learning Data Strcuctures/leftrot.py | a59b29f8e69a92c0af79525829434d953614443a | [] | no_license | omaralaniz/Learning | 82d41554a06f7726e20057ce2da2b6d1f8293607 | 59899f40491ea9a4ec0d15bee00aaa142638af26 | refs/heads/master | 2023-01-24T12:21:13.431061 | 2020-01-23T01:05:26 | 2020-01-23T01:05:26 | 214,857,913 | 0 | 0 | null | 2023-01-04T12:23:12 | 2019-10-13T16:53:27 | Python | UTF-8 | Python | false | false | 161 | py | def rotLeft(a, d):
for i in range(d):
a.append(a.pop(0))
return a
print( rotLeft([1,2,3,4], 3))
for i in range(3):
print(i) | [
"omar.alaniz021@gamil.com"
] | omar.alaniz021@gamil.com |
2854338d649a6d9020a9d21bd74adf1f7bdd9c47 | 40b15e4df2843c52fa181f4e77e3257501c84da1 | /venv/bin/pip3 | 1143ef6b7c3cacfce5b3b5d990f9ad0b48ba3507 | [] | no_license | Pydare/Pop-Share | 84e7ea460e9382b28a4fb4668888913e145fe3b6 | 10005e807f1cb375340f49bb9858fcd2858daaae | refs/heads/master | 2020-05-17T21:12:05.320886 | 2020-02-11T20:55:53 | 2020-02-11T20:55:53 | 183,963,451 | 3 | 0 | null | 2020-01-01T15:17:03 | 2019-04-28T22:03:20 | Python | UTF-8 | Python | false | false | 270 | #!/media/dare/Windows/Users/DARE/Desktop/Pop-Share/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"dareyadewumi650@gmail.com"
] | dareyadewumi650@gmail.com | |
8e109914502323d3485c70396b4695a92dcdfa34 | 4c6f1119543d9d5b3ab7078fc9abba43bb2a0f5b | /babelfish.py | d127c43088c35d0d39ed05997b3b405d6e1a9e45 | [] | no_license | 0xecho/KattisSubmissions | 8072478b10dc4da42b8fa3504e7fc1923a1edc62 | 7cb981be49e1102e9c6254d15cb51af11fce164f | refs/heads/master | 2021-01-03T23:45:32.366936 | 2020-02-13T15:07:03 | 2020-02-13T15:07:03 | 240,287,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | import sys
m={
}
while 1:
inp = input()
if inp=="":
break
inp = inp.split()
m[inp[1]]=inp[0]
for line in sys.stdin:
if line.strip() in m:
print(m[line.strip()])
else:
print("eh") | [
"mr.m.r.s.h.i.e.l.d@gmail.com"
] | mr.m.r.s.h.i.e.l.d@gmail.com |
7ceae1ad282b1059676d6451c86751575d7e1e6b | a40950330ea44c2721f35aeeab8f3a0a11846b68 | /OpenCV-python读取监控/发送照片.py | 7485633f350fd958947abde317da53ecd06ae10f | [] | no_license | huang443765159/kai | 7726bcad4e204629edb453aeabcc97242af7132b | 0d66ae4da5a6973e24e1e512fd0df32335e710c5 | refs/heads/master | 2023-03-06T23:13:59.600011 | 2023-03-04T06:14:12 | 2023-03-04T06:14:12 | 233,500,005 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | import cv2
import socket
import pickle
import base64
import numpy as np
network = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
frame = cv2.imread('B.jpeg')
new_frame = cv2.resize(frame, (int(frame.shape[1] * 0.2), int(frame.shape[0] * 0.2)))
tx_data = pickle.dumps((b'\x01', new_frame))
print(len(tx_data))
head, rx_data = pickle.loads(tx_data)
new_rx = cv2.resize(rx_data, (int(rx_data.shape[1] * 2), int(rx_data.shape[0] * 2)))
# print(head, rx_data)
cv2.imshow('1', new_rx)
cv2.waitKey(0)
| [
"443765159@qq.com"
] | 443765159@qq.com |
126855637a4d88d391af38ee4179e683dff52074 | 0a839af5f62b1a8011e4fbd81213642078f9af11 | /testing/districts.py | bd0daa380e854d01b52a35854469937728b9c258 | [] | no_license | davidy9000/NSHacks-Bank-Mobile | 79b91871704a4a51ad2d58cb6d901a0fdef566d0 | cf703e5adf610627da8115b59a90744faf05a80f | refs/heads/master | 2020-04-07T01:11:04.048175 | 2018-11-17T14:49:27 | 2018-11-17T14:49:27 | 157,933,739 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,992 | py | import matplotlib as plt
import pandas as pd
import numpy as py
import folium
import os
import json
#import osmnx as ox
capOneLocFile = "Capital_One_Locations.csv"
commDistFile = "nycd.csv"
cOneLocations = pd.read_csv(capOneLocFile)
commDist = pd.read_csv(commDistFile)
districtNY = 'nycommdistrict.geojson'
commDist2 = pd.DataFrame(cOneLocations)
commDist2.to_json('cap.json')
commDist2 = commDist2.reset_index()
#commDist2.columns = ['District', 'Number']
'''
bankLocDen = pd.DataFrame(cOneLocations['Bank District'].value_counts().astype(float))
bankLocDen.to_json('bankLocs.json')
bankLocDen = bankLocDen.reset_index()
bankLocDen.columns = ['District', 'Number of Banks']
with open('bankLocs.json') as json_data:
bank2 = json.load(json_data)
#print(bank2)
bank3 = bank2['Bank District']
#print(bank3)
bank3 = {int(float(k)):int(v) for k,v in bank3.items()}
bank2['Bank District'] = bank3
print(bank2)
#print(bank3)
#bank3.to_json('bank3.json')
#bank3 = bank3.reset_index()
#bank4 = {"Bank District"}
#print(bank2)
'''
#distMap = folium.Map(location = [40.76831, -73.964915], tiles="Cartodb Positron", zoom_start=10)
#distMap.choropleth(geo_data = districtNY, name = 'choropleth', data = commDist2 , columns = ['District', 'Number'], key_on = 'feature.properties.DISTRICT', fill_color = 'PuBuGn', fill_opacity = 0.7, line_opacity = 1.0, legend_name = 'Community District')
#folium.LayerControl().add_to(distMap)
'''
#bankLocDen = "bankLocs.json"
densityMap = folium.Map(location = [40.76831, -73.964915], tiles="Cartodb Positron", zoom_start=10)
densityMap.choropleth(geo_data = districtNY, name = 'choropleth', data = bank2, columns = ['District', 'Number of Banks'], key_on = 'feature.properties.DISTRICT', fill_color = 'PuBuGn', fill_opacity = 0.7, line_opacity = 1.0, legend_name = 'Number of Banks')
folium.LayerControl().add_to(densityMap)
'''
#distMap.save(outfile = 'districtMap.html')
#densityMap.save(outfile = 'densityMap.html') | [
"davidy.9000@gmail.com"
] | davidy.9000@gmail.com |
4bd522ee91096abba12454ecea7d90ac9d60bc03 | e47f6b71e3e1278c4605fc55bf4f49612a9e2fab | /second_week/firstday/main.py | 455fc959dae19a841372355af393f0c577c4d33d | [] | no_license | jinhanlai/DLStudy | bbed2ea61060389aa447e426f5419ebe3d5e23e0 | 1fb7460820aa0ea1760856bbc408e2f8138761e8 | refs/heads/master | 2020-12-09T14:19:00.828091 | 2020-01-12T05:14:02 | 2020-01-12T05:14:02 | 233,331,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,922 | py | # _*_ coding:utf-8 _*_
"""
@author: LaiJinHan
@time:2019/7/27 21:50
"""
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from DLStudy.L_layer_network import *
from DLStudy.second_week.firstday import init_utils, reg_utils, gc_utils
from DLStudy.second_week.firstday.reg_utils import load_2D_dataset
"""
# 几种不同的初始化参数
def initialize_parameters(layer_dims, type="he"):
parameters = {}
L = len(layer_dims)
for i in range(1, L):
if type == "zeros":
parameters["W" + str(i)] = np.zeros((layer_dims[i], layer_dims[i - 1]))
elif type == "random":
parameters["W" + str(i)] = np.random.randn(layer_dims[i], layer_dims[i - 1]) * 4
else:
parameters["W" + str(i)] = np.random.randn(layer_dims[i], layer_dims[i - 1]) * np.sqrt(
2 / layer_dims[i - 1])
parameters["b" + str(i)] = np.zeros([layer_dims[i], 1])
assert (parameters["W" + str(i)].shape == (layer_dims[i], layer_dims[i - 1]))
assert (parameters["b" + str(i)].shape == (layer_dims[i], 1))
return parameters
"""
def model_init(X, Y, learning_rate=0.01, num_iterations=15000, print_cost=True, type="he", is_polt=True):
"""
实现一个三层的神经网络:LINEAR ->RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
参数:
X - 输入的数据,维度为(2, 要训练/测试的数量)
Y - 标签,【0 | 1】,维度为(1,对应的是输入的数据的标签)
learning_rate - 学习速率
num_iterations - 迭代的次数
print_cost - 是否打印成本值,每迭代1000次打印一次
type - 字符串类型,初始化的类型【"zeros" | "random" | "he"】
is_polt - 是否绘制梯度下降的曲线图
返回
parameters - 学习后的参数
"""
grads = {}
costs = []
m = X.shape[1]
layers_dims = [X.shape[0], 10, 5, 1]
# 选择初始化参数的类型
if type == "zeros":
parameters = initialize_parameters(layers_dims, "zeros")
elif type == "random":
parameters = initialize_parameters(layers_dims, "random")
elif type == "he":
parameters = initialize_parameters(layers_dims, "he")
else:
print("错误的初始化参数!程序退出")
exit
# 开始学习
for i in range(0, num_iterations):
# 前向传播
AL, caches = L_model_forward(X, parameters)
# 计算成本
cost = compute_cost(AL, Y)
# 反向传播
grads = L_model_backward(AL, Y, caches)
# 更新参数
parameters = update_parameters(parameters, grads, learning_rate)
# 记录成本
if i % 1000 == 0:
costs.append(cost)
# 打印成本
if print_cost:
print("第" + str(i) + "次迭代,成本值为:" + str(cost))
# 学习完毕,绘制成本曲线
if is_polt:
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# 返回学习完毕后的参数
return parameters,grads
def model_reg_or_dropout(X, Y, learning_rate=0.3, num_iterations=20000, print_cost=True, is_plot=True, lambd=0,
keep_prob=1):
"""
实现一个三层的神经网络:LINEAR ->RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
参数:
X - 输入的数据,维度为(2, 要训练/测试的数量)
Y - 标签,【0(蓝色) | 1(红色)】,维度为(1,对应的是输入的数据的标签)
learning_rate - 学习速率
num_iterations - 迭代的次数
print_cost - 是否打印成本值,每迭代10000次打印一次,但是每1000次记录一个成本值
is_polt - 是否绘制梯度下降的曲线图
lambd - 正则化的超参数,实数,0表示不使用正则化参数
keep_prob - 随机删除节点的概率,1表示不删除节点
返回
parameters - 学习后的参数
"""
grads = {}
costs = []
m = X.shape[1]
layers_dims = [X.shape[0], 20, 3, 1]
# 初始化参数
parameters = initialize_parameters(layers_dims)
# 开始学习
for i in range(0, num_iterations):
# 前向传播
##是否随机删除节点
if keep_prob == 1:
###不随机删除节点
AL, caches = L_model_forward(X, parameters)
elif keep_prob < 1:
###随机删除节点
AL, caches = L_model_forward_with_dropout(X, parameters, keep_prob)
else:
print("keep_prob参数错误!程序退出。")
exit
# 计算成本
## 是否使用二范数
if lambd == 0:
###不使用L2正则化
cost = compute_cost(AL, Y)
else:
###使用L2正则化
cost = compute_cost_with_reg(AL, Y, parameters, lambd)
# 反向传播
##可以同时使用L2正则化和随机删除节点,但是本次实验不同时使用。
assert (lambd == 0 or keep_prob == 1)
##两个参数的使用情况
if (lambd == 0 and keep_prob == 1):
### 不使用L2正则化和不使用随机删除节点
grads = L_model_backward(AL, Y, caches)
elif lambd != 0:
### 使用L2正则化,不使用随机删除节点
grads = L_model_backward_with_reg(AL, Y, caches, lambd)
elif keep_prob < 1:
### 使用随机删除节点,不使用L2正则化
grads = L_model_backward_with_dropout(AL, Y, caches, keep_prob)
# 更新参数
parameters = update_parameters(parameters, grads, learning_rate)
# 记录并打印成本
if i % 1000 == 0:
## 记录成本
costs.append(cost)
if print_cost and i % 10000 == 0:
# 打印成本
print("第" + str(i) + "次迭代,成本值为:" + str(cost))
# 是否绘制成本曲线图
if is_plot:
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (x1,000)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# 返回学习后的参数
return parameters
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
"""
#初始化参数
train_X, train_Y, test_X, test_Y = init_utils.load_dataset(is_plot=False)
parameters = model_init(train_X, train_Y, type = "he",is_polt=True,print_cost=False)
print ("训练集:")
predictions_train = predict(train_X, train_Y, parameters)
print ("测试集:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with large random initialization")
axes = plt.gca()
axes.set_xlim([-1.5, 1.5])
axes.set_ylim([-1.5, 1.5])
init_utils.plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, np.squeeze(train_Y))
"""
"""
train_X, train_Y, test_X, test_Y = load_2D_dataset(is_plot=False)
parameters = model_reg_or_dropout(train_X, train_Y, keep_prob=0.86, learning_rate=0.03,is_plot=True)
print("使用随机删除节点,训练集:")
predictions_train = predict(train_X, train_Y, parameters)
print("使用随机删除节点,测试集:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with dropout")
axes = plt.gca()
axes.set_xlim([-0.75, 0.40])
axes.set_ylim([-0.75, 0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, np.squeeze(train_Y))
"""
| [
"laijinhan@126.com"
] | laijinhan@126.com |
48d706e3b94d4786ec4f67c5be3a9ae9818344ab | 419eb22ddf7afed1cd0dd6a914430b67bf951e51 | /backend/backend/urls.py | 786e8200bf100467e34420a3968051d26e256ca4 | [] | no_license | kadumuri1994/django-react-todo-app | 1de6ea8d5a739299a8e7c83587b1439fa1b3b098 | 822c2cbb8f8989a24ab5e6f732404b72b5478645 | refs/heads/master | 2022-12-07T09:39:51.756371 | 2020-08-28T14:11:44 | 2020-08-28T14:11:44 | 291,063,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 941 | py | """backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
from todo import views
router = routers.DefaultRouter()
router.register(r'todos', views.TodoView, 'todo')
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include(router.urls)),
]
| [
"vamshi1000@gmail.com"
] | vamshi1000@gmail.com |
45ce33fda780ab5d2100b97345cf5144bb2acb28 | c46c90c4d6772f9487b439b987ed00b763b0d337 | /util.py | f1c343bb5e22a7c15f91ba2fdbd7622296121c27 | [] | no_license | RandallLu/vultr | 58187b5a47dcc93060e5e1282742d37403acb74b | c57589ab699e48c89bfe7f5c84b2d14d2f25646c | refs/heads/master | 2021-01-20T15:04:59.470349 | 2017-10-08T07:20:43 | 2017-10-08T07:20:43 | 90,718,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | from termcolor import colored
def red(string):
return colored(string, "red")
def green(string):
return colored(string, "green")
def yellow(string):
return colored(string, "red")
| [
"randalllu02@gmail.com"
] | randalllu02@gmail.com |
29808cebdfec5d1a1a3db83d6c8537604d827cd9 | baba372d7b36d2aeed0f2cb9d86b449f73a02384 | /ultimatelabeling/siamMask/models/mask.py | 494fb59ea488bf58003318398c77c9798ad65e6d | [
"MIT"
] | permissive | alexandre01/UltimateLabeling | 0c577f0d458d74568aae4314bc958ec6b2abc57c | d0c6010da1c3d5e2fce2044111069cdf86df246b | refs/heads/master | 2023-02-21T20:02:30.809271 | 2023-02-15T08:38:51 | 2023-02-15T08:38:51 | 189,499,472 | 298 | 42 | MIT | 2022-06-21T22:03:45 | 2019-05-31T00:15:54 | Python | UTF-8 | Python | false | false | 523 | py | # --------------------------------------------------------
# SiamMask
# Licensed under The MIT License
# Written by Qiang Wang (wangqiang2015 at ia.ac.cn)
# --------------------------------------------------------
import torch.nn as nn
class Mask(nn.Module):
def __init__(self):
super(Mask, self).__init__()
def forward(self, z_f, x_f):
raise NotImplementedError
def template(self, template):
raise NotImplementedError
def track(self, search):
raise NotImplementedError
| [
"alexandre.carlier01@gmail.com"
] | alexandre.carlier01@gmail.com |
e2d97da7469f5b41125a340724c192101165acd9 | a6e44193f53a58344c1f02b6feb60f5674cf4982 | /crud/migrations/0029_auto_20210726_0755.py | bb9866b947f9274a5e96d6c5d110d4dee9e9fcf9 | [
"MIT"
] | permissive | TownOneWheel/townonewheel | 79e4f1220777078df4d195f6cdd0228dc47ee0b9 | 9feb120b7541b31d99b63c95edc7949005ab7862 | refs/heads/develop | 2023-07-04T00:57:10.517214 | 2021-07-29T09:52:21 | 2021-07-29T09:52:21 | 380,421,671 | 0 | 2 | MIT | 2021-07-29T09:52:21 | 2021-06-26T05:29:44 | JavaScript | UTF-8 | Python | false | false | 568 | py | # Generated by Django 3.2.4 on 2021-07-26 07:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crud', '0028_auto_20210726_0754'),
]
operations = [
migrations.AlterField(
model_name='cat',
name='created_at',
field=models.TextField(default=1627286105.8433306),
),
migrations.AlterField(
model_name='comment',
name='created_at',
field=models.TextField(default=1627286105.8433306),
),
]
| [
"ws0671@naver.com"
] | ws0671@naver.com |
e72ebda3a718398a198f24705840c971e2f4a7d4 | 1923a90cc154450343ed650085f2ee97ddca9181 | /env/bin/django-admin.py | 7832d88c4053a01efa8647914e0bbf68cc1843f7 | [] | no_license | Demieno/django-gulp-docker-ansible | 230c95a107a5e28b8ed55f2f0d26d8075df130ec | f2e2f74f70c32c02f65a702033042c540bac938a | refs/heads/master | 2020-03-07T18:06:59.295552 | 2018-04-01T12:58:01 | 2018-04-01T12:58:01 | 127,628,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | #!/var/idpowers/redcross/env/bin/python3.5
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"demienokam@gmail.com"
] | demienokam@gmail.com |
6f7393b8be9b1f7cdda141ca678315df0f7d0786 | 288a00d2ab34cba6c389b8c2444455aee55a8a95 | /tests/data23/recipe-435885.py | e88c6b46b1f95afffedafd3382b1d82cbf0470bf | [
"BSD-2-Clause"
] | permissive | JohannesBuchner/pystrict3 | ffd77b7bbc378bd4d8f21b5c6bd69a0d64a52ddb | 18b0dd369082422f9bf0f89c72e7acb53a49849c | refs/heads/master | 2023-08-14T06:37:37.954880 | 2023-07-13T11:16:38 | 2023-07-13T11:16:38 | 268,571,175 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,111 | py | # dispatch.py
# definitions:
import threading
class Dispatcher(object):
def __init__(self, targets=None, nonBlocking=True):
if not targets or targets is None:
self._targets = []
else:
self._targets = targets
self._nonBlocking = nonBlocking
def __iadd__(self, target):
self._targets.append(target)
return self
def __isub__(self, target):
self._targets.remove(target)
return self
def isNonBlocking(self):
return self._nonBlocking
nonBlocking = property(isNonBlocking)
def __call__(self, *listArgs, **kwArgs):
def invokeTargets():
for target in self._targets:
target(*listArgs, **kwArgs)
if self.nonBlocking:
threading.Timer(0, invokeTargets).start()
else:
invokeTargets()
# demos:
def Test1():
"""
A simple example demonstrating most functionality.
"""
def m1():
print('m1 invoked')
def m2():
print('m2 invoked')
e = Dispatcher()
e += m1
e += m2
e += m2
print('Dispatching:')
e()
e -= m1
print('Dispatching:')
e()
e -= m2
print('Dispatching:')
e()
def Test2():
"""
A more realistic example for the OO programmer.
"""
class Sprite(object):
def __init__(self, location):
self._location = location
locationChanged = Dispatcher()
def getLocation(self):
return self._location
def setLocation(self, newLocation):
oldLocation = self._location
self._location = newLocation
# Dispatch a "property change event"
self.locationChanged(oldLocation, newLocation)
location = property(getLocation, setLocation)
s = Sprite((2,4))
def SpriteLocationChanged(oldLocation, newLocation):
print('oldLocation =', oldLocation)
print('newLocation =', newLocation)
s.locationChanged += SpriteLocationChanged
s.location = (3,4)
s.location = (4,4)
if __name__ == '__main__':
Test1()
Test2()
| [
"johannes.buchner.acad@gmx.com"
] | johannes.buchner.acad@gmx.com |
b75a006234cd636a9f0b674101009b376cf4ede1 | e5a0a77a66563511c72feda18229712f109ab16d | /code/Chapter 14 - configparser/crud_config.py | f82dbd809b9feb0fd1c7fc3301b61832c269eb04 | [] | no_license | driscollis/Python-101-Russian | 0136b3fe3edee025e4408a89c0461bb79ab4be07 | 28ce6727ef56dee8b6966526c5f80d8323ec9d73 | refs/heads/master | 2021-10-20T23:31:05.413934 | 2018-10-23T06:54:30 | 2018-10-23T06:54:30 | 149,648,717 | 0 | 2 | null | 2018-10-23T06:54:31 | 2018-09-20T17:53:06 | Python | UTF-8 | Python | false | false | 760 | py | import configparser
import os
def crudConfig(path):
"""
Create, read, update, delete config
"""
if not os.path.exists(path):
createConfig(path)
config = configparser.ConfigParser()
config.read(path)
# read some values from the config
font = config.get("Settings", "font")
font_size = config.get("Settings", "font_size")
# change a value in the config
config.set("Settings", "font_size", "12")
# delete a value from the config
config.remove_option("Settings", "font_style")
# write changes back to the config file
with open(path, "w") as config_file:
config.write(config_file)
if __name__ == "__main__":
path = "settings.ini"
crudConfig(path) | [
"mike@pythonlibrary.org"
] | mike@pythonlibrary.org |
bb4412640665c78c6c83623e2f5858eb4ea901a8 | cc2320819a23ff1e5270d474b806e087b2dbceaf | /rabbit_subr/pipelines.py | 38ed8018f3a9b22f1b25b74316cc79fd13d1258a | [] | no_license | cbscientist/rabbit_scrapy | cbc9c65f85b13de46192453893cb1871bfb4c730 | 490177b9e67375037351d5b85b9c1eac4002310c | refs/heads/master | 2021-01-01T18:54:07.164982 | 2015-03-25T17:29:19 | 2015-03-25T17:29:19 | 32,879,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class RabbitSubrPipeline(object):
def process_item(self, item, spider):
return item
| [
"clairesalling@gmail.com"
] | clairesalling@gmail.com |
0a50a5878a88fadb82d274ab672c616160eb913b | 79e8e93a6807a4e2bd4923e0d9b78e3a7b38bb0b | /python/round2/permutations.py | 9ef6c0e553d85cf6940d2bfd03b7f8a1e35da930 | [] | no_license | djole103/algo | 2885c30e927898c749e99ee05ff6c8f43033c9eb | 5c60dc77fcc091d1b2c52de99ee3071d82e1e17f | refs/heads/master | 2020-04-12T02:28:58.300269 | 2017-04-19T23:18:04 | 2017-04-19T23:18:04 | 43,453,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | import collections
#O(n) storage
def isPermutation(str1, str2):
if str1 == None or str2 == None: return False
d = collections.defaultdict(int)
for l in str1:
d[l] +=1
for l in str2:
if l not in d:
return False
d[l] -= 1
if d[l] < 0:
return False
return True
def isPermutationLol(str1, str2):
return sorted(str1) == sorted(str2)
def allPermutations(str):
if len(str) <= 1: return str
perms = []
for i in range(len(str)):
perms += [ str[i]+x for x in allPermutations(str[:i] + str[i+1:])]
return perms
print(allPermutations("abc"))
def swapPermute(xs, low=0):
if low+1 >= len(xs):
yield xs
else:
for p in swapPermute(xs, low+1):
yield p
for i in range(low+1,len(xs)):
xs[low], xs[i] = xs[i], xs[low]
for p in swapPermute(xs, low+1):
yield p
xs[low], xs[i] = xs[i], xs[low]
for i in swapPermute(['a','b','c']):
print(i)
| [
"djordje_7@hotmail.com"
] | djordje_7@hotmail.com |
e28344b490033f65b86bea33ef1552aca7424176 | 201e53869312129cd050cc7ac32b855568cc3622 | /SNS/discovery/tcpack_ping.py | 61b309d7d9ce0fae599af967cdf7c42fa3bf4e43 | [] | no_license | an-learn/Scapy-networt-scanning | 49f5d920b595c43f0d2fd8140411fe4bffe98cb1 | 0da3b57ecb62c0f17836b4936208cf28559a397b | refs/heads/master | 2021-04-14T02:47:43.399978 | 2018-03-26T08:03:35 | 2018-03-26T08:03:35 | 126,783,246 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,688 | py | #!/usr/bin/python
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from datetime import datetime
from scapy.all import *
import threading
from Queue import Queue
q = Queue()
import sys
sys.path.append("..")
import main
def list_select():
print"===========================\n"
print"1.Enter IP address"
print" 1.1 Normal"
print" 1.2 Neatly (when IP too much,a little slow)\n"
print"2.Select a file\n"
def list_ips():
print"===========================\n"
print"Separate IP address by \",\" (support \"1-254\")\n"
def list_iptxt():
print"===========================\n"
print"Select a file(.txt) with IP list\n"
def select():
answer3 = raw_input("[discovery-ACK] Select > ")
if answer3 == "1.1":
list_ips()
ips1()
elif answer3 == "1.2":
list_ips()
ips2()
elif answer3 == "2":
list_iptxt()
iptxt_start()
elif answer3 == "show":
list_select()
select()
elif answer3 == "bk":
print"===========================\n"
main.select_discovery()
elif answer3 == "exit":
sys.exit()
else:
print"===========================\n"
print"No Such Number,Select again.\n"
select()
#############################################
############### 1.1 Normal ##################
#############################################
def ips1():
global ips_list_h,ips_list_c
ips_list_h = [] # "-" hyphen
ips_list_c = [] # "," comma
ips_input = raw_input("[discovery-ACK] IP > ")
if ips_input == "bk":
print"===========================\n"
select()
elif ips_input == "exit":
sys.exit()
else:
ips_list = ips_input.split(',')
ips_list_unre = list(set(ips_list)) #qu chong fu
for i in ips_list_unre:
if re.match(r"^(?:(?:1[0-9][0-9]\.)|(?:2[0-4][0-9]\.)|(?:25[0-5]\.)|(?:[1-9][0-9]\.)|(?:[0-9]\.)){3}(?:(?:1[0-9][0-9]\-)|(?:2[0-4][0-9]\-)|(?:25[0-5]\-)|(?:[1-9][0-9]\-)|(?:[0-9]\-))(?:(?:1[0-9][0-9])|(?:2[0-4][0-9])|(?:25[0-5])|(?:[1-9][0-9])|(?:[0-9]))$", i):
ips_list_h.append(i)
elif re.match(r"^(?:(?:1[0-9][0-9]\.)|(?:2[0-4][0-9]\.)|(?:25[0-5]\.)|(?:[1-9][0-9]\.)|(?:[0-9]\.)){3}(?:(?:1[0-9][0-9])|(?:2[0-4][0-9])|(?:25[0-5])|(?:[1-9][0-9])|(?:[0-9]))$", i):
ips_list_c.append(i)
else:
print"===========================\n"
print("Include incorrect IP format: "+i)
print"Try again\n"
ips1()
t1 = datetime.now()
print"===========================\n"
ips1_h_start()
print"finish in "+str(datetime.now()-t1)
print"---------------------------"
ips1()
#############################################
def ips1_h_start():
global ips_list_h,ips_list_c
ips_h_num = len(ips_list_h)
for ih in range(0,ips_h_num):
addr1 = int(ips_list_h[ih].split('.')[3].split('-')[0])
addr2 = int(ips_list_h[ih].split('.')[3].split('-')[1])
prefix = ips_list_h[ih].split('.')[0] + '.' + ips_list_h[ih].split('.')[1] + '.' + ips_list_h[ih].split('.')[2] + '.'
for addr in range(addr1,addr2+1):
iph = prefix + str(addr)
ips_list_c.append(iph)
ips1_c_start()
#############################################
def ips1_c_start():
global ips_list_c
ips_list_c = list(set(ips_list_c))
ips_c_num = len(ips_list_c)
print" Alive:"
map(q.put,xrange(0,ips_c_num))
threads = [threading.Thread(target=worker1_c) for i in xrange(50)]
map(lambda x:x.start(),threads)
q.join()
def ips1_c(ic):
answer1 = sr1(IP(dst = ips_list_c[ic])/TCP(dport=80,flags="A"),timeout=1,verbose=0)
answer2 = sr1(IP(dst = ips_list_c[ic])/TCP(dport=445,flags="A"),timeout=1,verbose=0)
answer3 = sr1(IP(dst = ips_list_c[ic])/TCP(dport=2222,flags="A"),timeout=1,verbose=0)
if answer1 == None and answer2 == None and answer3 == None:
pass
else:
print(ips_list_c[ic]+"\n")
def worker1_c():
while not q.empty():
ic = q.get()
try:
ips1_c(ic)
finally:
q.task_done()
#############################################
############## 1.2 Neatly ###################
#############################################
def ips2():
global ips_list_h,ips_list_c,ips_c_num,ips_h_num
ips_list_h = [] # "-" hyphen
ips_list_c = [] # "," comma
ips_input = raw_input("[discovery-ACK] IP > ")
if ips_input == "bk":
print"===========================\n"
select()
elif ips_input == "exit":
sys.exit()
else:
ips_list = ips_input.split(',')
ips_list_unre = list(set(ips_list)) #qu chong fu
for i in ips_list_unre:
if re.match(r"^(?:(?:1[0-9][0-9]\.)|(?:2[0-4][0-9]\.)|(?:25[0-5]\.)|(?:[1-9][0-9]\.)|(?:[0-9]\.)){3}(?:(?:1[0-9][0-9]\-)|(?:2[0-4][0-9]\-)|(?:25[0-5]\-)|(?:[1-9][0-9]\-)|(?:[0-9]\-))(?:(?:1[0-9][0-9])|(?:2[0-4][0-9])|(?:25[0-5])|(?:[1-9][0-9])|(?:[0-9]))$",i):
ips_list_h.append(i)
elif re.match(r"^(?:(?:1[0-9][0-9]\.)|(?:2[0-4][0-9]\.)|(?:25[0-5]\.)|(?:[1-9][0-9]\.)|(?:[0-9]\.)){3}(?:(?:1[0-9][0-9])|(?:2[0-4][0-9])|(?:25[0-5])|(?:[1-9][0-9])|(?:[0-9]))$",i):
ips_list_c.append(i)
else:
print"===========================\n"
print("Include incorrect IP format: "+i)
print"Try again\n"
ips2()
t1 = datetime.now()
print"===========================\n"
ips_c_num = len(ips_list_c)
ips_h_num = len(ips_list_h)
if ips_c_num > 0:
print("Divide all IP into "+str(len(ips_list_h)+1)+" lists:\n")
ips2_c_start()
ips2_h_start()
else:
print("Divide all IP into "+str(len(ips_list_h))+" lists:\n")
ips2_h_start()
print"\nfinish in "+str(datetime.now()-t1)
print"---------------------------"
ips2()
#############################################
def ips2_c_start():
print"---------------------------"
print(" "+str(ips_c_num)+" IP in:\n"+str(ips_list_c)+"\n\n Alive:")
map(q.put,xrange(0,ips_c_num))
threads = [threading.Thread(target=worker2_c) for i in xrange(50)]
map(lambda x:x.start(),threads)
q.join()
def ips2_c(ic):
answer1 = sr1(IP(dst = ips_list_c[ic])/TCP(dport=80,flags="A"),timeout=1,verbose=0)
answer2 = sr1(IP(dst = ips_list_c[ic])/TCP(dport=445,flags="A"),timeout=1,verbose=0)
answer3 = sr1(IP(dst = ips_list_c[ic])/TCP(dport=2222,flags="A"),timeout=1,verbose=0)
if answer1 == None and answer2 == None and answer3 == None:
pass
else:
print(ips_list_c[ic]+"\n")
def worker2_c():
while not q.empty():
ic = q.get()
try:
ips2_c(ic)
finally:
q.task_done()
#############################################
def ips2_h_start():
global prefix
for ih in range(0,ips_h_num):
addr1 = int(ips_list_h[ih].split('.')[3].split('-')[0])
addr2 = int(ips_list_h[ih].split('.')[3].split('-')[1])
prefix = ips_list_h[ih].split('.')[0] + '.' + ips_list_h[ih].split('.')[1] + '.' + ips_list_h[ih].split('.')[2] + '.'
print"---------------------------"
print(str(addr2 - addr1 + 1)+" IP in:\n['"+str(ips_list_h[ih])+"']\n\n Alive:")
map(q.put,xrange(addr1,addr2+1))
threads = [threading.Thread(target=worker2_h) for i in xrange(50)]
map(lambda x:x.start(),threads)
q.join()
def ips2_h(addr):
answer1 = sr1(IP(dst = prefix + str(addr))/TCP(dport=80,flags="A"),timeout=1,verbose=0)
answer2 = sr1(IP(dst = prefix + str(addr))/TCP(dport=445,flags="A"),timeout=1,verbose=0)
answer3 = sr1(IP(dst = prefix + str(addr))/TCP(dport=2222,flags="A"),timeout=1,verbose=0)
if answer1 == None and answer2 == None and answer3 == None:
pass
else:
print(prefix+str(addr)+"\n")
def worker2_h():
while not q.empty():
addr = q.get()
try:
ips2_h(addr)
finally:
q.task_done()
#############################################
############# 2.Select a file ###############
#############################################
def iptxt_start():
filename = raw_input("[discovery-ACK] Filename > ")
if filename == "bk":
print"===========================\n"
select()
elif filename == "exit":
sys.exit()
else:
t1 = datetime.now()
try:
file = open(filename,'r')
print"===========================\n"
print" Alive:"
map(q.put,file)
threads = [threading.Thread(target=worker_iptxt) for i in xrange(50)]
map(lambda x:x.start(),threads)
q.join()
except IOError:
print"==========================="
print"File \""+filename + "\" not exist"
print"Try again\n"
print"finish in "+str(datetime.now()-t1)
print"---------------------------"
iptxt_start()
def iptxt(ipt):
answer1 = sr1(IP(dst=ipt.strip())/TCP(dport=80,flags="A"),timeout=1,verbose=0)
answer2 = sr1(IP(dst=ipt.strip())/TCP(dport=445,flags="A"),timeout=1,verbose=0)
answer3 = sr1(IP(dst=ipt.strip())/TCP(dport=24242,flags="A"),timeout=1,verbose=0)
if answer1 == None and answer2 == None and answer3 == None:
pass
else:
print(ipt.strip()+"\n")
def worker_iptxt():
while not q.empty():
ipt = q.get()
try:
iptxt(ipt)
finally:
q.task_done()
#############################################
def tcpack_ping_start():
list_select()
select()
if __name__ == '__main__':
tcpack_ping_start()
| [
"noreply@github.com"
] | noreply@github.com |
90794ec95294eccc4e08c6f9a498c9ec16f50c87 | fa9acf77eeab5a3f8d29e3f8b87c6aea3e44f8bf | /common/dataset/CifarDataset.py | ffe6fb4ff31fe41707f6f9bcd0f3505431a8b90b | [] | no_license | dshelukh/CapsuleLearner | 624837947b6fa81ad60aa7a29968bbcc6fb64c0f | 00e4f76b406251d426d3e5cb3e7cfd1d5f796ed8 | refs/heads/master | 2022-01-22T00:35:23.487975 | 2019-06-18T15:37:09 | 2019-06-18T15:37:09 | 116,154,345 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,181 | py | '''
@author: Dmitry
'''
from common.dataset.DatasetBase import *
import pickle as pkl
import tarfile
import os.path
import numpy as np
'''
import time
import os
import urllib.request
from scipy.io import loadmat
import sys
'''
def random_translate(image, vbound = (-2, 2), hbound = (-2, 2)):
v = np.random.randint(vbound[0], vbound[1] + 1)
h = np.random.randint(hbound[0], hbound[1] + 1)
image = np.roll(image, (v, h), axis = (0, 1))
return image
def get_translator(vbound, hbound):
return lambda x: random_translate(x, vbound, hbound)
#Preprocess from .mat files
class CifarPreprocessor():
def __init__(self, num_labels, feature_range = (-1, 1)):
self.num_labels = num_labels
self.feature_range = feature_range
def preprocess_X(self, images, augmentation):
retVal = scale(images, self.feature_range).astype(np.float32)
retVal = np.reshape(retVal, [-1, 3, 32, 32])
# from nchw to nhwc
retVal = np.transpose(retVal, [0, 2, 3, 1])
if augmentation:
#retVal = np.apply_along_axis(get_translator((-4, 4), (-4, 4)), 0, retVal)
func = np.vectorize(get_translator((-4, 4), (-4, 4)), signature = '(n,m,k)->(n,m,k)')
retVal = func(retVal)
return retVal
def onehot_y(self, data):
one_hotter = np.eye(self.num_labels)
return one_hotter[np.reshape(data - 1, [-1])]
def preprocess(self, imgs, labels, augmentation = True):
if imgs.size > 0 and labels.size > 0:
labels = self.onehot_y(labels)
imgs = self.preprocess_X(imgs, augmentation)
return imgs, labels
def unpickle(file):
with open(file, 'rb') as fo:
dict = pkl.load(fo, encoding='bytes')
return dict
class Cifar10Dataset(DownloadableDataset):
# leave_labels = -1 means use all labels
def __init__(self, val_split = 0.0, leave_labels = -1, feature_range = (-1, 1), data_dir = 'data-cifar10/'):
files_to_download = ['cifar-10-python.tar.gz']
DownloadableDataset.__init__(self, 'https://www.cs.toronto.edu/~kriz/', files_to_download, data_dir)
self.val_split = val_split
self.leave_labels = leave_labels
self.prep = CifarPreprocessor(10, feature_range = feature_range)
self.cifar_batches_dir = self.data_dir + 'cifar-10-batches-py/'
self.cifar_files_train = ['data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4', 'data_batch_5']
self.cifar_files_test = ['test_batch']
self.cifar_files_all = self.cifar_files_train + self.cifar_files_test
self.cifar_pic_size = 32 * 32 * 3
self.cifar_data_tag = b'data'
self.cifar_labels_tag = b'labels'
self.loadDataset()
def check_files_exist(self):
for f in self.cifar_files_all:
if not os.path.isfile(self.cifar_batches_dir + f):
return False
return True
def unpickle_all(self, files):
data = np.array([], dtype = np.uint8).reshape([0, self.cifar_pic_size])
labels = np.array([], dtype = np.uint8).reshape([0])
for f in files:
dict = unpickle(self.cifar_batches_dir + f)
data = np.concatenate([data, np.array(dict[self.cifar_data_tag])], axis = 0)
labels = np.concatenate([labels, np.array(dict[self.cifar_labels_tag])], axis = 0)
return data, labels
def loadDataset(self):
# untar if needed
exist = self.check_files_exist()
if not exist:
for f in self.file_list:
self.extract(f, self.data_dir + f)
# unpickle
self.train_data, self.train_labels = self.unpickle_all(self.cifar_files_train)
self.test_data, self.test_labels = self.unpickle_all(self.cifar_files_test)
num = len(self.train_data)
labels_info = np.ones(num)
if self.leave_labels != -1:
labels_info = np.concatenate(np.ones(self.leave_labels), np.zeros(num - self.leave_labels))
val_split = int(num * self.val_split)
self.train = DatasetBase(self.train_data[val_split:], self.train_labels[val_split:], labels_info[val_split:])
self.val = DatasetBase(self.train_data[:val_split], self.train_labels[:val_split], labels_info[:val_split])
self.test = DatasetBase(self.test_data, self.test_labels)
print('Train: inputs - ' + str(self.train.images.shape) + '\t outputs - ' + str(self.train.labels.shape))
print('Val : inputs - ' + str(self.val.images.shape) + '\t outputs - ' + str(self.val.labels.shape))
print('Test : inputs - ' + str(self.test.images.shape) + '\t outputs - ' + str(self.test.labels.shape))
'''
trainset = loadmat(self.data_dir + 'train_32x32.mat')
testset = loadmat(self.data_dir + 'test_32x32.mat')
if (self.with_extra):
extraset = loadmat(self.data_dir + 'extra_32x32.mat')
trainset = {'X': np.concatenate((trainset['X'], extraset['X']), axis = 3), 'y':np.concatenate((trainset['y'], extraset['y']))}
trainset['X'] = np.rollaxis(trainset['X'], 3)
testset['X'] = np.rollaxis(testset['X'], 3)
'''
def get_dataset_for_trainer(self, lazy_prepare = True, with_reconstruction = True):
dataset = LazyPrepDataset((self.train, self.val, self.test), self.prep, no_shuffle = False)
if not lazy_prepare:
print('No lazy prepare!')
self.train.images, self.train.labels = self.prep.preprocess(self.train.images, self.train.labels)
self.val.images, self.val.labels = self.prep.preprocess(self.val.images, self.val.labels)
self.test.images, self.test.labels = self.prep.preprocess(self.test.images, self.test.labels)
#TODO: use decorators!
base_dataset = SemiSupervisedDataset if self.leave_labels != -1 else ShuffleDataset
dataset = base_dataset((self.train, self.val, self.test), no_shuffle = False)
return DatasetWithReconstruction(dataset, with_reconstruction = with_reconstruction)
if __name__ == "__main__":
dataset = Cifar10Dataset()
print (dataset.data[b'labels'])
| [
"dshelukh@gmail.com"
] | dshelukh@gmail.com |
8441c2afa17f311500ce9da51fa3705416fa4148 | 3843509791dd0c30c5c9631cff776ee77ba887cd | /runner.py | aa7e398eaf66937b10a920510c584dc3586551c9 | [] | no_license | nsbradford/HorizonCV | 570098d4907a6536df12e0d1b7b64b769ad6ecf5 | ec535a93f7e8eb44891d59e874d837ebf4ea83c3 | refs/heads/master | 2022-11-15T14:14:32.768599 | 2022-11-05T23:08:16 | 2022-11-05T23:08:16 | 87,033,799 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | """
runner.py
Nicholas S. Bradford
4/12/2017
"""
import nose
import cv2
import numpy as np
from horizoncv import demo
def testAll():
print('Test...')
argv = ['fake',
'-verbosity=2',
'--nocapture',
'--with-coverage',
'--cover-package=horizoncv']
result = nose.run(argv=argv)
return result
if __name__ == '__main__':
testResult = testAll()
if testResult:
# demo.time_score()
# demo.optimization_surface_demo()
# demo.timerDemo() # framerate of 25
# demo.video_demo('flying_turn.avi')
demo.video_demo('turn1.mp4')
| [
"nsbradford@gmail.com"
] | nsbradford@gmail.com |
7805244463d2fd3897904857f92487d73aff4204 | 0f6d8b0864d01c1e97bf9852fcf35866adfe0606 | /PythonGrammar/oop_and_control_flow.py | e1ee463994162938c8a84567ea529a409226d955 | [] | no_license | NicholasZXT/DataAnalysis | afe94d0cf200911ffcab133912774255a589547f | 0c3d8f872cc8db45bf5b36423bea7fc35de27a3e | refs/heads/master | 2023-08-16T19:39:56.000845 | 2023-08-10T09:07:11 | 2023-08-10T09:07:11 | 214,614,649 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,675 | py | import sys
import os
from collections import namedtuple
# ========= 类的定义和使用 ====================
def __Class_Practice():
pass
# 定义类
class Person:
address = 'China'
def __init__(self,name,sex,weight,height):
self.name = name
self._sex = sex
self.__weight = weight
self.__height__ = height
def get_name(self):
return self.name
@classmethod
def get_address(cls):
return cls.address
# p1 = Person('Daniel', 'male', 65, 170)
# p1.name
# p1._sex
# p1.__weight
# p1.__height__
# p1.get_name()
# p1.address
# Person.address
# p1.address = "Earth"
# p1.address
# Person.address
# Person.address = 'Earth'
# p2 = Person('Daniel', 'male', 65 ,170)
# =======继承=================
class Student(Person):
def __init__(self, name, sex, weight, sid):
super().__init__(name, sex, weight)
self.sid = sid
# s1 = Student('Daniel', 'male', 65, 20161212)
# ================装饰器============================
def __Decorator_Practice():
pass
def fun_decorator(f):
def wrapper(*args, **kwargs):
print("call:", f.__name__)
print("positional arguments:", args)
print("keyword arguments:", kwargs)
return f(*args, **kwargs)
return wrapper
def add(x, y):
return x+y
# add(3,4)
# add_document = fun_decorator(add)
# add_document(3,4)
@fun_decorator
def add(x, y):
return x+y
# add(3,4)
# 带参装饰器
def fun_decorator(text):
def wrapper(f):
def inner_wrapper(*args,**kwargs):
print(text,f.__name__)
print("positional arguments:",args)
print("keyword arguments:",kwargs)
return f(*args,**kwargs)
return inner_wrapper
return wrapper
@fun_decorator('execute')
def add(x, y):
return x+y
# add(3,4)
# ========== 协程 ============================
def __Coroutine_Practice():
pass
# -------- yield 的生成器用法 ----------
def gen_fun():
# 通常 yield 会在一个循环里,不过不是必须的
print('start')
# 注意,此时 yield 后面跟了返回的值,左边是没有表达式的
yield 'A'
print('continue')
yield 'B'
print('end.')
# if __name__ == '__main__':
# for v in gen_fun():
# print('===>', v)
# -------- yield 的协程用法 -----------
def simple_coroutine():
print('=> coroutine start')
# 注意这里 yield 关键字的右边没有值,而左边有一个赋值表达式
# 实际上,yield 有两种含义:1. 将yield右边的值发送给调用方;2.yield停止的地方可以接受调用方传来的数据,yield将该数据赋值给左边的表达式
# 这里的 yield 后面没有值,只有左边有接收的表达式,说明此函数只接受数据,不产出数据
x = yield
print('=> coroutine received: ', x)
# if __name__ == '__main__':
# my_coro = simple_coroutine()
# print('my_coro: ', my_coro)
# # 首先调用 next 方法,启动生成器,执行到 yield 处
# next(my_coro)
# # 执行到 yield 处之后,可以通过 send() 方法发送数据
# my_coro.send(12)
# # 这之后 yield 流程继续,直到最后抛出 StopIteration 异常
# 使用协程来实现计算均值
def coroutine_average():
sum = 0.0
count = 0
average = None
while True:
value = yield average
sum += value
count += 1
average = sum/count
# if __name__ == '__main__':
# cor_average = coroutine_average()
# # 激活协程
# next(cor_average)
# # 或者
# # cor_average.send(None)
# # 计算均值
# cor_average.send(2)
# cor_average.send(3)
# cor_average.send(4)
# cor_average.send(5)
# # 关闭协程
# cor_average.close()
# -------- yield from -----------
Result = namedtuple('Result', ['count', 'average'])
def sub_averager():
"""计算均值"""
total = 0
count = 0
average = None
while True:
value = yield
if value is None:
break
total += value
count += 1
average = total/count
return Result(count, average)
def grouper(result, key):
while True:
result[key] = yield from sub_averager()
def main(data):
"""用于计算data中每组的均值"""
result = {}
for key, values in data.items():
group = grouper(result, key)
next(group)
for value in values:
group.send(value)
group.send(None)
print("result: ", result)
data = {
'arr1': [1, 2, 3, 4, 5],
'arr2': [6, 7, 8, 9, 10]
}
if __name__ == '__main__':
main(data) | [
"1036562425@qq.com"
] | 1036562425@qq.com |
4a25485e686ee71a8779acb9894ac44d2da004f1 | d1309391f108e044dab2b51f6bed61588c1163b0 | /DigiZiaApp/migrations/0003_phone_specification.py | 937ac458a56424d3a48d60a473362ca1bd8a019c | [] | no_license | ziashanehbandi/DigiZia | 0cb7e94845d25c563e95d4ddb41c8b3517adea15 | bb3639a8daccc0eef3119e284ee10f0a06f12093 | refs/heads/main | 2023-04-16T13:31:58.396982 | 2021-04-27T16:25:49 | 2021-04-27T16:25:49 | 362,179,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # Generated by Django 3.1.7 on 2021-04-26 18:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('DigiZiaApp', '0002_auto_20210426_0331'),
]
operations = [
migrations.AddField(
model_name='phone',
name='specification',
field=models.TextField(blank=True, max_length=500),
),
]
| [
"ziashanehbandi@gmail.com"
] | ziashanehbandi@gmail.com |
c1d2f5c25c0936309c89953f0cef51921de927b6 | e34cbf5fce48f661d08221c095750240dbd88caf | /python/day26/learncrm/crmAdmin/templatetags/__init__.py | a32c81fa794bf67b4db658cf0d748f8b0d206638 | [] | no_license | willianflasky/growup | 2f994b815b636e2582594375e90dbcb2aa37288e | 1db031a901e25bbe13f2d0db767cd28c76ac47f5 | refs/heads/master | 2023-01-04T13:13:14.191504 | 2020-01-12T08:11:41 | 2020-01-12T08:11:41 | 48,899,304 | 2 | 0 | null | 2022-12-26T19:46:22 | 2016-01-02T05:04:39 | C | UTF-8 | Python | false | false | 55 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# by Wendy | [
"284607860@qq.com"
] | 284607860@qq.com |
aa82cd8b14ac750d2e51a46add5665e8f8f4ab0c | d1c5176a1cb370addfb3ef0dabb303fefb9c6f87 | /struktury_danych/min i max while true.py | 568704eb6ce757f8f1a4e50e3f14fb5be05b0b45 | [] | no_license | fzubowicz/Python_bootcamp | 72d58f49f744ae5711f65446433b49be26e4351c | 12d6624f0070038a8a0f28a2d09325a2100f0941 | refs/heads/master | 2020-03-22T12:58:41.499223 | 2018-07-08T14:58:27 | 2018-07-08T14:58:27 | 140,074,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py |
komunikat = 'Podaj kolejną liczbę lub wpisz [koniec]'
res = input(komunikat)
# najpierw obsłużwyjątkowe sytuacje
if res == 'koniec':
exit('Nie wyrałeś żadnej liczby')
liczba = int(res)
min = liczba
max = liczba
while True:
res = input(komunikat)
if res == 'koniec':
break
liczba = int(res)
if liczba > max:
max = liczba
if liczba < min:
min = liczba
print( max, 'fddfsd', min) | [
"fzubowicz@gmail.com"
] | fzubowicz@gmail.com |
32c84120721d4e812bb8463b78c62dc17ef51e5d | a2da066b6729f208f578307e18700dded468547a | /src/extract_img_features.py | 345d3bb9d2ba5f5ea2ecf3a5ffc9eb863fd6679a | [
"Apache-2.0"
] | permissive | aleSuglia/guesswhat | 2195b31db2c043e263769301da1d853817a90417 | 659d1361b6b49e68ec6087b154490adf9c483908 | refs/heads/master | 2020-09-02T10:51:28.378853 | 2020-06-28T13:16:45 | 2020-06-28T13:16:45 | 219,205,640 | 0 | 0 | Apache-2.0 | 2019-11-02T19:48:24 | 2019-11-02T19:48:24 | null | UTF-8 | Python | false | false | 3,407 | py | #!/usr/bin/env python
import os
from argparse import ArgumentParser
from os import listdir
from time import time
import h5py
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim.python.slim.nets.vgg as vgg
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from tqdm import tqdm
parser = ArgumentParser()
parser.add_argument("-image_dir", help="Path to the directory containing the images")
parser.add_argument("-output_dir", help="ResNet features output directory where the features metadata will be stored")
parser.add_argument("-model_ckpt", default="data/vgg.ckpt", help="Path to the VGG-16 Net model checkpoint")
def get_splits(image_dir):
dirs = [name for name in os.listdir(image_dir) if os.path.isdir(os.path.join(image_dir, name))]
if not dirs:
return ['train', 'val', 'test']
return dirs
def extract_features(
images_placeholder,
image_dir,
split,
ft_output,
network_ckpt,
out_dir):
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
saver = tf.train.Saver()
saver.restore(sess, network_ckpt)
img_list = listdir(image_dir)
print("Load dataset -> set: {}".format(split))
no_images = len(img_list)
############################
# CREATE FEATURES
############################
print("Start computing image features...")
filepath = os.path.join(out_dir, "{}_features.h5".format(split))
with h5py.File(filepath, 'w') as f:
ft_shape = [int(dim) for dim in ft_output.get_shape()[1:]]
ft_dataset = f.create_dataset('features', shape=[no_images] + ft_shape, dtype=np.float32)
idx2img = f.create_dataset('idx2img', shape=[no_images], dtype=h5py.special_dtype(vlen=str))
for i in tqdm(range(len(img_list))):
image_filepath = os.path.join(image_dir, img_list[i])
image_tensor = Image.open(image_filepath).convert('RGB')
feat = sess.run(ft_output, feed_dict={images_placeholder: np.expand_dims(image_tensor, 0)})
# Store dataset
ft_dataset[i] = feat
image_id = img_list[i][:img_list[i].index(".")]
idx2img[i] = image_id
print("Finished dumping file: {}".format(filepath))
print("Done!")
def main(args):
start = time()
print('Start')
splits = get_splits(args.image_dir)
img_size = 224
# TF graph creation
images_placeholder = tf.placeholder(tf.float32, [None, None, None, 3], name='image')
proc_image_op = tf.image.resize_image_with_crop_or_pad(
images_placeholder,
target_height=224,
target_width=224
)
_, end_points = vgg.vgg_16(proc_image_op, is_training=False, dropout_keep_prob=1.0)
ft_name = os.path.join("vgg_16", "fc8")
ft_output = end_points[ft_name]
####
for split in splits:
extract_features(
images_placeholder=images_placeholder,
image_dir=os.path.join(args.image_dir, split),
ft_output=ft_output,
out_dir=args.output_dir,
split=split,
network_ckpt=args.model_ckpt)
print('Image Features extracted.')
print('Time taken: ', time() - start)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| [
"alessandro.suglia@gmail.com"
] | alessandro.suglia@gmail.com |
e07ee7440dc3f636815f2f06a2a2aff021cb0f83 | 359dcdb32288a300d3dcd9402532e4433c1b0c81 | /directionalvi/utils/test/test_load_data.py | afedefc3fba0490903eb1e5cd336eb9579df3d8e | [] | no_license | kkang2097/GP-Derivatives-Variational-Inference | 7d94cec6171a20587887282724dd87ec37f2131f | 0699c5ef20132f92e0bd4f41525eb09f6fd2c118 | refs/heads/main | 2023-08-20T10:43:31.166748 | 2021-10-25T20:46:21 | 2021-10-25T20:46:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | import os
import sys
sys.path.append("../")
from load_data import *
args = {}
args["n_train"] = 12000
args["n_test"] = 2040
args["seed"] = 3
#cwd = os.getcwd()
#print(cwd)
#print("hi")
train, test, dim = load_helens("../../../data/MtSH.mat", **args)
print(len(train))
print(train[0])
print(len(test))
print(dim) | [
"albert11813@gmail.com"
] | albert11813@gmail.com |
7fc96a9563536b294dc30fc237fda52fbf8b6ed9 | 709d63cce6db2c6e42f06f88f7e96e1f822ff773 | /heuristic/functions/routing_costs.py | 4eb313c3bb40550b6939ed416e71b3be0f87a199 | [
"MIT"
] | permissive | cryptonome/OR-Analysis | 09bc567ae213c47d6160c985ff648b44690ce766 | 2070e2dee49f1556fcaa00044d3512e4ba08550f | refs/heads/master | 2022-04-12T04:51:51.494249 | 2020-03-31T09:42:17 | 2020-03-31T09:42:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,751 | py | import numpy as np
from heuristic.classes import Problem, Route, Solution
from heuristic.constants import DEPOT
def routing_costs(sol: Solution) -> np.ndarray:
"""
Computes routing costs for each customer, as the cost made currently for
having a customer in a route, against the alternative of not having said
customer in the route: e.g., for customer [2] this compares the hypothetical
route [1] -> [2] -> [3] with the alternative of [1] -> [3]. The difference
in cost is the customer's routing cost. O(|customers|).
"""
problem = Problem()
costs = np.zeros(problem.num_customers)
for route in sol.routes:
for idx, customer in enumerate(route):
costs[customer] = _customer_routing_cost(route, customer, idx)
return costs
def _customer_routing_cost(route: Route, customer: int, idx: int) -> float:
customers = route.customers
problem = Problem()
assert 0 <= idx < len(customers)
assert customer in route
# There is just one customer, which, once removed, would result in a cost
# of zero. Hence the cost for this single customer is just the route cost.
if len(customers) == 1:
return route.routing_cost()
if idx == 0:
cost = problem.short_distances[DEPOT, customer, customers[1]]
cost -= problem.distances[DEPOT + 1, customers[1] + 1]
return cost
if idx == len(route) - 1:
cost = problem.short_distances[customers[-2], customer, DEPOT]
cost -= problem.distances[customers[-2] + 1, DEPOT + 1]
return cost
cost = problem.short_distances[customers[idx - 1], customer, customers[idx + 1]]
cost -= problem.distances[customers[idx - 1] + 1, customers[idx + 1] + 1]
return cost
| [
"noreply@github.com"
] | noreply@github.com |
7924494e333eaaa3fc1fb45014a438dff96f2abb | c5542154b44f1b228cdadeaf44c6a5998ed37ed4 | /base/day3/if/2numif.py | cf3ae469fede9e992a02e8e751cd5ee19d44e9a9 | [] | no_license | ThrallOtaku/python3Test | a31a2de1576b3a3c1062a7d6908119d7cbf21b36 | c62e6024bbbeafd396b68e40332991758914ba0b | refs/heads/master | 2020-03-07T22:45:47.403999 | 2018-06-08T10:19:42 | 2018-06-08T10:19:42 | 127,763,269 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | '''
x=10
num= 10 if x>18 else 20
print(num)
'''
#ages=10
#price= 20 if ages>=16 else 10
import os
os.system("calc") if 3>12 else os.system("notepad") | [
"tanght@11wlw.com"
] | tanght@11wlw.com |
5adbfd7cbeb0d5d743b0f47012cd9121dca75e95 | 941142d175ab166c6a37d9d8dae038431506d503 | /linebot/mi-bot/wsgi.py | 685eaa98da6ff8efba1c1299b1f006a65a3448df | [
"MIT"
] | permissive | jphacks/FK_1707 | e4a2e5db6f16463d5ea1783705404ccb67df8400 | 9c8a7513eb8ca3d06d68de93568ca1aa0631760c | refs/heads/master | 2021-07-22T18:41:00.761123 | 2017-10-29T04:50:24 | 2017-10-29T04:50:24 | 105,496,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | """
WSGI config for mi_bot project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mi_bot.settings")
application = get_wsgi_application()
| [
"bskzxc@gmail.com"
] | bskzxc@gmail.com |
5fd162325d1b76f2416508c204ac01e3912b2b7c | eb067a086adce4571a8d69db5423db41d8817d0d | /test.py | e28ceca3d03757572d445f642b7afc80d5a00003 | [] | no_license | thomasballinger/simplerest | 09f47d1e6a3a4f5b6dc5de0f511dfd9d14783e8a | d0309b5a9439de8c16d107d33e4784e0a9b068a9 | refs/heads/master | 2016-09-15T19:00:37.995063 | 2014-05-22T13:12:18 | 2014-05-22T13:12:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | import socket
server = socket.socket()
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('', 7000))
server.listen(5)
while True:
s, (ip, port) = server.accept()
data = ''
data = s.recv(10000)
print data
s.send("your page")
s.close()
| [
"thomasballinger@gmail.com"
] | thomasballinger@gmail.com |
313ce40ebd7cc63db4407f9138208d991cdc68dc | 13f6dcc2141ad5e93c5b7f35d014ef66182a6ab9 | /AppDia/programas/generar_pdf_hoy.py | 432e24aa3d138342534f3ede929866d47c14bd63 | [] | no_license | joseluciano57/proyecto_heroku | 52a28256b3612726760bef64d9b8097e2003069b | 6d2e681b0e60e7b0665ccb1786612f6ae9b1c7ff | refs/heads/main | 2023-03-11T01:53:49.890895 | 2021-02-24T19:58:56 | 2021-02-24T19:58:56 | 341,221,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,156 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 12 17:49:59 2020
@author: SYSTEM
"""
from datetime import (date,datetime,timedelta)
import numpy as np
from .libreria_loteria import (lee_datos,conseguir_secuencias_iguales,secuencias_con_iguales_sin_orden,
conjuntos_N_iguales_sin_orden,determina_los_numeros_distintos,
veces_que_ocurrio_un_numero,obtiene_frecuencias,obtiene_matriz_datos_dia,
almacenar_datos_excel,obtiene_mayor_y_menor,obtiene_frecuencias_de_frecuencias,
obtiene_matriz_datos_dia_mes_semana,obtiene_matriz_datos_dia_mes)
from .libreria_grafica import (grafica_en_ventana_general)
from .combinaciones import (obtiene_las_N_mayores_frecuencias,obtiene_numeros_con_N_ocurrencias_mas_altas_o_bajas,
obtiene_las_N_menores_frecuencias)
import csv
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter
def generar_pdf(dia,mes):
c = canvas.Canvas("C:/Users/58414/Django_curso/proyectosdjango/ProyectoLoteria/AppDia/programas/resultados/posibles_jugadas.pdf", pagesize=letter)
c.setLineWidth(.3)
c.setFont('Helvetica', 13)
c.drawString(60,740,'POSIBLES JUGADAS PARA HOY: ')
if (dia=="Sábados"):
c.drawString(267,740,"Sábado")
else:
c.drawString(267,740,"Miércoles")
fecha= datetime.now()
month=format(fecha.month)
year=format(fecha.year)
day=format(fecha.day)
c.drawString(400,740,day +"/" + month +"/"+year)
c.line(400,737,460,737)
c.setFont('Helvetica', 10)
c.drawString(30,700,'LOS 6 MAS SALIDORES')
c.line(30,697,142,697)
c.drawString(30,680,'NÚMERO')
c.drawString(100,680,'FRECUENCIA')
archivo="C:/Users/58414/Django_curso/proyectosdjango/ProyectoLoteria/AppNumeros/programas/resultados/"
archivo= archivo + "archivo_todas_jugadas_ordenados_frecuencia.csv"
with open(archivo,encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
i=1
for row in reader:
if (i<=6):
#print(row['Número'], row['Frecuencia'])
x=10*i
c.drawString(40,677 - x,row['Número'])
c.drawString(120,677- x,row['Frecuencia'])
i=i+1
else:
break
c.drawString(30,580,'LOS 6 MAS SALIDORES DEL ')
if (dia=="Sábados"):
c.drawString(170,580,"Sábado")
else:
c.drawString(170,580,"Miércoles")
c.line(30,577,213,577)
c.drawString(30,560,'NÚMERO')
c.drawString(100,560,'FRECUENCIA')
if (dia=="Miércoles"):
el_dia="miercoles"
if (dia=="Sábados"):
el_dia="sabados"
archivo="C:/Users/58414/Django_curso/proyectosdjango/ProyectoLoteria/AppDia/programas/resultados/resultados_solo_"+ el_dia +".csv"
with open(archivo,encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
i=1
for row in reader:
if (i<=6):
#print(row['Número'], row['Frecuencia'])
x=10*i
c.drawString(40,557 - x,row['Número'])
c.drawString(120,557- x,row['Frecuencia'])
i=i+1
else:
break
c.drawString(30,460,'LOS 6 MAS SALIDORES DE ')
c.drawString(170,460,mes)
c.line(30,457,213,457)
c.drawString(30,440,'NÚMERO')
c.drawString(100,440,'FRECUENCIA')
archivo="C:/Users/58414/Django_curso/proyectosdjango/ProyectoLoteria/AppDia/programas/resultados/archivo_" + mes +".csv"
with open(archivo,encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
i=1
for row in reader:
if (i<=6):
#print(row['Número'], row['Frecuencia'])
x=10*i
c.drawString(40,437 - x,row['Número'])
c.drawString(120,437- x,row['Frecuencia'])
i=i+1
else:
break
c.drawString(30,334,'LOS 6 MAS SALIDORES DE LOS')
c.drawString(185,334,dia)
c.drawString(230,334,"de")
c.drawString(245,334,mes)
c.line(30,331,270,331)
c.drawString(30,314,'NÚMERO')
c.drawString(100,314,'FRECUENCIA')
if (dia=="Sábados"):
dia="Sábado"
archivo="C:/Users/58414/Django_curso/proyectosdjango/ProyectoLoteria/AppDia/programas/resultados/archivo_"+ dia + "_" + mes +".csv"
with open(archivo,encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
i=1
for row in reader:
if (i<=6):
#print(row['Número'], row['Frecuencia'])
x=10*i
c.drawString(40,311 - x,row['Número'])
c.drawString(120,311- x,row['Frecuencia'])
i=i+1
else:
break
c.save()
return
#generar_pdf("Miércoles","enero")
| [
"jlmaldonaj@gmail.com"
] | jlmaldonaj@gmail.com |
3b34c3aa261597bb0b7a20265a7d26473b548dd0 | a50a4e874d3d203344a47bc7ad9c317b213eab90 | /base/config.py | 28c8fb077efd365c3408ab3d90723e234358ad31 | [] | no_license | fjl121029xx/yarn-api-python | d5b61ca0695d5fdc4f8923d5814f6576c3c87509 | 4468609dea2d7630fd9fc3dabbe7c02ded7aa4a1 | refs/heads/master | 2020-12-04T02:02:40.913088 | 2020-02-27T08:08:18 | 2020-02-27T08:08:18 | 231,563,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,782 | py | AppConfig = {
'DOHKO': {
# 'livyServerUri': 'http://172.20.44.6:8999/sessions/',
# 'yarnServerUri': 'http://172.20.44.6:8088/ws/v1/cluster/apps/',
# 'livyServerPath': '/usr/hdp/current/livy2-server/bin/livy-server',
'livyServerUri': 'http://172.26.25.148:8999/sessions/',
'yarnServerUri': 'http://172.26.25.148:8088/ws/v1/cluster/apps/',
'livyServerPath': '/home/hadoop/livy/bin/livy-server',
'readApp': {
"jars": ["hdfs://cluster/yqs/tools/engine-0.0.1-SNAPSHOT.jar"],
"pyFiles": [],
"files": [],
"archives": [],
"kind": 'spark',
"driverMemory": '2g',
"driverCores": 1,
"executorMemory": '2g',
"executorCores": 2,
"numExecutors": 4,
"queue": 'default',
"heartbeatTimeoutInSecond": 86400,
"proxyUser": 'yqs',
'conf': {
"spark.default.parallelism": 12,
"spark.rdd.compress": True,
"spark.io.compression.codec": "snappy"
}
},
'writeApp': {
"jars": ["hdfs://cluster/yqs/tools/engine-0.0.1-SNAPSHOT.jar"],
"pyFiles": [],
"files": [],
"archives": [],
"kind": 'spark',
"driverMemory": '512m',
"driverCores": 1,
"executorMemory": '2g',
"executorCores": 2,
"numExecutors": 2,
"queue": 'default',
"heartbeatTimeoutInSecond": 86400,
"proxyUser": 'yqs',
'conf': {
"spark.default.parallelism": 12,
"spark.rdd.compress": True,
"spark.io.compression.codec": "snappy"
}
}
},
'PRODUCT': {
# 'livyServerUri': 'http://rm.yqs.hualala.com:8999/sessions/',
# 'yarnServerUri': 'http://rm.yqs.hualala.com:8088/ws/v1/cluster/apps/',
# 'livyServerPath': '/home/olap/tools/apps/livy/bin/livy-server',
'livyServerUri': 'http://172.26.25.148:8999/sessions/',
'yarnServerUri': 'http://172.26.25.148:8088/ws/v1/cluster/apps/',
'livyServerPath': '/home/hadoop/livy/bin/livy-server',
'readApp': {
"jars": ["hdfs://cluster/yqs/tools/engine-0.0.1-SNAPSHOT.jar"],
"pyFiles": [],
"files": [],
"archives": [],
"kind": 'spark',
"driverMemory": '16g',
"driverCores": 8,
"executorMemory": '10g',
"executorCores": 6,
"numExecutors": 35,
"queue": 'default',
"heartbeatTimeoutInSecond": 86400,
"proxyUser": None,
'conf': {
"spark.default.parallelism": 400,
"spark.scheduler.mode": "FAIR",
"spark.serializer": "org.apache.spark.serializer.KryoSerializer",
"spark.rdd.compress": True,
"spark.io.compression.codec": "snappy",
"spark.sql.inMemoryColumnarStorage.batchSize": 300000,
"spark.sql.files.maxPartitionBytes": 134217728,
"spark.sql.broadcastTimeout": 60,
"spark.sql.orc.enabled": True,
"spark.sql.orc.impl": "native",
"spark.sql.orc.enableVectorizedReader": True,
"spark.sql.hive.convertMetastoreOrc": True
}
},
'writeApp': {
"jars": ["hdfs://cluster/yqs/tools/engine-0.0.1-SNAPSHOT.jar"],
"pyFiles": [],
"files": [],
"archives": [],
"kind": 'spark',
"driverMemory": '10g',
"driverCores": 4,
"executorMemory": '10g',
"executorCores": 6,
"numExecutors": 10,
"queue": 'default',
"heartbeatTimeoutInSecond": 86400,
"proxyUser": None,
'conf': {
"spark.default.parallelism": 400,
"spark.scheduler.mode": "FAIR",
"spark.serializer": "org.apache.spark.serializer.KryoSerializer",
"spark.rdd.compress": True,
"spark.io.compression.codec": "snappy",
"spark.sql.inMemoryColumnarStorage.batchSize": 300000,
"spark.sql.files.maxPartitionBytes": 134217728,
"spark.sql.broadcastTimeout": 60,
"spark.sql.orc.enabled": True,
"spark.sql.orc.impl": "native",
"spark.sql.orc.enableVectorizedReader": True,
"spark.sql.hive.convertMetastoreOrc": True,
"spark.sql.orc.filterPushdown": True,
"spark.sql.orc.char.enabled": True
}
}
}
}
| [
"sishengqikuo_xx@163.com"
] | sishengqikuo_xx@163.com |
4ff46f654c2a228d62028ea69d5fe8eb18d0d44b | 78ff488fdc75774abc808c9a4c0dd9566b23ae49 | /58103-Scalco-Valentina/TP3/Test1.py | 5164be4821681fb51b34d688f1f22dd74a60a059 | [] | no_license | valenscalco/um-programacion-i-2020 | 3dc84d2ca3337f129e6d85f5f0803277615031ec | 07d25a1999fbef2cff266fa3e27b77e4a8293f4b | refs/heads/master | 2022-07-17T13:01:19.565251 | 2020-05-20T16:45:48 | 2020-05-20T16:45:48 | 250,258,900 | 0 | 0 | null | 2020-03-26T12:57:44 | 2020-03-26T12:57:43 | null | UTF-8 | Python | false | false | 3,434 | py | import unittest
from Cajero import Atm, CantidadError, ValueError, MultiplicidadError, BilleteError, DisponibilidadError
from Billetes import Billete_1000, Billete_500, Billete_200, Billete_100
class TestAtm(unittest.TestCase):
def setUp(self):
self.cien = Billete_100("pesos", "$")
self.docientos = Billete_200("pesos", "$")
self.quinientos = Billete_500("pesos", "$")
self.onek = Billete_1000("pesos", "$")
self.atm1 = Atm()
self.atm2 = Atm()
self.atm3 = Atm()
self.atm4 = Atm()
lista_billetes = []
lista_billetes = [self.onek for valor in range(0, 10)]
self.atm1.agregar_dinero(lista_billetes)
self.atm2.agregar_dinero(lista_billetes)
self.atm3.agregar_dinero(lista_billetes)
lista_billetes = [self.quinientos for valor in range(0, 20)]
self.atm2.agregar_dinero(lista_billetes)
for i in range(0, 15):
lista_billetes.append(self.docientos)
self.atm3.agregar_dinero(lista_billetes)
def test_set_1_a(self):
resultado = ["0 billetes de $100, Parcial $0",
"0 billetes de $200, Parcial $0", "0 billetes de $500, Parcial $0",
"10 billetes de $1000, Parcial $10000", "Total: $10000"]
self.assertEqual(self.atm1.contar_dinero(), resultado)
def test_set_1_b(self):
self.assertEqual(self.atm1.extraer_dinero(5000), ["5 billetes de $1000"])
def test_set_1_c(self):
with self.assertRaises(CantidadError):
self.atm1.extraer_dinero(12000)
def test_set_1_d(self):
with self.assertRaises(MultiplicidadError):
self.atm1.extraer_dinero(5520)
def test_set_1_e(self):
with self.assertRaises(ValueError):
self.atm1.extraer_dinero(-1000)
def test_set_2_a(self):
resultado = ["0 billetes de $100, Parcial $0",
"0 billetes de $200, Parcial $0", "20 billetes de $500, Parcial $10000",
"10 billetes de $1000, Parcial $10000", "Total: $20000"]
self.assertEqual(self.atm2.contar_dinero(), resultado)
def test_set_2_b(self):
self.assertEqual(self.atm2.extraer_dinero(5000), ["5 billetes de $1000", "0 billetes de $500"])
def test_set_2_c(self):
self.assertEqual(self.atm2.extraer_dinero(12000), ["10 billetes de $1000",
"4 billetes de $500"])
def test_set_2_d(self):
with self.assertRaises(BilleteError):
self.atm2.extraer_dinero(12100)
def test_set_3_a(self):
resultado = ["0 billetes de $100, Parcial $0",
"15 billetes de $200, Parcial $3000", "20 billetes de $500, Parcial $10000",
"10 billetes de $1000, Parcial $10000", "Total: $23000"]
self.assertEqual(self.atm3.contar_dinero(), resultado)
def test_set_3_b(self):
self.assertEqual(self.atm3.extraer_dinero(5000), ["5 billetes de $1000", "0 billetes de $500", "0 billetes de $200"])
def test_set_3_c(self):
self.assertEqual(self.atm3.extraer_dinero(12000), ["10 billetes de $1000", "4 billetes de $500", "0 billetes de $200"])
def test_set_3_d(self):
with self.assertRaises(BilleteError):
self.atm3.extraer_dinero(12100)
def test_set_4_a(self):
with self.assertRaises(DisponibilidadError):
self.atm4.extraer_dinero(1000)
if __name__ == '__main__':
unittest.main()
| [
"mv.scalco@hotmail.com"
] | mv.scalco@hotmail.com |
883ef4f1e95ccb4ad230631341e6db9e90dcbc54 | 2011727bd90b0ffcb1da5a95bfdb28cbafc00a7c | /rpc.py | 97c347a07c9c8080661df9f60c2cf5c9c9235892 | [
"MIT"
] | permissive | ronitray95/rpc_ftp_example | 3311009462a587b4a8a870abe4562e58c98a31b4 | 91c33c54b11a5fe8f02a9f880271e7fcd749371b | refs/heads/master | 2023-03-02T19:57:17.958155 | 2021-02-10T05:13:00 | 2021-02-10T05:13:00 | 337,624,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | #!/usr/bin/env python3
import os
from ftplib import *
FTP_URL = 'localhost'
FTP_PORT = 5000
def initFTP(cmd, *args):
ftp = FTP()
ftp.connect('localhost', FTP_PORT)
resp = ftp.getwelcome()
# print(resp)
resp = ftp.login('user', '12345')
# print(resp)
fname = 'cmd.txt'
with open(fname, 'w') as f:
s = ''
for arg in args:
s = s + str(arg) + ' '
f.write(cmd+' '+s)
with open(fname, 'rb') as f:
ftp.storbinary('STOR input.txt', f)
ftp.retrbinary('RETR output.txt', open('userop.txt', 'wb').write)
resp = ftp.sendcmd("QUIT")
# print(resp)
s = ''
with open('userop.txt', 'r') as f:
s = f.readline()
os.remove('cmd.txt')
os.remove('userop.txt')
os.remove('input.txt')
os.remove('output.txt')
return s
| [
"ronitray95@gmail.com"
] | ronitray95@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.