blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea6e69e46c5deb8331ab4c3e5c27848242b75155
|
82ef810dcdf1baf6555df6c9409c1692a4f30a2f
|
/myportal/rootApp/models.py
|
ecbc8e5c53e274faabe884e6ae988783e738dfe5
|
[] |
no_license
|
ernest19/xtralat
|
20dd89527738f733c7a23f33901d8cc9e4a552fb
|
61ba6f3caf90dc2df460d822b9556600ff8d6ed4
|
refs/heads/master
| 2023-03-08T16:17:40.046895
| 2021-02-26T11:57:31
| 2021-02-26T11:57:31
| 342,561,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,411
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
#Have Updated the usertbl with the AbstractBaseUser
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.gis.db import models
from django.contrib.auth.models import User
# Create your models here.
#/// Web Forms /////////////////////////////////////////////////////////////////////////////////
class timeStamp(models.Model):
"""
Description: This models is an abstract class that defines the columns that should be present in every table.
"""
created_date = models.DateTimeField(auto_now=True)
delete_field = models.CharField(max_length=10, default="no")
class Meta:
abstract = True
class ProtectedArea(models.Model):
id = models.IntegerField(primary_key=True)
geom = models.GeometryField(blank=True, null=True)
reserve_na = models.CharField(max_length=30, blank=True, null=True)
region = models.CharField(max_length=30, blank=True, null=True)
area_sqkm = models.FloatField(blank=True, null=True)
class Meta:
managed = False
db_table = 'Protected Area'
class RegionalBoundary2019(models.Model):
id = models.IntegerField(primary_key=True)
geom = models.GeometryField(blank=True, null=True)
region = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'Regional_Boundary_2019'
class District(models.Model):
id = models.IntegerField(primary_key=True)
geom = models.GeometryField(blank=True, null=True)
created_da = models.CharField(max_length=254, blank=True, null=True)
delete_fie = models.CharField(max_length=10, blank=True, null=True)
district_n = models.CharField(max_length=50, blank=True, null=True)
district = models.CharField(max_length=10, blank=True, null=True)
region = models.CharField(max_length=5, blank=True, null=True)
class Meta:
managed = False
db_table = 'district'
class Forestry(models.Model):
geom = models.GeometryField(blank=True, null=True)
name = models.CharField(max_length=254, blank=True, null=True)
area = models.CharField(max_length=10, blank=True, null=True)
north = models.CharField(max_length=15, blank=True, null=True)
west = models.CharField(max_length=15, blank=True, null=True)
class Meta:
managed = False
db_table = 'forestry'
|
[
"eopokukwarteng@gmail.com"
] |
eopokukwarteng@gmail.com
|
095cd2ef6b508c966d83f3087b4d7f37e01079fe
|
8257051114b25cf9d94ed2eb8c4d265292470d47
|
/Selenium.py
|
320ea7927ddba619c2bf1616bf2f520f6c2edfc6
|
[] |
no_license
|
Revisto/NLP
|
b5e33918224edf7e3f86dd3828dc7a403c99ba60
|
3402fa61c106eb308f9ed88692a0aad015f20bfd
|
refs/heads/master
| 2022-04-14T16:38:29.294436
| 2020-04-10T21:41:36
| 2020-04-10T21:41:36
| 254,740,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,242
|
py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import selenium
import time
import random
File=open("API_KEY_.txt", "a+")
for i in range (100000):
Path="C:\ChromeDriver2\chromedriver.exe"
Drive = webdriver.Chrome("/usr/lib/chromium-browser/chromedriver")
Drive.get("https://app.text-mining.ir/account/register")
SearchBox = Drive.find_element_by_name("Email")
SearchBox.send_keys(str(random.randint(999999999999999999999,999999999999999999999999999999999999999999))+"@gmail.com")
SearchBox.send_keys(Keys.ENTER)
SearchBox = Drive.find_element_by_name("Password")
SearchBox.send_keys("ThisIsABot123456789")
SearchBox.send_keys(Keys.ENTER)
SearchBox = Drive.find_element_by_name("ConfirmPassword")
SearchBox.send_keys("ThisIsABot123456789")
SearchBox.send_keys(Keys.ENTER)
Drive.get("https://app.text-mining.ir/CustomerPanel/ApiKeys/Create")
SearchBox=Drive.find_element_by_class_name("form-control")
SearchBox.send_keys("1")
SearchBox.send_keys(Keys.ENTER)
SearchBox=Drive.find_element_by_xpath('//*[@id="listtable"]/tbody/tr[2]/td[2]/span').text
print (SearchBox)
File.write('"'+SearchBox+'"'+","+"\n")
Drive.close()
File.close()
|
[
"walt.shabani@gmail.com"
] |
walt.shabani@gmail.com
|
09b4e2ea0f8daa1f00dafee64896d4ae07f9cb74
|
58778e553b55c3a9e7a9a61a9b7cdea656aa032c
|
/src/prototype/actions/propModelJoin.py
|
248bd40228462d067f32a64bc8a389bf647c7731
|
[] |
no_license
|
douarime/ProtoExt
|
44025c4535440313497500e53dba4a522f2697a4
|
b74852daf6763495c530a31aecbfeb0133c3e295
|
refs/heads/master
| 2020-12-28T10:34:23.717859
| 2014-01-23T02:04:29
| 2014-01-23T02:04:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,480
|
py
|
# -*- coding: utf-8 -*-
from prototype.models import PropertyModel
from datetime import datetime
def doPropModelJoin( queryset ):
"""
No se pueden crear props de diferentes modelos, pues el modelo hace parte de la llave
y aunq yo pienso q deberia factorisarse todo a nivel de proeyecto, es importante saber
saber cual es el modelo, es posible q la vision de un analista sea limitada a modelos especificos
de todas formas se puede establecer equivalencias entre proModel
"""
myBase = None
sAux = ''
# Verifica si todos son del mismo modelo y del mismo tipo
for propModel in queryset:
if propModel.conceptType == 'ref':
sAux = 'References are not machable :' + propModel.code
return {'success':False , 'message' : sAux }
sAux += '-' + propModel.code
if myBase is None:
myBase = propModel
continue
if myBase.model.code != propModel.model.code:
sAux = 'model mistMach :' + myBase.model.code + '-' + propModel.model.code
return {'success':False , 'message' : sAux }
# Crea el nuevo propModel
# TODO: Implementar la seguridad real, esta copiando del registro base
if len( sAux ) > 40: sAux = sAux[:40] + str( datetime.now() )
defValues = {
'code' : sAux[1:],
'model' : myBase.model,
'baseType' : myBase.baseType,
'prpLength' : myBase.prpLength,
'prpScale' : myBase.prpScale,
'vType' : myBase.vType,
'prpDefault' : myBase.prpDefault,
'prpChoices' : myBase.prpChoices,
'isSensitive' : myBase.isSensitive,
'description' : myBase.description,
'smOwningTeam' : myBase.smOwningTeam,
'smOwningUser' : myBase.smOwningUser,
'smCreatedBy' : myBase.smCreatedBy,
'smModifiedBy' : myBase.smModifiedBy ,
'smRegStatus' : myBase.smRegStatus ,
'smWflowStatus' : myBase.smWflowStatus ,
'smCreatedOn' : myBase.smCreatedOn ,
'smModifiedOn' : myBase.smModifiedOn
}
myBase = PropertyModel( **defValues )
myBase.save()
# Actualiza las Property dependeientes
for propModel in queryset:
propModel.property_set.update(propertyModel= myBase )
sAux += ' ' + propModel.code
# Borra las propModels
queryset.delete()
return {'success':True , 'message' : sAux }
|
[
"dariogomezt@hotmail.com"
] |
dariogomezt@hotmail.com
|
fcc334e799ef4601b29842814eb335553b0c2e12
|
b55df7681dc339fe94c28a7df4042db590d9611a
|
/chargen.py
|
8ab652b5b0d2867006c89a4286b8bd3ba8b932a0
|
[] |
no_license
|
MatthewKLewis/python-cli-chargen
|
a9e284556d977b7cab32ff8fafcee606e9192bfb
|
055f782b10004e7339df49714fc58035c15ce177
|
refs/heads/master
| 2023-01-21T11:35:19.279484
| 2020-11-30T00:36:24
| 2020-11-30T00:36:24
| 317,071,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,596
|
py
|
import os, sys, random, datetime
import PySimpleGUI as sg
from peewee import *
from pyguiwrapper import *
# Some environmental variable bug is solved with...
os.environ.__setitem__('DISPLAY', ':0.0')
# connect to database
db = PostgresqlDatabase('characters', user='postgres', password='newPassword', host='localhost', port=5432)
db.connect()
print('Connected to Database')
# Schema -------------------------------------------RUN WHEN DONE
class BaseModel(Model):
class Meta:
database = db
class Character(BaseModel):
name = CharField(unique=True)
bio = TextField()
age = IntegerField()
race = CharField()
homeworld = CharField()
region = CharField()
createdDate: DateTimeField(default=datetime.datetime.now)
# etc etc
db.create_tables([Character])
def safeSave(databaseEntry):
try:
databaseEntry.save()
print('entry saved!')
except:
print("entry failed to save")
# Pull info from Database
characterQuery = Character.select()
# Create temporary container for new characters
newCharArray = []
# UI Loop
while True:
#top level control flow
topWindow = createTopWindow()
event, values = topWindow.read()
# if the user closes the top window
if event == sg.WIN_CLOSED:
break # goes to window.close()
if event == 'Create':
print('create')
topWindow.close()
initialInfoWindow = createInitialInfoWindow()
initialInfoEvent , initialInfo = initialInfoWindow.read()
if initialInfoEvent == 'Submit':
initialInfoWindow.close()
homeworldWindow = createHomeworldWindow()
homeworldEvent, homeworldInfo = homeworldWindow.read()
if homeworldEvent == 'Submit':
homeworldWindow.close()
# First Window Info
newCharArray.append(initialInfo[0]) #Name
newCharArray.append(initialInfo[1]) #Age
newCharArray.append(initialInfo[2]) #Bio
if initialInfo[3]: newCharArray.append('Human') #IF TRUE append Human
if initialInfo[4]: newCharArray.append('Android')
if initialInfo[5]: newCharArray.append('New-type')
# Second Window Info
if homeworldInfo[0]: newCharArray.append('Earth')
if homeworldInfo[1]: newCharArray.append('Sidespace')
if homeworldInfo[2]: newCharArray.append('Mars')
newCharArray.append(homeworldInfo[3]) #Region
print(newCharArray)
tempDatabaseEntry = Character(
name=newCharArray[0],
age=newCharArray[1],
bio=newCharArray[2],
race=newCharArray[3],
homeworld=newCharArray[4],
region=newCharArray[5]
)
safeSave(tempDatabaseEntry)
pass
if event == ' View ':
print('view')
topWindow.close()
characterViewerWindow = createCharacterViewerWindow(characterQuery)
ccvEvent , cvInfo = characterViewerWindow.read()
if event == 'Modify':
print('modify')
topWindow.close()
characterViewerWindow = createCharacterViewerWindow(characterQuery)
ccvEvent , cvInfo = characterViewerWindow.read()
if event == 'Delete':
print('delete')
topWindow.close()
characterViewerWindow = createCharacterViewerWindow(characterQuery)
ccvEvent , cvInfo = characterViewerWindow.read()
# topWindow.close()
|
[
"matthew.knight.lewis@gmail.com"
] |
matthew.knight.lewis@gmail.com
|
8f94e1a0d91cce2c115e02194c6a231e51c544b3
|
2ecdd403ec11d63f7f7c3377c9047a143b13c8fb
|
/src/server/physics/test_physics.py
|
825564f04e53c855d7db2b41de289a2eaef400d7
|
[
"MIT"
] |
permissive
|
umbc-hackafe/bridgesim
|
505cb3a469b8a1be43ca3d3c956520eacf6e97bb
|
6075dbc3afa8b017f86c628757c3d6c329949546
|
refs/heads/master
| 2021-01-23T22:54:42.203980
| 2015-01-20T21:59:29
| 2015-01-20T21:59:29
| 23,506,206
| 6
| 0
| null | 2014-08-31T19:46:57
| 2014-08-31T04:10:14
|
C
|
UTF-8
|
Python
| false
| false
| 3,928
|
py
|
#!/usr/bin/env python
import unittest
from vectors import *
class TestNVectors(unittest.TestCase):
def setUp(self):
self.v11 = NVector(1, 1)
self.v22 = NVector(2, 2)
self.v34 = NVector(3, 4)
self.v10 = NVector(10, 0)
self.vneg = NVector(-2, -2)
def test_dimensionality(self):
"""Test counting of number of dimensionality"""
self.assertEqual(self.v11.dimensionality(), 2)
def test_magnitude(self):
"""Test magnitude calculation"""
self.assertEqual(self.v34.magnitude(), 5)
def test_norm(self):
"""Test unit vector calculation"""
self.assertEqual(self.v10.norm(), NVector(1, 0))
self.assertEqual(self.v11.norm(),
NVector(0.7071067811865475, 0.7071067811865475))
def test_dot(self):
"""Test dot product calculation"""
self.assertEqual(self.v22.dot(self.v34), 14)
with self.assertRaises(DimensionalityError):
NVector(2, 2).dot(NVector(3, 3, 3))
def test_init(self):
"""Check initialization"""
self.assertEqual(self.v11.dimensions, (1, 1))
self.assertEqual(self.v34.dimensions, (3, 4))
self.assertEqual(self.vneg.dimensions, (-2, -2))
def test_equality(self):
"""Check equality between vectors"""
self.assertEqual(NVector(5, 5), NVector(5, 5))
self.assertNotEqual(NVector(3, 4), NVector(4, 3))
def test_neg(self):
"""Check negation"""
self.assertEqual(NVector(1, -1), -NVector(-1, 1))
self.assertNotEqual(NVector(10, 5), -NVector(10, 5))
def test_truth(self):
"""Check truth values"""
self.assertFalse(NVector(0, 0, 0, 0))
self.assertTrue(NVector(1, 0))
self.assertTrue(NVector(-10, -20, -30))
def test_addition(self):
"""Check vector addition"""
self.assertEqual(NVector(3, 2, 1, 0) + NVector(0, 1, 2, 3),
NVector(3, 3, 3, 3))
# Make sure some exceptions are raised.
with self.assertRaises(DimensionalityError):
NVector(2, 2) + NVector(3, 3, 3)
with self.assertRaises(TypeError):
NVector(1, 1) + 10
def test_subtraction(self):
"""Check vector subtraction"""
self.assertEqual(NVector(3, 2, 1, 0) - NVector(0, 1, 2, 3),
NVector(3, 1, -1, -3))
# Make sure some exceptions are raised.
with self.assertRaises(DimensionalityError):
NVector(2, 2) - NVector(3, 3, 3)
with self.assertRaises(TypeError):
NVector(1, 1) - 10
def test_multiplication(self):
"""Check vector and scalar multiplication"""
self.assertEqual(NVector(4, 2) * 10, NVector(40, 20))
self.assertEqual(2 * NVector(1, 1), NVector(2, 2))
self.assertEqual(NVector(3, 3) * NVector(2, 2), NVector(6, 6))
# Make sure some exceptions are raised.
with self.assertRaises(DimensionalityError):
NVector(1) * NVector(2, 2)
def test_division(self):
"""Check vector and scalar true and floor division"""
self.assertEqual(NVector(5, 5) / NVector(2, 2), NVector(2.5, 2.5))
self.assertEqual(NVector(5, 5) // NVector(2, 2), NVector(2, 2))
self.assertEqual(NVector(5, 5) / 2, NVector(2.5, 2.5))
self.assertEqual(NVector(5, 5) // 2, NVector(2, 2))
with self.assertRaises(DimensionalityError):
NVector(3, 3, 3) / NVector(2, 2)
with self.assertRaises(DimensionalityError):
NVector(3, 3, 3) // NVector(2, 2)
with self.assertRaises(TypeError):
5 / NVector(1, 1)
with self.assertRaises(TypeError):
5 // NVector(1, 1)
def test_stringy(self):
"""Test string formatting"""
self.assertEqual(str(NVector(1, 1)), "<1.000000, 1.000000>")
class TestVectors(unittest.TestCase):
def setUp(self):
self.v0 = Vector()
self.v333 = Vector(3, 3, 3)
self.v234 = Vector(2, 3, 4)
def test_cross(self):
self.assertEqual(Vector(5, 0, 0).cross(Vector(0, 5, 0)),
Vector(0, 0, 25))
self.assertEqual(Vector(0, 5, 0).cross(Vector(5, 0, 0)),
Vector(0, 0, -25))
if __name__ == "__main__":
unittest.main()
|
[
"sasha@crofter.org"
] |
sasha@crofter.org
|
a8e44bafb1a20a1932c0a0d426b34cf0d7a91d49
|
47436fa43825f070dd95aa408efceb3f85f34b16
|
/PID.py
|
796339c0c21c47005c475de11f127ed65d382078
|
[] |
no_license
|
IamShyam/sattrack
|
abc7b020b8533cff47aacd850a506f6980fc95ab
|
208ce189170b969aa511d5651cbc0b56fa93716a
|
refs/heads/master
| 2020-04-01T04:32:54.140193
| 2018-10-13T08:22:23
| 2018-10-13T08:22:23
| 152,868,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,234
|
py
|
class PID:
"""PID Controller
"""
def __init__(self, P=20, I=0.0, D=0.0):
self.Kp = P
self.Ki = I
self.Kd = D
self.clear()
def clear(self):
"""Clears PID computations and coefficients"""
self.PTerm = 0.0
self.ITerm = 0.0
self.DTerm = 0.0
self.last_error = 0.0
# Windup Guard
self.int_error = 0.0
self.windup_guard = 20.0
self.output = 0.0
def update(self, error):
"""Calculates PID value for given reference feedback
"""
delta_error = error - self.last_error
self.PTerm = self.Kp * error
self.ITerm += error
if (self.ITerm < -self.windup_guard):
self.ITerm = -self.windup_guard
elif (self.ITerm > self.windup_guard):
self.ITerm = self.windup_guard
# if abs(error) < 3: self.ITerm = 0
self.DTerm = delta_error
# Remember last error for next calculation
self.last_error = error
self.output = self.PTerm + (self.Ki * self.ITerm) + (self.Kd * self.DTerm)
def setKp(self, proportional_gain):
"""Determines how aggressively the PID reacts to the current error with setting Proportional Gain"""
self.Kp = proportional_gain
def setKi(self, integral_gain):
"""Determines how aggressively the PID reacts to the current error with setting Integral Gain"""
self.Ki = integral_gain
def setKd(self, derivative_gain):
"""Determines how aggressively the PID reacts to the current error with setting Derivative Gain"""
self.Kd = derivative_gain
def setWindup(self, windup):
"""Integral windup, also known as integrator windup or reset windup,
refers to the situation in a PID feedback controller where
a large change in setpoint occurs (say a positive change)
and the integral terms accumulates a significant error
during the rise (windup), thus overshooting and continuing
to increase as this accumulated error is unwound
(offset by errors in the other direction).
The specific problem is the excess overshooting.
"""
self.windup_guard = windup
|
[
"shyam141098@gmail.com"
] |
shyam141098@gmail.com
|
f9e1f690fc14f3838e5b90d9bc77f55ae6c8b20d
|
0895e65ec003c9ac27398abdd2ba83680dfba2c7
|
/StatsBot/basicLib.py
|
a067aa239a867169d4362c75890e48a8dd471da3
|
[] |
no_license
|
Reltor/ECLStatBot
|
42e7baa5febe95cf57ee47552af48ce91b6c039e
|
62d10f0470ea7cac06868e05626fa8160a133bf2
|
refs/heads/master
| 2021-01-25T14:03:24.570469
| 2018-03-26T00:48:08
| 2018-03-26T00:48:08
| 123,641,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
#BASIC functions for Python - Super Multi-Purpose
def getInt(prompt="Please give a number"):
num = -1
done = False
while not done:
try:
num = int(input(prompt))
done = True
except ValueError:
done = False
return num
|
[
"noreply@github.com"
] |
Reltor.noreply@github.com
|
a393381fc709d221953a74740fee4525177c337d
|
f41a57efd9f5ec3a67c8459a926c283bc75f0248
|
/tripAdvisor_en/crawler_Advisor_en.py
|
c04fb2a67aaeb055d6829e33bf626f553c84e030
|
[] |
no_license
|
kokais/crawler_script
|
e245bf944310bcc70439d621106e88ddb149c00d
|
bed64584588f830ac47c8b9cab7fac056e16036e
|
refs/heads/master
| 2021-06-18T05:28:57.241026
| 2017-06-20T11:19:30
| 2017-06-20T11:19:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,293
|
py
|
# coding:utf-8
import re
import time
from urllib import quote
import urllib2
import MySQLdb
from bs4 import BeautifulSoup
import json
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class TA_EN:
def __init__(self):
self.siteURL = 'http://www.tripadvisor.com'
self.category = {1: 'spa', 2: quote('夜店'), 3: quote('夜宵'), 4: quote('酒吧'), 5: quote('表演')}
self.city = {1: '293916', 2: '293920', 3: '293919', 4: '293917', 5: '293918'} # 曼谷,普吉,芭堤雅,清迈,苏梅岛
self.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36'
self.headers = {'User-Agent': self.user_agent}
# 获得主页面html
def getPage(self, url):
try:
request = urllib2.Request(url, headers=self.headers)
response = urllib2.urlopen(request)
page = response.read()
pageCode = re.sub(r'<br[ ]?/?>', '\n', page)
return pageCode
except urllib2.URLError, e:
if hasattr(e, "reason"):
print e.reason
return None
# 获得店铺页面链接
def getHref(self, url):
page = self.getPage(url)
soup = BeautifulSoup(page, 'html.parser')
href = []
items = soup.find_all("h3",class_="title")
# items = soup.find_all("div", class_="property_title")
for item in items:
href.append(item.a['href'])
return href
def getShopId(self, detailURL):
shop_id = re.sub(r'.*?-d|-Revi.*', '', detailURL)
return shop_id
# 获得店铺页面html
def getDetailPage(self, detailURL):
try:
shopURL = self.siteURL + str(detailURL)
request = urllib2.Request(shopURL, headers=self.headers)
response = urllib2.urlopen(request)
detailPage = response.read()
detailPageCode = re.sub(r'<br[ ]?/?>', '\n', detailPage)
return detailPageCode
except urllib2.URLError, e:
if hasattr(e, "reason"):
print e.reason
return None
# 获得评论页面html
def getCommentPage(self, detailURL, pageid):
try:
shopURL = "http://www.dianping.com" + str(detailURL) + '/review_all?pageno=' + str(pageid)
request = urllib2.Request(shopURL, headers=self.headers)
response = urllib2.urlopen(request)
commentPage = response.read()
commentPageCode = re.sub(r'<br[ ]?/?>', '\n', commentPage)
return commentPageCode
except urllib2.URLError, e:
if hasattr(e, "reason"):
print e.reason
return None
# 获得店铺评论图片
def getCommentIma(self, page):
commentImaHref = []
soup = BeautifulSoup(page, 'html.parser')
commentList = soup.find("div", class_="comment-list")
commentItems = commentList.find_all(name="li", attrs={"data-id": re.compile('^\d*$')})
for commentItem in commentItems:
try:
commentImaHref.append(commentItem.find("div", class_="shop-photo").ul.li.a.img.get('src'))
except AttributeError, e:
continue
return commentImaHref
# 获得店铺评论
def getComment(self, page):
comment = []
soup = BeautifulSoup(page, 'html.parser')
commentList =soup.find_all(name="div", attrs={"class": re.compile('reviewSelector.*?'),"id":re.compile('review_.*?')})
# for commentItem in commentList:
# comment.append(commentItem.string)
return commentList
# 获得店铺图片页面
def getImaPage(self, page):
soup = BeautifulSoup(page, 'html.parser')
photoPageURL = soup.find("div", class_="main_section photobar").a['href']
photoPage = self.getDetailPage(photoPageURL)
return photoPage
# 获得图片链接
def getImaHref(self, page):
imaHref = []
if page is None:
return imaHref
else:
soup = BeautifulSoup(page, 'html.parser')
pictureList = soup.find_all("div", class_="imgBx")
for pictureItem in pictureList:
imaHref.append(pictureItem.find('img').get('src'))
return imaHref
# 获得店铺信息
def getInfo(self, page):
soup = BeautifulSoup(page, 'html.parser')
shopInfoList = []
# 店名,地址,电话,营业时间,描述
shopInfoList.append(soup.find("div", class_="warLocName").string)
try:
shopLocation1 = soup.find("span", class_="extended-address").string
shopLocation2 = soup.find("span", class_="street-address").string
shopLocation = shopLocation2 + '|' + shopLocation1
shopInfoList.append(shopLocation)
except AttributeError, e:
shopInfoList.append("")
try:
shopInfoList.append(soup.find("div", class_="phoneNumber").string)
except AttributeError, e:
shopInfoList.append("")
try:
shopTimeList = []
shopOfficeTimes = soup.find("div", id="HOUR_OVERLAY_CONTENTS").find_all("span")
for shopTime in shopOfficeTimes:
shopTimeList.append(shopTime.string.replace("\n", ""))
shopOfficeTime = ''.join(shopTimeList)
shopInfoList.append(shopOfficeTime)
except:
shopInfoList.append("")
try:
shopDesTag = soup.find("div", class_="listing_details").getText()
shopInfoList.append(shopDesTag)
except AttributeError, e:
shopInfoList.append("")
return shopInfoList
# 获得店铺坐标
def getLocation(self, page):
location = []
lat = re.findall(re.compile('lat: (.*?),'), page)
location.append(lat[0])
lng = re.findall(re.compile('lng: (.*?),'), page)
location.append(lng[0])
return location
# 店铺评分
def getRatings(self, page):
soup = BeautifulSoup(page, 'html.parser')
try:
rating = soup.find("div", class_="rs rating").img['content']
except:
rating = '0'
return rating
# 店铺标签
def getShopTag(self, page):
soup = BeautifulSoup(page, 'html.parser')
tag = soup.find("div", class_="heading_details").getText().replace("\n", "")
return tag
# 店铺排名
def getRanking(self, page):
soup = BeautifulSoup(page, 'html.parser')
ranking = soup.find("div", class_="slim_ranking").getText().replace('\n', '')
return ranking
# 获得店铺人均消费
def getAvgPrice(self, page):
soup = BeautifulSoup(page, 'html.parser')
avgPrices = soup.find("div", class_="offer_price_box").find("div", class_="display_price smaller").getText()
avgPrice = str(avgPrices).replace("CN¥", "").replace("*", "")
return avgPrice
def start(self):
conn = MySQLdb.connect(
host='127.0.0.1',
user='root',
passwd='root',
db='dtrip',
port=3306,
charset="utf8"
)
cur = conn.cursor()
print "抓取中..."
a = 0
for city_id in range(1, 6):
for category_id in range(1, 6):
for pageNum in range(0, 300, 30):
try:
# url = "https://www.tripadvisor.com.tr/Attraction_Review-g293916-d496987-Reviews-Wat_Pho_Thai_Traditional_Massage_School-Bangkok.html"
url = "file:///Users/Zyang/Movies/tripadvisor_en/geo=" + str(
self.city[city_id]) + "&pid=3826&q=" \
+ str(self.category[category_id]) + "%23&o=" + str(pageNum) + ".html"
hrefs = self.getHref(url)
for href in hrefs:
shop_id = self.getShopId(href)
# cur.execute("SELECT id FROM merchants_en WHERE shop_id=" + shop_id + "")
# results = cur.fetchone()
#
# if results is not None:
# print "已存在"
# a += 1
# continue
# else:
try:
#
# Infodict = {}.fromkeys(
# ('city_id', 'bio', 'category_id', 'name', 'office_hours', 'lat', 'lng', \
# 'avgprice', 'address', 'phone', 'description', 'coordinate', 'photo_urls',
# 'content_image_url', 'ratings', 'ranking', 'shop_tag'))
# page = self.getDetailPage(href)
#
# shopInfos = self.getInfo(page)
# time.sleep(1.5)
# location = self.getLocation(page)
# time.sleep(1.5)
# imgpage = self.getImaPage(page)
# shop_ima = {}
# photos = self.getImaHref(imgpage)
# for i in range(0, len(photos)):
# shop_ima[i] = photos[i]
# shop_ima_json = json.dumps(shop_ima, indent=1)
#
# Infodict['ranking'] = str(self.getRanking(page))
# Infodict['shop_tag'] = str(self.getShopTag(page))
# Infodict['shop_id'] = int(shop_id)
# Infodict['city_id'] = int(city_id)
# Infodict['category_id'] = int(category_id)
# Infodict['name'] = str(shopInfos[0]).replace("'", "")
# Infodict['office_hours'] = str(shopInfos[3]).replace('\n', '').replace(' ', '')
# try:
# Infodict['avgprice'] = float(self.getAvgPrice(page))
# except:
# Infodict['avgprice'] = float('0')
# Infodict['address'] = str(shopInfos[1]).replace('\n', '').replace(' ', '')
# Infodict['phone'] = str(shopInfos[2]).replace("Phone Number: ", "")
# try:
# Infodict['lat'] = float(location[0])
# Infodict['lng'] = float(location[1])
# except IndexError, e:
# Infodict['lat'] = float(0)
# Infodict['lng'] = float(0)
# Infodict['photo_urls'] = str(shop_ima_json)
# Infodict['ratings'] = float(self.getRatings(page)[0])
# Infodict['bio'] = ''
# try:
# Infodict['description'] = str(shopInfos[4]).replace('\n', '').replace("'", "")
# except:
# Infodict['description'] = str('')
# try:
# Infodict['content_image_url'] = str(photos[0])
# except IndexError, e:
# Infodict['content_image_url'] = str('')
# print Infodict
# try:
# cur.execute("INSERT INTO merchants_en(shop_id,name,category_id,coordinate, ratings,phone,city_id,avgprice,office_hours,address,bio, description,photo_urls,content_image_url,ranking,shop_tag) \
# VALUES (%d, '%s',%d, ST_POINTFROMTEXT('POINT(%f %f)'),%.1f, '%s', %d, %f, '%s', '%s', '%s', '%s' , '%s' , '%s','%s','%s');" % \
# (Infodict['shop_id'], Infodict['name'], Infodict['category_id'],
# Infodict['lng'], Infodict['lat'], Infodict['ratings'],
# Infodict['phone'],
# Infodict['city_id'], Infodict['avgprice'],
# Infodict['office_hours'],
# Infodict['address'], Infodict['bio'], Infodict['description'],
# Infodict['photo_urls'], Infodict['content_image_url'],
# Infodict['ranking'], Infodict['shop_tag']))
# conn.commit()
#
# except:
# print "error"
# continue
# a += 1
# print "抓取成功第" + str(a) + "条"
page = self.getDetailPage(href)
Commentdict = {}.fromkeys(
('shop_id', 'merchant_id', 'created_by', 'author_name', 'content'))
# time.sleep(1.5)
commentList = self.getComment(page)
for commentItem in commentList:
cur.execute("select id from merchants_en where shop_id = " + shop_id + ";")
merchant_id = cur.fetchone()
Commentdict['merchant_id'] = int(merchant_id[0])
Commentdict['created_by'] = int('0')
try:
Commentdict['author_name'] = str(
commentItem.find(name="div",attrs={"class", "username mo"}).getText()).replace("\n","")
except AttributeError, e:
continue
# time.sleep(1.5)
Commentdict['shop_id'] = int(shop_id)
try:
Commentdict['content'] = str(
commentItem.find("p", class_="partial_entry").getText()).replace("\n", "")
except AttributeError, e:
continue
# cur.execute("SELECT id FROM comments_en WHERE author_name='" + Commentdict[
# 'author_name'] + "'")
# results = cur.fetchone()
#
# if results is not None:
# print "已存在"
# continue
# else:
try:
cur.execute("INSERT INTO comments_en(shop_id,merchant_id,created_by,author_name,content) \
VALUES (%d , %d ,%d ,'%s' ,'%s' );" % \
(Commentdict['shop_id'], Commentdict['merchant_id'],
Commentdict['created_by'], Commentdict['author_name'],
Commentdict['content']))
conn.commit()
except:
continue
print "数据+1"
except AttributeError, e:
a += 1
print "抓取失败"
continue
except TypeError, e:
print "override"
continue
cur.close()
conn.close()
print "抓取完成,共" + str(a) + "条记录"
ta_en = TA_EN()
ta_en.start()
# dzdp.getShopId('http://www.tripadvisor.cn/Search?geo=293917&pid=3826&q=按摩#&o=30&ssrc=a&dist=50km')
# url = "http://www.tripadvisor.cn/Restaurants-g293916-zfp8-Bangkok.html"
# url = "https://www.tripadvisor.com/Attraction_Review-g293916-d496987-Reviews-Wat_Pho_Thai_Traditional_Massage_School-Bangkok.html"
# page = ta_en.getPage(url)
# commentList =ta_en.getComment(page)
# for i in commentList:
# # print str(i.find("p",class_="partial_entry").getText()).replace("\n","").replace("...More","")
# print str(i.find(name="div",attrs={"class", "username mo"}).getText()).replace("\n","")
# imapage = dzdp.getImaPage(page)
# print dzdp.getImaHref(imapage)
|
[
"424536312@qq.com"
] |
424536312@qq.com
|
af721700409df42fbce835e0c8315348d47d3999
|
2d496320c2bbe124f035e59fddb90e163a4c2270
|
/noted/sandbox/test_smtp.py
|
3122881583a2c3556edd367e2da64a02a5893e97
|
[] |
no_license
|
parz3val/Codavt
|
8b08fd5424ffbee04c0eac09149955b0b6b1d40c
|
4d4e1ad0db5dd959f27680c743a1db5c2b93f486
|
refs/heads/master
| 2023-02-03T10:44:28.739260
| 2020-12-23T07:05:58
| 2020-12-23T07:05:58
| 307,899,688
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
from fixtures import (
smtp_connection
)
# Test func
def test_smtp(smtp_connection):
response = smtp_connection.ehlo()
assert response == 250
assert 0
|
[
"iamtheparzival@gmail.com"
] |
iamtheparzival@gmail.com
|
5fecb7219e9625230304028e4c8a4d628c6bd15a
|
e9a7decfd176aa114efe022c82a07934a3378f8a
|
/Øving1_linear_regresjon/main.py
|
b0fb82a5247fa751ec1c1a8a2205896278d095f6
|
[] |
no_license
|
Mahmoud-m1994/MachineLearning
|
8beaff5fd27cee3c4c3c87fe26ab129289a30696
|
e57800814f64029054ea9b4052b91b019ca2135c
|
refs/heads/main
| 2023-01-05T09:08:56.728946
| 2020-11-02T22:46:40
| 2020-11-02T22:46:40
| 309,508,915
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,966
|
py
|
import matplotlib.pyplot as plt
import pandas as pandas
import torch as torch
# Reading input and output from file without rounding off
with pandas.option_context('display.precision', 20):
data_from_file = pandas.read_csv("length_weght.csv", float_precision=None)
# creating tensor from targets_df
allData = torch.tensor(data_from_file.values).float()
# print(allData)
# Split allData to x and y train tensors
x_train = torch.reshape(allData[:, 0],(-1,1))
y_train = torch.reshape(allData[:, 1],(-1,1))
# print(x_train_asDouble)
# printing out result
# print(x_train)
class LinearRegressionModel:
def __init__(self):
# Model variables
self.W = torch.tensor([[0.0]], requires_grad=True) # requires_grad enables calculation of gradients
self.b = torch.tensor([[0.0]], requires_grad=True)
# Predictor
def f(self, x):
return torch.mm(x , self.W) + self.b # x @ self.W + self.b # @ corresponds to matrix multiplication
# Uses Mean Squared Error
def loss(self, x, y):
return torch.nn.functional.mse_loss(self.f(x), y)
model = LinearRegressionModel()
# Optimize: adjust W and b to minimize loss using stochastic gradient descent
optimizer = torch.optim.SGD([model.b, model.W], 0.0001)
for epoch in range(1000):
model.loss(x_train, y_train).backward() # Compute loss gradients
optimizer.step() # Perform optimization by adjusting W and b,
# similar to:
# model.W -= model.W.grad * 0.01
# model.b -= model.b.grad * 0.01
optimizer.zero_grad() # Clear gradients for next step
# Print model variables and loss
print("W = %s, b = %s, loss = %s" % (model.W, model.b, model.loss(x_train, y_train)))
# Visualize result
plt.plot(x_train, y_train, 'o', label='$(\\hat x^{(i)},\\hat y^{(i)})$')
plt.xlabel('x')
plt.ylabel('y')
x = torch.tensor([[torch.min(x_train)], [torch.max(x_train)]])
plt.plot(x, model.f(x).detach(), label='$y = f(x) = xW+b$')
plt.legend()
plt.show()
|
[
"mahmouim@stud.ntnu.no"
] |
mahmouim@stud.ntnu.no
|
293a0f2271360822a1606d2995fcdcbc78ef916e
|
46e01962d3eef258562b4b5e0204368a80eb2101
|
/resources/site-packages/dns/edns.py
|
8ac676bc62108d991c6d4dc35a507084b2baa454
|
[
"WTFPL"
] |
permissive
|
elgatito/script.elementum.burst
|
fb4fe3f10f722822373e2739a9308130482d1734
|
e37adca9634f644890673dee236a9c215c6744c1
|
refs/heads/master
| 2023-08-26T20:30:59.300868
| 2023-07-27T11:23:44
| 2023-07-27T11:23:44
| 111,373,790
| 108
| 181
|
WTFPL
| 2023-08-28T11:57:32
| 2017-11-20T06:59:52
|
Python
|
UTF-8
|
Python
| false
| false
| 4,384
|
py
|
# Copyright (C) 2009, 2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""EDNS Options"""
NSID = 3
class Option(object):
"""Base class for all EDNS option types.
"""
def __init__(self, otype):
"""Initialize an option.
@param otype: The rdata type
@type otype: int
"""
self.otype = otype
def to_wire(self, file):
"""Convert an option to wire format.
"""
raise NotImplementedError
@classmethod
def from_wire(cls, otype, wire, current, olen):
"""Build an EDNS option object from wire format
@param otype: The option type
@type otype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param olen: The length of the wire-format option data
@type olen: int
@rtype: dns.edns.Option instance"""
raise NotImplementedError
def _cmp(self, other):
"""Compare an EDNS option with another option of the same type.
Return < 0 if self < other, 0 if self == other,
and > 0 if self > other.
"""
raise NotImplementedError
def __eq__(self, other):
if not isinstance(other, Option):
return False
if self.otype != other.otype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Option):
return False
if self.otype != other.otype:
return False
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) > 0
class GenericOption(Option):
"""Generate Rdata Class
This class is used for EDNS option types for which we have no better
implementation.
"""
def __init__(self, otype, data):
super(GenericOption, self).__init__(otype)
self.data = data
def to_wire(self, file):
file.write(self.data)
@classmethod
def from_wire(cls, otype, wire, current, olen):
return cls(otype, wire[current: current + olen])
def _cmp(self, other):
if self.data == other.data:
return 0
if self.data > other.data:
return 1
return -1
_type_to_class = {
}
def get_option_class(otype):
cls = _type_to_class.get(otype)
if cls is None:
cls = GenericOption
return cls
def option_from_wire(otype, wire, current, olen):
"""Build an EDNS option object from wire format
@param otype: The option type
@type otype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param olen: The length of the wire-format option data
@type olen: int
@rtype: dns.edns.Option instance"""
cls = get_option_class(otype)
return cls.from_wire(otype, wire, current, olen)
|
[
"denis.kuzmenok@gmail.com"
] |
denis.kuzmenok@gmail.com
|
49c50875c50caadcecb420f7aeaa3ee9ca8bb707
|
c009db43a5b81988829fe061ebcfb1270589dfd9
|
/test4.py
|
fe449ff57a1dc875ada9daf8dd4a64cd9db99009
|
[] |
no_license
|
TasosLep/Compiler-MiniPython
|
bd4d1ce8506ca0adda32f8fe621366cdb8a8c6f6
|
70a13e3a93a6a024564121fc494bbb50a689193e
|
refs/heads/master
| 2020-04-06T13:28:33.573742
| 2018-12-31T13:34:12
| 2018-12-31T13:34:12
| 157,501,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39
|
py
|
x = 2
y = 'pepe'
if y == x:
print 't'
|
[
"mariosprokopakis@gmail.com"
] |
mariosprokopakis@gmail.com
|
b7c087fbcc7dcedc7b1fd62174a96cc5138213ba
|
66998d0fef1dc2dcb2b6cbfe700ee24d6951f2b6
|
/cyberguardians1/guess/exploit.py
|
4790f28c22add384d5e24fc4961bd34c27672671
|
[] |
no_license
|
nhristov97/write-ups
|
a545f68ab577aabd6121e826eb92ffb62d9ddc8c
|
e7c4a7dffff37d5ab455f6f03d40ec8a945bcf3a
|
refs/heads/master
| 2021-09-27T22:47:16.923407
| 2018-11-12T12:58:28
| 2018-11-12T12:58:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
#!/usr/bin/python
# coding: utf-8
from pwn import *
# context.log_level = 'debug'
filename = "./guess"
e = ELF(filename)
p = process(e.path)
print p.recvuntil("Tell me your name")
payload = "A" * 0x10
p.send(payload)
print p.recvuntil("Nice to meet you " + payload)
randnum = u32(p.recv(4))
p.recvline()
log.info("leaked random number : %d" % randnum)
print p.recvuntil("Okay, can you guess the random number?")
p.sendline(str(randnum))
p.interactive()
|
[
"chaneyoon@gmail.com"
] |
chaneyoon@gmail.com
|
80c58f7da7e71f28a200604c09362759efceda31
|
b72fdee0ad45255bee22e52b2673548078b99076
|
/Exam preperation/Rekursive funksjoner.py
|
4fce0498a04e931633aafbc5739399d9b5f83ecd
|
[] |
no_license
|
lovmo/Hovedinnleveringer-132
|
37a181256788314a04a4ca59eb23f2ba8791ccf3
|
300fa0471989ef61d12f35775759dcde565567b3
|
refs/heads/master
| 2020-09-24T23:08:38.739962
| 2019-12-09T13:32:46
| 2019-12-09T13:32:46
| 225,866,052
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
# REKURSIVE FUNKSJONER
n = 0
def rek(n):
if n == 10:
return n
else:
print(n)
return rek(n+1)
def snu(tekst):
if tekst=='': return ''
else:
snuddRest=snu(tekst[1:])
return snuddRest+tekst[0]
|
[
"endysen@gmail.com"
] |
endysen@gmail.com
|
1c690f9fd488c79aa396d9191ea2904cd176477b
|
d24a1cab3fadaf2a839f41cb7d2e7ae9c4779ef8
|
/arduino_server/SNMP/RFC1155-SMI.py
|
1dc463b93246b10e6426600f76cf7903fa44c450
|
[] |
no_license
|
vfalbor/nagios-arduino
|
685d3652a7154a7c06c3c2fc76a83683732f1a43
|
d05d30b5ec54ca48ed8fc72fb333d712ad25da4d
|
refs/heads/master
| 2020-04-29T11:10:25.028415
| 2014-03-10T16:43:22
| 2014-03-10T16:43:22
| 17,251,528
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
from pysnmp.proto import rfc1155
( MibIdentifier,
MibTableColumn,
MibTableRow,
MibTable ) = mibBuilder.importSymbols(
'SNMPv2-SMI',
'MibIdentifier',
'MibTableColumn',
'MibTableRow',
'MibTable',
)
iso = MibIdentifier((1,))
org = MibIdentifier(iso.name + (3,))
dod = MibIdentifier(org.name + (6,))
internet = MibIdentifier(dod.name + (1,))
directory = MibIdentifier(internet.name + (1,))
mgmt = MibIdentifier(internet.name + (2,))
experimental = MibIdentifier(internet.name + (3,))
private = MibIdentifier(internet.name + (4,))
enterprises = MibIdentifier(private.name + (1,))
ObjectName = rfc1155.ObjectName
NetworkAddress = rfc1155.NetworkAddress
IpAddress = rfc1155.IpAddress
Counter = rfc1155.Counter
Gauge = rfc1155.Gauge
TimeTicks=rfc1155.TimeTicks
Opaque=rfc1155.Opaque
mibBuilder.exportSymbols(
'RFC1155-SMI',
iso=iso,
org=org,
dod=dod,
internet=internet,
directory=directory,
mgmt=mgmt,
experimental=experimental,
private=private,
enterprises=enterprises,
MibIdentifier=MibIdentifier,
MibTableColumn=MibTableColumn,
MibTableRow=MibTableRow,
MibTable=MibTable,
ObjectName=ObjectName,
NetworkAddress=NetworkAddress,
IpAddress=IpAddress,
Counter=Counter,
Gauge=Gauge,
TimeTicks=TimeTicks,
Opaque=Opaque
)
|
[
"victor.fernandez.albor@gmail.com"
] |
victor.fernandez.albor@gmail.com
|
aa5f25502bcc370c1a2e7eb79e5cd35bdc1841d7
|
b781507eee338e844cae5d4cc615b14d06bc7f64
|
/prediction.ml/python/src/main/python/bundle/start_bundle_server.py
|
6d69b5ca11877824cd20fff0296346dc4f8335f2
|
[
"Apache-2.0"
] |
permissive
|
4thepoch/pipeline
|
3f994c8feb7b9040bc288fb2ec0a40e8bcefc019
|
962d913cce21a693102102f2548b9c7d4e3dc162
|
refs/heads/master
| 2021-01-19T09:35:26.562286
| 2017-04-10T03:54:01
| 2017-04-10T03:54:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,838
|
py
|
#!/usr/bin/env python3
"""Simple HTTP Server With Upload.
This module builds on BaseHTTPServer by implementing the standard GET
and HEAD requests in a fairly straightforward manner.
see: https://gist.github.com/UniIsland/3346170
see also (python 3): https://gist.githubusercontent.com/touilleMan/eb02ea40b93e52604938/raw/8765e34ffe1a981b7d7911bdc17380bb85356f39/SimpleHTTPServerWithUpload.py
"""
__version__ = "0.1"
__all__ = ["SimpleHTTPRequestHandler"]
__author__ = "bones7456"
__home_page__ = "http://li2z.cn/"
import os
import posixpath
import http.server
import urllib.request, urllib.parse, urllib.error
import cgi
import shutil
import mimetypes
import re
from io import BytesIO
class SimpleHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET/HEAD/POST commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method. And can reveive file uploaded
by client.
The GET/HEAD/POST requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTPWithUpload/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def do_POST(self):
"""Serve a POST request."""
r, info = self.deal_post_data()
print((r, info, "by: ", self.client_address))
f = BytesIO()
f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write(b"<html>\n<title>Upload Result Page</title>\n")
f.write(b"<body>\n<h2>Upload Result Page</h2>\n")
f.write(b"<hr>\n")
if r:
f.write(b"<strong>Success:</strong>")
else:
f.write(b"<strong>Failed:</strong>")
f.write(info.encode())
f.write(("<br><a href=\"%s\">back</a>" % self.headers['referer']).encode())
f.write(b"<hr><small>Powerd By: bones7456, check new version at ")
f.write(b"<a href=\"http://li2z.cn/?s=SimpleHTTPServerWithUpload\">")
f.write(b"here</a>.</small></body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
if f:
self.copyfile(f, self.wfile)
f.close()
def deal_post_data(self):
content_type = self.headers['content-type']
if not content_type:
return (False, "Content-Type header doesn't contain boundary")
boundary = content_type.split("=")[1].encode()
remainbytes = int(self.headers['content-length'])
line = self.rfile.readline()
remainbytes -= len(line)
if not boundary in line:
return (False, "Content NOT begin with boundary")
line = self.rfile.readline()
remainbytes -= len(line)
fn = re.findall(r'Content-Disposition.*name="file"; filename="(.*)"', line.decode())
if not fn:
return (False, "Can't find out file name...")
path = self.translate_path(self.path)
fn = os.path.join(path, fn[0])
line = self.rfile.readline()
remainbytes -= len(line)
line = self.rfile.readline()
remainbytes -= len(line)
try:
out = open(fn, 'wb')
except IOError:
return (False, "Can't create file to write, do you have permission to write?")
preline = self.rfile.readline()
remainbytes -= len(preline)
while remainbytes > 0:
line = self.rfile.readline()
remainbytes -= len(line)
if boundary in line:
preline = preline[0:-1]
if preline.endswith(b'\r'):
preline = preline[0:-1]
out.write(preline)
out.close()
return (True, "File '%s' upload success!" % fn)
else:
out.write(preline)
preline = line
return (False, "Unexpect Ends of data.")
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = BytesIO()
displaypath = cgi.escape(urllib.parse.unquote(self.path))
f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write(("<html>\n<title>Directory listing for %s</title>\n" % displaypath).encode())
f.write(("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath).encode())
f.write(b"<hr>\n")
f.write(b"<form ENCTYPE=\"multipart/form-data\" method=\"post\">")
f.write(b"<input name=\"file\" type=\"file\"/>")
f.write(b"<input type=\"submit\" value=\"upload\"/></form>\n")
f.write(b"<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write(('<li><a href="%s">%s</a>\n'
% (urllib.parse.quote(linkname), cgi.escape(displayname))).encode())
f.write(b"</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.parse.unquote(path))
words = path.split('/')
words = [_f for _f in words if _f]
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = http.server.HTTPServer):
http.server.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
|
[
"chris@fregly.com"
] |
chris@fregly.com
|
724c2b02f263df523b8afe0c513cfa3edbeb912d
|
7f11800bcf0f339e9e233cee228b26d5cd317252
|
/exc_main.py
|
64dde2884c64d0733bddc81039c80222f0395e53
|
[] |
no_license
|
sindbach/plugin-example
|
fd6198594b2cf4db38ba496a5fdd3d4eb0af0bb2
|
aad5ae63ff59640ba450973692e379444d95a29d
|
refs/heads/master
| 2021-01-10T03:19:52.089852
| 2016-04-09T12:11:59
| 2016-04-09T12:11:59
| 55,842,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,948
|
py
|
#!/usr/bin/env python
""" Executable for serialiser package.
Supporting 3 modes of operation: serialise, deserialise and listtypes.
"""
import sys
import argparse
from serial.serialiser import SerialLoader
from serial.serialiser import DE_PRINT
def main(flags):
# fetch a list of available plugins.
try:
supported = SerialLoader.get_supported_parsers()
except Exception, ex:
print "Failed to load supported plugins: %s" % ex
sys.exit(1)
# list supported/available plugins and then quit
if flags.listtypes:
for k, v in supported.iteritems():
print "Type: %s" % (k)
print "Desc: %s" % (v.get("description"))
sys.exit(1)
# validate serialiser type
if not flags.type in supported.keys():
print "Parser type %s is not supported. Please read --help file" % (flags.type)
sys.exit(1)
# instantiate object parser
s = SerialLoader.create_parser(parser=supported.get(flags.type))
# run serialise()
if flags.serialise:
print s.serialise(flags.input)
# run deserialise()
elif flags.deserialise:
dprint = flags.dprint
if not dprint in DE_PRINT:
print "Warning: print format is unsupported. reverting to default."
dprint = DE_PRINT[0]
for d in s.deserialise(flags.input):
print getattr(d, dprint)()
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-s", "--serialise", action="store_true", help="Serialise document mode.")
parser.add_argument("-d", "--deserialise", action="store_true", help="Deserialise document mode.")
parser.add_argument("-l", "--listtypes", action="store_true", help="List supported types mode.")
parser.add_argument("-t", "--type", help="Specify parser type. [REQUIRED]")
parser.add_argument("-i", "--input", help="Specify file input. [REQUIRED]")
parser.add_argument("--dprint", default="csv", help="Specify format of deserialisation display. %s" % DE_PRINT)
flags = parser.parse_args()
if not (flags.serialise or flags.deserialise or flags.listtypes):
print "Please specify at least one mode of operation. (serialise, deserialise, listtypes)"
parser.print_help()
sys.exit(1)
if (flags.serialise and flags.deserialise) or \
(flags.serialise and flags.listtypes) or \
(flags.deserialise and flags.listtypes):
print "Please choose only one mode of operation"
parser.print_help()
sys.exit(1)
if (flags.serialise or flags.deserialise) and not flags.type:
print "Please specify serialiser type."
parser.print_help()
sys.exit(1)
if (flags.serialise or flags.deserialise) and not flags.input:
print "Please specify an input file."
parser.print_help()
sys.exit(1)
main(flags)
|
[
"sindbach@gmail.com"
] |
sindbach@gmail.com
|
248a20762cc356d830d0d042b2203ae7bd5635db
|
0d4b4be2ff6bd3a58cac3adb9a392e080c6c7c46
|
/snf-cyclades-app/synnefo/logic/management/commands/server-remove.py
|
86a6b9d6660536f13183dcc99896daf2c1d16780
|
[
"BSD-2-Clause"
] |
permissive
|
philipgian/synnefo
|
715645a8bbaab8bd6c448bf7c168f6f99ae615a1
|
d4fffe4942e9e3c5ac7f41af0f5643dd0645023e
|
refs/heads/master
| 2021-01-21T11:33:22.540104
| 2014-05-27T10:57:45
| 2014-05-27T10:57:45
| 17,517,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,443
|
py
|
# Copyright 2013 GRNET S.A. All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY GRNET S.A. ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GRNET S.A OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be
# interpreted as representing official policies, either expressed
# or implied, of GRNET S.A.
from optparse import make_option
from django.core.management.base import CommandError
from synnefo.management.common import (get_vm, convert_api_faults,
wait_server_task)
from synnefo.logic import servers
from snf_django.management.commands import RemoveCommand
from snf_django.management.utils import parse_bool
from snf_django.lib.api import faults
class Command(RemoveCommand):
args = "<Server ID> [<Server ID> ...]"
help = "Remove a server by deleting the instance from the Ganeti backend."
option_list = RemoveCommand.option_list + (
make_option(
'--wait',
dest='wait',
default="True",
choices=["True", "False"],
metavar="True|False",
help="Wait for Ganeti job to complete."),
)
@convert_api_faults
def handle(self, *args, **options):
if not args:
raise CommandError("Please provide a server ID")
force = options['force']
message = "servers" if len(args) > 1 else "server"
self.confirm_deletion(force, message, args)
for server_id in args:
self.stdout.write("\n")
try:
server = get_vm(server_id)
self.stdout.write("Trying to remove server '%s' from backend "
"'%s' \n" % (server.backend_vm_id,
server.backend))
server = servers.destroy(server)
jobID = server.task_job_id
self.stdout.write("Issued OP_INSTANCE_REMOVE with id: %s\n" %
jobID)
wait = parse_bool(options["wait"])
wait_server_task(server, wait, self.stdout)
except (CommandError, faults.BadRequest) as e:
self.stdout.write("Error -- %s\n" % e.message)
|
[
"cstavr@grnet.gr"
] |
cstavr@grnet.gr
|
e47934720afca41716adadcb251455080fbf3b6c
|
e559781f1a785f6932a20315f491316fa5f991ba
|
/src/match_stats.py
|
919c2a873cf0790697dfd2362cda6ee43d4aa887
|
[
"MIT"
] |
permissive
|
PeterJCLaw/match-scheduler
|
3e05a5f07842d16029dfd72e18a3dd52d8ea9ce0
|
f4630f274197af06bfbc268039e23028b00b0344
|
refs/heads/master
| 2022-04-29T14:05:05.571911
| 2022-04-19T17:04:45
| 2022-04-19T17:04:45
| 9,333,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
def match_statistics(matches):
from collections import Counter, defaultdict
match_counter = Counter()
opponents = defaultdict(set)
prev_match = []
collisions = set()
for match in matches:
for team in match:
if team in prev_match:
collisions.add(team)
match_counter[team] += 1
for other_team in match:
if team != other_team:
opponents[team].add(other_team)
return match_counter, opponents, collisions
|
[
"arplynn@gmail.com"
] |
arplynn@gmail.com
|
7237e7351b5b9578ebd652a9374a1a35a966ee7c
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/4/jzp.py
|
8d619608d3a047ef364a68e383155b077ce18c55
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018
| 2016-11-13T20:45:50
| 2016-11-13T20:45:50
| 73,624,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'jZP':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
8b7e943dba0a90c2c31e62aa0f1beb573b9b08d4
|
95bd46f885998bbb8da1483c44a291de228a2270
|
/quantize_and_test.py
|
ade2a62cfc37a38657b310246ff09f78879284ce
|
[] |
no_license
|
githubfragments/INR
|
cb8fc487210b4240c2342d3e4e3cf834e2ca4500
|
1e7261588ff57ed528f64aa26ee2baaceba2e398
|
refs/heads/master
| 2023-06-12T22:59:11.262698
| 2021-07-06T10:29:40
| 2021-07-06T10:29:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,473
|
py
|
import copy
import pickle
import zlib
import numpy as np
import pandas
import yaml
import sys
from losses import model_l1
sys.path.append("siren/torchmeta")
sys.path.append("siren")
import skimage
import matplotlib.pyplot as plt
import io
import torch
# @title Set device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Device: %s' % device)
import sys
import torch
import os
from tqdm import tqdm
from aimet_torch.qc_quantize_op import QcPostTrainingWrapper
from aimet_torch.quantsim import QuantizationSimModel
import dataio, meta_modules, utils, training, loss_functions, modules
from modules import Sine, ImageDownsampling, PosEncodingNeRF, FourierFeatureEncodingPositional, \
FourierFeatureEncodingGaussian
from absl import app
from absl import flags
import glob, PIL
from torch.utils.data import DataLoader
import configargparse
from functools import partial
import json
from utils import check_metrics, check_metrics_full
from aimet_torch.adaround.adaround_weight import Adaround, AdaroundParameters
from quantize_utils import convert_to_nn_module, convert_to_nn_module_in_place
from aimet_torch.save_utils import SaveUtils
from aimet_common.defs import QuantScheme
from aimet_torch.meta import connectedgraph_utils
def mse_func(a, b):
return np.mean((np.array(a, dtype='float32') - np.array(b, dtype='float32')) ** 2)
flags.DEFINE_string('data_root',
'/home/yannick',
'Root directory of data.')
flags.DEFINE_string('exp_root',
'exp',
'Root directory of experiments.')
flags.DEFINE_string('exp_glob',
'KODAK21*',
'regular expression to match experiment name')
flags.DEFINE_enum('dataset', 'KODAK21',
['KODAK', 'KODAK21'],
'Dataset used during retraining.')
flags.DEFINE_enum('difference_encoding', 'same',
['same', 'adjusted', 'off'],
'Difference encoding mode')
flags.DEFINE_integer('epochs',
10000,
'Maximum number of epochs during retraining.',
lower_bound=1)
flags.DEFINE_float('lr',
1e-06,
'Learning rate used during retraining.',
lower_bound=0.0)
flags.DEFINE_float('l1_reg',
0.0,
'L1 weight regularization used during retraining.',
lower_bound=0.0)
flags.DEFINE_integer('bitwidth',
8,
'bitwidth used for Quantization',
lower_bound=1)
flags.DEFINE_bool('adaround',
True,
'use adative rounding post quanitzatition')
flags.DEFINE_bool('retrain',
True,
'use retraining post quanitzatition')
flags.DEFINE_float('adaround_reg', 0.001, 'regularizing parameter for adaround')
flags.DEFINE_integer('adaround_iterations', 500, 'Number of adaround iterations')
FLAGS = flags.FLAGS
class AimetDataset(torch.utils.data.Dataset):
def __init__(self, dataset):
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return (self.dataset[idx][0]['coords'].unsqueeze(0), self.dataset[idx][1]['img'])
def apply_adaround(model, sim, dataloader, exp_folder, image_name, input_shape, bitwidth, layerwise_bitwidth=None,
adaround_reg=0.01, adaround_iterations=500):
dummy_in = ((torch.rand(input_shape).unsqueeze(0) * 2) - 1).cuda()
params = AdaroundParameters(data_loader=dataloader, num_batches=1, default_num_iterations=adaround_iterations,
default_reg_param=adaround_reg, default_beta_range=(20, 2))
# Compute only param encodings
Adaround._compute_param_encodings(sim)
# Get the module - activation function pair using ConnectedGraph
module_act_func_pair = connectedgraph_utils.get_module_act_func_pair(model, dummy_in)
Adaround._adaround_model(model, sim, module_act_func_pair, params, dummy_in)
# Update every module (AdaroundSupportedModules) weight with Adarounded weight (Soft rounding)
Adaround._update_modules_with_adarounded_weights(sim)
path = os.path.join(exp_folder, image_name)
filename_prefix = 'adaround'
# Export quantization encodings to JSON-formatted file
Adaround._export_encodings_to_json(path, filename_prefix, sim)
SaveUtils.remove_quantization_wrappers(sim.model)
adarounded_model = sim.model
sim = get_quant_sim(model=adarounded_model, input_shape=input_shape, bitwidth=bitwidth,
layerwise_bitwidth=layerwise_bitwidth)
sim.set_and_freeze_param_encodings(encoding_path=os.path.join(path, filename_prefix + '.encodings'))
return sim
def get_quant_sim(model, input_shape, bitwidth, layerwise_bitwidth=None):
dummy_in = ((torch.rand(input_shape).unsqueeze(0) * 2) - 1).cuda()
sim = QuantizationSimModel(model, default_param_bw=bitwidth,
default_output_bw=31, dummy_input=dummy_in)
modules_to_exclude = (
Sine, ImageDownsampling, PosEncodingNeRF, FourierFeatureEncodingPositional, FourierFeatureEncodingGaussian)
excl_layers = []
for mod in sim.model.modules():
if isinstance(mod, QcPostTrainingWrapper) and isinstance(mod._module_to_wrap, modules_to_exclude):
excl_layers.append(mod)
sim.exclude_layers_from_quantization(excl_layers)
i = 0
for name, mod in sim.model.named_modules():
if isinstance(mod, QcPostTrainingWrapper):
mod.output_quantizer.enabled = False
mod.input_quantizer.enabled = False
weight_quantizer = mod.param_quantizers['weight']
bias_quantizer = mod.param_quantizers['bias']
weight_quantizer.use_symmetric_encodings = True
bias_quantizer.use_symmetric_encodings = True
if torch.count_nonzero(mod._module_to_wrap.bias.data):
mod.param_quantizers['bias'].enabled = True
if layerwise_bitwidth:
mod.param_quantizers['bias'].bitwidth = layerwise_bitwidth[i]
mod.param_quantizers['weight'].bitwidth = layerwise_bitwidth[i]
i += 1
return sim
def apply_quantization(sim):
quantized_dict = {}
state_dict = {}
for name, module in sim.model.named_modules():
if isinstance(module, QcPostTrainingWrapper) and isinstance(module._module_to_wrap, torch.nn.Linear):
weight_quantizer = module.param_quantizers['weight']
bias_quantizer = module.param_quantizers['bias']
weight_quantizer.enabled = True
bias_quantizer.enabled = True
wrapped_linear = module._module_to_wrap
weight = wrapped_linear.weight
bias = wrapped_linear.bias
state_dict[name + '.weight'] = weight_quantizer.quantize_dequantize(weight,
weight_quantizer.round_mode).cpu().detach()
assert (len(torch.unique(state_dict[name + '.weight'])) <= 2 ** weight_quantizer.bitwidth)
state_dict[name + '.bias'] = bias_quantizer.quantize_dequantize(bias,
bias_quantizer.round_mode).cpu().detach()
assert (len(torch.unique(state_dict[name + '.bias'])) <= 2 ** bias_quantizer.bitwidth)
quantized_weight = weight_quantizer.quantize(weight,
weight_quantizer.round_mode).cpu().detach().numpy() + weight_quantizer.encoding.offset
quantized_bias = bias_quantizer.quantize(bias,
bias_quantizer.round_mode).cpu().detach().numpy() + bias_quantizer.encoding.offset
quantized_dict[name] = {'weight': {'data': quantized_weight, 'encoding': weight_quantizer.encoding},
'bias': {'data': quantized_bias, 'encoding': bias_quantizer.encoding}}
weights_np = []
for l in quantized_dict.values():
w = l['weight']['data']
b = l['bias']['data']
Q = l['weight']['encoding'].bw
if Q < 9:
tpe = 'int8'
elif Q < 17:
tpe = 'int16'
else:
tpe = 'int32'
w = w.astype(tpe).flatten()
weights_np.append(w)
if l['bias']['encoding']:
Q = l['bias']['encoding'].bw
if Q < 9:
tpe = 'int8'
elif Q < 17:
tpe = 'int16'
else:
tpe = 'int32'
b = b.astype(tpe).flatten()
weights_np.append(b)
weights_np = np.concatenate(weights_np)
comp = zlib.compress(weights_np, level=9)
return comp, state_dict
def retrain_model(model, train_dataloader, epochs, loss_fn, lr, l1_reg, image_resolution,
randomize_quant_wrappers=False,
weight_loss_weight=0):
optim = torch.optim.Adam(lr=lr, params=model.parameters())
best_mse = 1
use_amp = False
q_wrapper_list = []
for name, mod in model.named_modules():
if isinstance(mod, QcPostTrainingWrapper):
q_wrapper_list.append(mod)
N = len(q_wrapper_list)
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
with tqdm(total=len(train_dataloader) * epochs) as pbar:
for epoch in range(epochs):
if randomize_quant_wrappers:
r = torch.rand(N)
for i, q in enumerate(q_wrapper_list):
if r[i] > 0.5:
q.param_quantizers['weight'].enabled = True
q.param_quantizers['bias'].enabled = True
else:
q.param_quantizers['weight'].enabled = False
q.param_quantizers['bias'].enabled = False
for step, (model_input, gt) in enumerate(train_dataloader):
with torch.cuda.amp.autocast(enabled=use_amp):
model_input = {key: value.cuda() for key, value in model_input.items()}
gt = {key: value.cuda() for key, value in gt.items()}
model_output = {}
model_output['model_out'] = model(model_input['coords'])
losses = loss_fn(model_output, gt)
l1_loss = model_l1(model, l1_reg)
losses = {**losses, **l1_loss}
train_loss = 0.
for loss_name, loss in losses.items():
single_loss = loss.mean()
train_loss += single_loss
weight_loss = 0
if weight_loss_weight:
for i, q in enumerate(q_wrapper_list):
weight_quantizer = q.param_quantizers['weight']
bias_quantizer = q.param_quantizers['bias']
wrapped_linear = q._module_to_wrap
weight = copy.deepcopy(wrapped_linear.weight)
bias = copy.deepcopy(wrapped_linear.bias)
weight_dequant = weight_quantizer.quantize_dequantize(weight, weight_quantizer.round_mode)
bias_dequant = bias_quantizer.quantize_dequantize(bias, bias_quantizer.round_mode)
weight_diff = torch.abs(weight_dequant - wrapped_linear.weight)
bias_diff = torch.abs(bias_dequant - wrapped_linear.bias)
weight_loss += torch.sum(weight_diff) + torch.sum(bias_diff)
train_loss = train_loss + weight_loss_weight * weight_loss
optim.zero_grad()
scaler.scale(train_loss).backward()
scaler.step(optim)
scaler.update()
pbar.update(1)
# make sure all quanitzation wrappers are active for performance evaluation
for i, q in enumerate(q_wrapper_list):
q.param_quantizers['weight'].enabled = True
q.param_quantizers['bias'].enabled = True
m = check_metrics(train_dataloader, model, image_resolution)
mse, ssim, psnr = m
if mse < best_mse:
best_state_dict = copy.deepcopy(model.state_dict())
best_mse = mse
model.load_state_dict(best_state_dict, strict=True)
return model
def quantize_model(model, coord_dataset, bitwidth=8, layerwise_bitwidth=None, retrain=True, epochs=300, ref_model=None,
flags=None,
adaround=False, lr=0.00000001, adaround_reg=0.01, adaround_iterations=500, exp_folder=None,
image_name=None, difference_encoding='same'):
input_shape = coord_dataset.mgrid.shape
image_resolution = coord_dataset.sidelength
dataloader = DataLoader(coord_dataset, shuffle=True, batch_size=1, pin_memory=True,
num_workers=0)
aimet_dataloader = DataLoader(AimetDataset(coord_dataset), shuffle=True, batch_size=1, pin_memory=True,
num_workers=0)
sim = get_quant_sim(model=model, input_shape=input_shape, bitwidth=bitwidth, layerwise_bitwidth=layerwise_bitwidth)
res = check_metrics(dataloader, sim.model, image_resolution)
print('After Quantization: ', res)
if adaround:
sim = apply_adaround(model=model, sim=sim, dataloader=aimet_dataloader, exp_folder=exp_folder,
image_name=image_name,
input_shape=input_shape, bitwidth=bitwidth, layerwise_bitwidth=layerwise_bitwidth,
adaround_reg=adaround_reg, adaround_iterations=adaround_iterations)
res = check_metrics(dataloader, sim.model, image_resolution)
print('After Adaround: ', res)
if retrain:
loss_fn = partial(loss_functions.image_mse, None)
retrained_model = retrain_model(model=sim.model, train_dataloader=dataloader, epochs=epochs, loss_fn=loss_fn,
lr=lr,
l1_reg=flags['l1_reg'] if flags is not None else 0,
image_resolution=image_resolution)
res = check_metrics(dataloader, retrained_model, image_resolution)
model = retrained_model
print('After retraining: ', res)
def evaluate_model(model: torch.nn.Module, eval_iterations: int, use_cuda: bool = False) -> float:
"""
:param model: Model to evaluate
:param eval_iterations: Number of iterations to use for evaluation.
None for entire epoch.
:param use_cuda: If true, evaluate using gpu acceleration
:return: single float number (accuracy) representing model's performance
"""
mse, ssim, psnr = check_metrics(dataloader, model, image_resolution)
return psnr
# Compute the difference for each parameter
if ref_model is not None and difference_encoding=='same':
ref_sim = get_quant_sim(model=convert_to_nn_module(ref_model), input_shape=input_shape, bitwidth=bitwidth, layerwise_bitwidth=layerwise_bitwidth)
new_state_dict = copy.deepcopy(sim.model.state_dict())
sim.model.load_state_dict(ref_sim.model.state_dict())
_, ref_state_dict_quantized = apply_quantization(sim)
lis = [[i, j, a, b] for i, a in ref_state_dict_quantized.items() for j, b in new_state_dict.items() if
i == j.replace('._module_to_wrap', '')]
# lis = [[i, j, a, b] for i, a in ref_model.named_parameters() for j, b in sim.model.named_parameters() if
# i == j.replace('._module_to_wrap', '')]
for module in lis:
new_state_dict[module[1]] = module[3] - module[2].cuda()
sim.model.load_state_dict(new_state_dict)
#sim.compute_encodings(forward_pass_callback=evaluate_model, forward_pass_callback_args=1)
#ref_model_state_dict = ref_model.state_dict()
comp, update_state_dict = apply_quantization(sim)
# for key, value in ref_model_state_dict.items():
# ref_model_state_dict[key] = value + update_state_dict[key].cuda()
# state_dict = ref_model_state_dict
final_state_dict = {}
for key, value in ref_state_dict_quantized.items():
final_state_dict[key] = value + update_state_dict[key]
state_dict = final_state_dict
elif ref_model is not None and difference_encoding=='adjusted':
new_state_dict = copy.deepcopy(sim.model.state_dict())
lis = [[i, j, a, b] for i, a in ref_model.named_parameters() for j, b in sim.model.named_parameters() if
i == j.replace('._module_to_wrap', '')]
for module in lis:
new_state_dict[module[1]] = module[3] - module[2].cuda()
sim.model.load_state_dict(new_state_dict)
sim.compute_encodings(forward_pass_callback=evaluate_model, forward_pass_callback_args=1)
ref_model_state_dict = ref_model.state_dict()
comp, update_state_dict = apply_quantization(sim)
for key, value in ref_model_state_dict.items():
ref_model_state_dict[key] = value + update_state_dict[key].cuda()
state_dict = ref_model_state_dict
else:
comp, state_dict = apply_quantization(sim)
print(len(comp))
return model, res, len(comp), state_dict
def get_quant_config_name():
# create exp folder
name = 'bw' + str(FLAGS.bitwidth)
if FLAGS.adaround:
name = '_'.join([name, 'adaround_iter', str(FLAGS.adaround_iterations),'adaround_reg', str(FLAGS.adaround_reg)])
if FLAGS.retrain:
name = '_'.join([name, 'retrain_epochs' + str(FLAGS.epochs), 'retrain_lr' + str(FLAGS.lr)])
if FLAGS.difference_encoding == 'adjusted':
name = '_'.join([name, 'enc_adjusted'])
return name
def main(_):
imglob = glob.glob(os.path.join(FLAGS.data_root, FLAGS.dataset, '*'))
df_list = []
exp_glob = glob.glob(os.path.join(FLAGS.exp_root, FLAGS.exp_glob))
for exp_folder in exp_glob:
TRAINING_FLAGS = yaml.safe_load(open(os.path.join(exp_folder, 'FLAGS.yml'), 'r'))
for im in imglob:
image_name = im.split('/')[-1].split('.')[0]
img_dataset = dataio.ImageFile(im)
img = PIL.Image.open(im)
scale = TRAINING_FLAGS['downscaling_factor']
image_resolution = (img.size[1] // scale, img.size[0] // scale)
coord_dataset = dataio.Implicit2DWrapper(img_dataset, sidelength=image_resolution)
dataloader = DataLoader(coord_dataset, shuffle=True, batch_size=1, pin_memory=True,
num_workers=0)
if 'encoding_scale' in TRAINING_FLAGS:
s = TRAINING_FLAGS['encoding_scale']
else:
s = 0
if 'bn' not in TRAINING_FLAGS:
TRAINING_FLAGS['bn'] = False
if 'intermediate_losses' not in TRAINING_FLAGS:
TRAINING_FLAGS['intermediate_losses'] = False
if 'phased' not in TRAINING_FLAGS:
TRAINING_FLAGS['phased'] = False
if 'ff_dims' not in TRAINING_FLAGS:
TRAINING_FLAGS['ff_dims'] = None
if 'num_components' not in TRAINING_FLAGS:
TRAINING_FLAGS['num_components'] = 1
if TRAINING_FLAGS['model_type'] == 'mlp':
model = modules.SingleBVPNet_INR(type=TRAINING_FLAGS['activation'], mode=TRAINING_FLAGS['encoding'],
sidelength=image_resolution,
out_features=img_dataset.img_channels,
hidden_features=TRAINING_FLAGS['hidden_dims'],
num_hidden_layers=TRAINING_FLAGS['hidden_layers'], encoding_scale=s,
batch_norm=TRAINING_FLAGS['bn'], ff_dims=TRAINING_FLAGS['ff_dims'])
elif TRAINING_FLAGS['model_type'] == 'multi_tapered':
model = modules.MultiScale_INR(type=TRAINING_FLAGS['activation'], mode=TRAINING_FLAGS['encoding'],
sidelength=image_resolution,
out_features=img_dataset.img_channels,
hidden_features=TRAINING_FLAGS['hidden_dims'],
num_hidden_layers=TRAINING_FLAGS['hidden_layers'], encoding_scale=s,
tapered=True, ff_dims=TRAINING_FLAGS['ff_dims'])
elif TRAINING_FLAGS['model_type'] == 'multi':
model = modules.MultiScale_INR(type=TRAINING_FLAGS['activation'], mode=TRAINING_FLAGS['encoding'],
sidelength=image_resolution,
out_features=img_dataset.img_channels,
hidden_features=TRAINING_FLAGS['hidden_dims'],
num_hidden_layers=TRAINING_FLAGS['hidden_layers'], encoding_scale=s,
tapered=False, ff_dims=TRAINING_FLAGS['ff_dims'])
elif TRAINING_FLAGS['model_type'] == 'mixture':
model = modules.INR_Mixture(type=TRAINING_FLAGS['activation'], mode=TRAINING_FLAGS['encoding'],
sidelength=image_resolution,
out_features=img_dataset.img_channels,
hidden_features=TRAINING_FLAGS['hidden_dims'],
num_hidden_layers=TRAINING_FLAGS['hidden_layers'], encoding_scale=s,
batch_norm=TRAINING_FLAGS['bn'], ff_dims=TRAINING_FLAGS['ff_dims'],
num_components=TRAINING_FLAGS['num_components'])
model = model.to(device)
try:
state_dict = torch.load(os.path.join(exp_folder, image_name + '/checkpoints/model_best_.pth'),
map_location='cpu')
except:
continue
model.load_state_dict(state_dict, strict=True)
mse, ssim, psnr = check_metrics_full(dataloader, model, image_resolution)
try:
ref_state_dict = torch.load(os.path.join(exp_folder, 'model_maml.pth'),
map_location='cpu')
ref_model = copy.deepcopy(model)
ref_model.load_state_dict(ref_state_dict, strict=True)
except:
ref_model = None
if TRAINING_FLAGS['model_type'] == 'mlp':
model = convert_to_nn_module(model)
else:
model = convert_to_nn_module_in_place(model)
model.use_meta = False
model_quantized, metrics, bytes, state_dict = quantize_model(model=model, coord_dataset=coord_dataset,
bitwidth=FLAGS.bitwidth,
retrain=FLAGS.retrain, epochs=FLAGS.epochs,
lr=FLAGS.lr, ref_model=ref_model,
adaround=FLAGS.adaround,
adaround_iterations=FLAGS.adaround_iterations,
adaround_reg=FLAGS.adaround_reg,
exp_folder=exp_folder,
image_name=image_name,
difference_encoding=FLAGS.difference_encoding)
model.load_state_dict(state_dict, strict=True)
metrics = check_metrics(dataloader, model, image_resolution)
print('Final metrics: ', metrics)
bpp_val = bytes * 8 / (image_resolution[0] * image_resolution[1])
mse, ssim, psnr = metrics
metrics_dict = {'activation': TRAINING_FLAGS['activation'], 'training_epochs': TRAINING_FLAGS['epochs'],
'encoding': TRAINING_FLAGS['encoding'], 'training_lr': TRAINING_FLAGS['lr'],
'hidden_dims': TRAINING_FLAGS['hidden_dims'],
'hidden_layers': TRAINING_FLAGS['hidden_layers'],
'model_type': TRAINING_FLAGS['model_type'],
'psnr': psnr.item(), 'ssim': ssim.item(), 'mse': mse.item(), 'encoding_scale': TRAINING_FLAGS['encoding_scale'],
'bpp': bpp_val,
'l1_reg': TRAINING_FLAGS['l1_reg'],
'bn': TRAINING_FLAGS['bn'] if 'bn' in TRAINING_FLAGS else False,
'phased': TRAINING_FLAGS['phased'],
'intermediate_losses': TRAINING_FLAGS['intermediate_losses'],
'ff_dims': TRAINING_FLAGS['ff_dims'], 'num_components': TRAINING_FLAGS['num_components'],
'retrain_epochs': FLAGS.epochs, 'retrain': FLAGS.retrain,
'adaround': FLAGS.adaround, 'adaround_reg': FLAGS.adaround_reg,
'adaround_iterations': FLAGS.adaround_iterations, 'retraining_lr': FLAGS.lr,
'outer_lr': TRAINING_FLAGS['outer_lr'], 'lr': TRAINING_FLAGS['lr'], 'inner_lr': TRAINING_FLAGS['inner_lr']}
name = get_quant_config_name()
yaml.dump(metrics_dict, open(os.path.join(exp_folder, image_name, 'metrics_' + name + '.yml'), 'w'))
torch.save(model.state_dict(),
os.path.join(exp_folder, image_name, 'model_' + name + '.pth'))
if __name__ == '__main__':
app.run(main)
|
[
"y.struempler@gmail.com"
] |
y.struempler@gmail.com
|
940b9e7443beffa163aadaa8d3ff6b99e06872bd
|
d44215864e30ad8039a1a294875e4222e3d23ebd
|
/devel/lib/python2.7/dist-packages/rosserial_python/__init__.py
|
10cd20fa99dfcbffffdf314d7933408dee3a8348
|
[] |
no_license
|
prathyusha-shine/abhiyan1.0
|
5c3eebfbbacb8b364180b9c2bd377c73cf29e693
|
bf9be6462c132465ddbf8c20b1e9a4e1eabd596e
|
refs/heads/master
| 2020-12-31T01:23:32.911145
| 2015-05-31T06:19:16
| 2015-05-31T06:19:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,014
|
py
|
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/home/sudha/catkin_ws/src/rosserial/rosserial_python/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
|
[
"sudha@sudha.(none)"
] |
sudha@sudha.(none)
|
322667431192b6412e24a1dea7c120a3b19202e9
|
b953909018be86cf8cdf328e2b13395c1dbe28c0
|
/apps/blog/signals.py
|
ad1b5bcf358f33816d995cb34b3fe89b1a4a3dc1
|
[] |
no_license
|
wangyong240/mes
|
06ce26d146aebe0b0103dda4fdd198c3cefc6014
|
12d7321c1b96ae0fdd8f26029462e1943a500c01
|
refs/heads/master
| 2023-01-01T13:29:29.853063
| 2020-09-19T01:19:22
| 2020-09-19T01:19:22
| 296,762,233
| 1
| 0
| null | 2020-09-19T01:20:05
| 2020-09-19T01:20:04
| null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
import django.dispatch
post_viewed = django.dispatch.Signal(providing_args=["post", "request"])
post_published = django.dispatch.Signal(providing_args=["post"])
post_redirected = django.dispatch.Signal(providing_args=["post", "request"])
|
[
"70498306+wangyong240@users.noreply.github.com"
] |
70498306+wangyong240@users.noreply.github.com
|
eb1b5beff1192f98e33857eec82befcbd20bdb2c
|
7e6c2aa534a612c05c6f87e85a6ae45dd6c03408
|
/General Problem/ListIntersection.py
|
65457d8b30eb016dc09e0ea84a3302687fbe03cf
|
[] |
no_license
|
Akshay-agarwal/DataStructures
|
528c9ee93cca90aae634985bc4470f6419885eba
|
a6abea2c24166db168ae9092893ac794fc921d74
|
refs/heads/master
| 2021-01-12T09:54:36.696776
| 2016-12-29T09:59:31
| 2016-12-29T09:59:31
| 76,295,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
c=[1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,3,3,3,4,3]
print(set(c))
a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
l=[x for x in set(a) if x in set(b)]
print(l)
|
[
"akshay.agarwal01@sjsu.edu"
] |
akshay.agarwal01@sjsu.edu
|
d9c704f85da2ac0dcb6c44ef62fafb05e70ad87b
|
667fad988ea0a5652fc522e87bb7b02cc748ecad
|
/fluent_dashboard/__init__.py
|
2964b54e27bfc0df7eb28e684c4954db4e6c6ea7
|
[
"Apache-2.0"
] |
permissive
|
django-fluent/django-fluent-dashboard
|
6a841f4572b8797427b46839aeb496c7e6b3192b
|
d683d1bda698aa8790526fcf7ecd255dfbea45cd
|
refs/heads/master
| 2023-02-20T04:01:34.448490
| 2021-11-17T12:02:38
| 2021-11-17T12:02:38
| 2,784,335
| 204
| 33
|
Apache-2.0
| 2023-02-15T20:49:59
| 2011-11-15T23:57:05
|
Python
|
UTF-8
|
Python
| false
| false
| 40
|
py
|
# following PEP 440
__version__ = "2.0"
|
[
"vdboor@edoburu.nl"
] |
vdboor@edoburu.nl
|
b8a0670e967d3cb15d51ccd374c0e667ce324f29
|
3374a4f971298367204be27142b1d13ec141eefc
|
/app/__init__.py
|
96674b9d54359935d23e5ccf30dde0a8f9d6cbb2
|
[] |
no_license
|
ThePoulsen/testr
|
2f82286230536fa04944962005be882cdce80d9e
|
16e58187d9c974aada63136f6166edb35ea3efd4
|
refs/heads/master
| 2020-12-24T20:00:39.699410
| 2017-03-31T13:58:51
| 2017-03-31T13:58:51
| 86,226,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
## -*- coding: utf-8 -*-
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Mail
import flask_sijax
from flask_htmlmin import HTMLMIN
from flask_security import Security, SQLAlchemyUserDatastore
# Setup Flask and read config from ConfigClass defined above
app = Flask(__name__)
app.config.from_object('config.DevelopmentConfig')
# Flask-SQLAlchemy
db = SQLAlchemy(app)
# Flask-mail
mail = Mail(app)
# HTML min
HTMLMIN(app)
# Flask-sijax
flask_sijax.Sijax(app)
# Import models
from app.user.models import User, Role
# Flask-Security
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
## import blueprints
from app.index.views import indexBP
from app.map.views import mapBP
from app.user.views import userBP
from app.sites.rooms.views import roomsBP
from app.sites.transport.views import transportBP
## Register blueprints
app.register_blueprint(indexBP, url_prefix='')
app.register_blueprint(mapBP, url_prefix='/map')
app.register_blueprint(userBP, url_prefix='/user')
app.register_blueprint(roomsBP, url_prefix='/rooms')
app.register_blueprint(transportBP, url_prefix='/transport')
# Error handlers
@app.route('/error404')
def error404():
return render_template('errors/404.html')
|
[
"henrik@vipilon.dk"
] |
henrik@vipilon.dk
|
d0c4325e46632c9ee28107a7ddec6c8ce15010bc
|
3f71a817cfb54eb388f9eb30c9cd4a503ded77e7
|
/join/migrations/0001_initial.py
|
8a28f3c5f06953b4e757a9eaecef18a104d59718
|
[] |
no_license
|
diegomatar/irbolsa
|
f3737a4e85a8d4067aa62e88b03e97ac0fe5f35a
|
adec275f07bb7e8393dd9ca4ed8c6613b0688c8e
|
refs/heads/master
| 2020-05-16T22:51:45.310640
| 2015-07-19T20:48:09
| 2015-07-19T20:48:09
| 16,681,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Join'
db.create_table(u'join_join', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=250)),
('name', self.gf('django.db.models.fields.CharField')(max_length=250, null=True, blank=True)),
('timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'join', ['Join'])
def backwards(self, orm):
# Deleting model 'Join'
db.delete_table(u'join_join')
models = {
u'join.join': {
'Meta': {'object_name': 'Join'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '250'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
}
}
complete_apps = ['join']
|
[
"diegomatar@gmail.com"
] |
diegomatar@gmail.com
|
3a875bc89bf0d83cc7e80b9fa971cd13322cde19
|
133312f6347f283685b6492476dc2fa844b48124
|
/DSCLinker/wsgi.py
|
9aa4a5fc6dbbec174965974e9f71d498df2913d6
|
[] |
no_license
|
durgesh2001/DSCLinker
|
35bc974bf9d9eb81d938aaeff62f99e43f88d41d
|
fedfb0b4a2de663da02c90d967db21395911579e
|
refs/heads/main
| 2023-08-13T11:47:45.213015
| 2021-10-06T17:54:52
| 2021-10-06T17:54:52
| 413,177,651
| 0
| 1
| null | 2021-10-06T17:38:45
| 2021-10-03T19:32:30
|
Python
|
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
WSGI config for DSCLinker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DSCLinker.settings')
application = get_wsgi_application()
|
[
"michaeldurgesh@gmail.com"
] |
michaeldurgesh@gmail.com
|
a8580c754c4c9e01c2cef4b77ddbea9386055f1e
|
310719469c00c7663db9b1958b99784dc775b50f
|
/ProjektUmowy/settings.py
|
ad6777bf4cf9c16b24659bd1760f0d8be0c182dd
|
[] |
no_license
|
msm3858/projektumowy
|
b4c0e8d760c63111d2b9dc402496a40a6742ab2d
|
80ea7a376f908b9c784b38104312e4a8df487b3e
|
refs/heads/master
| 2021-01-25T14:55:44.601063
| 2018-03-03T22:28:00
| 2018-03-03T22:28:00
| 123,736,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,467
|
py
|
"""
Django settings for ProjektUmowy project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_w4uai%-_qd^gc_5b6z)85j#s#j+2$5xpvylw(w!4#!ttu(n%o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'umowy.apps.UmowyConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ProjektUmowy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ProjektUmowy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'umowy',
'USER': 'ziomek',
'PASSWORD': 'admin',
'HOST': 'localhost',
'PORT': '',
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
},
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'pl'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = False
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
DATE_FORMAT = 'd-n-Y'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
LOGIN_REDIRECT_URL = 'umowy:index'
LOGIN_URL = 'umowy:login'
|
[
"="
] |
=
|
419f91ca1f613bbe63fe2dbd179ce02d31f7d5a1
|
c9f54e1a2e11a033b53b4f12564c7b87c5ce1a4a
|
/one_time/environment_create.py
|
727284811b13add5fd6d62eeedff615c51d4cfd9
|
[] |
no_license
|
mikewycklendt/dcadventures
|
b2e5e38ed53a698bb3c18c5b332df424540a18e3
|
542f90c3cce859416de14e40bdebf6a8cddcf67a
|
refs/heads/master
| 2023-06-10T17:00:33.380125
| 2021-06-21T20:38:25
| 2021-06-21T20:38:25
| 290,849,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
@app.route('/env/create')
def env_create():
environment = ['Underwater', 'Zero Gravity', 'Mountains', 'Jungle', 'Desert', 'Volcano', 'Space', 'Woodlands', 'Arctic']
for i in environment:
entry = Environment(name=i, show=True)
db.session.add(entry)
db.session.commit()
results = Environment.query.all()
for result in results:
print (result.id)
print (result.name)
return ('environments added')
|
[
"mikewycklendt@gmail.com"
] |
mikewycklendt@gmail.com
|
5415341df0ce6d42fac8a85929bd3883209db3ec
|
3bdf42f3cc30af47aef031492154022b5edf212c
|
/OutOfBag/G/KMeans.py
|
f04cd28cfd3adaa22917fe85c03463a6e0edfc82
|
[] |
no_license
|
shuowenwei/LeetCodePython
|
a86873189a9fce011782496e4d8519731efc0519
|
0821af55eca60084b503b5f751301048c55e4381
|
refs/heads/master
| 2022-10-12T19:24:39.709886
| 2022-10-07T03:53:54
| 2022-10-07T03:53:54
| 59,629,706
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,264
|
py
|
class K_Means:
def __init__(self, k=2, tol=0.001, max_iter=300):
self.k = k
self.tol = tol
self.max_iter = max_iter
def fit(self, data):
# if len(data) < self.k:
# rasie Exception("Sorry, k must be >= data length")
self.centroids = {}
for i in range(self.k):
self.centroids[i] = data[i]
for _ in range(self.max_iter):
# each iteration, initialize a new classification dictionary
self.classifications = {}
for i in range(self.k):
self.classifications[i] = []
for featureset in data:
distances = [np.linalg.norm(featureset - self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
self.classifications[classification].append(featureset)
prev_centroids = dict(self.centroids)
for classification in self.classifications:
self.centroids[classification] = np.average(self.classifications[classification], axis=0)
optimized = True
for c in self.centroids:
original_centroid = prev_centroids[c]
current_centroid = self.centroids[c]
if np.sum((current_centroid - original_centroid) / original_centroid*100.0) > self.tol:
print(np.sum((current_centroid - original_centroid) / original_centroid*100.0))
optimized = False
if optimized:
break
def predict(self,data):
distances = [np.linalg.norm(data-self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
return classification
model = K_Means()
model.fit(X)
for centroid in model.centroids:
plt.scatter(model.centroids[centroid][0], model.centroids[centroid][1],
marker="o", color="k", s=150, linewidths=5)
for classification in model.classifications:
color = colors[classification]
for featureset in model.classifications[classification]:
plt.scatter(featureset[0], featureset[1], marker="x", color=color, s=150, linewidths=5)
plt.show()
|
[
"weisw9@gmail.com"
] |
weisw9@gmail.com
|
20732fea5a89fbdaea0293971a4d67660276aa7b
|
767c07db1fb131047af3d9b0a065b8fdc8aac9ab
|
/53-pd/pd_groupby1_mtcars.py
|
520aea1548de3be4b50e2c7c8b950a666f397141
|
[] |
no_license
|
DUanalytics/pyAnalytics
|
e52c5469da30a5f436ec0f3120d9f15fb82fd9b3
|
107a08bebe46ea51afccfeae4a666213bb405d41
|
refs/heads/master
| 2023-07-08T04:32:54.758902
| 2023-07-03T14:37:04
| 2023-07-03T14:37:04
| 202,094,535
| 394
| 31
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,506
|
py
|
#Pandas -Group By
#%
import pandas as pd
pd.set_option('display.max_columns',11)
pd.set_option('display.width', 1000)
import numpy as np
import matplotlib.pyplot as plt
from pydataset import data
mtcars = data('mtcars')
mtcars.head()
df2 = mtcars
df2.head()
df2.columns
df2.am
df2[df2['am']== 0]['mpg'].min() #min mileage of cars=0(auto)
df2.groupby("am").agg({ "mpg" : "min" }) #min mileage of each am
#new in 25 version
df2.groupby('gear').agg(Mean_mpg=('mpg','mean'), Mean_wt=('wt','mean'))
df2.groupby('gear').agg(Min_mpg=('mpg','min'), Max_mpg = ('mpg','max'))
df2.groupby("gear").agg( min_mpg=pd.NamedAgg(column='mpg', aggfunc='min'), max_mpg=pd.NamedAgg(column='mpg', aggfunc='max'), average_wt= pd.NamedAgg( column='wt', aggfunc=np.mean))
df2.groupby("gear").mpg.agg( min_mpg="min", max_mpg="max", sd_mpg='std' )
#first and last mpg for each gear type
gGear = df2.groupby('gear').apply(lambda x: x.sort_values(['mpg']))
gGear
df2.groupby('gear', sort=1).mpg.agg([lambda x: x.iloc[0], lambda x: x.iloc[-1]])
df2.groupby('gear').mpg.agg([lambda x: x.iloc[0], lambda x: x.iloc[1], lambda x: x.iloc[-1]])
#groupby
df2.groupby('cyl')
df2.groupby('cyl').size()
df2.groupby('cyl')['mpg'].mean()
df2.groupby('cyl')[['mpg','hp','wt']].mean()
df2.groupby('cyl').describe()
df2.groupby('cyl').describe().unstack()
#multiple groups
df2.groupby(['cyl','gear'])
df2.groupby(['cyl','gear']).aggregate(['mean'])
df2.groupby(['cyl','gear']).size().unstack(fill_value=0)
#value counts works on series
df2.cyl.value_counts()
df2.groupby('cyl').aggregate({'mpg':'mean'})
#see column headings
df2.groupby('cyl', as_index=True).aggregate({'mpg':'mean'}) #index - cyl
df2.groupby('cyl', as_index=False).aggregate({'mpg':'mean'}) #no index
#sort
df2.groupby(['cyl','gear'], sort=False, as_index= True).aggregate({'mpg' :'mean'})
df2.groupby(['cyl','gear'], sort=True, as_index= True).aggregate({'mpg' :'mean'})
df2[['mpg','wt','gear']].groupby('gear').apply(lambda x: x.sort_values(by = 'mpg', ascending = 1))
df2
df2.sort_values(by='mpg')
df2.sort_values(by=['cyl','wt'])
df2.apply(lambda x: x.sort_values()) #index column only
#level
df2.groupby(level=0)['mpg'].mean() #first level of index (here only 1)
#only 1 index column ie car names
df2.groupby(level=1)['mpg'].mean() #error as there is no index
#axis
df2.groupby(['cyl','gear'], axis=0, sort=True).aggregate({'mpg':'mean'})
#sort by row index
df2.groupby(['cyl','gear'], axis=1, sort=True).aggregate({'mpg':'mean'}) #error : cannot index cols
df2.groupby(['cyl','gear'], axis=1, sort=True)['mpg'].mean() #error
#observed
df2['gear'] = df2['gear'].astype('category')
df2.groupby(['cyl','gear'], observed=True).aggregate({'mpg':'mean'})
df2.groupby(['cyl','gear'], observed=False).aggregate({'mpg':'mean'})
#groupkeys - with apply, index pieces
#squeeze - reduce dimn : 1 output
df2.groupby(['cyl','gear'], squeeze=True)['mpg'].mean()
df2.groupby(['cyl','gear'], squeeze=False)['mpg'].mean()
#df2.pivot_table(index=['cyl'], columns='gear', aggfunc='size', fill_value=0)
#attributes
df2.groupby('gear')
geargp = df2.groupby('gear')
geargp.groups
len(geargp) #3 groups
#groupby multiindex
df2b = df2.set_index(['gear','cyl'])
df2b
#columns shifted to first 2 columns as index
group2b = df2b.groupby('carb')
df2b.groupby(level=0).size()
df2b.groupby(level='gear').size()
df2b.groupby(level=1).size()
df2b.groupby(level='cyl').size()
df2b
df2b.groupby(level=[0,1]).size() #gear-level0, cyl-Level1
df2b.groupby(level=['gear','cyl']).size()
df2b.groupby(level=[1,0]).size()
df2b.groupby(level=['cyl','gear']).size()
df2b.groupby(level=['cyl','gear']).sum()
#index and columns
df2b.groupby([pd.Grouper(level=1), 'carb']).size()
df2b.groupby(['gear', 'carb']).size() #can be specified directly
df2b.groupby([pd.Grouper(level=0), 'carb']).size()
df2b.groupby([pd.Grouper(level=[0,1]), 'carb']).size() #not working
#find groups
group2 = df2.groupby('gear')
group2.get_group(4) #4 gear cars
group3 = df2.groupby(['gear','am'])
group3.get_group((4,0)) #4 gear cars
#aggregation
group2.aggregate(np.sum)
group2.agg(np.sum) #agg same as aggregate
group3.agg(np.sum)
group3.agg(np.sum).reset_index() #remove index
#if index names not required
group4 = df2.groupby(['gear','am'], as_index=False)
group4.agg(np.sum)
#no index column now
group4.sum()
group4.size()
group4.describe()
group4.count() #size better
#other functions
#mean(), sum(), size(), count(), std(), var(), sem(), describe(), first(), last(), nth(), min(), max()
#aggregating multiple functions
group4.groups
group4.size()
group4.sum()
group4['mpg'].agg([np.sum, np.mean, np.median]) #one column agg mpg
group4.agg([np.sum, np.mean, np.median]) #all numeric columns
#rename columns as you aggregate
group4['mpg'].agg([np.sum, np.mean, np.median])
group4['mpg'].agg([np.sum, np.mean, np.median]).rename(columns={'sum':'Total', 'mean':"Average", 'median':'Middle_Value'})
#rename columns as you aggregate
group4['mpg'].agg([np.sum, np.mean, np.median])
group4.agg([np.sum, np.mean, np.median]).rename(columns={'sum':'Total', 'mean':"Average", 'median':'Middle_Value'}) #all columns
#different functions different columns
group4.agg({'mpg':np.mean, 'wt':[np.mean, np.median], 'hp':'std'})
group4.agg({'mpg':np.mean, 'wt': lambda x:np.std(x) + 2}) #lambda functions
group4.agg({'mpg':np.mean, 'wt':[np.mean, 'mean'], 'hp':'mean'}) #error
#functions can be strings but must unique function defn
group4.agg({'mpg':np.mean, 'wt':[np.median,'mean'], 'hp':'mean'}) #error
#apply function
group4.apply(lambda x:x.describe())
group4.apply(lambda x:np.multiply(x,2))
#unobserved
pd.Series([1,2,3,4])
pd.Series([1,2,3,4]).groupby(pd.Categorical(['a','b','a','a']))
pd.Series([1,2,3,4]).groupby(pd.Categorical(['a','b','a','a'], categories=['a','b','c']), observed=False).count()
#show only observed values
pd.Series([1,2,3,4]).groupby(pd.Categorical(['a','b','a','a'], categories=['a','b','c']), observed=True).count()
#no c category here
#first/last rows of each group
group4.size()
group4.first()
group4.last()
#nth row
group4.nth(1) #check & compare
group4.nth(-1) #last
group4.nth(-1, dropna='any') #drop any ??
group4.nth(-1, dropna='all') #drop all??
#enumerate
group4.cumcount()
#where each row is
#listing
list(group4)
#add prefix
group4.mean()
group4.mean().add_prefix('MEAN_')
#function
def get_status(group):
return {'min': group.min(), 'max':group.max()}
get_status(group4)
group4.apply(get_stats)
#end----------------------------
|
[
"dup1966@gmail.com"
] |
dup1966@gmail.com
|
5168e9f2905c6f970952595ab106f2c5d32da5a6
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_285/ch5_2019_04_04_20_41_17_645524.py
|
f380d7683bb0ac22f7fc58d7e72786323413a3e5
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
def maior_primo_menor_que(n):
for i in range(1,n):
x=0
for k in range(1,n):
if i%k==0:
x+=1
if x<=2:
primo=i
if primo<=1:
primo=-1
return primo
|
[
"you@example.com"
] |
you@example.com
|
5115dbdab7c9a002e97baa1e8b601b71b88f13de
|
957cc36f8368e3baa98c77cd3895daefa2e55fc0
|
/DSS_translated_to_Python.py
|
a22bf6bee2873940cd1a04d4a3dd2a24ec272281
|
[] |
no_license
|
kalenkovich/pyDSS
|
6f0a093d8e487f60be60eba6dabb93a16cdd95d4
|
622fabbef185958976d8dffc5964c09e1868696e
|
refs/heads/master
| 2020-05-18T06:40:44.681447
| 2019-05-17T08:25:24
| 2019-05-17T08:25:24
| 184,241,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,379
|
py
|
# coding: utf-8
# This is a test of the translation of the DSS algorithm from Matlab to Python. The Matlab source is taken from `NoiseTools` package that can be found [here](http://audition.ens.fr/adc/NoiseTools/).
#
# The test consists of running an example from the `NoiseTools` in parallel in Matlab and Python and matching the results.
# # Setup
# ## Imports
from pathlib import Path
import inspect
# Code highlightintg
from pygments import highlight
from pygments.lexers import MatlabLexer, PythonLexer
from pygments.formatters import HtmlFormatter
from IPython.core.display import HTML
# This is a package that allows us to inerface with a Matlab process
from pymatbridge import Matlab
import numpy as np
import scipy
from matplotlib import pyplot as plt
# ## Set up autoreloading of the Python code
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '1 # autoreload only the source loaded with %aimport')
get_ipython().run_line_magic('aimport', 'dss')
# ## Paths
# Change this to wherever you unpacked `NoiseTools` to.
noise_tools_dir = Path('NoiseTools')
noise_tools_examples_dir = noise_tools_dir / 'EXAMPLES' / 'a few old example scripts'
example_1 = noise_tools_examples_dir / 'example1.m'
# ## Printing code
def print_highlighted_code(code, lexer):
display(HTML(highlight(code, lexer, HtmlFormatter(full=True, linenos='inline'))))
def print_matlab_code(code):
print_highlighted_code(code, MatlabLexer())
def print_python_code(code):
print_highlighted_code(code, PythonLexer())
def print_matlab_script(path):
with open(path, 'r') as f:
code = f.read()
print_matlab_code(code)
def print_python_function(fun):
code = inspect.getsource(fun)
print_python_code(code)
# ## Start Matlab
matlab = Matlab()
matlab.start()
# ## Add `NoiseTools` to the Matlab's path
matlab.run_code('addpath(\'{}\')'.format(noise_tools_dir))
# # Simulate data
# Let's look at the example 1 code:
print_matlab_script(example_1)
# Let's create synthetic data in Matlab and transfer it here.
example_1_code = open(example_1, 'r').readlines()
synthethize_data_code = ''.join(example_1_code[9:21])
print_matlab_code(synthethize_data_code)
matlab.run_code(synthethize_data_code)
data = matlab.get_variable('data')
print(data.shape)
# That is 300 time points, 30 channels and 100 trials.
# # Calculate covariances
# ## Inspect the `NoiseTools` code
# Here is how the covariance matrices are calculated:
covariances_code = ''.join(example_1_code[22:24])
print_matlab_code(covariances_code)
# Let's see what `nt_cov` does.
nt_cov = noise_tools_dir / 'nt_cov.m'
print_matlab_script(nt_cov)
# The example code only uses the first argument `x` and only the first output `c`. Also, both `data` and `mean(data,3)` are single matrices, not arrays of matrices. Therefore, checks on line 33 (`isempty(w)`) and on line 35 (`is_numberic(x)`) both avaluate to `true`. So, the only piece of code relevant to us is this:
nt_cov_code = open(nt_cov, 'r').readlines()
print_matlab_code(''.join(nt_cov_code[35:42]))
# Let's see what `nt_multishift` does. First, we need to figure out what `shifts` is since we didn't supply this parameter. Here are the relevant lines:
print_matlab_code(''.join(nt_cov_code[26:29]))
# So, shifts is just `0`. Let's see what `nt_multishift` does in this case.
nt_multishift_path = noise_tools_dir / 'nt_multishift.m'
print_matlab_script(nt_multishift_path)
# And here is the relevant part:
nt_multishift_code = open(nt_multishift_path, 'r').readlines()
print_matlab_code(''.join(nt_multishift_code[28:32]))
# So, with `shifts == 0` function `nt_multishift` does not change the input. Let's get back to the `nt_cov` code then.
nt_cov_code = open(nt_cov, 'r').readlines()
print_matlab_code(''.join(nt_cov_code[35:42]))
# Since `data` is shaped as `nsamples`-by-`nchans`-by-`ntrials` then so is `x`.
#
# For each `k`:
# - `xx` is the data from one trial,
# - `xx'*xx` is uncentered covariance of channels in time during a single trial.
#
# Then `c` is the sum of per-trial ucnentered covariances.
# ## Run the Matlab code
print_matlab_code(covariances_code)
matlab.run_code(covariances_code)
c0 = matlab.get_variable('c0')
c1 = matlab.get_variable('c1')
# ## Run the Python code
print_python_function(dss.calculate_covariances)
R0, R1 = dss.calculate_covariances(data, uncentered=True, unnormalized=True)
# ## Match
np.allclose(R0, c0)
np.allclose(R1, c1)
# # PCA
# The DSS algorithm includes two PCA rotations. Let's check that we do it in the same manner.
# ## Take a look at the `NoiseTools` code
pca_path = noise_tools_dir / 'nt_pcarot.m'
print_matlab_script(pca_path)
# We won't be working with sparse matrices so the Python code won't have the `N` parameter.
# ## Take a look at the Python code
print_python_function(dss.covariance_pca)
# ## Run the code
# ### Matlab
matlab.run_code('[E, D] = nt_pcarot(c0);')
E = matlab.get_variable('E')
D = matlab.get_variable('D')
# ### Python
eigvals, eigvecs = dss.covariance_pca(R0)
# ## Match
np.allclose(eigvals, D)
np.allclose(eigvecs, E)
# Let's take a look at the topleft corners.
eigvecs[:5, :5]
E[:5, :5]
# The first problem is some vectors differ in sign. Let's coerce the first row of both matrices to be positive.
eigvecs *= np.sign(eigvecs[np.newaxis, 0])
E = E * np.sign(E[np.newaxis, 0])
np.allclose(eigvecs, E)
# The second problem is that eigenvectors corresponding to the very small eigenvalues may differ a lot.
threshold = 1e-9
abs_threshold = max(eigvals) * threshold
np.allclose(eigvecs[:, eigvals > abs_threshold], E[:, D.squeeze() > abs_threshold])
# So, the results are equivalent. Let's check that both functions produce the same output if we ask them to remove eigenvalues below a given threshold.
matlab.set_variable(value=threshold, varname='threshold')
matlab.run_code('[E, D] = nt_pcarot(c0, [], threshold);')
E = matlab.get_variable('E')
D = matlab.get_variable('D')
eigvals, eigvecs = dss.covariance_pca(R0, threshold=threshold)
np.allclose(eigvals, D)
eigvecs *= np.sign(eigvecs[np.newaxis, 0])
E = E * np.sign(E[np.newaxis, 0])
np.allclose(eigvecs, E)
# # DSS from covariance matrices
# Here is how the dss is run in the example:
dss_code = ''.join(example_1_code[24:26])
print_matlab_code(dss_code)
# Two steps:
# 1. `nt_dss0` calculates the unmixing matrix,
# 2. `nt_mmat` applies it to the data.
#
# We'll add calculating the mixing matrix to the first step as well - it will give us the topographies of the components.
# ## Unmixing and mixing
# ### Matlab code
# Let's see what `nt_dss0` does.
nt_dss0_path = noise_tools_dir / 'nt_dss0.m'
print_matlab_script(nt_dss0_path)
# ### Python code
print_python_function(dss.unmix_covariances)
# ### Run code
# #### Matlab
unmxing_matlab_code = dss_code.split('\n')[0]
print_matlab_code(unmxing_matlab_code)
matlab.run_code(unmxing_matlab_code)
todss = matlab.get_variable('todss')
pwr0 = matlab.get_variable('pwr0')
pwr1 = matlab.get_variable('pwr1')
# #### Python
c0 = matlab.get_variable('c0')
c1 = matlab.get_variable('c1')
U, M, phase_locked_power, total_power = dss.unmix_covariances(c0, c1, threshold=1e-9, return_mixing=True, return_power=True)
# 1e-9 is the default threshold used in the Matlab code, see line 16
# ### Match
# Filters only need to match up to a sign of columns.
dss.allclose_up_to_sign(todss, U, component_axis=1)
# And they do.
# Matlab code takes a square root after power calculation so we'll do that as well before matching.
np.allclose(pwr0, np.sqrt(phase_locked_power))
np.allclose(pwr1, np.sqrt(total_power))
# ### Check the unmixing matrix
dss.is_pseudoinverse(U, M)
# ## Applying
# ### Matlab
applying_matlab_code = dss_code.split('\n')[1]
print_matlab_code(applying_matlab_code)
matlab.run_code(applying_matlab_code)
z = matlab.get_variable('z')
# ### Python
dss_applied = np.stack([epoch @ U for epoch in np.moveaxis(data, 2, 0)], axis=2)
# ### Match
dss.allclose_up_to_sign(z, dss_applied, component_axis=1)
# # Whole thing together
U, M, phase_locked_power, total_power = dss.dss(
data, cov_uncentered=True, cov_unnormalized=True,
threshold=1e-9, return_mixing=True, return_power=True)
dss.allclose_up_to_sign(U, todss, component_axis=1)
|
[
"e.kalenkovich@gmail.com"
] |
e.kalenkovich@gmail.com
|
0cf7314d3c53af83e62431f47cd686002240c6fa
|
95e3df357914bbb287cd4a53ac166910088f4f9f
|
/2.Data Wrangle OpenstreetMaps Data/Code for Project/audit2.py
|
f9af28b29fac79dbb12bcc5d1a7612c94cad105d
|
[] |
no_license
|
poonkin/Udacity-2
|
ed58afb2902742a5f34e81072d45a74d15a5a5da
|
5aabdce377db79bb5d1133460ce5e85ec207ff37
|
refs/heads/master
| 2021-01-17T06:29:38.770780
| 2015-03-15T06:54:08
| 2015-03-15T06:54:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
# Execution
for st_type, ways in st_types.iteritems():
for name in ways:
better_name = update_name(name, mapping)
print name, "=>", better_name
|
[
"yanyachen@users.noreply.github.com"
] |
yanyachen@users.noreply.github.com
|
8cac904ee71ae5c4ecb35db6d604c241f1c83402
|
e8f0334572e46bd52ff66660bd4e0b335b5de619
|
/main.py
|
bfbea0dc2b39665c26d351a630877b397ab38ae9
|
[] |
no_license
|
00012121/Seminar_4
|
19fedabdbc186dcbd186f5ba79d121c2f7ee1589
|
0be5cc3cb0bd20c074db82716f393bf63c558e0d
|
refs/heads/master
| 2023-08-13T14:43:02.624936
| 2021-10-15T05:21:05
| 2021-10-15T05:21:05
| 417,370,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51
|
py
|
print("yes")
print("tesgfasdf")
print("tesgfasdf");
|
[
"wiut00012121@gmail.com"
] |
wiut00012121@gmail.com
|
8073a0e1716bdee0601790717843a54d8c410fe2
|
25a532c41440300f240845523b4c3ae82db03e0f
|
/pyB/rpython_b_objmodel.py
|
597f9072b34aca7474449bdbe48d5c39f37e14c1
|
[] |
no_license
|
hhu-stups/pyB
|
b262e767b9e85251be8b86846a557bd3ff6d51de
|
5915f816cab3230e6d1b46dcb785e13f902dec63
|
refs/heads/master
| 2020-05-21T19:42:06.536979
| 2018-09-24T10:36:25
| 2018-09-24T10:36:25
| 8,031,304
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,199
|
py
|
# Wrapped basic types to allow RPYTHOn Typing (everthing is a W_Object)
# Immutable (always return W_XXX) to get better performance results with RPTYHON-tranlation
# except boolean methods!
import sys
from config import PRINT_WARNINGS, USE_RPYTHON_CODE, RYPYTHON_PATH
sys.path.append(RYPYTHON_PATH)
from rpython.rlib.objectmodel import r_dict
from rpython.rlib.objectmodel import compute_hash
def my_eq(obj0, obj1):
return obj0.__eq__(obj1)
def my_hash(obj):
return obj.__my_hash__()
class W_Object:
#_settled_ = True
#_attrs_ = []
def __init__(self):
pass
def __contains__(self, e):
raise Exception("abstract W_Object instance _contains_ called")
def __repr__(self):
return "w-obj"
def __my_hash__(self):
hash = compute_hash(self.__repr__())
return hash
class W_Tuple(W_Object):
def __init__(self, tvalue):
# e.g. W_Tuple((W_Tuple((W_Integer(1),W_Integer(2))),W_Integer(3)))
assert isinstance(tvalue, tuple) or isinstance(tvalue, W_Tuple)
if isinstance(tvalue, tuple):
assert isinstance(tvalue[0], W_Object)
assert isinstance(tvalue[1], W_Object)
self.tvalue = tvalue
def __eq__(self, other):
if not isinstance(other, W_Tuple):
return False
return self.tvalue[0].__eq__(other.tvalue[0]) and self.tvalue[1].__eq__(other.tvalue[1])
def __ne__(self, other):
assert isinstance(other, tuple)
return self.tvalue != other.tvalue
def __getitem__(self, key):
if key==0:
return self.tvalue[0]
elif key==1:
return self.tvalue[1]
else:
raise Exception("PyB-ERROR: illegal tuple index in W_Tuple")
def __repr__(self):
return str(self.tvalue)
def clone(self):
t0 = self.tvalue[0].clone()
t1 = self.tvalue[1].clone()
return W_Tuple((t0,t1))
# Cantor pairing function
# https://en.wikipedia.org/wiki/Pairing_function
def __my_hash__(self):
x = self.tvalue[0].__my_hash__()
y = self.tvalue[1].__my_hash__()
return y+((x+y)*(x+y+1))/2
# sadly only single inheritance allow in RPYTHON :(
# reimplementation of all needed integer operations
# special methods are NOT supported by RPython. But they allow easy switching of
# build-in types and wrapped types in the python version. A measurement of
# speed- and space-loose is possible.
class W_Integer(W_Object):
#_settled_ = True
def __init__(self, ivalue):
assert isinstance(ivalue, int)
self.ivalue = ivalue
def __repr__(self):
return str(self.ivalue)
def __str__(self):
return str(self.ivalue)
def __add__(self, other):
assert isinstance(other, W_Integer)
return W_Integer(self.ivalue + other.ivalue)
def __sub__(self, other):
assert isinstance(other, W_Integer)
return W_Integer(self.ivalue - other.ivalue)
def __mul__(self, other):
assert isinstance(other, W_Integer)
return W_Integer(self.ivalue * other.ivalue)
# Maybe unused
def __div__(self, other):
assert isinstance(other, W_Integer)
return W_Integer(self.ivalue / other.ivalue)
def __floordiv__(self, other):
assert isinstance(other, W_Integer)
return W_Integer(self.ivalue // other.ivalue)
def __lt__(self, other):
assert isinstance(other, W_Integer)
#print "DEBUG:", self.ivalue, other.ivalue ,self.ivalue < other.ivalue
return self.ivalue < other.ivalue
def __le__(self, other):
assert isinstance(other, W_Integer)
return self.ivalue <= other.ivalue
def __eq__(self, other):
if not isinstance(other, W_Integer):
return False
return self.ivalue == other.ivalue
def __ne__(self, other):
assert isinstance(other, W_Integer)
return self.ivalue != other.ivalue
def __gt__(self, other):
assert isinstance(other, W_Integer)
return self.ivalue > other.ivalue
def __ge__(self, other):
assert isinstance(other, W_Integer)
return self.ivalue >= other.ivalue
def __neg__(self):
return W_Integer(-1*self.ivalue)
def __mod__(self, other):
return W_Integer(self.ivalue % other.ivalue)
def __contains__(self, e):
raise Exception("Nothing is member of a W_Integer")
def clone(self):
return W_Integer(self.ivalue)
def __my_hash__(self):
return self.ivalue
class W_Boolean(W_Object):
#_settled_ = True
def __init__(self, bvalue):
#assert isinstance(value, bool)
self.bvalue = bvalue
def __and__(self, other):
assert isinstance(other, W_Boolean)
boolean = self.bvalue and other.bvalue
#assert isinstance(boolean, bool)
return boolean
def __or__(self, other):
assert isinstance(other, W_Boolean)
boolean = self.bvalue or other.bvalue
#assert isinstance(boolean, bool)
return boolean
def __not__(self):
boolean = not self.bvalue
assert isinstance(boolean, bool)
return boolean
def __eq__(self, other):
if not isinstance(other, W_Boolean):
return False
boolean = self.bvalue == other.bvalue
assert isinstance(boolean, bool)
return boolean
def __repr__(self):
return str(self.bvalue)
def __str__(self):
return str(self.bvalue)
def __contains__(self, e):
raise Exception("Nothing is member of a W_Boolean")
def clone(self):
return W_Boolean(self.bvalue)
def __my_hash__(self):
if self.bvalue:
return 1
else:
return 0
class W_None(W_Object):
#_settled_ = True
def __contains__(self, e):
raise Exception("Nothing is member of a W_None")
def __repr__(self):
return "None"
def clone(self):
return W_None()
# elements of enumerated sets or machine parameter sets
class W_Set_Element(W_Object):
#_settled_ = True
def __init__(self, string):
assert isinstance(string, str)
self.string = string
def __eq__(self, other):
if not isinstance(other, W_Set_Element):
return False
return self.string==other.string
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return self.string
def clone(self):
return W_Set_Element(self.string)
class W_String(W_Object):
#_settled_ = True
def __init__(self, string):
assert isinstance(string, str)
self.string = string
def __eq__(self, other):
if not isinstance(other, W_String):
return False
return self.string==other.string
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return self.string
def clone(self):
return W_String(self.string)
# an import of this module will overwrite the frozenset build-in type
# TODO: replace with more efficient implementation.
# Different enumeration order than build-in frozenset.
class frozenset(W_Object):
#_settled_ = True
def __init__(self, L=None):
W_Object.__init__(self)
if L is None:
L = []
self.hashmap = r_dict(my_eq, my_hash)
# frozenset([1,1,2])==frozenset([1,2])
# TODO: maybe the performance improves if cases like frozenset([1,1,2])
# are not used by any pyB code. (only enumerated sets and the repl. needs this check then)
assert isinstance(L, list)
for e in L:
self.hashmap[e] = True
self.repr_string = None
self.hash_computed = False
self.my_hash = 0
# TODO: more efficient impl
def __repr__(self):
if self.repr_string is None:
string = ["{"]
for e in self.hashmap:
string.append(str(e) +",")
string.append("}")
self.repr_string = "".join(string)
return self.repr_string
def __len__(self):
return len(self.hashmap)
def __contains__(self, element):
# This uses the my_eq method.
# In case of set of sets this will call the _eq_ method of this class
return element in self.hashmap
def issubset(self, other):
for e in self.hashmap:
if e not in other.hashmap:
return False
return True
def issuperset(self, other):
for e in other.hashmap:
if e not in self.hashmap:
return False
return True
def union(self, other):
#new_map = r_dict(my_eq, my_hash)
#for e in self.hashmap:
# new_map[e] = True
#for e in other.hashmap:
# new_map[e] = True
result = frozenset()
new_map = self.hashmap.copy()
new_map.update(other.hashmap)
result.hashmap = new_map
return result
def intersection(self, other):
new_map = r_dict(my_eq, my_hash)
for e in other.hashmap:
if e in self.hashmap:
new_map[e] = True
result = frozenset()
result.hashmap = new_map
return result
def __sub__(self, other):
return self.difference(other)
def difference(self, other):
new_map = self.hashmap.copy()
for e in other.hashmap:
if e in new_map:
del new_map[e]
result = frozenset()
result.hashmap = new_map
return result
def copy(self):
result = frozenset()
result.hashmap = self.hashmap.copy()
return result
# WARNING: set([1,2,3])!=frozenset([1,2,3])
def __eq__(self, other):
from symbolic_sets import SymbolicSet
if not isinstance(other, frozenset) and not isinstance(other, SymbolicSet):
return False
assert isinstance(other, frozenset) or isinstance(other, SymbolicSet)
if not self.__len__()==other.__len__():
return False
#if self.__len__()==0 and other.__len__()==0:
# return True
if isinstance(other, frozenset):
for e in self.hashmap:
if not e in other.hashmap:
return False
for e in other.hashmap:
if not e in self.hashmap:
return False
return True
# Wrong: e.g. {1} : {{1},{1,2}}
#return self.hashmap == other.hashmap
else:
for e in self.hashmap:
if not other.__contains__(e):
return False
for e in other:
if not e in self.hashmap:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
# only a copy of the instance will prevent an enumeration bug. This
# Bug ocures when the set is enumerated twice(or more) at the same time
# e.g
# for x in S:
# for y in S: ...
# (used by recursive generators)
def __iter__(self):
copy = frozenset()
copy.hashmap = self.hashmap.copy()
copy.generator = self.w_frozenset_generator()
return copy
# also enables list(frozenset s) cast
def next(self):
return self.generator.next()
def w_frozenset_generator(self):
for e in self.hashmap:
yield e
def clone(self):
new_map = self.hashmap.copy()
clone = frozenset()
for e in self.hashmap:
clone.hashmap[e.clone()] = True
return clone
def to_list(self):
#lst = []
#for e in self.hashmap:
# lst.append(e)
#return lst
return self.hashmap.keys()
def __setitem__(self, key, value):
remove_tuple = None
for t in self.hashmap:
if t.tvalue[0].__eq__(key):
remove_tuple = t
if not remove_tuple is None:
del self.hashmap[remove_tuple]
self.hashmap[W_Tuple((key, value))] = True
"""
# Cantor k-tuple function
# https://en.wikipedia.org/wiki/Pairing_function
def __my_hash__(self):
if not self.hash_computed:
hash = 0 # empty set hash is 0
lst = self.hashmap.keys()
if not len(lst)==0:
hash = lst.pop().__my_hash__()
while not len(lst)==0:
x = hash
y = lst.pop().__my_hash__()
hash = y+((x+y)*(x+y+1))/2
self.my_hash = hash
self.hash_computed = True
return self.my_hash
"""
|
[
"John.Witulski@uni-duesseldorf.de"
] |
John.Witulski@uni-duesseldorf.de
|
e3c1110febed38dbb271bdcb22a91f0ce6883dbf
|
849e95a72f4f380d6b31573a0a13e9eccd288838
|
/legal-api/migrations/versions/8c74427a6c0e_document_table.py
|
ce9d42b94349d5f8d89ee6e8dac4b29336d97f1f
|
[
"Apache-2.0"
] |
permissive
|
bcgov/lear
|
d9b27e2b44ba607ca13878357a62a0623d54ddee
|
d90f11a7b14411b02c07fe97d2c1fc31cd4a9b32
|
refs/heads/main
| 2023-09-01T11:26:11.058427
| 2023-08-31T20:25:24
| 2023-08-31T20:25:24
| 168,396,249
| 13
| 117
|
Apache-2.0
| 2023-09-14T20:52:02
| 2019-01-30T18:49:09
|
Python
|
UTF-8
|
Python
| false
| false
| 3,875
|
py
|
"""document-table
Revision ID: 8c74427a6c0e
Revises: b7c4b2533f84
Create Date: 2021-08-17 06:11:01.634099
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8c74427a6c0e'
down_revision = 'b7c4b2533f84'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('documents',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('type', sa.String(length=30), nullable=False),
sa.Column('file_key', sa.String(length=100), nullable=False),
sa.Column('file_name', sa.String(length=100), nullable=False),
sa.Column('content_type', sa.String(length=20), nullable=False),
sa.Column('business_id', sa.Integer(), nullable=True),
sa.Column('filing_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['business_id'], ['businesses.id'], ),
sa.ForeignKeyConstraint(['filing_id'], ['filings.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_documents_business_id'), 'documents', ['business_id'], unique=False)
op.create_index(op.f('ix_documents_filing_id'), 'documents', ['filing_id'], unique=False)
op.create_table('documents_version',
sa.Column('id', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('type', sa.String(length=30), nullable=False),
sa.Column('file_key', sa.String(length=100), nullable=False),
sa.Column('file_name', sa.String(length=100), nullable=False),
sa.Column('content_type', sa.String(length=20), nullable=False),
sa.Column('business_id', sa.Integer(), nullable=True),
sa.Column('filing_id', sa.Integer(), nullable=True),
sa.Column('transaction_id', sa.BigInteger(), autoincrement=False, nullable=False),
sa.Column('end_transaction_id', sa.BigInteger(), nullable=True),
sa.Column('operation_type', sa.SmallInteger(), nullable=False),
sa.ForeignKeyConstraint(['business_id'], ['businesses.id'], ),
sa.ForeignKeyConstraint(['filing_id'], ['filings.id'], ),
sa.PrimaryKeyConstraint('id', 'transaction_id')
)
op.create_index(op.f('ix_documents_version_end_transaction_id'), 'documents_version', ['end_transaction_id'], unique=False)
op.create_index(op.f('ix_documents_version_business_id'), 'documents_version', ['business_id'], unique=False)
op.create_index(op.f('ix_documents_version_filing_id'), 'documents_version', ['filing_id'], unique=False)
op.create_index(op.f('ix_documents_version_operation_type'), 'documents_version', ['operation_type'], unique=False)
op.create_index(op.f('ix_documents_version_transaction_id'), 'documents_version', ['transaction_id'], unique=False)
op.add_column('businesses', sa.Column('association_type', sa.String(length=50), nullable=True))
op.add_column('businesses_version', sa.Column('association_type', sa.String(length=50), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_documents_filing_id'), table_name='documents')
op.drop_index(op.f('ix_documents_business_id'), table_name='documents')
op.drop_table('documents')
op.drop_index(op.f('ix_documents_version_end_transaction_id'), table_name='documents_version')
op.drop_index(op.f('ix_documents_version_business_id'), table_name='documents_version')
op.drop_index(op.f('ix_documents_version_filing_id'), table_name='documents_version')
op.drop_index(op.f('ix_documents_version_operation_type'), table_name='documents_version')
op.drop_index(op.f('ix_documents_version_transaction_id'), table_name='documents_version')
op.drop_table('documents_version')
op.drop_column('businesses_version', 'association_type')
op.drop_column('businesses', 'association_type')
# ### end Alembic commands ###
|
[
"noreply@github.com"
] |
bcgov.noreply@github.com
|
726a3546e1a75330b41c5b98818aa8b938abd613
|
4903164971d3263cc8f18175ec36de5c33d0a38a
|
/lab4/first_task.py
|
b8a13ec569f6662f5300724f68526ddb930454a6
|
[] |
no_license
|
SamodelkinDA/infa_2021_varlamov
|
658087ec99d2719e87701553617bff199f794cba
|
a3c8ea237919de1da5a9291dc19e5118e3f82fb1
|
refs/heads/main
| 2023-08-22T15:04:52.732114
| 2021-10-08T07:47:22
| 2021-10-08T07:47:22
| 412,359,739
| 0
| 0
| null | 2021-10-01T06:48:50
| 2021-10-01T06:48:49
| null |
UTF-8
|
Python
| false
| false
| 825
|
py
|
import pygame
from pygame.draw import *
pygame.init()
FPS = 15
screen = pygame.display.set_mode((800, 800))
YELLOW = (255, 255, 0)
RED = (255, 0, 0)
BLACK = (0, 0, 0)
GREY = (217, 217, 217)
rect(screen, GREY, [0, 0, 800, 800], 0)
circle(screen, YELLOW, [400, 400], 250, 0)
line(screen, BLACK, [520, 550], [280, 550], 37)
circle(screen, RED, [280, 330], 35, 0)
circle(screen, RED, [520, 330], 25, 0)
circle(screen, BLACK, [280, 330], 18)
circle(screen, BLACK, [520, 330], 9)
line(screen, BLACK, [150, 170], [1.1 * 303, 1.1 * 304], 12)
line(screen, BLACK, [600, 200], [501 - 20, 20 + 313], 12)
pygame.display.update()
clock = pygame.time.Clock()
finished = False
while not finished:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
finished = True
pygame.quit()
|
[
"varlamov.al@phystech.edu"
] |
varlamov.al@phystech.edu
|
80bea7b770acd282f216015f2a326ea37f193ab0
|
306a781b7bcf3cbc0da2ee293f8f470d633dec47
|
/server.py
|
883b46d82b7d2664342af4c88754068830e20884
|
[] |
no_license
|
preyas-mishra/chatroom-application-on-same-network
|
fe99c502b4d0b0675437920f3f41ec75417ecc39
|
26a10ae01ead5c298c243375cdac1b6326754ba6
|
refs/heads/main
| 2023-07-28T08:32:09.306130
| 2021-09-02T14:19:27
| 2021-09-02T14:19:27
| 402,447,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,005
|
py
|
import socket
import threading
#import chat
class Server(object):
def __init__(self, hostname, port):
self.clients = {}
# create server socket
self.tcp_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# start server
self.tcp_server.bind((hostname, port))
self.tcp_server.listen(5)
print("[INFO] Server running on {}:{}".format(hostname, port))
while True:
connection, address = self.tcp_server.accept()
nickname = connection.recv(1024)
nickname = nickname.decode()
self.clients[nickname] = connection
# start a thread for the client
threading.Thread(target=self.receive_message, args=(connection, nickname), daemon=True).start()
print("[INFO] Connection from {}:{} AKA {}".format(address[0], address[1], nickname))
print("Total people connected: ",len(self.clients))
def receive_message(self, connection, nickname):
print("[INFO] Waiting for messages")
while True:
try:
msg = connection.recv(1024)
self.send_message(msg, nickname)
print(nickname + ": " + msg.decode())
except:
connection.close()
#remove user from users list
del(self.clients[nickname])
break
print(nickname, " disconnected")
def send_message(self, message, sender):
if len(self.clients) > 0:
for nickname in self.clients:
if nickname != sender:
msg = sender + ": " + message.decode()
self.clients[nickname].send(msg.encode())
#def totalClients():
count = len(self.clients)
return count
if __name__ == "__main__":
port = 5555
hostname = "0.0.0.0"
chat_server = Server(hostname, port)
|
[
"noreply@github.com"
] |
preyas-mishra.noreply@github.com
|
5b0b2e2d88961cb5966b60ee7e827530990e1b03
|
d6525d326e133b4492071d89d4066615489d08c5
|
/other_algorithms/insert_sort.py
|
04741db1539a3ed8adc2e3d82b49e73da3d97f6d
|
[] |
no_license
|
iAnafem/Stepic_Algorithms.Theory_and_practise
|
85e03842925ea2649c198e6dd20c79636d64c79d
|
1c9bc87f662bd98448891dff299035f57f218ac4
|
refs/heads/master
| 2020-04-07T12:46:58.252891
| 2019-02-05T12:30:14
| 2019-02-05T12:30:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
A = [1, 4, 5, 6, -4, 3, 3, 4, -45, 5, 6, 7, -8, 6, 5, 4, -4]
N = len(A)
for top in range(1, N):
k = top
print(k)
while k > 0 and A[k-1] > A[k]:
A[k], A[k-1] = A[k-1], A[k]
k -= 1
print(A)
|
[
"DPronkin@mostro.ru"
] |
DPronkin@mostro.ru
|
078f4e4873e1e60966ee3d88a1b39126dd8bd317
|
58f05b14c1d6f1c164a6405c9969317086317ca3
|
/exr2.py
|
a142089d82c998c3361bc74e06991ccdd1260c4b
|
[] |
no_license
|
kelvinhovinho/mad-lib-game
|
f2c785a0c7df1276c312cc7dbf910e0bfc96b0a7
|
fc249afbf3fdf664dd5db7e9b20ec3a2e2806666
|
refs/heads/master
| 2023-06-25T18:47:42.761747
| 2021-07-28T12:18:13
| 2021-07-28T12:18:13
| 390,340,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
# mad lib game
name = input('what is your name\n ')
age = int(input('what is your age\n '))
town = input('what is your home town\n ')
color = input('what is your favorite color\n ')
status = input('what is your relationship status\n ')
print(f'my name is {name}. i am {age} years old. my home town is {town} and my favorite color is {color}. unfortunately i am {status} and i regret.')
|
[
"kelvinadamba@hotmail.com"
] |
kelvinadamba@hotmail.com
|
db4fbc8cc9596eb08c2e5e399c88fdc42e118905
|
ec2b6625290d93518dcbf1bed183a6c876b230a0
|
/bak/get_stock_list.py
|
13543354a07a4b7758db919bbe5393057d7a9fe9
|
[] |
no_license
|
Ye980226/lxneeq
|
9d030edcf26336382047a20f6ac6520b0750764d
|
3b9d9824b1befcdd43c646d7c1057aa6e42fb1aa
|
refs/heads/master
| 2020-05-17T10:03:01.421878
| 2019-05-04T15:26:44
| 2019-05-04T15:26:44
| 183,648,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
import requests
import os
from bs4 import BeautifulSoup
i = 1
dirname = os.path.dirname(os.path.abspath(__file__))
os.chdir(dirname)
filename = "stock.txt"
f = open(filename, "w")
BASE_URL = "http://www.jingujie.com/stocklist/?page=%d"
while i <= 769:
r = requests.get(BASE_URL % i)
i += 1
bsObj = BeautifulSoup(r.text, "lxml")
tbody = bsObj.find("tbody", {"id": "mores"})
trs = tbody.findAll("tr")
for tr in trs:
tds = tr.findAll("td")
stock_account = tds[0].text
stock_name = tds[1].text
f.write(stock_account + " " + stock_name + "\n")
f.flush()
|
[
"18955993726@163.com"
] |
18955993726@163.com
|
25db676fe3fe5f0e0ae1c74cc258468e05e3f61f
|
b372b9a1bdaaa9263aa93664f8550896232dc12c
|
/list_collections/ll.py
|
4f1de1f277ee2da416cae18709aca3a0eef79755
|
[] |
no_license
|
shaiwilson/algorithm_practice
|
f029d72517d76905fd8fc8676b0a57c664563146
|
e7db19f9d1c13fff2f9dce348b96e59d068151e8
|
refs/heads/master
| 2021-01-21T13:17:39.058681
| 2016-05-15T06:51:28
| 2016-05-15T06:51:28
| 54,241,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,732
|
py
|
"""
this implementation of a linked list adds the new node at the head
of the list. The new item is the first item of the list and the
existing items are linked to this new first item so that they follow.
"""
class Node:
def __init__(self,initdata):
self.data = initdata
self.next = None
def getData(self):
return self.data
def getNext(self):
return self.next
def setData(self,newdata):
self.data = newdata
def setNext(self,newnext):
self.next = newnext
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
def isEmpty(self):
return self.head == None
def addNode(self, newdata):
newNode = Node(newdata)
if self.head is None:
self.head = newNode
if self.tail is not None:
self.tail.next = newNode
self.tail = newNode
# remove a node by value
def removeNode(self, value):
if self.head and self.head.data == value:
self.head = self.head.nex
return
current = self.head
while current.next is not None:
if current.next.data == value:
current.next = current.next.next
return
else:
current = current.next
def removeNodePrev(self, value):
prev = None
curr = self.head
while curr is not None:
if curr.data == value:
if prev is None:
self.head = curr.bext
else:
prev.next = curr.next
return
else:
prev = current
current = current.next
|
[
"sjwilson2@usfca.edu"
] |
sjwilson2@usfca.edu
|
b7a900ff96fd23f345e5bc1b05aa71e21eb03b88
|
efcd21234f3291e8fc561f49a7c88fc57a63e952
|
/tests/unit/language/parsers/lark/test_parser.py
|
8c8dec3c8693acc9b6689af162d7e7d9861a47f2
|
[
"MIT"
] |
permissive
|
tartiflette/tartiflette
|
146214a43847d2f423bf74594643c1fdefc746f1
|
421c1e937f553d6a5bf2f30154022c0d77053cfb
|
refs/heads/master
| 2023-09-01T02:40:05.974025
| 2022-01-20T14:55:31
| 2022-01-20T14:55:31
| 119,035,565
| 586
| 39
|
MIT
| 2023-09-11T07:49:27
| 2018-01-26T09:56:10
|
Python
|
UTF-8
|
Python
| false
| false
| 389,497
|
py
|
import os
from unittest.mock import Mock, patch
import pytest
from tartiflette.language.ast import (
ArgumentNode,
BooleanValueNode,
DescriptionNode,
DirectiveDefinitionNode,
DirectiveNode,
DocumentNode,
EnumTypeDefinitionNode,
EnumTypeExtensionNode,
EnumValueDefinitionNode,
EnumValueNode,
FieldDefinitionNode,
FloatValueNode,
InputObjectTypeDefinitionNode,
InputObjectTypeExtensionNode,
InputValueDefinitionNode,
InterfaceTypeDefinitionNode,
InterfaceTypeExtensionNode,
IntValueNode,
ListTypeNode,
ListValueNode,
Location,
NamedTypeNode,
NameNode,
NonNullTypeNode,
NullValueNode,
ObjectFieldNode,
ObjectTypeDefinitionNode,
ObjectTypeExtensionNode,
ObjectValueNode,
OperationTypeDefinitionNode,
ScalarTypeDefinitionNode,
ScalarTypeExtensionNode,
SchemaDefinitionNode,
SchemaExtensionNode,
StringValueNode,
UnionTypeDefinitionNode,
UnionTypeExtensionNode,
)
from tartiflette.language.parsers.lark import parse_to_document
_BASE_DIR = os.path.dirname(__file__)
@pytest.mark.skip(reason="Shouldn't fail...")
@pytest.mark.parametrize(
"sdl_file_path",
[
os.path.join(_BASE_DIR, "fixtures", "simple.graphql"),
os.path.join(_BASE_DIR, "fixtures", "advanced.graphql"),
os.path.join(_BASE_DIR, "fixtures", "kitchen-sink.graphql"),
os.path.join(_BASE_DIR, "fixtures", "keyword-tokens.graphql"),
os.path.join(_BASE_DIR, "fixtures", "github.graphql"),
],
)
def test_parse_without_exception(sdl_file_path):
with open(sdl_file_path) as sdl_file:
assert isinstance(parse_to_document(sdl_file.read()), DocumentNode)
@pytest.mark.skip(reason="Shouldn't fail...")
@pytest.mark.parametrize(
"sdl_file_path,expected",
[
(
os.path.join(_BASE_DIR, "fixtures", "simple.graphql"),
DocumentNode(
definitions=[
SchemaDefinitionNode(
directives=[
DirectiveNode(
name=NameNode(
value="enable_cache",
location=Location(
line=1,
column=9,
line_end=1,
column_end=22,
),
),
arguments=[],
location=Location(
line=1, column=8, line_end=1, column_end=22
),
)
],
operation_type_definitions=[
OperationTypeDefinitionNode(
operation_type="query",
type=NamedTypeNode(
name=NameNode(
value="RootQuery",
location=Location(
line=2,
column=12,
line_end=3,
column_end=5,
),
),
location=Location(
line=2,
column=12,
line_end=3,
column_end=5,
),
),
location=Location(
line=2, column=5, line_end=3, column_end=5
),
),
OperationTypeDefinitionNode(
operation_type="mutation",
type=NamedTypeNode(
name=NameNode(
value="RootMutation",
location=Location(
line=3,
column=15,
line_end=4,
column_end=5,
),
),
location=Location(
line=3,
column=15,
line_end=4,
column_end=5,
),
),
location=Location(
line=3, column=5, line_end=4, column_end=5
),
),
OperationTypeDefinitionNode(
operation_type="subscription",
type=NamedTypeNode(
name=NameNode(
value="RootSubscription",
location=Location(
line=4,
column=19,
line_end=5,
column_end=1,
),
),
location=Location(
line=4,
column=19,
line_end=5,
column_end=1,
),
),
location=Location(
line=4, column=5, line_end=5, column_end=1
),
),
],
location=Location(
line=1, column=1, line_end=7, column_end=1
),
),
ScalarTypeDefinitionNode(
description=None,
name=NameNode(
value="Date",
location=Location(
line=7, column=8, line_end=9, column_end=1
),
),
directives=[],
location=Location(
line=7, column=1, line_end=9, column_end=1
),
),
UnionTypeDefinitionNode(
description=None,
name=NameNode(
value="Group",
location=Location(
line=9, column=7, line_end=9, column_end=13
),
),
directives=[],
types=[
NamedTypeNode(
name=NameNode(
value="Foo",
location=Location(
line=9,
column=15,
line_end=9,
column_end=19,
),
),
location=Location(
line=9,
column=15,
line_end=9,
column_end=19,
),
),
NamedTypeNode(
name=NameNode(
value="Bar",
location=Location(
line=9,
column=21,
line_end=9,
column_end=25,
),
),
location=Location(
line=9,
column=21,
line_end=9,
column_end=25,
),
),
NamedTypeNode(
name=NameNode(
value="Baz",
location=Location(
line=9,
column=27,
line_end=11,
column_end=1,
),
),
location=Location(
line=9,
column=27,
line_end=11,
column_end=1,
),
),
],
location=Location(
line=9, column=1, line_end=11, column_end=1
),
),
InterfaceTypeDefinitionNode(
description=None,
name=NameNode(
value="Something",
location=Location(
line=11, column=11, line_end=11, column_end=21
),
),
directives=[],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="oneField",
location=Location(
line=12,
column=5,
line_end=12,
column_end=13,
),
),
arguments=[],
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Int",
location=Location(
line=12,
column=16,
line_end=12,
column_end=19,
),
),
location=Location(
line=12,
column=16,
line_end=12,
column_end=19,
),
),
location=Location(
line=12,
column=15,
line_end=13,
column_end=5,
),
),
directives=[],
location=Location(
line=12,
column=5,
line_end=13,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="anotherField",
location=Location(
line=13,
column=5,
line_end=13,
column_end=17,
),
),
arguments=[],
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=13,
column=20,
line_end=13,
column_end=26,
),
),
location=Location(
line=13,
column=20,
line_end=13,
column_end=26,
),
),
location=Location(
line=13,
column=19,
line_end=14,
column_end=5,
),
),
directives=[],
location=Location(
line=13,
column=5,
line_end=14,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="aLastOne",
location=Location(
line=14,
column=5,
line_end=14,
column_end=13,
),
),
arguments=[],
type=NonNullTypeNode(
type=ListTypeNode(
type=ListTypeNode(
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Date",
location=Location(
line=14,
column=17,
line_end=14,
column_end=21,
),
),
location=Location(
line=14,
column=17,
line_end=14,
column_end=21,
),
),
location=Location(
line=14,
column=17,
line_end=14,
column_end=22,
),
),
location=Location(
line=14,
column=16,
line_end=14,
column_end=23,
),
),
location=Location(
line=14,
column=15,
line_end=14,
column_end=24,
),
),
location=Location(
line=14,
column=15,
line_end=15,
column_end=1,
),
),
directives=[],
location=Location(
line=14,
column=5,
line_end=15,
column_end=1,
),
),
],
location=Location(
line=11, column=1, line_end=17, column_end=1
),
),
InputObjectTypeDefinitionNode(
description=None,
name=NameNode(
value="UserInfo",
location=Location(
line=17, column=7, line_end=17, column_end=16
),
),
directives=[],
fields=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="name",
location=Location(
line=18,
column=5,
line_end=18,
column_end=9,
),
),
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=18,
column=11,
line_end=19,
column_end=5,
),
),
location=Location(
line=18,
column=11,
line_end=19,
column_end=5,
),
),
default_value=None,
directives=[],
location=Location(
line=18,
column=5,
line_end=19,
column_end=5,
),
),
InputValueDefinitionNode(
description=None,
name=NameNode(
value="dateOfBirth",
location=Location(
line=19,
column=5,
line_end=19,
column_end=16,
),
),
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Date",
location=Location(
line=19,
column=19,
line_end=19,
column_end=23,
),
),
location=Location(
line=19,
column=19,
line_end=19,
column_end=23,
),
),
location=Location(
line=19,
column=18,
line_end=20,
column_end=5,
),
),
default_value=None,
directives=[],
location=Location(
line=19,
column=5,
line_end=20,
column_end=5,
),
),
InputValueDefinitionNode(
description=None,
name=NameNode(
value="graphQLFan",
location=Location(
line=20,
column=5,
line_end=20,
column_end=15,
),
),
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=20,
column=17,
line_end=20,
column_end=24,
),
),
location=Location(
line=20,
column=17,
line_end=20,
column_end=24,
),
),
location=Location(
line=20,
column=17,
line_end=21,
column_end=1,
),
),
default_value=None,
directives=[],
location=Location(
line=20,
column=5,
line_end=21,
column_end=1,
),
),
],
location=Location(
line=17, column=1, line_end=25, column_end=1
),
),
ObjectTypeDefinitionNode(
description=DescriptionNode(
value="\nThis is a docstring for the Test Object Type.\n",
location=Location(
line=25, column=1, line_end=28, column_end=1
),
),
name=NameNode(
value="Test",
location=Location(
line=28, column=6, line_end=28, column_end=11
),
),
interfaces=[
NamedTypeNode(
name=NameNode(
value="Unknown",
location=Location(
line=28,
column=22,
line_end=28,
column_end=30,
),
),
location=Location(
line=28,
column=22,
line_end=28,
column_end=30,
),
),
NamedTypeNode(
name=NameNode(
value="Empty",
location=Location(
line=28,
column=32,
line_end=28,
column_end=38,
),
),
location=Location(
line=28,
column=32,
line_end=28,
column_end=38,
),
),
],
directives=[
DirectiveNode(
name=NameNode(
value="enable_cache",
location=Location(
line=28,
column=39,
line_end=28,
column_end=52,
),
),
arguments=[],
location=Location(
line=28,
column=38,
line_end=28,
column_end=52,
),
)
],
fields=[
FieldDefinitionNode(
description=DescriptionNode(
value="\n This is a field description :D\n ",
location=Location(
line=29,
column=5,
line_end=32,
column_end=5,
),
),
name=NameNode(
value="field",
location=Location(
line=32,
column=5,
line_end=32,
column_end=10,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="input",
location=Location(
line=32,
column=11,
line_end=32,
column_end=16,
),
),
type=NamedTypeNode(
name=NameNode(
value="InputObject",
location=Location(
line=32,
column=18,
line_end=32,
column_end=29,
),
),
location=Location(
line=32,
column=18,
line_end=32,
column_end=29,
),
),
default_value=None,
directives=[],
location=Location(
line=32,
column=11,
line_end=32,
column_end=29,
),
)
],
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=32,
column=32,
line_end=32,
column_end=38,
),
),
location=Location(
line=32,
column=32,
line_end=32,
column_end=38,
),
),
location=Location(
line=32,
column=32,
line_end=32,
column_end=40,
),
),
directives=[
DirectiveNode(
name=NameNode(
value="deprecated",
location=Location(
line=32,
column=41,
line_end=32,
column_end=51,
),
),
arguments=[
ArgumentNode(
name=NameNode(
value="reason",
location=Location(
line=32,
column=52,
line_end=32,
column_end=58,
),
),
value=StringValueNode(
value="Useless field",
location=Location(
line=32,
column=60,
line_end=32,
column_end=75,
),
),
location=Location(
line=32,
column=52,
line_end=32,
column_end=75,
),
)
],
location=Location(
line=32,
column=40,
line_end=33,
column_end=5,
),
)
],
location=Location(
line=29,
column=5,
line_end=33,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="anotherField",
location=Location(
line=33,
column=5,
line_end=33,
column_end=17,
),
),
arguments=[],
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Int",
location=Location(
line=33,
column=20,
line_end=33,
column_end=23,
),
),
location=Location(
line=33,
column=20,
line_end=33,
column_end=23,
),
),
location=Location(
line=33,
column=19,
line_end=33,
column_end=25,
),
),
directives=[
DirectiveNode(
name=NameNode(
value="something",
location=Location(
line=33,
column=26,
line_end=33,
column_end=35,
),
),
arguments=[
ArgumentNode(
name=NameNode(
value="lst",
location=Location(
line=34,
column=9,
line_end=34,
column_end=12,
),
),
value=ListValueNode(
values=[
StringValueNode(
value="about",
location=Location(
line=34,
column=15,
line_end=34,
column_end=23,
),
),
StringValueNode(
value="stuff",
location=Location(
line=34,
column=23,
line_end=34,
column_end=30,
),
),
],
location=Location(
line=34,
column=14,
line_end=35,
column_end=9,
),
),
location=Location(
line=34,
column=9,
line_end=35,
column_end=9,
),
),
ArgumentNode(
name=NameNode(
value="obj",
location=Location(
line=35,
column=9,
line_end=35,
column_end=12,
),
),
value=ObjectValueNode(
fields=[
ObjectFieldNode(
name=NameNode(
value="some",
location=Location(
line=35,
column=15,
line_end=35,
column_end=19,
),
),
value=ListValueNode(
values=[
IntValueNode(
value=4,
location=Location(
line=35,
column=22,
line_end=35,
column_end=25,
),
),
IntValueNode(
value=8,
location=Location(
line=35,
column=25,
line_end=35,
column_end=28,
),
),
IntValueNode(
value=16,
location=Location(
line=35,
column=28,
line_end=35,
column_end=30,
),
),
],
location=Location(
line=35,
column=21,
line_end=35,
column_end=33,
),
),
location=Location(
line=35,
column=15,
line_end=35,
column_end=33,
),
),
ObjectFieldNode(
name=NameNode(
value="complex",
location=Location(
line=35,
column=33,
line_end=35,
column_end=40,
),
),
value=ObjectValueNode(
fields=[
ObjectFieldNode(
name=NameNode(
value="about",
location=Location(
line=35,
column=43,
line_end=35,
column_end=48,
),
),
value=FloatValueNode(
value=19.4,
location=Location(
line=35,
column=50,
line_end=35,
column_end=54,
),
),
location=Location(
line=35,
column=43,
line_end=35,
column_end=54,
),
)
],
location=Location(
line=35,
column=42,
line_end=35,
column_end=57,
),
),
location=Location(
line=35,
column=33,
line_end=35,
column_end=57,
),
),
ObjectFieldNode(
name=NameNode(
value="another",
location=Location(
line=35,
column=57,
line_end=35,
column_end=64,
),
),
value=EnumValueNode(
value="EnumVal",
location=Location(
line=35,
column=66,
line_end=35,
column_end=73,
),
),
location=Location(
line=35,
column=57,
line_end=35,
column_end=73,
),
),
],
location=Location(
line=35,
column=14,
line_end=36,
column_end=5,
),
),
location=Location(
line=35,
column=9,
line_end=36,
column_end=5,
),
),
],
location=Location(
line=33,
column=25,
line_end=37,
column_end=5,
),
)
],
location=Location(
line=33,
column=5,
line_end=37,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="fieldWithDefaultValueArg",
location=Location(
line=37,
column=5,
line_end=37,
column_end=29,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="test",
location=Location(
line=37,
column=30,
line_end=37,
column_end=34,
),
),
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=37,
column=36,
line_end=37,
column_end=43,
),
),
location=Location(
line=37,
column=36,
line_end=37,
column_end=43,
),
),
default_value=StringValueNode(
value="default",
location=Location(
line=37,
column=45,
line_end=37,
column_end=54,
),
),
directives=[],
location=Location(
line=37,
column=30,
line_end=37,
column_end=54,
),
)
],
type=NamedTypeNode(
name=NameNode(
value="ID",
location=Location(
line=37,
column=57,
line_end=38,
column_end=5,
),
),
location=Location(
line=37,
column=57,
line_end=38,
column_end=5,
),
),
directives=[],
location=Location(
line=37,
column=5,
line_end=38,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="simpleField",
location=Location(
line=38,
column=5,
line_end=38,
column_end=16,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Date",
location=Location(
line=38,
column=18,
line_end=39,
column_end=1,
),
),
location=Location(
line=38,
column=18,
line_end=39,
column_end=1,
),
),
directives=[],
location=Location(
line=38,
column=5,
line_end=39,
column_end=1,
),
),
],
location=Location(
line=25, column=1, line_end=40, column_end=1
),
),
],
location=Location(line=1, column=1, line_end=40, column_end=1),
),
),
(
os.path.join(_BASE_DIR, "fixtures", "advanced.graphql"),
DocumentNode(
definitions=[
EnumTypeDefinitionNode(
description=None,
name=NameNode(
value="Episode",
location=Location(
line=1, column=6, line_end=1, column_end=14
),
),
directives=[
DirectiveNode(
name=NameNode(
value="link_db",
location=Location(
line=1,
column=15,
line_end=1,
column_end=22,
),
),
arguments=[
ArgumentNode(
name=NameNode(
value="table",
location=Location(
line=1,
column=23,
line_end=1,
column_end=28,
),
),
value=StringValueNode(
value="movies.slug",
location=Location(
line=1,
column=30,
line_end=1,
column_end=43,
),
),
location=Location(
line=1,
column=23,
line_end=1,
column_end=43,
),
)
],
location=Location(
line=1,
column=14,
line_end=1,
column_end=45,
),
)
],
values=[
EnumValueDefinitionNode(
description=None,
name=EnumValueNode(
value="A_NEW_HOPE",
location=Location(
line=2,
column=5,
line_end=3,
column_end=5,
),
),
directives=[],
location=Location(
line=2, column=5, line_end=3, column_end=5
),
),
EnumValueDefinitionNode(
description=None,
name=EnumValueNode(
value="THE_EMPIRE_STRIKES_BACK",
location=Location(
line=3,
column=5,
line_end=4,
column_end=5,
),
),
directives=[],
location=Location(
line=3, column=5, line_end=4, column_end=5
),
),
EnumValueDefinitionNode(
description=None,
name=EnumValueNode(
value="RETURN_OF_THE_JEDI",
location=Location(
line=4,
column=5,
line_end=5,
column_end=5,
),
),
directives=[],
location=Location(
line=4, column=5, line_end=5, column_end=5
),
),
EnumValueDefinitionNode(
description=None,
name=EnumValueNode(
value="THE_PHANTOM_MENACE",
location=Location(
line=5,
column=5,
line_end=6,
column_end=5,
),
),
directives=[],
location=Location(
line=5, column=5, line_end=6, column_end=5
),
),
EnumValueDefinitionNode(
description=None,
name=EnumValueNode(
value="ATTACK_OF_THE_CLONES",
location=Location(
line=6,
column=5,
line_end=7,
column_end=5,
),
),
directives=[],
location=Location(
line=6, column=5, line_end=7, column_end=5
),
),
EnumValueDefinitionNode(
description=None,
name=EnumValueNode(
value="REVENGE_OF_THE_SITH",
location=Location(
line=7,
column=5,
line_end=8,
column_end=1,
),
),
directives=[],
location=Location(
line=7, column=5, line_end=8, column_end=1
),
),
],
location=Location(
line=1, column=1, line_end=10, column_end=1
),
),
InterfaceTypeDefinitionNode(
description=None,
name=NameNode(
value="Character",
location=Location(
line=10, column=11, line_end=10, column_end=21
),
),
directives=[],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="id",
location=Location(
line=11,
column=5,
line_end=11,
column_end=7,
),
),
arguments=[],
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=11,
column=9,
line_end=11,
column_end=15,
),
),
location=Location(
line=11,
column=9,
line_end=11,
column_end=15,
),
),
location=Location(
line=11,
column=9,
line_end=12,
column_end=5,
),
),
directives=[],
location=Location(
line=11,
column=5,
line_end=12,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="name",
location=Location(
line=12,
column=5,
line_end=12,
column_end=9,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=12,
column=11,
line_end=13,
column_end=5,
),
),
location=Location(
line=12,
column=11,
line_end=13,
column_end=5,
),
),
directives=[],
location=Location(
line=12,
column=5,
line_end=13,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="friends",
location=Location(
line=13,
column=5,
line_end=13,
column_end=12,
),
),
arguments=[],
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Character",
location=Location(
line=13,
column=15,
line_end=13,
column_end=24,
),
),
location=Location(
line=13,
column=15,
line_end=13,
column_end=24,
),
),
location=Location(
line=13,
column=14,
line_end=14,
column_end=5,
),
),
directives=[],
location=Location(
line=13,
column=5,
line_end=14,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="appearsIn",
location=Location(
line=14,
column=5,
line_end=14,
column_end=14,
),
),
arguments=[],
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Episode",
location=Location(
line=14,
column=17,
line_end=14,
column_end=24,
),
),
location=Location(
line=14,
column=17,
line_end=14,
column_end=24,
),
),
location=Location(
line=14,
column=16,
line_end=14,
column_end=26,
),
),
directives=[
DirectiveNode(
name=NameNode(
value="default",
location=Location(
line=14,
column=27,
line_end=14,
column_end=34,
),
),
arguments=[
ArgumentNode(
name=NameNode(
value="value",
location=Location(
line=14,
column=35,
line_end=14,
column_end=40,
),
),
value=EnumValueNode(
value="A_NEW_HOPE",
location=Location(
line=14,
column=42,
line_end=14,
column_end=52,
),
),
location=Location(
line=14,
column=35,
line_end=14,
column_end=52,
),
)
],
location=Location(
line=14,
column=26,
line_end=15,
column_end=1,
),
)
],
location=Location(
line=14,
column=5,
line_end=15,
column_end=1,
),
),
],
location=Location(
line=10, column=1, line_end=17, column_end=1
),
),
InterfaceTypeDefinitionNode(
description=None,
name=NameNode(
value="Creature",
location=Location(
line=17, column=11, line_end=17, column_end=20
),
),
directives=[],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="name",
location=Location(
line=18,
column=5,
line_end=18,
column_end=9,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=18,
column=11,
line_end=19,
column_end=5,
),
),
location=Location(
line=18,
column=11,
line_end=19,
column_end=5,
),
),
directives=[],
location=Location(
line=18,
column=5,
line_end=19,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="species",
location=Location(
line=19,
column=5,
line_end=19,
column_end=12,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=19,
column=14,
line_end=20,
column_end=1,
),
),
location=Location(
line=19,
column=14,
line_end=20,
column_end=1,
),
),
directives=[],
location=Location(
line=19,
column=5,
line_end=20,
column_end=1,
),
),
],
location=Location(
line=17, column=1, line_end=22, column_end=1
),
),
InterfaceTypeDefinitionNode(
description=None,
name=NameNode(
value="Vehicle",
location=Location(
line=22, column=11, line_end=22, column_end=19
),
),
directives=[],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="id",
location=Location(
line=23,
column=5,
line_end=23,
column_end=7,
),
),
arguments=[],
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=23,
column=9,
line_end=23,
column_end=15,
),
),
location=Location(
line=23,
column=9,
line_end=23,
column_end=15,
),
),
location=Location(
line=23,
column=9,
line_end=24,
column_end=5,
),
),
directives=[],
location=Location(
line=23,
column=5,
line_end=24,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="name",
location=Location(
line=24,
column=5,
line_end=24,
column_end=9,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=24,
column=11,
line_end=25,
column_end=5,
),
),
location=Location(
line=24,
column=11,
line_end=25,
column_end=5,
),
),
directives=[],
location=Location(
line=24,
column=5,
line_end=25,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="owner",
location=Location(
line=25,
column=5,
line_end=25,
column_end=10,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Owner",
location=Location(
line=25,
column=12,
line_end=26,
column_end=1,
),
),
location=Location(
line=25,
column=12,
line_end=26,
column_end=1,
),
),
directives=[],
location=Location(
line=25,
column=5,
line_end=26,
column_end=1,
),
),
],
location=Location(
line=22, column=1, line_end=28, column_end=1
),
),
InterfaceTypeDefinitionNode(
description=None,
name=NameNode(
value="Location",
location=Location(
line=28, column=11, line_end=28, column_end=20
),
),
directives=[],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="id",
location=Location(
line=29,
column=5,
line_end=29,
column_end=7,
),
),
arguments=[],
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=29,
column=9,
line_end=29,
column_end=15,
),
),
location=Location(
line=29,
column=9,
line_end=29,
column_end=15,
),
),
location=Location(
line=29,
column=9,
line_end=30,
column_end=5,
),
),
directives=[],
location=Location(
line=29,
column=5,
line_end=30,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="name",
location=Location(
line=30,
column=5,
line_end=30,
column_end=9,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=30,
column=11,
line_end=31,
column_end=5,
),
),
location=Location(
line=30,
column=11,
line_end=31,
column_end=5,
),
),
directives=[],
location=Location(
line=30,
column=5,
line_end=31,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="coordinates",
location=Location(
line=31,
column=5,
line_end=31,
column_end=16,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="default",
location=Location(
line=31,
column=17,
line_end=31,
column_end=24,
),
),
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Float",
location=Location(
line=31,
column=27,
line_end=31,
column_end=32,
),
),
location=Location(
line=31,
column=27,
line_end=31,
column_end=32,
),
),
location=Location(
line=31,
column=26,
line_end=31,
column_end=34,
),
),
default_value=ListValueNode(
values=[
FloatValueNode(
value=0.0,
location=Location(
line=31,
column=37,
line_end=31,
column_end=42,
),
),
FloatValueNode(
value=0.0,
location=Location(
line=31,
column=42,
line_end=31,
column_end=47,
),
),
FloatValueNode(
value=0.0,
location=Location(
line=31,
column=47,
line_end=31,
column_end=50,
),
),
],
location=Location(
line=31,
column=36,
line_end=31,
column_end=51,
),
),
directives=[],
location=Location(
line=31,
column=17,
line_end=31,
column_end=51,
),
)
],
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Float",
location=Location(
line=31,
column=55,
line_end=31,
column_end=60,
),
),
location=Location(
line=31,
column=55,
line_end=31,
column_end=60,
),
),
location=Location(
line=31,
column=54,
line_end=32,
column_end=1,
),
),
directives=[],
location=Location(
line=31,
column=5,
line_end=32,
column_end=1,
),
),
],
location=Location(
line=28, column=1, line_end=34, column_end=1
),
),
ObjectTypeDefinitionNode(
description=None,
name=NameNode(
value="Planet",
location=Location(
line=34, column=6, line_end=34, column_end=13
),
),
interfaces=[
NamedTypeNode(
name=NameNode(
value="Location",
location=Location(
line=34,
column=24,
line_end=34,
column_end=33,
),
),
location=Location(
line=34,
column=24,
line_end=34,
column_end=33,
),
)
],
directives=[],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="id",
location=Location(
line=35,
column=5,
line_end=35,
column_end=7,
),
),
arguments=[],
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=35,
column=9,
line_end=35,
column_end=15,
),
),
location=Location(
line=35,
column=9,
line_end=35,
column_end=15,
),
),
location=Location(
line=35,
column=9,
line_end=36,
column_end=5,
),
),
directives=[],
location=Location(
line=35,
column=5,
line_end=36,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="atmosphere",
location=Location(
line=36,
column=5,
line_end=36,
column_end=15,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=36,
column=17,
line_end=37,
column_end=1,
),
),
location=Location(
line=36,
column=17,
line_end=37,
column_end=1,
),
),
directives=[],
location=Location(
line=36,
column=5,
line_end=37,
column_end=1,
),
),
],
location=Location(
line=34, column=1, line_end=39, column_end=1
),
),
UnionTypeDefinitionNode(
description=None,
name=NameNode(
value="Owner",
location=Location(
line=39, column=7, line_end=39, column_end=13
),
),
directives=[],
types=[
NamedTypeNode(
name=NameNode(
value="Organization",
location=Location(
line=39,
column=15,
line_end=39,
column_end=28,
),
),
location=Location(
line=39,
column=15,
line_end=39,
column_end=28,
),
),
NamedTypeNode(
name=NameNode(
value="Character",
location=Location(
line=39,
column=30,
line_end=41,
column_end=1,
),
),
location=Location(
line=39,
column=30,
line_end=41,
column_end=1,
),
),
],
location=Location(
line=39, column=1, line_end=41, column_end=1
),
),
ObjectTypeDefinitionNode(
description=None,
name=NameNode(
value="Organization",
location=Location(
line=41, column=6, line_end=41, column_end=19
),
),
interfaces=[],
directives=[],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="id",
location=Location(
line=42,
column=5,
line_end=42,
column_end=7,
),
),
arguments=[],
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=42,
column=9,
line_end=42,
column_end=15,
),
),
location=Location(
line=42,
column=9,
line_end=42,
column_end=15,
),
),
location=Location(
line=42,
column=9,
line_end=43,
column_end=5,
),
),
directives=[],
location=Location(
line=42,
column=5,
line_end=43,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="name",
location=Location(
line=43,
column=5,
line_end=43,
column_end=9,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=43,
column=11,
line_end=44,
column_end=5,
),
),
location=Location(
line=43,
column=11,
line_end=44,
column_end=5,
),
),
directives=[],
location=Location(
line=43,
column=5,
line_end=44,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="members",
location=Location(
line=44,
column=5,
line_end=44,
column_end=12,
),
),
arguments=[],
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Character",
location=Location(
line=44,
column=15,
line_end=44,
column_end=24,
),
),
location=Location(
line=44,
column=15,
line_end=44,
column_end=24,
),
),
location=Location(
line=44,
column=14,
line_end=45,
column_end=1,
),
),
directives=[],
location=Location(
line=44,
column=5,
line_end=45,
column_end=1,
),
),
],
location=Location(
line=41, column=1, line_end=47, column_end=1
),
),
ObjectTypeDefinitionNode(
description=None,
name=NameNode(
value="Human",
location=Location(
line=47, column=6, line_end=47, column_end=12
),
),
interfaces=[
NamedTypeNode(
name=NameNode(
value="Character",
location=Location(
line=47,
column=23,
line_end=47,
column_end=33,
),
),
location=Location(
line=47,
column=23,
line_end=47,
column_end=33,
),
),
NamedTypeNode(
name=NameNode(
value="Creature",
location=Location(
line=47,
column=35,
line_end=47,
column_end=44,
),
),
location=Location(
line=47,
column=35,
line_end=47,
column_end=44,
),
),
],
directives=[],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="id",
location=Location(
line=48,
column=5,
line_end=48,
column_end=7,
),
),
arguments=[],
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=48,
column=9,
line_end=48,
column_end=15,
),
),
location=Location(
line=48,
column=9,
line_end=48,
column_end=15,
),
),
location=Location(
line=48,
column=9,
line_end=49,
column_end=5,
),
),
directives=[],
location=Location(
line=48,
column=5,
line_end=49,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="name",
location=Location(
line=49,
column=5,
line_end=49,
column_end=9,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=49,
column=11,
line_end=50,
column_end=5,
),
),
location=Location(
line=49,
column=11,
line_end=50,
column_end=5,
),
),
directives=[],
location=Location(
line=49,
column=5,
line_end=50,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="friends",
location=Location(
line=50,
column=5,
line_end=50,
column_end=12,
),
),
arguments=[],
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Character",
location=Location(
line=50,
column=15,
line_end=50,
column_end=24,
),
),
location=Location(
line=50,
column=15,
line_end=50,
column_end=24,
),
),
location=Location(
line=50,
column=14,
line_end=51,
column_end=5,
),
),
directives=[],
location=Location(
line=50,
column=5,
line_end=51,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="appearsIn",
location=Location(
line=51,
column=5,
line_end=51,
column_end=14,
),
),
arguments=[],
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Episode",
location=Location(
line=51,
column=17,
line_end=51,
column_end=24,
),
),
location=Location(
line=51,
column=17,
line_end=51,
column_end=24,
),
),
location=Location(
line=51,
column=16,
line_end=52,
column_end=5,
),
),
directives=[],
location=Location(
line=51,
column=5,
line_end=52,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="homePlanet",
location=Location(
line=52,
column=5,
line_end=52,
column_end=15,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Location",
location=Location(
line=52,
column=17,
line_end=53,
column_end=1,
),
),
location=Location(
line=52,
column=17,
line_end=53,
column_end=1,
),
),
directives=[],
location=Location(
line=52,
column=5,
line_end=53,
column_end=1,
),
),
],
location=Location(
line=47, column=1, line_end=55, column_end=1
),
),
ObjectTypeDefinitionNode(
description=None,
name=NameNode(
value="Droid",
location=Location(
line=55, column=6, line_end=55, column_end=12
),
),
interfaces=[
NamedTypeNode(
name=NameNode(
value="Character",
location=Location(
line=55,
column=23,
line_end=55,
column_end=33,
),
),
location=Location(
line=55,
column=23,
line_end=55,
column_end=33,
),
)
],
directives=[],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="id",
location=Location(
line=56,
column=5,
line_end=56,
column_end=7,
),
),
arguments=[],
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=56,
column=9,
line_end=56,
column_end=15,
),
),
location=Location(
line=56,
column=9,
line_end=56,
column_end=15,
),
),
location=Location(
line=56,
column=9,
line_end=57,
column_end=5,
),
),
directives=[],
location=Location(
line=56,
column=5,
line_end=57,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="name",
location=Location(
line=57,
column=5,
line_end=57,
column_end=9,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=57,
column=11,
line_end=58,
column_end=5,
),
),
location=Location(
line=57,
column=11,
line_end=58,
column_end=5,
),
),
directives=[],
location=Location(
line=57,
column=5,
line_end=58,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="friends",
location=Location(
line=58,
column=5,
line_end=58,
column_end=12,
),
),
arguments=[],
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Character",
location=Location(
line=58,
column=15,
line_end=58,
column_end=24,
),
),
location=Location(
line=58,
column=15,
line_end=58,
column_end=24,
),
),
location=Location(
line=58,
column=14,
line_end=58,
column_end=26,
),
),
directives=[
DirectiveNode(
name=NameNode(
value="deprecated",
location=Location(
line=58,
column=27,
line_end=58,
column_end=37,
),
),
arguments=[
ArgumentNode(
name=NameNode(
value="reason",
location=Location(
line=58,
column=38,
line_end=58,
column_end=44,
),
),
value=StringValueNode(
value="Droids can't have friends. Use the acquaintances field.",
location=Location(
line=58,
column=46,
line_end=58,
column_end=103,
),
),
location=Location(
line=58,
column=38,
line_end=58,
column_end=103,
),
)
],
location=Location(
line=58,
column=26,
line_end=59,
column_end=5,
),
)
],
location=Location(
line=58,
column=5,
line_end=59,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="acquaintances",
location=Location(
line=59,
column=5,
line_end=59,
column_end=18,
),
),
arguments=[],
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Character",
location=Location(
line=59,
column=21,
line_end=59,
column_end=30,
),
),
location=Location(
line=59,
column=21,
line_end=59,
column_end=30,
),
),
location=Location(
line=59,
column=20,
line_end=60,
column_end=5,
),
),
directives=[],
location=Location(
line=59,
column=5,
line_end=60,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="appearsIn",
location=Location(
line=60,
column=5,
line_end=60,
column_end=14,
),
),
arguments=[],
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Episode",
location=Location(
line=60,
column=17,
line_end=60,
column_end=24,
),
),
location=Location(
line=60,
column=17,
line_end=60,
column_end=24,
),
),
location=Location(
line=60,
column=16,
line_end=61,
column_end=5,
),
),
directives=[],
location=Location(
line=60,
column=5,
line_end=61,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="primaryFunction",
location=Location(
line=61,
column=5,
line_end=61,
column_end=20,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=61,
column=22,
line_end=62,
column_end=1,
),
),
location=Location(
line=61,
column=22,
line_end=62,
column_end=1,
),
),
directives=[],
location=Location(
line=61,
column=5,
line_end=62,
column_end=1,
),
),
],
location=Location(
line=55, column=1, line_end=64, column_end=1
),
),
ObjectTypeDefinitionNode(
description=None,
name=NameNode(
value="Query",
location=Location(
line=64, column=6, line_end=64, column_end=12
),
),
interfaces=[],
directives=[],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="hero",
location=Location(
line=65,
column=5,
line_end=65,
column_end=9,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="episode",
location=Location(
line=65,
column=10,
line_end=65,
column_end=17,
),
),
type=NamedTypeNode(
name=NameNode(
value="Episode",
location=Location(
line=65,
column=19,
line_end=65,
column_end=26,
),
),
location=Location(
line=65,
column=19,
line_end=65,
column_end=26,
),
),
default_value=None,
directives=[],
location=Location(
line=65,
column=10,
line_end=65,
column_end=26,
),
)
],
type=NamedTypeNode(
name=NameNode(
value="Character",
location=Location(
line=65,
column=29,
line_end=66,
column_end=5,
),
),
location=Location(
line=65,
column=29,
line_end=66,
column_end=5,
),
),
directives=[],
location=Location(
line=65,
column=5,
line_end=66,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="human",
location=Location(
line=66,
column=5,
line_end=66,
column_end=10,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="id",
location=Location(
line=66,
column=11,
line_end=66,
column_end=13,
),
),
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=66,
column=15,
line_end=66,
column_end=21,
),
),
location=Location(
line=66,
column=15,
line_end=66,
column_end=21,
),
),
location=Location(
line=66,
column=15,
line_end=66,
column_end=22,
),
),
default_value=None,
directives=[],
location=Location(
line=66,
column=11,
line_end=66,
column_end=22,
),
)
],
type=NamedTypeNode(
name=NameNode(
value="Human",
location=Location(
line=66,
column=25,
line_end=67,
column_end=5,
),
),
location=Location(
line=66,
column=25,
line_end=67,
column_end=5,
),
),
directives=[],
location=Location(
line=66,
column=5,
line_end=67,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="droid",
location=Location(
line=67,
column=5,
line_end=67,
column_end=10,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="id",
location=Location(
line=67,
column=11,
line_end=67,
column_end=13,
),
),
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=67,
column=15,
line_end=67,
column_end=21,
),
),
location=Location(
line=67,
column=15,
line_end=67,
column_end=21,
),
),
location=Location(
line=67,
column=15,
line_end=67,
column_end=22,
),
),
default_value=None,
directives=[],
location=Location(
line=67,
column=11,
line_end=67,
column_end=22,
),
)
],
type=NamedTypeNode(
name=NameNode(
value="Droid",
location=Location(
line=67,
column=25,
line_end=68,
column_end=5,
),
),
location=Location(
line=67,
column=25,
line_end=68,
column_end=5,
),
),
directives=[],
location=Location(
line=67,
column=5,
line_end=68,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="characters",
location=Location(
line=68,
column=5,
line_end=68,
column_end=15,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="filter",
location=Location(
line=68,
column=16,
line_end=68,
column_end=22,
),
),
type=NamedTypeNode(
name=NameNode(
value="FilterCharacters",
location=Location(
line=68,
column=24,
line_end=68,
column_end=40,
),
),
location=Location(
line=68,
column=24,
line_end=68,
column_end=40,
),
),
default_value=None,
directives=[],
location=Location(
line=68,
column=16,
line_end=68,
column_end=40,
),
)
],
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Characters",
location=Location(
line=68,
column=44,
line_end=68,
column_end=54,
),
),
location=Location(
line=68,
column=44,
line_end=68,
column_end=54,
),
),
location=Location(
line=68,
column=43,
line_end=69,
column_end=5,
),
),
directives=[],
location=Location(
line=68,
column=5,
line_end=69,
column_end=5,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="planet",
location=Location(
line=69,
column=5,
line_end=69,
column_end=11,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="id",
location=Location(
line=69,
column=12,
line_end=69,
column_end=14,
),
),
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=69,
column=16,
line_end=69,
column_end=22,
),
),
location=Location(
line=69,
column=16,
line_end=69,
column_end=22,
),
),
location=Location(
line=69,
column=16,
line_end=69,
column_end=23,
),
),
default_value=None,
directives=[],
location=Location(
line=69,
column=12,
line_end=69,
column_end=23,
),
)
],
type=NamedTypeNode(
name=NameNode(
value="Planet",
location=Location(
line=69,
column=26,
line_end=70,
column_end=1,
),
),
location=Location(
line=69,
column=26,
line_end=70,
column_end=1,
),
),
directives=[],
location=Location(
line=69,
column=5,
line_end=70,
column_end=1,
),
),
],
location=Location(
line=64, column=1, line_end=72, column_end=1
),
),
InputObjectTypeDefinitionNode(
description=None,
name=NameNode(
value="FilterCharacters",
location=Location(
line=72, column=7, line_end=72, column_end=24
),
),
directives=[],
fields=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="limit",
location=Location(
line=73,
column=5,
line_end=73,
column_end=10,
),
),
type=NamedTypeNode(
name=NameNode(
value="Int",
location=Location(
line=73,
column=12,
line_end=74,
column_end=5,
),
),
location=Location(
line=73,
column=12,
line_end=74,
column_end=5,
),
),
default_value=None,
directives=[],
location=Location(
line=73,
column=5,
line_end=74,
column_end=5,
),
),
InputValueDefinitionNode(
description=None,
name=NameNode(
value="sinceEpisode",
location=Location(
line=74,
column=5,
line_end=74,
column_end=17,
),
),
type=NamedTypeNode(
name=NameNode(
value="Episode",
location=Location(
line=74,
column=19,
line_end=75,
column_end=1,
),
),
location=Location(
line=74,
column=19,
line_end=75,
column_end=1,
),
),
default_value=None,
directives=[],
location=Location(
line=74,
column=5,
line_end=75,
column_end=1,
),
),
],
location=Location(
line=72, column=1, line_end=77, column_end=1
),
),
ScalarTypeDefinitionNode(
description=DescriptionNode(
value="\nA custom scalar to represent time in the StarWars universe.\nIt should support nanoseconds and conversion to/from a flick.\n",
location=Location(
line=77, column=1, line_end=81, column_end=1
),
),
name=NameNode(
value="Date",
location=Location(
line=81, column=8, line_end=83, column_end=1
),
),
directives=[],
location=Location(
line=77, column=1, line_end=83, column_end=1
),
),
ObjectTypeExtensionNode(
name=NameNode(
value="Human",
location=Location(
line=83, column=13, line_end=83, column_end=19
),
),
interfaces=[],
directives=[],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="born",
location=Location(
line=84,
column=5,
line_end=84,
column_end=9,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Date",
location=Location(
line=84,
column=11,
line_end=84,
column_end=16,
),
),
location=Location(
line=84,
column=11,
line_end=84,
column_end=16,
),
),
directives=[
DirectiveNode(
name=NameNode(
value="limit",
location=Location(
line=84,
column=17,
line_end=84,
column_end=22,
),
),
arguments=[
ArgumentNode(
name=NameNode(
value="min",
location=Location(
line=84,
column=23,
line_end=84,
column_end=26,
),
),
value=IntValueNode(
value=0,
location=Location(
line=84,
column=28,
line_end=84,
column_end=29,
),
),
location=Location(
line=84,
column=23,
line_end=84,
column_end=29,
),
)
],
location=Location(
line=84,
column=16,
line_end=85,
column_end=1,
),
)
],
location=Location(
line=84,
column=5,
line_end=85,
column_end=1,
),
)
],
location=Location(
line=83, column=1, line_end=87, column_end=1
),
),
ObjectTypeExtensionNode(
name=NameNode(
value="Droid",
location=Location(
line=87, column=13, line_end=87, column_end=19
),
),
interfaces=[],
directives=[],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="produced",
location=Location(
line=88,
column=5,
line_end=88,
column_end=13,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Date",
location=Location(
line=88,
column=15,
line_end=89,
column_end=1,
),
),
location=Location(
line=88,
column=15,
line_end=89,
column_end=1,
),
),
directives=[],
location=Location(
line=88,
column=5,
line_end=89,
column_end=1,
),
)
],
location=Location(
line=87, column=1, line_end=91, column_end=1
),
),
InputObjectTypeExtensionNode(
name=NameNode(
value="FilterCharacters",
location=Location(
line=91, column=14, line_end=91, column_end=31
),
),
directives=[],
fields=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="existsSince",
location=Location(
line=92,
column=5,
line_end=92,
column_end=16,
),
),
type=NamedTypeNode(
name=NameNode(
value="Date",
location=Location(
line=92,
column=18,
line_end=93,
column_end=1,
),
),
location=Location(
line=92,
column=18,
line_end=93,
column_end=1,
),
),
default_value=None,
directives=[],
location=Location(
line=92,
column=5,
line_end=93,
column_end=1,
),
)
],
location=Location(
line=91, column=1, line_end=94, column_end=1
),
),
],
location=Location(line=1, column=1, line_end=94, column_end=1),
),
),
(
os.path.join(_BASE_DIR, "fixtures", "kitchen-sink.graphql"),
DocumentNode(
definitions=[
SchemaDefinitionNode(
directives=[],
operation_type_definitions=[
OperationTypeDefinitionNode(
operation_type="query",
type=NamedTypeNode(
name=NameNode(
value="QueryType",
location=Location(
line=2,
column=10,
line_end=3,
column_end=3,
),
),
location=Location(
line=2,
column=10,
line_end=3,
column_end=3,
),
),
location=Location(
line=2, column=3, line_end=3, column_end=3
),
),
OperationTypeDefinitionNode(
operation_type="mutation",
type=NamedTypeNode(
name=NameNode(
value="MutationType",
location=Location(
line=3,
column=13,
line_end=4,
column_end=1,
),
),
location=Location(
line=3,
column=13,
line_end=4,
column_end=1,
),
),
location=Location(
line=3, column=3, line_end=4, column_end=1
),
),
],
location=Location(
line=1, column=1, line_end=6, column_end=1
),
),
ObjectTypeDefinitionNode(
description=DescriptionNode(
value="\nThis is a description\nof the `Foo` type.\n",
location=Location(
line=6, column=1, line_end=10, column_end=1
),
),
name=NameNode(
value="Foo",
location=Location(
line=10, column=6, line_end=10, column_end=10
),
),
interfaces=[
NamedTypeNode(
name=NameNode(
value="Bar",
location=Location(
line=10,
column=21,
line_end=10,
column_end=25,
),
),
location=Location(
line=10,
column=21,
line_end=10,
column_end=25,
),
),
NamedTypeNode(
name=NameNode(
value="Baz",
location=Location(
line=10,
column=27,
line_end=10,
column_end=31,
),
),
location=Location(
line=10,
column=27,
line_end=10,
column_end=31,
),
),
],
directives=[],
fields=[
FieldDefinitionNode(
description=DescriptionNode(
value="Description of the `one` field.",
location=Location(
line=11,
column=3,
line_end=12,
column_end=3,
),
),
name=NameNode(
value="one",
location=Location(
line=12,
column=3,
line_end=12,
column_end=6,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Type",
location=Location(
line=12,
column=8,
line_end=13,
column_end=3,
),
),
location=Location(
line=12,
column=8,
line_end=13,
column_end=3,
),
),
directives=[],
location=Location(
line=11,
column=3,
line_end=13,
column_end=3,
),
),
FieldDefinitionNode(
description=DescriptionNode(
value="\n This is a description of the `two` field.\n ",
location=Location(
line=13,
column=3,
line_end=16,
column_end=3,
),
),
name=NameNode(
value="two",
location=Location(
line=16,
column=3,
line_end=16,
column_end=6,
),
),
arguments=[
InputValueDefinitionNode(
description=DescriptionNode(
value="\n This is a description of the `argument` argument.\n ",
location=Location(
line=17,
column=5,
line_end=20,
column_end=5,
),
),
name=NameNode(
value="argument",
location=Location(
line=20,
column=5,
line_end=20,
column_end=13,
),
),
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="InputType",
location=Location(
line=20,
column=15,
line_end=20,
column_end=24,
),
),
location=Location(
line=20,
column=15,
line_end=20,
column_end=24,
),
),
location=Location(
line=20,
column=15,
line_end=21,
column_end=3,
),
),
default_value=None,
directives=[],
location=Location(
line=17,
column=5,
line_end=21,
column_end=3,
),
)
],
type=NamedTypeNode(
name=NameNode(
value="Type",
location=Location(
line=21,
column=6,
line_end=22,
column_end=3,
),
),
location=Location(
line=21,
column=6,
line_end=22,
column_end=3,
),
),
directives=[],
location=Location(
line=13,
column=3,
line_end=22,
column_end=3,
),
),
FieldDefinitionNode(
description=DescriptionNode(
value="This is a description of the `three` field.",
location=Location(
line=22,
column=3,
line_end=23,
column_end=3,
),
),
name=NameNode(
value="three",
location=Location(
line=23,
column=3,
line_end=23,
column_end=8,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="argument",
location=Location(
line=23,
column=9,
line_end=23,
column_end=17,
),
),
type=NamedTypeNode(
name=NameNode(
value="InputType",
location=Location(
line=23,
column=19,
line_end=23,
column_end=30,
),
),
location=Location(
line=23,
column=19,
line_end=23,
column_end=30,
),
),
default_value=None,
directives=[],
location=Location(
line=23,
column=9,
line_end=23,
column_end=30,
),
),
InputValueDefinitionNode(
description=None,
name=NameNode(
value="other",
location=Location(
line=23,
column=30,
line_end=23,
column_end=35,
),
),
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=23,
column=37,
line_end=23,
column_end=43,
),
),
location=Location(
line=23,
column=37,
line_end=23,
column_end=43,
),
),
default_value=None,
directives=[],
location=Location(
line=23,
column=30,
line_end=23,
column_end=43,
),
),
],
type=NamedTypeNode(
name=NameNode(
value="Int",
location=Location(
line=23,
column=46,
line_end=24,
column_end=3,
),
),
location=Location(
line=23,
column=46,
line_end=24,
column_end=3,
),
),
directives=[],
location=Location(
line=22,
column=3,
line_end=24,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="four",
location=Location(
line=24,
column=3,
line_end=24,
column_end=7,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="argument",
location=Location(
line=24,
column=8,
line_end=24,
column_end=16,
),
),
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=24,
column=18,
line_end=24,
column_end=25,
),
),
location=Location(
line=24,
column=18,
line_end=24,
column_end=25,
),
),
default_value=StringValueNode(
value="string",
location=Location(
line=24,
column=27,
line_end=24,
column_end=35,
),
),
directives=[],
location=Location(
line=24,
column=8,
line_end=24,
column_end=35,
),
)
],
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=24,
column=38,
line_end=25,
column_end=3,
),
),
location=Location(
line=24,
column=38,
line_end=25,
column_end=3,
),
),
directives=[],
location=Location(
line=24,
column=3,
line_end=25,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="five",
location=Location(
line=25,
column=3,
line_end=25,
column_end=7,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="argument",
location=Location(
line=25,
column=8,
line_end=25,
column_end=16,
),
),
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=25,
column=19,
line_end=25,
column_end=25,
),
),
location=Location(
line=25,
column=19,
line_end=25,
column_end=25,
),
),
location=Location(
line=25,
column=18,
line_end=25,
column_end=27,
),
),
default_value=ListValueNode(
values=[
StringValueNode(
value="string",
location=Location(
line=25,
column=30,
line_end=25,
column_end=40,
),
),
StringValueNode(
value="string",
location=Location(
line=25,
column=40,
line_end=25,
column_end=48,
),
),
],
location=Location(
line=25,
column=29,
line_end=25,
column_end=49,
),
),
directives=[],
location=Location(
line=25,
column=8,
line_end=25,
column_end=49,
),
)
],
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=25,
column=52,
line_end=26,
column_end=3,
),
),
location=Location(
line=25,
column=52,
line_end=26,
column_end=3,
),
),
directives=[],
location=Location(
line=25,
column=3,
line_end=26,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="six",
location=Location(
line=26,
column=3,
line_end=26,
column_end=6,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="argument",
location=Location(
line=26,
column=7,
line_end=26,
column_end=15,
),
),
type=NamedTypeNode(
name=NameNode(
value="InputType",
location=Location(
line=26,
column=17,
line_end=26,
column_end=27,
),
),
location=Location(
line=26,
column=17,
line_end=26,
column_end=27,
),
),
default_value=ObjectValueNode(
fields=[
ObjectFieldNode(
name=NameNode(
value="key",
location=Location(
line=26,
column=30,
line_end=26,
column_end=33,
),
),
value=StringValueNode(
value="value",
location=Location(
line=26,
column=35,
line_end=26,
column_end=42,
),
),
location=Location(
line=26,
column=30,
line_end=26,
column_end=42,
),
)
],
location=Location(
line=26,
column=29,
line_end=26,
column_end=43,
),
),
directives=[],
location=Location(
line=26,
column=7,
line_end=26,
column_end=43,
),
)
],
type=NamedTypeNode(
name=NameNode(
value="Type",
location=Location(
line=26,
column=46,
line_end=27,
column_end=3,
),
),
location=Location(
line=26,
column=46,
line_end=27,
column_end=3,
),
),
directives=[],
location=Location(
line=26,
column=3,
line_end=27,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="seven",
location=Location(
line=27,
column=3,
line_end=27,
column_end=8,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="argument",
location=Location(
line=27,
column=9,
line_end=27,
column_end=17,
),
),
type=NamedTypeNode(
name=NameNode(
value="Int",
location=Location(
line=27,
column=19,
line_end=27,
column_end=23,
),
),
location=Location(
line=27,
column=19,
line_end=27,
column_end=23,
),
),
default_value=NullValueNode(
location=Location(
line=27,
column=25,
line_end=27,
column_end=29,
)
),
directives=[],
location=Location(
line=27,
column=9,
line_end=27,
column_end=29,
),
)
],
type=NamedTypeNode(
name=NameNode(
value="Type",
location=Location(
line=27,
column=32,
line_end=28,
column_end=3,
),
),
location=Location(
line=27,
column=32,
line_end=28,
column_end=3,
),
),
directives=[],
location=Location(
line=27,
column=3,
line_end=28,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="height",
location=Location(
line=28,
column=3,
line_end=28,
column_end=9,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="argument",
location=Location(
line=28,
column=10,
line_end=28,
column_end=18,
),
),
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=28,
column=20,
line_end=28,
column_end=28,
),
),
location=Location(
line=28,
column=20,
line_end=28,
column_end=28,
),
),
default_value=BooleanValueNode(
value=False,
location=Location(
line=28,
column=30,
line_end=28,
column_end=35,
),
),
directives=[],
location=Location(
line=28,
column=10,
line_end=28,
column_end=35,
),
)
],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=28,
column=38,
line_end=29,
column_end=1,
),
),
location=Location(
line=28,
column=38,
line_end=29,
column_end=1,
),
),
directives=[],
location=Location(
line=28,
column=3,
line_end=29,
column_end=1,
),
),
],
location=Location(
line=6, column=1, line_end=31, column_end=1
),
),
ObjectTypeDefinitionNode(
description=None,
name=NameNode(
value="AnnotatedObject",
location=Location(
line=31, column=6, line_end=31, column_end=22
),
),
interfaces=[],
directives=[
DirectiveNode(
name=NameNode(
value="onObject",
location=Location(
line=31,
column=23,
line_end=31,
column_end=31,
),
),
arguments=[
ArgumentNode(
name=NameNode(
value="arg",
location=Location(
line=31,
column=32,
line_end=31,
column_end=35,
),
),
value=StringValueNode(
value="value",
location=Location(
line=31,
column=37,
line_end=31,
column_end=44,
),
),
location=Location(
line=31,
column=32,
line_end=31,
column_end=44,
),
)
],
location=Location(
line=31,
column=22,
line_end=31,
column_end=46,
),
)
],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="annotatedField",
location=Location(
line=32,
column=3,
line_end=32,
column_end=17,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="arg",
location=Location(
line=32,
column=18,
line_end=32,
column_end=21,
),
),
type=NamedTypeNode(
name=NameNode(
value="Type",
location=Location(
line=32,
column=23,
line_end=32,
column_end=28,
),
),
location=Location(
line=32,
column=23,
line_end=32,
column_end=28,
),
),
default_value=StringValueNode(
value="default",
location=Location(
line=32,
column=30,
line_end=32,
column_end=40,
),
),
directives=[
DirectiveNode(
name=NameNode(
value="onArgumentDefinition",
location=Location(
line=32,
column=41,
line_end=32,
column_end=61,
),
),
arguments=[],
location=Location(
line=32,
column=40,
line_end=32,
column_end=61,
),
)
],
location=Location(
line=32,
column=18,
line_end=32,
column_end=61,
),
)
],
type=NamedTypeNode(
name=NameNode(
value="Type",
location=Location(
line=32,
column=64,
line_end=32,
column_end=69,
),
),
location=Location(
line=32,
column=64,
line_end=32,
column_end=69,
),
),
directives=[
DirectiveNode(
name=NameNode(
value="onField",
location=Location(
line=32,
column=70,
line_end=33,
column_end=1,
),
),
arguments=[],
location=Location(
line=32,
column=69,
line_end=33,
column_end=1,
),
)
],
location=Location(
line=32,
column=3,
line_end=33,
column_end=1,
),
)
],
location=Location(
line=31, column=1, line_end=35, column_end=1
),
),
ObjectTypeDefinitionNode(
description=None,
name=NameNode(
value="UndefinedType",
location=Location(
line=35, column=6, line_end=37, column_end=1
),
),
interfaces=[],
directives=[],
fields=[],
location=Location(
line=35, column=1, line_end=37, column_end=1
),
),
ObjectTypeExtensionNode(
name=NameNode(
value="Foo",
location=Location(
line=37, column=13, line_end=37, column_end=17
),
),
interfaces=[],
directives=[],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="seven",
location=Location(
line=38,
column=3,
line_end=38,
column_end=8,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="argument",
location=Location(
line=38,
column=9,
line_end=38,
column_end=17,
),
),
type=ListTypeNode(
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=38,
column=20,
line_end=38,
column_end=26,
),
),
location=Location(
line=38,
column=20,
line_end=38,
column_end=26,
),
),
location=Location(
line=38,
column=19,
line_end=38,
column_end=27,
),
),
default_value=None,
directives=[],
location=Location(
line=38,
column=9,
line_end=38,
column_end=27,
),
)
],
type=NamedTypeNode(
name=NameNode(
value="Type",
location=Location(
line=38,
column=30,
line_end=39,
column_end=1,
),
),
location=Location(
line=38,
column=30,
line_end=39,
column_end=1,
),
),
directives=[],
location=Location(
line=38,
column=3,
line_end=39,
column_end=1,
),
)
],
location=Location(
line=37, column=1, line_end=41, column_end=1
),
),
ObjectTypeExtensionNode(
name=NameNode(
value="Foo",
location=Location(
line=41, column=13, line_end=41, column_end=17
),
),
interfaces=[],
directives=[
DirectiveNode(
name=NameNode(
value="onType",
location=Location(
line=41,
column=18,
line_end=43,
column_end=1,
),
),
arguments=[],
location=Location(
line=41,
column=17,
line_end=43,
column_end=1,
),
)
],
fields=[],
location=Location(
line=41, column=1, line_end=43, column_end=1
),
),
InterfaceTypeDefinitionNode(
description=None,
name=NameNode(
value="Bar",
location=Location(
line=43, column=11, line_end=43, column_end=15
),
),
directives=[],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="one",
location=Location(
line=44,
column=3,
line_end=44,
column_end=6,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Type",
location=Location(
line=44,
column=8,
line_end=45,
column_end=3,
),
),
location=Location(
line=44,
column=8,
line_end=45,
column_end=3,
),
),
directives=[],
location=Location(
line=44,
column=3,
line_end=45,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="four",
location=Location(
line=45,
column=3,
line_end=45,
column_end=7,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="argument",
location=Location(
line=45,
column=8,
line_end=45,
column_end=16,
),
),
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=45,
column=18,
line_end=45,
column_end=25,
),
),
location=Location(
line=45,
column=18,
line_end=45,
column_end=25,
),
),
default_value=StringValueNode(
value="string",
location=Location(
line=45,
column=27,
line_end=45,
column_end=35,
),
),
directives=[],
location=Location(
line=45,
column=8,
line_end=45,
column_end=35,
),
)
],
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=45,
column=38,
line_end=46,
column_end=1,
),
),
location=Location(
line=45,
column=38,
line_end=46,
column_end=1,
),
),
directives=[],
location=Location(
line=45,
column=3,
line_end=46,
column_end=1,
),
),
],
location=Location(
line=43, column=1, line_end=48, column_end=1
),
),
InterfaceTypeDefinitionNode(
description=None,
name=NameNode(
value="AnnotatedInterface",
location=Location(
line=48, column=11, line_end=48, column_end=30
),
),
directives=[
DirectiveNode(
name=NameNode(
value="onInterface",
location=Location(
line=48,
column=31,
line_end=48,
column_end=43,
),
),
arguments=[],
location=Location(
line=48,
column=30,
line_end=48,
column_end=43,
),
)
],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="annotatedField",
location=Location(
line=49,
column=3,
line_end=49,
column_end=17,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="arg",
location=Location(
line=49,
column=18,
line_end=49,
column_end=21,
),
),
type=NamedTypeNode(
name=NameNode(
value="Type",
location=Location(
line=49,
column=23,
line_end=49,
column_end=28,
),
),
location=Location(
line=49,
column=23,
line_end=49,
column_end=28,
),
),
default_value=None,
directives=[
DirectiveNode(
name=NameNode(
value="onArgumentDefinition",
location=Location(
line=49,
column=29,
line_end=49,
column_end=49,
),
),
arguments=[],
location=Location(
line=49,
column=28,
line_end=49,
column_end=49,
),
)
],
location=Location(
line=49,
column=18,
line_end=49,
column_end=49,
),
)
],
type=NamedTypeNode(
name=NameNode(
value="Type",
location=Location(
line=49,
column=52,
line_end=49,
column_end=57,
),
),
location=Location(
line=49,
column=52,
line_end=49,
column_end=57,
),
),
directives=[
DirectiveNode(
name=NameNode(
value="onField",
location=Location(
line=49,
column=58,
line_end=50,
column_end=1,
),
),
arguments=[],
location=Location(
line=49,
column=57,
line_end=50,
column_end=1,
),
)
],
location=Location(
line=49,
column=3,
line_end=50,
column_end=1,
),
)
],
location=Location(
line=48, column=1, line_end=52, column_end=1
),
),
InterfaceTypeDefinitionNode(
description=None,
name=NameNode(
value="UndefinedInterface",
location=Location(
line=52, column=11, line_end=54, column_end=1
),
),
directives=[],
fields=[],
location=Location(
line=52, column=1, line_end=54, column_end=1
),
),
InterfaceTypeExtensionNode(
name=NameNode(
value="Bar",
location=Location(
line=54, column=18, line_end=54, column_end=22
),
),
directives=[],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="two",
location=Location(
line=55,
column=3,
line_end=55,
column_end=6,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="argument",
location=Location(
line=55,
column=7,
line_end=55,
column_end=15,
),
),
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="InputType",
location=Location(
line=55,
column=17,
line_end=55,
column_end=26,
),
),
location=Location(
line=55,
column=17,
line_end=55,
column_end=26,
),
),
location=Location(
line=55,
column=17,
line_end=55,
column_end=27,
),
),
default_value=None,
directives=[],
location=Location(
line=55,
column=7,
line_end=55,
column_end=27,
),
)
],
type=NamedTypeNode(
name=NameNode(
value="Type",
location=Location(
line=55,
column=30,
line_end=56,
column_end=1,
),
),
location=Location(
line=55,
column=30,
line_end=56,
column_end=1,
),
),
directives=[],
location=Location(
line=55,
column=3,
line_end=56,
column_end=1,
),
)
],
location=Location(
line=54, column=1, line_end=58, column_end=1
),
),
InterfaceTypeExtensionNode(
name=NameNode(
value="Bar",
location=Location(
line=58, column=18, line_end=58, column_end=22
),
),
directives=[
DirectiveNode(
name=NameNode(
value="onInterface",
location=Location(
line=58,
column=23,
line_end=60,
column_end=1,
),
),
arguments=[],
location=Location(
line=58,
column=22,
line_end=60,
column_end=1,
),
)
],
fields=[],
location=Location(
line=58, column=1, line_end=60, column_end=1
),
),
UnionTypeDefinitionNode(
description=None,
name=NameNode(
value="Feed",
location=Location(
line=60, column=7, line_end=60, column_end=12
),
),
directives=[],
types=[
NamedTypeNode(
name=NameNode(
value="Story",
location=Location(
line=61,
column=5,
line_end=62,
column_end=3,
),
),
location=Location(
line=61,
column=5,
line_end=62,
column_end=3,
),
),
NamedTypeNode(
name=NameNode(
value="Article",
location=Location(
line=62,
column=5,
line_end=63,
column_end=3,
),
),
location=Location(
line=62,
column=5,
line_end=63,
column_end=3,
),
),
NamedTypeNode(
name=NameNode(
value="Advert",
location=Location(
line=63,
column=5,
line_end=65,
column_end=1,
),
),
location=Location(
line=63,
column=5,
line_end=65,
column_end=1,
),
),
],
location=Location(
line=60, column=1, line_end=65, column_end=1
),
),
UnionTypeDefinitionNode(
description=None,
name=NameNode(
value="AnnotatedUnion",
location=Location(
line=65, column=7, line_end=65, column_end=22
),
),
directives=[
DirectiveNode(
name=NameNode(
value="onUnion",
location=Location(
line=65,
column=23,
line_end=65,
column_end=31,
),
),
arguments=[],
location=Location(
line=65,
column=22,
line_end=65,
column_end=31,
),
)
],
types=[
NamedTypeNode(
name=NameNode(
value="A",
location=Location(
line=65,
column=33,
line_end=65,
column_end=35,
),
),
location=Location(
line=65,
column=33,
line_end=65,
column_end=35,
),
),
NamedTypeNode(
name=NameNode(
value="B",
location=Location(
line=65,
column=37,
line_end=67,
column_end=1,
),
),
location=Location(
line=65,
column=37,
line_end=67,
column_end=1,
),
),
],
location=Location(
line=65, column=1, line_end=67, column_end=1
),
),
UnionTypeDefinitionNode(
description=None,
name=NameNode(
value="AnnotatedUnionTwo",
location=Location(
line=67, column=7, line_end=67, column_end=25
),
),
directives=[
DirectiveNode(
name=NameNode(
value="onUnion",
location=Location(
line=67,
column=26,
line_end=67,
column_end=34,
),
),
arguments=[],
location=Location(
line=67,
column=25,
line_end=67,
column_end=34,
),
)
],
types=[
NamedTypeNode(
name=NameNode(
value="A",
location=Location(
line=67,
column=38,
line_end=67,
column_end=40,
),
),
location=Location(
line=67,
column=38,
line_end=67,
column_end=40,
),
),
NamedTypeNode(
name=NameNode(
value="B",
location=Location(
line=67,
column=42,
line_end=69,
column_end=1,
),
),
location=Location(
line=67,
column=42,
line_end=69,
column_end=1,
),
),
],
location=Location(
line=67, column=1, line_end=69, column_end=1
),
),
UnionTypeDefinitionNode(
description=None,
name=NameNode(
value="UndefinedUnion",
location=Location(
line=69, column=7, line_end=71, column_end=1
),
),
directives=[],
types=[],
location=Location(
line=69, column=1, line_end=71, column_end=1
),
),
UnionTypeExtensionNode(
name=NameNode(
value="Feed",
location=Location(
line=71, column=14, line_end=71, column_end=19
),
),
directives=[],
types=[
NamedTypeNode(
name=NameNode(
value="Photo",
location=Location(
line=71,
column=21,
line_end=71,
column_end=27,
),
),
location=Location(
line=71,
column=21,
line_end=71,
column_end=27,
),
),
NamedTypeNode(
name=NameNode(
value="Video",
location=Location(
line=71,
column=29,
line_end=73,
column_end=1,
),
),
location=Location(
line=71,
column=29,
line_end=73,
column_end=1,
),
),
],
location=Location(
line=71, column=1, line_end=73, column_end=1
),
),
UnionTypeExtensionNode(
name=NameNode(
value="Feed",
location=Location(
line=73, column=14, line_end=73, column_end=19
),
),
directives=[
DirectiveNode(
name=NameNode(
value="onUnion",
location=Location(
line=73,
column=20,
line_end=75,
column_end=1,
),
),
arguments=[],
location=Location(
line=73,
column=19,
line_end=75,
column_end=1,
),
)
],
types=[],
location=Location(
line=73, column=1, line_end=75, column_end=1
),
),
ScalarTypeDefinitionNode(
description=None,
name=NameNode(
value="CustomScalar",
location=Location(
line=75, column=8, line_end=77, column_end=1
),
),
directives=[],
location=Location(
line=75, column=1, line_end=77, column_end=1
),
),
ScalarTypeDefinitionNode(
description=None,
name=NameNode(
value="AnnotatedScalar",
location=Location(
line=77, column=8, line_end=77, column_end=24
),
),
directives=[
DirectiveNode(
name=NameNode(
value="onScalar",
location=Location(
line=77,
column=25,
line_end=79,
column_end=1,
),
),
arguments=[],
location=Location(
line=77,
column=24,
line_end=79,
column_end=1,
),
)
],
location=Location(
line=77, column=1, line_end=79, column_end=1
),
),
ScalarTypeExtensionNode(
name=NameNode(
value="CustomScalar",
location=Location(
line=79, column=15, line_end=79, column_end=28
),
),
directives=[
DirectiveNode(
name=NameNode(
value="onScalar",
location=Location(
line=79,
column=29,
line_end=81,
column_end=1,
),
),
arguments=[],
location=Location(
line=79,
column=28,
line_end=81,
column_end=1,
),
)
],
location=Location(
line=79, column=1, line_end=81, column_end=1
),
),
EnumTypeDefinitionNode(
description=None,
name=NameNode(
value="Site",
location=Location(
line=81, column=6, line_end=81, column_end=11
),
),
directives=[],
values=[
EnumValueDefinitionNode(
description=DescriptionNode(
value="\n This is a description of the `DESKTOP` value\n ",
location=Location(
line=82,
column=3,
line_end=85,
column_end=3,
),
),
name=EnumValueNode(
value="DESKTOP",
location=Location(
line=85,
column=3,
line_end=87,
column_end=3,
),
),
directives=[],
location=Location(
line=82,
column=3,
line_end=87,
column_end=3,
),
),
EnumValueDefinitionNode(
description=DescriptionNode(
value="This is a description of the `MOBILE` value",
location=Location(
line=87,
column=3,
line_end=88,
column_end=3,
),
),
name=EnumValueNode(
value="MOBILE",
location=Location(
line=88,
column=3,
line_end=90,
column_end=3,
),
),
directives=[],
location=Location(
line=87,
column=3,
line_end=90,
column_end=3,
),
),
EnumValueDefinitionNode(
description=DescriptionNode(
value="This is a description of the `WEB` value",
location=Location(
line=90,
column=3,
line_end=91,
column_end=3,
),
),
name=EnumValueNode(
value="WEB",
location=Location(
line=91,
column=3,
line_end=92,
column_end=1,
),
),
directives=[],
location=Location(
line=90,
column=3,
line_end=92,
column_end=1,
),
),
],
location=Location(
line=81, column=1, line_end=94, column_end=1
),
),
EnumTypeDefinitionNode(
description=None,
name=NameNode(
value="AnnotatedEnum",
location=Location(
line=94, column=6, line_end=94, column_end=20
),
),
directives=[
DirectiveNode(
name=NameNode(
value="onEnum",
location=Location(
line=94,
column=21,
line_end=94,
column_end=28,
),
),
arguments=[],
location=Location(
line=94,
column=20,
line_end=94,
column_end=28,
),
)
],
values=[
EnumValueDefinitionNode(
description=None,
name=EnumValueNode(
value="ANNOTATED_VALUE",
location=Location(
line=95,
column=3,
line_end=95,
column_end=19,
),
),
directives=[
DirectiveNode(
name=NameNode(
value="onEnumValue",
location=Location(
line=95,
column=20,
line_end=96,
column_end=3,
),
),
arguments=[],
location=Location(
line=95,
column=19,
line_end=96,
column_end=3,
),
)
],
location=Location(
line=95,
column=3,
line_end=96,
column_end=3,
),
),
EnumValueDefinitionNode(
description=None,
name=EnumValueNode(
value="OTHER_VALUE",
location=Location(
line=96,
column=3,
line_end=97,
column_end=1,
),
),
directives=[],
location=Location(
line=96,
column=3,
line_end=97,
column_end=1,
),
),
],
location=Location(
line=94, column=1, line_end=99, column_end=1
),
),
EnumTypeDefinitionNode(
description=None,
name=NameNode(
value="UndefinedEnum",
location=Location(
line=99, column=6, line_end=101, column_end=1
),
),
directives=[],
values=[],
location=Location(
line=99, column=1, line_end=101, column_end=1
),
),
EnumTypeExtensionNode(
name=NameNode(
value="Site",
location=Location(
line=101,
column=13,
line_end=101,
column_end=18,
),
),
directives=[],
values=[
EnumValueDefinitionNode(
description=None,
name=EnumValueNode(
value="VR",
location=Location(
line=102,
column=3,
line_end=103,
column_end=1,
),
),
directives=[],
location=Location(
line=102,
column=3,
line_end=103,
column_end=1,
),
)
],
location=Location(
line=101, column=1, line_end=105, column_end=1
),
),
EnumTypeExtensionNode(
name=NameNode(
value="Site",
location=Location(
line=105,
column=13,
line_end=105,
column_end=18,
),
),
directives=[
DirectiveNode(
name=NameNode(
value="onEnum",
location=Location(
line=105,
column=19,
line_end=107,
column_end=1,
),
),
arguments=[],
location=Location(
line=105,
column=18,
line_end=107,
column_end=1,
),
)
],
values=[],
location=Location(
line=105, column=1, line_end=107, column_end=1
),
),
InputObjectTypeDefinitionNode(
description=None,
name=NameNode(
value="InputType",
location=Location(
line=107, column=7, line_end=107, column_end=17
),
),
directives=[],
fields=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="key",
location=Location(
line=108,
column=3,
line_end=108,
column_end=6,
),
),
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="String",
location=Location(
line=108,
column=8,
line_end=108,
column_end=14,
),
),
location=Location(
line=108,
column=8,
line_end=108,
column_end=14,
),
),
location=Location(
line=108,
column=8,
line_end=109,
column_end=3,
),
),
default_value=None,
directives=[],
location=Location(
line=108,
column=3,
line_end=109,
column_end=3,
),
),
InputValueDefinitionNode(
description=None,
name=NameNode(
value="answer",
location=Location(
line=109,
column=3,
line_end=109,
column_end=9,
),
),
type=NamedTypeNode(
name=NameNode(
value="Int",
location=Location(
line=109,
column=11,
line_end=109,
column_end=15,
),
),
location=Location(
line=109,
column=11,
line_end=109,
column_end=15,
),
),
default_value=IntValueNode(
value=42,
location=Location(
line=109,
column=17,
line_end=110,
column_end=1,
),
),
directives=[],
location=Location(
line=109,
column=3,
line_end=110,
column_end=1,
),
),
],
location=Location(
line=107, column=1, line_end=112, column_end=1
),
),
InputObjectTypeDefinitionNode(
description=None,
name=NameNode(
value="AnnotatedInput",
location=Location(
line=112, column=7, line_end=112, column_end=22
),
),
directives=[
DirectiveNode(
name=NameNode(
value="onInputObject",
location=Location(
line=112,
column=23,
line_end=112,
column_end=37,
),
),
arguments=[],
location=Location(
line=112,
column=22,
line_end=112,
column_end=37,
),
)
],
fields=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="annotatedField",
location=Location(
line=113,
column=3,
line_end=113,
column_end=17,
),
),
type=NamedTypeNode(
name=NameNode(
value="Type",
location=Location(
line=113,
column=19,
line_end=113,
column_end=24,
),
),
location=Location(
line=113,
column=19,
line_end=113,
column_end=24,
),
),
default_value=None,
directives=[
DirectiveNode(
name=NameNode(
value="onInputFieldDefinition",
location=Location(
line=113,
column=25,
line_end=114,
column_end=1,
),
),
arguments=[],
location=Location(
line=113,
column=24,
line_end=114,
column_end=1,
),
)
],
location=Location(
line=113,
column=3,
line_end=114,
column_end=1,
),
)
],
location=Location(
line=112, column=1, line_end=116, column_end=1
),
),
InputObjectTypeDefinitionNode(
description=None,
name=NameNode(
value="UndefinedInput",
location=Location(
line=116, column=7, line_end=118, column_end=1
),
),
directives=[],
fields=[],
location=Location(
line=116, column=1, line_end=118, column_end=1
),
),
InputObjectTypeExtensionNode(
name=NameNode(
value="InputType",
location=Location(
line=118,
column=14,
line_end=118,
column_end=24,
),
),
directives=[],
fields=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="other",
location=Location(
line=119,
column=3,
line_end=119,
column_end=8,
),
),
type=NamedTypeNode(
name=NameNode(
value="Float",
location=Location(
line=119,
column=10,
line_end=119,
column_end=16,
),
),
location=Location(
line=119,
column=10,
line_end=119,
column_end=16,
),
),
default_value=FloatValueNode(
value=12300.0,
location=Location(
line=119,
column=18,
line_end=119,
column_end=25,
),
),
directives=[
DirectiveNode(
name=NameNode(
value="onInputFieldDefinition",
location=Location(
line=119,
column=26,
line_end=120,
column_end=1,
),
),
arguments=[],
location=Location(
line=119,
column=25,
line_end=120,
column_end=1,
),
)
],
location=Location(
line=119,
column=3,
line_end=120,
column_end=1,
),
)
],
location=Location(
line=118, column=1, line_end=122, column_end=1
),
),
InputObjectTypeExtensionNode(
name=NameNode(
value="InputType",
location=Location(
line=122,
column=14,
line_end=122,
column_end=24,
),
),
directives=[
DirectiveNode(
name=NameNode(
value="onInputObject",
location=Location(
line=122,
column=25,
line_end=124,
column_end=1,
),
),
arguments=[],
location=Location(
line=122,
column=24,
line_end=124,
column_end=1,
),
)
],
fields=[],
location=Location(
line=122, column=1, line_end=124, column_end=1
),
),
DirectiveDefinitionNode(
description=DescriptionNode(
value="\nThis is a description of the `@skip` directive\n",
location=Location(
line=124, column=1, line_end=127, column_end=1
),
),
name=NameNode(
value="skip",
location=Location(
line=127,
column=12,
line_end=127,
column_end=16,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="if",
location=Location(
line=128,
column=3,
line_end=128,
column_end=5,
),
),
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=128,
column=7,
line_end=128,
column_end=14,
),
),
location=Location(
line=128,
column=7,
line_end=128,
column_end=14,
),
),
location=Location(
line=128,
column=7,
line_end=128,
column_end=16,
),
),
default_value=None,
directives=[
DirectiveNode(
name=NameNode(
value="onArgumentDefinition",
location=Location(
line=128,
column=17,
line_end=129,
column_end=1,
),
),
arguments=[],
location=Location(
line=128,
column=16,
line_end=129,
column_end=1,
),
)
],
location=Location(
line=128,
column=3,
line_end=129,
column_end=1,
),
)
],
locations=[
NameNode(
value="FIELD",
location=Location(
line=129,
column=6,
line_end=129,
column_end=12,
),
),
NameNode(
value="FRAGMENT_SPREAD",
location=Location(
line=129,
column=14,
line_end=129,
column_end=30,
),
),
NameNode(
value="INLINE_FRAGMENT",
location=Location(
line=129,
column=32,
line_end=131,
column_end=1,
),
),
],
location=Location(
line=124, column=1, line_end=131, column_end=1
),
),
DirectiveDefinitionNode(
description=None,
name=NameNode(
value="include",
location=Location(
line=131,
column=12,
line_end=131,
column_end=19,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="if",
location=Location(
line=131,
column=20,
line_end=131,
column_end=22,
),
),
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=131,
column=24,
line_end=131,
column_end=31,
),
),
location=Location(
line=131,
column=24,
line_end=131,
column_end=31,
),
),
location=Location(
line=131,
column=24,
line_end=131,
column_end=32,
),
),
default_value=None,
directives=[],
location=Location(
line=131,
column=20,
line_end=131,
column_end=32,
),
)
],
locations=[
NameNode(
value="FIELD",
location=Location(
line=132,
column=6,
line_end=133,
column_end=4,
),
),
NameNode(
value="FRAGMENT_SPREAD",
location=Location(
line=133,
column=6,
line_end=134,
column_end=4,
),
),
NameNode(
value="INLINE_FRAGMENT",
location=Location(
line=134,
column=6,
line_end=136,
column_end=1,
),
),
],
location=Location(
line=131, column=1, line_end=136, column_end=1
),
),
DirectiveDefinitionNode(
description=None,
name=NameNode(
value="include2",
location=Location(
line=136,
column=12,
line_end=136,
column_end=20,
),
),
arguments=[
InputValueDefinitionNode(
description=None,
name=NameNode(
value="if",
location=Location(
line=136,
column=21,
line_end=136,
column_end=23,
),
),
type=NonNullTypeNode(
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=136,
column=25,
line_end=136,
column_end=32,
),
),
location=Location(
line=136,
column=25,
line_end=136,
column_end=32,
),
),
location=Location(
line=136,
column=25,
line_end=136,
column_end=33,
),
),
default_value=None,
directives=[],
location=Location(
line=136,
column=21,
line_end=136,
column_end=33,
),
)
],
locations=[
NameNode(
value="FIELD",
location=Location(
line=137,
column=5,
line_end=138,
column_end=3,
),
),
NameNode(
value="FRAGMENT_SPREAD",
location=Location(
line=138,
column=5,
line_end=139,
column_end=3,
),
),
NameNode(
value="INLINE_FRAGMENT",
location=Location(
line=139,
column=5,
line_end=141,
column_end=1,
),
),
],
location=Location(
line=136, column=1, line_end=141, column_end=1
),
),
SchemaExtensionNode(
directives=[
DirectiveNode(
name=NameNode(
value="onSchema",
location=Location(
line=141,
column=16,
line_end=143,
column_end=1,
),
),
arguments=[],
location=Location(
line=141,
column=15,
line_end=143,
column_end=1,
),
)
],
operation_type_definitions=[],
location=Location(
line=141, column=1, line_end=143, column_end=1
),
),
SchemaExtensionNode(
directives=[
DirectiveNode(
name=NameNode(
value="onSchema",
location=Location(
line=143,
column=16,
line_end=143,
column_end=25,
),
),
arguments=[],
location=Location(
line=143,
column=15,
line_end=143,
column_end=25,
),
)
],
operation_type_definitions=[
OperationTypeDefinitionNode(
operation_type="subscription",
type=NamedTypeNode(
name=NameNode(
value="SubscriptionType",
location=Location(
line=144,
column=17,
line_end=145,
column_end=1,
),
),
location=Location(
line=144,
column=17,
line_end=145,
column_end=1,
),
),
location=Location(
line=144,
column=3,
line_end=145,
column_end=1,
),
)
],
location=Location(
line=143, column=1, line_end=146, column_end=1
),
),
],
location=Location(
line=1, column=1, line_end=146, column_end=1
),
),
),
(
os.path.join(_BASE_DIR, "fixtures", "keyword-tokens.graphql"),
DocumentNode(
definitions=[
ObjectTypeDefinitionNode(
description=None,
name=NameNode(
value="TreeEntry",
location=Location(
line=1, column=6, line_end=1, column_end=16
),
),
interfaces=[],
directives=[],
fields=[
FieldDefinitionNode(
description=None,
name=NameNode(
value="schema",
location=Location(
line=2,
column=3,
line_end=2,
column_end=9,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=2,
column=11,
line_end=3,
column_end=3,
),
),
location=Location(
line=2,
column=11,
line_end=3,
column_end=3,
),
),
directives=[],
location=Location(
line=2, column=3, line_end=3, column_end=3
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="query",
location=Location(
line=3,
column=3,
line_end=3,
column_end=8,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=3,
column=10,
line_end=4,
column_end=3,
),
),
location=Location(
line=3,
column=10,
line_end=4,
column_end=3,
),
),
directives=[],
location=Location(
line=3, column=3, line_end=4, column_end=3
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="mutation",
location=Location(
line=4,
column=3,
line_end=4,
column_end=11,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=4,
column=13,
line_end=5,
column_end=3,
),
),
location=Location(
line=4,
column=13,
line_end=5,
column_end=3,
),
),
directives=[],
location=Location(
line=4, column=3, line_end=5, column_end=3
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="subscription",
location=Location(
line=5,
column=3,
line_end=5,
column_end=15,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=5,
column=17,
line_end=6,
column_end=3,
),
),
location=Location(
line=5,
column=17,
line_end=6,
column_end=3,
),
),
directives=[],
location=Location(
line=5, column=3, line_end=6, column_end=3
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="type",
location=Location(
line=6,
column=3,
line_end=6,
column_end=7,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=6,
column=9,
line_end=7,
column_end=3,
),
),
location=Location(
line=6,
column=9,
line_end=7,
column_end=3,
),
),
directives=[],
location=Location(
line=6, column=3, line_end=7, column_end=3
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="directive",
location=Location(
line=7,
column=3,
line_end=7,
column_end=12,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=7,
column=14,
line_end=8,
column_end=3,
),
),
location=Location(
line=7,
column=14,
line_end=8,
column_end=3,
),
),
directives=[],
location=Location(
line=7, column=3, line_end=8, column_end=3
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="interface",
location=Location(
line=8,
column=3,
line_end=8,
column_end=12,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=8,
column=14,
line_end=9,
column_end=3,
),
),
location=Location(
line=8,
column=14,
line_end=9,
column_end=3,
),
),
directives=[],
location=Location(
line=8, column=3, line_end=9, column_end=3
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="implements",
location=Location(
line=9,
column=3,
line_end=9,
column_end=13,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=9,
column=15,
line_end=10,
column_end=3,
),
),
location=Location(
line=9,
column=15,
line_end=10,
column_end=3,
),
),
directives=[],
location=Location(
line=9, column=3, line_end=10, column_end=3
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="union",
location=Location(
line=10,
column=3,
line_end=10,
column_end=8,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=10,
column=10,
line_end=11,
column_end=3,
),
),
location=Location(
line=10,
column=10,
line_end=11,
column_end=3,
),
),
directives=[],
location=Location(
line=10,
column=3,
line_end=11,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="enum",
location=Location(
line=11,
column=3,
line_end=11,
column_end=7,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=11,
column=9,
line_end=12,
column_end=3,
),
),
location=Location(
line=11,
column=9,
line_end=12,
column_end=3,
),
),
directives=[],
location=Location(
line=11,
column=3,
line_end=12,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="input",
location=Location(
line=12,
column=3,
line_end=12,
column_end=8,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=12,
column=10,
line_end=13,
column_end=3,
),
),
location=Location(
line=12,
column=10,
line_end=13,
column_end=3,
),
),
directives=[],
location=Location(
line=12,
column=3,
line_end=13,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="on",
location=Location(
line=13,
column=3,
line_end=13,
column_end=5,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=13,
column=7,
line_end=14,
column_end=3,
),
),
location=Location(
line=13,
column=7,
line_end=14,
column_end=3,
),
),
directives=[],
location=Location(
line=13,
column=3,
line_end=14,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="scalar",
location=Location(
line=14,
column=3,
line_end=14,
column_end=9,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=14,
column=11,
line_end=15,
column_end=3,
),
),
location=Location(
line=14,
column=11,
line_end=15,
column_end=3,
),
),
directives=[],
location=Location(
line=14,
column=3,
line_end=15,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="extend",
location=Location(
line=15,
column=3,
line_end=15,
column_end=9,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=15,
column=11,
line_end=16,
column_end=3,
),
),
location=Location(
line=15,
column=11,
line_end=16,
column_end=3,
),
),
directives=[],
location=Location(
line=15,
column=3,
line_end=16,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="QUERY",
location=Location(
line=16,
column=3,
line_end=16,
column_end=8,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=16,
column=10,
line_end=17,
column_end=3,
),
),
location=Location(
line=16,
column=10,
line_end=17,
column_end=3,
),
),
directives=[],
location=Location(
line=16,
column=3,
line_end=17,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="MUTATION",
location=Location(
line=17,
column=3,
line_end=17,
column_end=11,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=17,
column=13,
line_end=18,
column_end=3,
),
),
location=Location(
line=17,
column=13,
line_end=18,
column_end=3,
),
),
directives=[],
location=Location(
line=17,
column=3,
line_end=18,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="SUBSCRIPTION",
location=Location(
line=18,
column=3,
line_end=18,
column_end=15,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=18,
column=17,
line_end=19,
column_end=3,
),
),
location=Location(
line=18,
column=17,
line_end=19,
column_end=3,
),
),
directives=[],
location=Location(
line=18,
column=3,
line_end=19,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="FIELD",
location=Location(
line=19,
column=3,
line_end=19,
column_end=8,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=19,
column=10,
line_end=20,
column_end=3,
),
),
location=Location(
line=19,
column=10,
line_end=20,
column_end=3,
),
),
directives=[],
location=Location(
line=19,
column=3,
line_end=20,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="FRAGMENT_DEFINITION",
location=Location(
line=20,
column=3,
line_end=20,
column_end=22,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=20,
column=24,
line_end=21,
column_end=3,
),
),
location=Location(
line=20,
column=24,
line_end=21,
column_end=3,
),
),
directives=[],
location=Location(
line=20,
column=3,
line_end=21,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="FRAGMENT_SPREAD",
location=Location(
line=21,
column=3,
line_end=21,
column_end=18,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=21,
column=20,
line_end=22,
column_end=3,
),
),
location=Location(
line=21,
column=20,
line_end=22,
column_end=3,
),
),
directives=[],
location=Location(
line=21,
column=3,
line_end=22,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="INLINE_FRAGMENT",
location=Location(
line=22,
column=3,
line_end=22,
column_end=18,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=22,
column=20,
line_end=23,
column_end=3,
),
),
location=Location(
line=22,
column=20,
line_end=23,
column_end=3,
),
),
directives=[],
location=Location(
line=22,
column=3,
line_end=23,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="SCHEMA",
location=Location(
line=23,
column=3,
line_end=23,
column_end=9,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=23,
column=11,
line_end=24,
column_end=3,
),
),
location=Location(
line=23,
column=11,
line_end=24,
column_end=3,
),
),
directives=[],
location=Location(
line=23,
column=3,
line_end=24,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="SCALAR",
location=Location(
line=24,
column=3,
line_end=24,
column_end=9,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=24,
column=11,
line_end=25,
column_end=3,
),
),
location=Location(
line=24,
column=11,
line_end=25,
column_end=3,
),
),
directives=[],
location=Location(
line=24,
column=3,
line_end=25,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="OBJECT",
location=Location(
line=25,
column=3,
line_end=25,
column_end=9,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=25,
column=11,
line_end=26,
column_end=3,
),
),
location=Location(
line=25,
column=11,
line_end=26,
column_end=3,
),
),
directives=[],
location=Location(
line=25,
column=3,
line_end=26,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="FIELD_DEFINITION",
location=Location(
line=26,
column=3,
line_end=26,
column_end=19,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=26,
column=21,
line_end=27,
column_end=3,
),
),
location=Location(
line=26,
column=21,
line_end=27,
column_end=3,
),
),
directives=[],
location=Location(
line=26,
column=3,
line_end=27,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="ARGUMENT_DEFINITION",
location=Location(
line=27,
column=3,
line_end=27,
column_end=22,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=27,
column=24,
line_end=28,
column_end=3,
),
),
location=Location(
line=27,
column=24,
line_end=28,
column_end=3,
),
),
directives=[],
location=Location(
line=27,
column=3,
line_end=28,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="INTERFACE",
location=Location(
line=28,
column=3,
line_end=28,
column_end=12,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=28,
column=14,
line_end=29,
column_end=3,
),
),
location=Location(
line=28,
column=14,
line_end=29,
column_end=3,
),
),
directives=[],
location=Location(
line=28,
column=3,
line_end=29,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="UNION",
location=Location(
line=29,
column=3,
line_end=29,
column_end=8,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=29,
column=10,
line_end=30,
column_end=3,
),
),
location=Location(
line=29,
column=10,
line_end=30,
column_end=3,
),
),
directives=[],
location=Location(
line=29,
column=3,
line_end=30,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="ENUM",
location=Location(
line=30,
column=3,
line_end=30,
column_end=7,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=30,
column=9,
line_end=31,
column_end=3,
),
),
location=Location(
line=30,
column=9,
line_end=31,
column_end=3,
),
),
directives=[],
location=Location(
line=30,
column=3,
line_end=31,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="ENUM_VALUE",
location=Location(
line=31,
column=3,
line_end=31,
column_end=13,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=31,
column=15,
line_end=32,
column_end=3,
),
),
location=Location(
line=31,
column=15,
line_end=32,
column_end=3,
),
),
directives=[],
location=Location(
line=31,
column=3,
line_end=32,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="INPUT_OBJECT",
location=Location(
line=32,
column=3,
line_end=32,
column_end=15,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=32,
column=17,
line_end=33,
column_end=3,
),
),
location=Location(
line=32,
column=17,
line_end=33,
column_end=3,
),
),
directives=[],
location=Location(
line=32,
column=3,
line_end=33,
column_end=3,
),
),
FieldDefinitionNode(
description=None,
name=NameNode(
value="INPUT_FIELD_DEFINITION",
location=Location(
line=33,
column=3,
line_end=33,
column_end=25,
),
),
arguments=[],
type=NamedTypeNode(
name=NameNode(
value="Boolean",
location=Location(
line=33,
column=27,
line_end=34,
column_end=1,
),
),
location=Location(
line=33,
column=27,
line_end=34,
column_end=1,
),
),
directives=[],
location=Location(
line=33,
column=3,
line_end=34,
column_end=1,
),
),
],
location=Location(
line=1, column=1, line_end=35, column_end=1
),
)
],
location=Location(line=1, column=1, line_end=35, column_end=1),
),
),
],
)
def test_parse_to_document(sdl_file_path, expected):
with open(sdl_file_path) as sdl_file:
assert parse_to_document(sdl_file.read()) == expected
def test_parse_to_document_mock():
parsed_mock = Mock()
node_transformed = Mock()
sdl = "type MyType { a: String }"
with patch(
"tartiflette.language.parsers.lark.parser._LARK_PARSER.parse",
return_value=parsed_mock,
) as lark_parser_mock:
with patch(
"tartiflette.language.parsers.lark.parser.NodeTransformer",
return_value=node_transformed,
) as node_transformer_mock:
with patch(
"tartiflette.language.parsers.lark.parser.TokenTransformer"
) as token_transformer_mock:
assert parse_to_document(sdl) == node_transformed.document_node
node_transformer_mock.assert_called_once()
token_transformer_mock.assert_called_once()
lark_parser_mock.assert_called_once_with(sdl)
|
[
"raulic.maximilien@gmail.com"
] |
raulic.maximilien@gmail.com
|
edf2cabc5e84594334d14292c5ba43863618c2c5
|
3a5f14a4a1911229dde120f9352fb63da73b2726
|
/AMDMServer/util/DBManager.py
|
ba558baf8d7b8128b4c012101c712bc6553ad216
|
[] |
no_license
|
yeonghokim/OSAM_AMDM
|
0aeebe3dc93c1eb8cde722c1c4e22999f318c87f
|
a930ab1a12d9ab8201ba01ea7d05cfc14d500dd8
|
refs/heads/master
| 2022-12-19T01:01:30.952629
| 2020-10-08T10:33:34
| 2020-10-08T10:33:34
| 300,146,813
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,479
|
py
|
#from util.jsonManager import *
import sqlite3
from util.serverLog import LogD
def updateIoTData(DM,DBLocation):
con = sqlite3.connect(DBLocation)
cur = con.cursor()
cur.execute("UPDATE PHONECASE SET IS_LOCK=? WHERE PHONECASE_PR=?;",(DM.getData("Lock"),DM.getData("ID")))
LogD("UpdateIoTSQL 완료(id : "+str(DM.getData("ID"))+",Lock : "+str(DM.getData("Lock"))+")")
con.commit();
con.close();
return True
def updateAndroidData(DM,DBLocation):
con = sqlite3.connect(DBLocation)
cur = con.cursor()
cur.execute("UPDATE PHONE SET IS_LOCK=? WHERE PHONE_PR=?;",(DM.getData("Lock"),DM.getData("ID")))
cur.execute("INSERT INTO LOCKMANAGE(PHONE_UNIQUENUM,MANAGETIME, IS_LOCK) VALUES(?,CURRENT_TIMESTAMP,?);",(DM.getData("ID"),DM.getData("Lock")))
con.commit();
con.close();
LogD("UpdateAndroidSQL 완료(id : "+str(DM.getData("ID"))+",Lock : "+str(DM.getData("Lock"))+")")
return True
def requestAndroidDataToIoT(DM,DBLocatio,androidSocket):
print(DM.getFileStr())
con = sqlite3.connect(DBLocation)
cur = con.cursor()
cur.execute("UPDATE PHONECASE SET IS_LOCK=? WHERE PHONECASE_PR=?;",(DM.getData("Lock"),DM.getData("IoTID")))
con.commit();
con.close();
#androidSocket에 완료되었다고 보내기
#androidSocket.send(data)
LogD("requestAndroidSQL 완료(AdminID : "+str(DM.getData("ID"))+",IoTID : "+str(DM.getData("IoTID"))+"Lock : "+str(DM.getData("Lock"))+")")
return True
|
[
"yeongho.kim2000@gmail.com"
] |
yeongho.kim2000@gmail.com
|
6d8d123690287a79fcc0833b2ba1410a6f893f52
|
e465ec4e9af2358ed8a63b555b03d24a30a68790
|
/study_python/day5/write_example2.py
|
d802f005d8dc31e93a387164c957087c9b81e36d
|
[] |
no_license
|
veritakim/study_python
|
40930442130d78190977157229bef9e03f6303fe
|
8c25ec8716fc16223473f53e87d29f102fc80adb
|
refs/heads/main
| 2023-02-27T16:40:36.189980
| 2021-02-05T11:42:56
| 2021-02-05T11:42:56
| 330,602,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
'''
앞선 실습 과제에서 vocabulary.txt라는 파일을 만들었죠? 이 파일에는 우리가 암기하고 싶은 단어들이 정리되어 있는데요. 이번에는 이 파일의 단어들을 가지고 학생들에게 문제를 내 주는 프로그램을 만들려고 합니다.
프로그램은 콘솔에 한국어 뜻을 알려 줄 것이고, 사용자는 그에 맞는 영어 단어를 입력해야 합니다. 사용자가 입력한 영어 단어가 정답이면 "맞았습니다!"라고 출력하고, 틀리면 "아쉽습니다. 정답은 OOO입니다."가 출력되어야 합니다.
문제를 내는 순서는 vocabulary.txt에 정리된 순서입니다.
'''
with open('vocabulary.txt', 'r', encoding='UTF-8') as f:
for line in f:
word = line.strip()
eng = word.split(": ")[0]
kor = word.split(": ")[1]
riddle = input(f"{kor}: ")
if riddle == eng:
print("맞았습니다!")
else:
print(f"아쉽습니다. 정답은 {eng}입니다.")
|
[
"kjs3597@gmail.com"
] |
kjs3597@gmail.com
|
d53ec57abc4aabc0c00b58decf042363f7a3cb3b
|
9936e97e5536ba6dc10b717a0b264f26b2557def
|
/reorder.py
|
0c237c9f932ee5430e5ac23910dd3d3d859a67f9
|
[
"MIT"
] |
permissive
|
acoli-repo/book-gen
|
e68cc5836be7cd23b7a5483b3574e3b97a880090
|
ef93116d2d6d4f10ef5f29c296c1f55448793a4a
|
refs/heads/master
| 2021-07-05T09:52:06.760023
| 2021-06-25T10:47:35
| 2021-06-25T10:47:35
| 183,104,770
| 5
| 4
|
MIT
| 2020-09-18T21:23:17
| 2019-04-23T22:26:10
|
Python
|
UTF-8
|
Python
| false
| false
| 14,269
|
py
|
import sys
import os
import gensim
import re
import random
import csv
import logging
import numpy as np
# import theanets
import json
# from sklearn.metrics import classification_report, confusion_matrix
import operator
from random import random
from gensim.models import KeyedVectors
### Main program
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
path = ''
# d2vFile = "300dimStreamedChemPhrases/model_streamed_topic.w2v"
d2vFile = "doc2vec/samuel_ngrams_model/model_chemdump_ngrams.w2v"
threshold = 6 #7
sim_threshold = 0.5 #0.4
importance = 0 #20
if "-t" in sys.argv:
for i in range(len(sys.argv)):
if sys.argv[i] == "-t":
if (sys.argv[i+1][:sys.argv[i+1].find(".")] + sys.argv[i+1][sys.argv[i+1].find(".")+1:]).isdigit():
threshold = float(sys.argv[i+1])
else:
input(sys.argv[i+1] + " is not a valid number. " + str(threshold) + " used as default. \nPress Enter to confirm. ")
sys.argv = sys.argv[:i] + sys.argv[i+2:]
break
if "-m" in sys.argv:
for i in range(len(sys.argv)):
if sys.argv[i] == "-m":
if (sys.argv[i+1][:sys.argv[i+1].find(".")] + sys.argv[i+1][sys.argv[i+1].find(".")+1:]).isdigit():
sim_threshold = float(sys.argv[i+1])
else:
input(sys.argv[i+1] + " is not a valid number. " + str(sim_threshold) + " used as default. \nPress Enter to confirm. ")
sys.argv = sys.argv[:i] + sys.argv[i+2:]
break
if "-i" in sys.argv:
for i in range(len(sys.argv)):
if sys.argv[i] == "-i":
if (sys.argv[i+1][:sys.argv[i+1].find(".")] + sys.argv[i+1][sys.argv[i+1].find(".")+1:]).isdigit():
importance = float(sys.argv[i+1])
else:
input(sys.argv[i+1] + " is not a valid number. " + str(importance) + " used as default. \nPress Enter to confirm. ")
sys.argv = sys.argv[:i] + sys.argv[i+2:]
break
black = False
if "-black" in sys.argv:
black = True
for i in range(len(sys.argv)):
if sys.argv[i] == "-black":
sys.argv = sys.argv[:i] + sys.argv[i+1:]
break
plain = False
if "-plain" in sys.argv:
plain = True
for i in range(len(sys.argv)):
if sys.argv[i] == "-plain":
sys.argv = sys.argv[:i] + sys.argv[i+1:]
break
if len(sys.argv) == 1:
input("No folder given. Current folder used. \nPress Enter to confirm. ")
elif sys.argv[1] == "-h":
print("folder [doc2vecmodel] [-t 7] [-m 0.4]")
else:
path = sys.argv[1]
if path[-1] != "/":
path += "/"
if len(sys.argv) > 2:
d2vFile = sys.argv[2]
### Main program
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# Load word vectors
vectors = KeyedVectors.load(d2vFile, mmap='r')
# print(vectors.most_similar("crops"))
# input("->")
# opening files
thisFileName = path + "section-sentences-restructured-syn-pos.txt"
if not os.path.exists(thisFileName):
print("no synonym pos file found")
thisFileName = path + "section-sentences-restructured-pos.txt"
if not os.path.exists(thisFileName):
print("no restructured pos file found")
thisFileName = path + "section-sentences-pos.txt"
f1 = open(thisFileName)
pos = f1.read()
f1.close()
pos = pos.split("\n")[:-1]
for i in range(len(pos)):
pos[i] = pos[i].split(" ")[:-1]
thisFileName = path + "section-sentences-restructured-syn-tok.txt"
if not os.path.exists(thisFileName):
print("no synonym tok file found")
thisFileName = path + "section-sentences-restructured-tok.txt"
if not os.path.exists(thisFileName):
print("no restructured tok file found")
thisFileName = path + "section-sentences-tok.txt"
f1 = open(thisFileName)
raw = f1.read()
f1.close()
raw = raw.split("\n")[:-1]
for i in range(len(raw)):
raw[i] = raw[i].split(" ")[:-1]
thisFileName = path + "section-sentences-ids.txt"
if not os.path.exists(thisFileName):
thisFileName += ".txt"
f1 = open(thisFileName)
ids = f1.read()
f1.close()
ids = ids.split("\n")[1:-1]
for i in range(len(ids)):
ids[i] = ids[i].split(",")
f1 = open(path + "section-sentences-restructured-meta.tsv")
meta = f1.read()
f1.close()
meta = meta.split("\n")[:-1]
for i in range(len(meta)):
meta[i] = meta[i].split("\t")
# # opening the files with the frist sentences
thisFileName = path + "abstract-sentences-restructured-syn-pos.txt"
if not os.path.exists(thisFileName):
print("no synonym abstract pos file found")
thisFileName = path + "abstract-sentences-restructured-pos.txt"
if not os.path.exists(thisFileName):
print("no abstract restructured pos file found")
thisFileName = path + "abstract-sentences-pos.txt"
if os.path.exists(thisFileName):
f1 = open(thisFileName)
fst_pos = f1.read()
f1.close()
fst_pos = fst_pos.split("\n")[:-1]
for i in range(len(fst_pos)):
fst_pos[i] = fst_pos[i].split(" ")[:-1]
thisFileName = path + "abstract-sentences-restructured-syn-tok.txt"
if not os.path.exists(thisFileName):
print("no synonym abstract tok file found")
thisFileName = path + "abstract-sentences-restructured-tok.txt"
if not os.path.exists(thisFileName):
print("no abstract restructured tok file found")
thisFileName = path + "abstract-sentences-tok.txt"
f1 = open(thisFileName)
fst_raw = f1.read()
f1.close()
fst_raw = fst_raw.split("\n")[:-1]
for i in range(len(fst_raw)):
fst_raw[i] = fst_raw[i].split(" ") #[:-1]
f1 = open(path + "abstract-sentences-ids.txt")
fst_ids = f1.read()
f1.close()
fst_ids = fst_ids.split("\n")[1:-1]
for i in range(len(fst_ids)):
fst_ids[i] = fst_ids[i].split(",")
f1 = open(path + "abstract-sentences-restructured-meta.tsv")
fst_meta = f1.read()
f1.close()
fst_meta = fst_meta.split("\n")[:-1]
for i in range(len(fst_meta)):
fst_meta[i] = fst_meta[i].split("\t")
else:
print("no abstract found at all")
fst_pos = []
fst_raw = []
fst_ids = []
fst_meta = []
# # adding the first sentences to the list of all sentences
# to seperate the first sentences later on
sent_count = len(raw)
raw += fst_raw
pos += fst_pos
ids += fst_ids
meta += fst_meta
# raw = [["lithium"]]
allWords = {}
wholeSize = 0
for s in raw:
for w in s:
wholeSize += 1
if w in allWords:
allWords[w] += 1
else:
allWords[w] = 1
corpusSize = 0
for w in vectors.wv.vocab:
corpusSize += vectors.wv.vocab[w].count
for w in allWords:
if w in vectors.wv.vocab:
# print (w, allWords[w], (allWords[w]/wholeSize)/(vectors.wv.vocab[w].count/corpusSize), sep = "\t")
allWords[w] = (allWords[w]/wholeSize)/(vectors.wv.vocab[w].count/corpusSize)
# sortedWords = sorted(allWords.items(), key=operator.itemgetter(1))
# for w in sortedWords:
# if type(w[1]) == int:
# print(w[0], "\tnot in corpus")
# else:
# print(w[0], "\t", w[1])
tags = {}
sent_tags = {}
for i in range(len(raw)):
sent_tags[i] = {}
imp_sent = range(len(raw))
def addToTag(sentence, word):
if word.isdigit():
return
if (len(word) > 0): # and (word in vectors):
if word in tags:
tags[word][sentence] = True
else:
tags[word] = {sentence: True}
if sentence in sent_tags:
sent_tags[sentence][word] = True
else:
sent_tags[sentence] = {word: True}
for sentence in range(len(pos)):
for word in range(len(pos[sentence])):
if importance:
if (allWords[raw[sentence][word]] > importance) or (type(allWords[raw[sentence][word]]) == int): # 1e-8:
addToTag(sentence, raw[sentence][word])
else:
if (pos[sentence][word].startswith("NN")):
addToTag(sentence, raw[sentence][word])
continue
if ("-" in raw[sentence][word]) and not(raw[sentence][word].endswith("-")):
addToTag(sentence, raw[sentence][word])
continue
if not (raw[sentence][word].endswith("-")):
for x in raw[sentence][word][1:]:
if x.isupper() or x.isdigit():
addToTag(sentence, raw[sentence][word])
break
if (len(pos[sentence])-2 >= word) and ((pos[sentence][word] == "NN") and (pos[sentence][word+1] == "NN")) or ((pos[sentence][word] == "JJ") and (pos[sentence][word+1] == "NN")):
addToTag(sentence, raw[sentence][word] + "_" + raw[sentence][word+1])
# for sentence in range(len(pos)):
# for word in range(len(pos[sentence])):
# if raw[sentence][word] in tags:
# addToTag(sentence, raw[sentence][word])
ord_sent = []
for i in range(len(raw)):
ord_sent.append(i)
def getColor(doc):
doc = int(doc)
r = int(((((575*doc+313)) % 907 ) % 20 + 2)*10)
g = int(((((612*doc+741)) % 1223) % 20 + 3)*10)
b = int(((((754*doc+329)) % 761 ) % 20 + 1)*10)
def fillNeros(x):
while len(x) < 6:
x = "0" + x
return x
return '#' + fillNeros(hex( (r*256 + g)*256 + b )[2:])
neworder = [ord_sent[0]]
# ord_sent = ord_sent[1:]
html = ["<p>"] #+ " ".join(raw[neworder[-1]])
def format(outputLine, maxi):
output = outputLine
# while True:
# # output = output.replace('` <font style="text-decoration: underline">', "`")
# # output = output.replace("</font> </font>", "</font>")
# # output = output.replace('<font style="text-decoration: underline"> <font style="text-decoration: underline">', '<font style="text-decoration: underline">')
# # output = output.replace("'</font> '", "''")
# # print(output)
# if output == outputLine:
# break
# outputLine = output
outputLine = output.replace("` ", "`").replace(" '", "'").replace('underline"> ', 'underline">').replace(" </font>", "</font>")
html = ""
if (not plain) and (maxi >= sent_count):
html += "<b>"
if not black:
html += '<font color="' + getColor(ids[maxi][1]) + '">'
html += outputLine
# html += untermaxi(maxi)
if not black:
html += '''</font> ''' + '<font color="#000000">'
if plain:
html += "<!-- "
if maxi < sent_count:
# print(maxi, ids[maxi])
html += "(sentID:" + ids[maxi][0] + ',doc:' + ids[maxi][1] + ',origSent:' + ids[maxi][3] + ")"
else:
html += '''(doc:''' + str(ids[maxi][1]) + ''',abstract Sentence) '''
if (not plain) and (maxi >= sent_count):
html += "</b>"
if int(meta[maxi][1]) < 11:
html += " -- ok"
else:
html += " -- <i> LCS: " + str(meta[maxi][1]) + "</i>"
if plain:
html += " -->"
html += "<br/>"
return html
outputLast = ""
while ord_sent:
sims = []
tag_counts = []
max_sim = 0
maxi = ord_sent[0]
outputLine = " ".join(raw[ord_sent[0]])
# outputLast = html[-1]
# looking for the most similar sentence to come next
for i in ord_sent:
lastLine = outputLast
thisLine = " ".join(raw[i])
sim = 0
for y in sent_tags[i]: # raw[i]:
for x in sent_tags[neworder[-1]]: # raw[neworder[-1]]:
if (x in vectors) and (y in vectors):
this_sim = vectors.similarity(x, y)
if this_sim >= sim_threshold:
sim += 1 # this_sim #1
if (not plain):
if not x in '<font style="text-decoration: underline">':
if (x.replace("_", " ") + " '") in lastLine:
lastLine = (" " + lastLine).replace(x.replace("_", " ") + " '", x.replace("_", " ") + " ''")
else:
lastLine = (" " + lastLine).replace(" " + x.replace("_", " ") + " ", ' <font style="text-decoration: underline"> ' + x.replace("_", " ") + " '</font> ")
if outputLast and (not y in '<font style="text-decoration: underline">'):
if ("` " + y.replace("_", " ")) in thisLine:
thisLine = (" " + thisLine).replace("` " + y.replace("_", " "), '`` ' + y.replace("_", " "))
else:
thisLine = (" " + thisLine).replace(" " + y.replace("_", " ") + " ", ' <font style="text-decoration: underline">` ' + y.replace("_", " ") + ' </font> ')
# else:
# print(y)
elif x == y:
sim += 1
# bias on fist sentences
if (i > sent_count) and (neworder[-1] <= sent_count):
sim *= 1.2
# and especially when its from the same document
if ids[i][1] == ids[neworder[-1]][1]:
sim *= 1.7
# and especially when its the last sentence from the document
if len([x for x in ord_sent if ids[x][1] == ids[i][1]]) == 1:
sim *= 5
if (sim > max_sim) and (sim > threshold):
max_sim = sim
maxi = i
outputLast = lastLine
outputLine = thisLine
while outputLast.startswith(" "):
outputLast = outputLast[1:]
if (outputLast):
# output +=
html += [format(outputLast, neworder[-1])]
outputLast = outputLine
neworder += [maxi]
ord_sent = [s for s in ord_sent if s != maxi] #ord_sent[:maxi] + ord_sent[maxi+1:]
html += [format(outputLast, neworder[-1])]
# if last_doc != ids[maxi][1]:
# html += "</p><p>\n"
# print (maxi, ord_sent)
# print(max_sim)
# print(" ".join(raw[neworder[-1]]))
html = "\n".join(html)
ord_sent = neworder
def unterline(line):
res = ""
skip = False
for i in range(len(raw[line])):
if skip:
skip = False
continue
if (not plain) and (raw[line][i] in tags):
res += '<font style="text-decoration: underline">' + raw[line][i] + '</font> '
elif (not plain) and (i < len(raw[line])-2) and ((raw[line][i] + "_" + raw[line][i+1]) in tags):
res += '<font style="text-decoration: underline">' + raw[line][i] + " " + raw[line][i+1] + '</font> '
skip = True
else:
res += raw[line][i] + ' '
return res
# html = "<p>\n"
# last_doc = 0
# for line in ord_sent:
# if last_doc != ids[line][1]:
# html += "</p><p>\n"
# if (not plain) and (line > sent_count):
# html += "<b>"
# if not black:
# html += '<font color="' + getColor(ids[line][1]) + '">'
# html += unterline(line)
# if not black:
# html += '''</font> '''
# if plain:
# html += "<!-- "
# if line < sent_count:
# # print(line, ids[line])
# html += "(sentID:" + ids[line][0] + ',doc:' + ids[line][1] + ',origSent:' + ids[line][3] + ")"
# else:
# html += '''(doc:'''+str(ids[line][1])+''',abstract Sentence) '''
# if plain:
# html += " -->"
# if (not plain) and (line > sent_count):
# html += "</b>"
# html += "<br/>\n"
# last_doc = ids[line][1]
html += "</p>"
f1=open(path + "reordered.html", "w")
f1.write(html)
f1.close()
|
[
"niko@Nikos-MacBook-Air.local"
] |
niko@Nikos-MacBook-Air.local
|
d54d3e97c2cd9d701ec0878fe7616f94b1f49463
|
a1c8731a8527872042bd46340d8d3e6d47596732
|
/programming-laboratory-I/iffz/teorema_herao.py
|
fd606d456f778469a63af30ca5e8a84d625308aa
|
[
"MIT"
] |
permissive
|
MisaelAugusto/computer-science
|
bbf98195b0ee954a7ffaf58e78f4a47b15069314
|
d21335a2dc824b54ffe828370f0e6717fd0c7c27
|
refs/heads/master
| 2022-12-04T08:21:16.052628
| 2020-08-31T13:00:04
| 2020-08-31T13:00:04
| 287,621,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
# coding: utf-8
# Aluno: Misael Augusto
# Matrícula: 117110525
# Problema: Utilizando o Teorema de Herão para Calcular a Área de Triângulos
import math
N = int(raw_input())
areas = []
for i in range(N):
medidas = raw_input().split()
a = float(medidas[0])
b = float(medidas[1])
c = float(medidas[2])
s = (a + b + c) / 2
area = math.sqrt(s * (s - a) * (s - b) * (s - c))
areas.append(area)
maiores = 0
total = 0
for i in range(N):
if areas[i] > 100:
maiores += 1
total += areas[i]
print "Área %d: %.2f" % ((i + 1), areas[i])
if maiores > 0:
media = total / maiores
print "Número maiores: %d, área média: %.2f" % (maiores, media)
|
[
"misael.costa@ccc.ufcg.edu.br"
] |
misael.costa@ccc.ufcg.edu.br
|
2ac22d58b0a0c7fff386b3c21240e78118b1b3f6
|
f44dcc228271aec0f0baac44bec35a1397b97c93
|
/protocolTest.py
|
a423bf08d3c2cc39115db5a49a93c03616eb8308
|
[] |
no_license
|
OranBar/Python_RC
|
053754341f9c0a9fcc3054333be454dc7d39c490
|
c4ca8df4e6391359d2208b97e0f72b326eca15fa
|
refs/heads/master
| 2021-01-19T10:45:57.568362
| 2017-02-17T04:40:13
| 2017-02-17T04:40:13
| 82,204,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 16 03:11:09 2017
@author: King Pub
"""
import sys
i = 16
ba = bytearray(i)
print ba.decode
#for i in range(0, ):
# sys.stdout.write(ba[i])
#
#sys.stdout.flush()
#print ba[0]
|
[
"baroran@hotmail.com"
] |
baroran@hotmail.com
|
034d7bc7558130581e12a0ca2d4775672c85a642
|
bf1588509df8cc40e99f3e362eff18f5bd754ae3
|
/Python/python_stack/django/restful_users_projects/manage.py
|
e244072b37fd103515ff2b6409be476d617bc657
|
[] |
no_license
|
nick0000100/DojoAssignments
|
dec7b45a18d986acea3373839a9dcc5c314781a2
|
dd698fc69df17041a284fd99cf2522e0731c6477
|
refs/heads/master
| 2021-01-01T17:39:44.065520
| 2017-08-18T22:28:45
| 2017-08-18T22:28:45
| 98,124,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "restful_users_projects.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"nick.sor@live.com"
] |
nick.sor@live.com
|
b1e2aad1bcc43e01228fd2cdb4bcb322e663dadd
|
1b30eb6a293b4631939b7e104e6050f3206e1b60
|
/jenkins-jobs
|
9513c4d1df382563e9c65330d789435793d989ec
|
[] |
no_license
|
tfheen/jenkins-job-builder
|
fec58afdea95a806591f3ed11fdd94f53c20b680
|
3739ad3eaed323af900c1d450114313679acebce
|
refs/heads/master
| 2020-12-24T19:17:46.866924
| 2012-10-15T10:34:01
| 2012-10-16T16:02:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,376
|
#!/usr/bin/env python
import jenkins_jobs.builder
import argparse
import ConfigParser
import logging
import os
import sys
def confirm(question):
answer = raw_input('%s (Y/N): ' % question).upper().strip()
if not answer == 'Y':
sys.exit('Aborted')
def main():
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers(help='update, test or delete job',
dest='command')
parser_update = subparser.add_parser('update')
parser_update.add_argument('path', help='Path to YAML file or directory')
parser_update.add_argument('name', help='name of job', nargs='?')
parser_test = subparser.add_parser('test')
parser_test.add_argument('path', help='Path to YAML file or directory')
parser_test.add_argument('-o', dest='output_dir',
help='Path to output XML')
parser_test.add_argument('name', help='name of job', nargs='?')
parser_delete = subparser.add_parser('delete')
parser_delete.add_argument('name', help='name of job', nargs='+')
subparser.add_parser('delete-all',
help='Delete *ALL* jobs from Jenkins server, including those '
'jobs not managed Jenkins Job Builder.')
parser.add_argument('--conf', dest='conf', help='Configuration file')
parser.add_argument('-l', '--log_level', dest='log_level', default='info',
help="Log level (default: %(default)s)")
options = parser.parse_args()
options.log_level = getattr(logging, options.log_level.upper(),
logging.INFO)
logging.basicConfig(level=options.log_level)
logger = logging.getLogger()
conf = '/etc/jenkins_jobs/jenkins_jobs.ini'
if options.conf:
conf = options.conf
else:
# Fallback to script directory
localconf = os.path.join(os.path.dirname(__file__),
'jenkins_jobs.ini')
if os.path.isfile(localconf):
conf = localconf
if not options.command == 'test':
logger.debug("Reading config from {0}".format(conf))
conffp = open(conf, 'r')
config = ConfigParser.ConfigParser()
config.readfp(conffp)
else:
config = {}
logger.debug("Config: {0}".format(config))
builder = jenkins_jobs.builder.Builder(config.get('jenkins', 'url'),
config.get('jenkins', 'user'),
config.get('jenkins', 'password'),
config)
if options.command == 'delete':
for job in options.name:
logger.info("Deleting job {0}".format(job))
builder.delete_job(job)
elif options.command == 'delete-all':
confirm('Sure you want to delete *ALL* jobs from Jenkins server?\n'
'(including those not managed by Jenkins Job Builder)')
logger.info("Deleting all jobs")
builder.delete_all_jobs()
elif options.command == 'update':
logger.info("Updating jobs in {0} ({1})".format(options.path,
options.name))
builder.update_job(options.path, options.name)
elif options.command == 'test':
builder.update_job(options.path, options.name,
output_dir=options.output_dir)
if __name__ == '__main__':
main()
|
[
"jenkins@review.openstack.org"
] |
jenkins@review.openstack.org
|
|
d4c0c3d0a6f77da0c017ffbae553d80b31212d54
|
878eea4c228f2dcc2d6ae281a9bf1b1f028ae18a
|
/svm.py
|
741d019146c125b275d27b1d158a050143596043
|
[] |
no_license
|
angeloponc/mood
|
39fbd4f7dfcc0c9d543e6cdc97162ffb6c3f4b7b
|
8e9d549798eb41d86e76beb157479f8323f2a2dd
|
refs/heads/main
| 2022-12-27T21:47:40.288447
| 2020-10-09T01:38:12
| 2020-10-09T01:38:12
| 302,493,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,216
|
py
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
namefile = 'data.csv'
df = pd.read_csv(namefile)
print(namefile)
print(df.shape)
print(df.head())
print(df['Clase'].value_counts())
X = df.drop(['Emocion','Path','Clase'],axis=1)
y = df.Clase
# separar entrenamiento y prueba
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.2,random_state = 42)
print('XX_train, X_test, y_train, y_test')
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
#Clasificar
from sklearn.svm import SVC
model = SVC(kernel='linear',gamma = 'auto')
model.fit(X_train, y_train)
# #predicciones
y_pred = model.predict(X_test)
# #evaluar el algoritmo
from sklearn.metrics import classification_report, confusion_matrix, precision_score, recall_score
eti = ['positivo', 'negativo']
tn, fp, fn, tp = confusion_matrix(y_test,y_pred,labels = eti).ravel()
print('tn: ', tn ,' fp: ', fp, ' fn: ', fn, ' tp:', tp)
cm1 = pd.DataFrame(confusion_matrix(y_test,y_pred), index = eti, columns = eti)
print(cm1)
print(classification_report(y_test,y_pred))
print(namefile)
|
[
"noreply@github.com"
] |
angeloponc.noreply@github.com
|
041b1a308cdbee3b037a69cc473611dd56cc9dee
|
44f0dcd9d987be19cc81c2d15cd5160aee1f4da7
|
/local/solution/model/train_char_rnn.py
|
99f5be01d6d563803b91070321327a57d2030331
|
[] |
no_license
|
claesgill/azure_ml_workshop
|
9bf6f52d0ea9f98aff9a74a289d685ae3425b186
|
e1e9fb8277be11f2a12fe7d48836320ac3cd3853
|
refs/heads/master
| 2022-12-26T19:06:00.031430
| 2020-10-08T13:13:00
| 2020-10-08T13:13:00
| 292,477,535
| 0
| 1
| null | 2020-10-07T14:28:24
| 2020-09-03T05:41:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,034
|
py
|
import argparse
import os
import random
import time
from azureml.core import Workspace, Dataset, Run, Experiment, RunConfiguration, ScriptRunConfig, Model
from torch.autograd import Variable
import torch.nn as nn
import torch
from tqdm import tqdm
# local
from src.src import read_file, char_tensor, time_since, CharRNN, generate, all_characters, n_characters
# Get experiment run context and workspace details
run = Run.get_context()
ws = run.experiment.workspace
# Parse command line arguments
argparser = argparse.ArgumentParser()
argparser.add_argument('--dataset', type=str, default='shakespeare.txt')
argparser.add_argument('--modelname', type=str, default='char_rnn_model')
argparser.add_argument('--model', type=str, default='gru')
argparser.add_argument('--n_epochs', type=int, default=10)
argparser.add_argument('--print_every', type=int, default=100)
argparser.add_argument('--hidden_size', type=int, default=100)
argparser.add_argument('--n_layers', type=int, default=2)
argparser.add_argument('--learning_rate', type=float, default=0.01)
argparser.add_argument('--chunk_len', type=int, default=200)
argparser.add_argument('--batch_size', type=int, default=100)
argparser.add_argument('--shuffle', action='store_true')
argparser.add_argument('--cuda', action='store_true')
args = argparser.parse_args()
# TODO: Download the dataset you uploaded earlier by using the Dataset class
# Use the recieved file_path as input to the "read_file()" function.
# NB! The filepath is a list and read_file expect a string
dataset = Dataset.get_by_name(ws, name=args.dataset)
file_path = dataset.download(target_path='.', overwrite=True)
file, file_len = read_file(file_path[0]) # <-- Input the file path
# Splitting dataset function
def random_training_set(chunk_len, batch_size):
inp = torch.LongTensor(batch_size, chunk_len)
target = torch.LongTensor(batch_size, chunk_len)
for bi in range(batch_size):
start_index = random.randint(0, file_len - chunk_len)
end_index = start_index + chunk_len + 1
chunk = file[start_index:end_index]
if len(chunk[:-1]) < 200: continue
inp[bi] = char_tensor(chunk[:-1])
target[bi] = char_tensor(chunk[1:])
inp = Variable(inp)
target = Variable(target)
if args.cuda:
inp = inp.cuda()
target = target.cuda()
return inp, target
# Training function
def train(inp, target):
hidden = decoder.init_hidden(args.batch_size)
if args.cuda:
hidden = hidden.cuda()
decoder.zero_grad()
loss = 0
for c in range(args.chunk_len):
output, hidden = decoder(inp[:,c], hidden)
loss += criterion(output.view(args.batch_size, -1), target[:,c])
loss.backward()
decoder_optimizer.step()
# return loss.data[0] / args.chunk_len
return loss.data / args.chunk_len
# Initialize model
decoder = CharRNN(
n_characters,
args.hidden_size,
n_characters,
model=args.model,
n_layers=args.n_layers,
)
decoder_optimizer = torch.optim.Adam(decoder.parameters(), lr=args.learning_rate)
criterion = nn.CrossEntropyLoss()
if args.cuda: decoder.cuda()
start = time.time()
all_losses = []
loss_avg = 0
print("Start training for {} epochs...".format(args.n_epochs))
for epoch in tqdm(range(1, args.n_epochs + 1)):
loss = train(*random_training_set(args.chunk_len, args.batch_size))
loss_avg += loss
if epoch % args.print_every == 0:
print('[%s (%d %d%%) %.4f]' % (time_since(start), epoch, epoch / args.n_epochs * 100, loss))
print(generate(decoder, 'Wh', 100, cuda=args.cuda), '\n')
# Saving model to outputs/ in Azure ML
save_filename = "outputs/" + args.modelname + ".pt"
torch.save(decoder.state_dict(), save_filename)
# TODO: Use the Model class and the register method to upload the model to Azure ML
model = Model.register(workspace=ws,
model_name=args.modelname,
model_path="outputs/")
# Complete the run
run.complete()
|
[
"noreply@github.com"
] |
claesgill.noreply@github.com
|
6e2a6feed956fbce85dfd0d98d47f8dc269a6884
|
08e2c21d7b1a0bafc79ecaa0884e7c7088a6d2ce
|
/python_hw3/problem3.py
|
cce8b06e75eb1d6bace837773de9b4170a5b8bfa
|
[] |
no_license
|
mklemos/python
|
9300ce3b9270d2140fa15b6c5909d9f74fb54903
|
d67d80617014a00c5ca033d4e0eb83ed31e573c2
|
refs/heads/master
| 2021-01-16T21:22:04.219446
| 2015-03-31T22:53:19
| 2015-03-31T22:53:19
| 33,214,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
## Lab3 problem3
## Maximilian Lemos
## 02-04-2015
newlist = [1,2,3,4,5]
newlist.pop(2)
newlist.pop(2)
print(newlist)
newlist.insert(1,17)
print(newlist)
test = 4 in newlist
print(test)
|
[
"mlemos91@gmail.com"
] |
mlemos91@gmail.com
|
1c931bd57a40181c6743b1d7327334c7a47cc38a
|
8e52c27f1b2823db67db4438b2b7e22c18254eca
|
/eval_gl_khpa.py
|
0257cad3197eece7938580d717a4b307d026a02d
|
[
"MIT"
] |
permissive
|
earhian/imgclsmob
|
5582f5f2d4062b620eecc28d5c4c9245fea47291
|
c87c0942420876941868c016211073dec4392e4d
|
refs/heads/master
| 2020-04-12T02:13:55.258601
| 2018-12-17T20:38:19
| 2018-12-17T20:38:19
| 162,242,486
| 1
| 0
|
MIT
| 2018-12-18T06:40:42
| 2018-12-18T06:40:41
| null |
UTF-8
|
Python
| false
| false
| 5,186
|
py
|
import argparse
import time
import logging
import mxnet as mx
from common.logger_utils import initialize_logging
from gluon.utils import prepare_mx_context, prepare_model, calc_net_weight_count
from gluon.khpa import add_dataset_parser_arguments
from gluon.khpa import get_batch_fn
from gluon.khpa import get_val_data_source
from gluon.khpa import validate
def parse_args():
parser = argparse.ArgumentParser(
description='Evaluate a model for image classification (Gluon/KHPA)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_dataset_parser_arguments(parser)
parser.add_argument(
'--model',
type=str,
required=True,
help='type of model to use. see model_provider for options.')
parser.add_argument(
'--use-pretrained',
action='store_true',
help='enable using pretrained model from gluon.')
parser.add_argument(
'--dtype',
type=str,
default='float32',
help='data type for training. default is float32')
parser.add_argument(
'--resume',
type=str,
default='',
help='resume from previously saved parameters if not None')
parser.add_argument(
'--input-size',
type=int,
default=224,
help='size of the input for model. default is 224')
parser.add_argument(
'--resize-inv-factor',
type=float,
default=0.875,
help='inverted ratio for input image crop. default is 0.875')
parser.add_argument(
'--num-classes',
type=int,
default=28,
help='number of classes')
parser.add_argument(
'--in-channels',
type=int,
default=4,
help='number of input channels')
parser.add_argument(
'--num-gpus',
type=int,
default=0,
help='number of gpus to use.')
parser.add_argument(
'-j',
'--num-data-workers',
dest='num_workers',
default=4,
type=int,
help='number of preprocessing workers')
parser.add_argument(
'--batch-size',
type=int,
default=512,
help='training batch size per device (CPU/GPU).')
parser.add_argument(
'--save-dir',
type=str,
default='',
help='directory of saved models and log-files')
parser.add_argument(
'--logging-file-name',
type=str,
default='train.log',
help='filename of training log')
parser.add_argument(
'--log-packages',
type=str,
default='mxnet',
help='list of python packages for logging')
parser.add_argument(
'--log-pip-packages',
type=str,
default='mxnet-cu92',
help='list of pip packages for logging')
args = parser.parse_args()
return args
def test(net,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx,
calc_weight_count=False,
extended_log=False):
rmse_calc = mx.metric.RMSE()
tic = time.time()
rmse_val_value = validate(
metric_calc=rmse_calc,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
if calc_weight_count:
weight_count = calc_net_weight_count(net)
logging.info('Model: {} trainable parameters'.format(weight_count))
if extended_log:
logging.info('Test: rmse={rmse:.4f} ({rmse})'.format(
rmse=rmse_val_value))
else:
logging.info('Test: rmse={rmse:.4f}'.format(
rmse=rmse_val_value))
logging.info('Time cost: {:.4f} sec'.format(
time.time() - tic))
def main():
args = parse_args()
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
ctx, batch_size = prepare_mx_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
dtype=args.dtype,
tune_layers="",
classes=args.num_classes,
in_channels=args.in_channels,
ctx=ctx)
input_image_size = net.in_size if hasattr(net, 'in_size') else (args.input_size, args.input_size)
val_data = get_val_data_source(
dataset_args=args,
batch_size=batch_size,
num_workers=args.num_workers,
input_image_size=input_image_size,
resize_inv_factor=args.resize_inv_factor)
batch_fn = get_batch_fn()
assert (args.use_pretrained or args.resume.strip())
test(
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=args.use_rec,
dtype=args.dtype,
ctx=ctx,
# calc_weight_count=(not log_file_exist),
calc_weight_count=True,
extended_log=True)
if __name__ == '__main__':
main()
|
[
"osemery@gmail.com"
] |
osemery@gmail.com
|
0880cf07429a36f3400967114be5d4659ffdb8d8
|
59b2f758a8ae22a8395e16343dd2cf43b32ffa3f
|
/SimLinearRegression.py
|
5ab3384ac679d45939efb6421b80d478ed1cfd01
|
[] |
no_license
|
GuanhuiGuan/Tensorflow_Applications
|
52eadf80e72abd414d1ea3d7d4dc3f0fcd42d943
|
a04756ab2215e9eff0a5bc874c7a451d2672b644
|
refs/heads/master
| 2021-08-26T07:25:08.948001
| 2017-11-22T06:13:31
| 2017-11-22T06:13:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
import tensorflow as tf
import numpy as np
feature_columns = [tf.feature_column.numeric_column("x", shape=[1])]
estimator = tf.estimator.LinearRegressor(feature_columns=feature_columns)
x_train = np.arange(1.0, 5.0)
y_train = np.array([0., -1., -2., -3.])
x_eval = np.array([2., 5., 8., 1.])
y_eval = np.array([-1.01, -4.1, -7, 0.])
input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size = 4, num_epochs=None, shuffle=True)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size = 4, num_epochs=1000, shuffle=False)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size = 4, num_epochs=1000, shuffle=False)
# writer = tf.summary.FileWriter('./graphs', sess.graph)
estimator.train(input_fn=input_fn, steps=1000)
train_metrics = estimator.evaluate(input_fn=train_input_fn)
eval_metrics = estimator.evaluate(input_fn=train_input_fn)
print("train metrics: %s" %train_metrics)
print("eval metrics: %s" %eval_metrics)
# writer.close()
|
[
"noreply@github.com"
] |
GuanhuiGuan.noreply@github.com
|
2789040406c024d7dac4e0a46d63e39cc5a2bcab
|
a90077635aeac846965381e0b07591a1df011afe
|
/care/facility/migrations/0107_populate_external_ids.py
|
9b3f281051e8f7bdd00a83eaea7ac4dd55b4b5bd
|
[
"MIT"
] |
permissive
|
Basharckr/care
|
f873ca140ae8607846d9b9500e3c21e9bfa15800
|
c86ae2614ea9ba80b140a2eb21ad64fdbb47ad7e
|
refs/heads/master
| 2023-06-17T21:26:48.936321
| 2021-07-12T06:03:52
| 2021-07-12T06:03:52
| 386,884,450
| 1
| 0
|
MIT
| 2021-07-17T08:41:09
| 2021-07-17T08:41:09
| null |
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
from uuid import uuid4
from django.db import migrations
def unique_external_ids(apps, *args):
models = [
"ambulance",
"ambulancedriver",
"building",
"facility",
"facilitycapacity",
"facilitypatientstatshistory",
"facilitystaff",
"facilityvolunteer",
"historicalfacilitycapacity",
"hospitaldoctors",
"inventory",
"inventoryitem",
"inventorylog",
"patientconsultation",
"patientsample",
"patientsampleflow",
"room",
"staffroomallocation",
]
for i in models:
model = apps.get_model("facility", i)
for obj in model.objects.all():
obj.external_id = uuid4()
obj.save()
def reverse_unique_external_ids(apps, *args):
pass
class Migration(migrations.Migration):
dependencies = [
("facility", "0106_auto_20200510_1557"),
]
operations = [migrations.RunPython(unique_external_ids, reverse_code=reverse_unique_external_ids)]
|
[
"vichuhari100@gmail.com"
] |
vichuhari100@gmail.com
|
dcb7d92857fd2fcf7f5ec9f843c75190a1a773a9
|
02878a9ea8e10fdbab3e187844f2c70072cda327
|
/em/api/admin.py
|
8fe943f141d2a00447d088aeb5749088f307958d
|
[] |
no_license
|
aravindm711/expense-manager-backend
|
58f17cb511576ced88453319cecb529f92dd5b83
|
1d649b3d62898e97553ad47e95134c838057be06
|
refs/heads/master
| 2023-04-16T12:51:09.688756
| 2021-04-18T17:34:25
| 2021-04-18T17:34:25
| 341,444,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
from django.contrib import admin
from .models import Expense, LentOrOwedExpense, ExpenseNotification
# Register your models here.
admin.site.register(Expense)
admin.site.register(LentOrOwedExpense)
admin.site.register(ExpenseNotification)
|
[
"aravindmurali711@gmail.com"
] |
aravindmurali711@gmail.com
|
da06f908b4fe9803300dc1d725dabb5956e02813
|
d91ee4e5d3fbfb45bfd623af5fac1aee1e318a3c
|
/app.py
|
36041031d0fbccaa3a3b9ca183d8d776d89b4b2b
|
[] |
no_license
|
srajasimman/RPi_Flask_Remote
|
747bc610d292dbce48bb8f9e9086c62d74b6794c
|
628165925ea653d05dafb86cb1dc82b9271d43cc
|
refs/heads/master
| 2021-04-06T11:02:26.278613
| 2018-03-14T10:23:46
| 2018-03-14T10:23:46
| 124,508,017
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,611
|
py
|
#!/usr/bin/env python
'''
Raspberry Pi GPIO Status and Control
'''
import RPi.GPIO as GPIO
from flask import Flask, render_template
app = Flask(__name__)
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# define GPIO pins for relay
tv = 17
dvd = 18
set_top_box = 22
projector = 23
ac = 24
fan = 27
# initialize GPIO status variables
tv_Sts = 0
dvd_Sts = 0
set_top_box_Sts = 0
projector_Sts = 0
ac_Sts = 0
fan_Sts = 0
# Define relay pins as output
GPIO.setup(tv, GPIO.OUT)
GPIO.setup(dvd, GPIO.OUT)
GPIO.setup(set_top_box, GPIO.OUT)
GPIO.setup(projector, GPIO.OUT)
GPIO.setup(ac, GPIO.OUT)
GPIO.setup(fan, GPIO.OUT)
# turn relay OFF at start
GPIO.output(tv, GPIO.LOW)
GPIO.output(dvd, GPIO.LOW)
GPIO.output(set_top_box, GPIO.LOW)
GPIO.output(projector, GPIO.LOW)
GPIO.output(ac, GPIO.LOW)
GPIO.output(fan, GPIO.LOW)
@app.route("/")
def index():
# Read Realay Status
tv_Sts = GPIO.input(tv)
dvd_Sts = GPIO.input(dvd)
set_top_box_Sts = GPIO.input(set_top_box)
projector_Sts = GPIO.input(projector)
ac_Sts = GPIO.input(ac)
fan_Sts = GPIO.input(fan)
templateData = {
'tv': tv_Sts,
'dvd': dvd_Sts,
'set_top_box': set_top_box_Sts,
'projector': projector_Sts,
'ac': ac_Sts,
'fan': fan_Sts,
}
return render_template('index.html', **templateData)
# The function below is executed when someone requests
# a URL with the actuator name and action in it:
@app.route("/<deviceName>/<action>")
def action(deviceName, action):
if deviceName == 'tv':
actuator = tv
if deviceName == 'dvd':
actuator = dvd
if deviceName == 'set_top_box':
actuator = set_top_box
if deviceName == 'projector':
actuator = projector
if deviceName == 'ac':
actuator = ac
if deviceName == 'fan':
actuator = fan
if action == "on":
GPIO.output(actuator, GPIO.HIGH)
if action == "off":
GPIO.output(actuator, GPIO.LOW)
tv_Sts = GPIO.input(tv)
dvd_Sts = GPIO.input(dvd)
set_top_box_Sts = GPIO.input(set_top_box)
projector_Sts = GPIO.input(projector)
ac_Sts = GPIO.input(ac)
fan_Sts = GPIO.input(fan)
templateData = {
'tv': tv_Sts,
'dvd': dvd_Sts,
'set_top_box': set_top_box_Sts,
'projector': projector_Sts,
'ac': ac_Sts,
'fan': fan_Sts,
}
return render_template('index.html', **templateData)
if __name__ == "__main__":
try:
exit(app.run(host='0.0.0.0', port=8080, debug=True))
except KeyboardInterrupt:
pass
finally:
GPIO.cleanup()
|
[
"srajasimman@gmail.com"
] |
srajasimman@gmail.com
|
983c9edc547d1ec5b03c3bc66ade4f0abffa040c
|
ced2b83ef7cc267792e8014aed7dc90f21ce9899
|
/euler/PandigitalProducts.py
|
c3efa3f4587af21aaab52306a71116e790776a7e
|
[] |
no_license
|
cypress777/oj
|
4f380dd84ed3b7f398dfc6ad0c254cab1adfd1a3
|
80062f6969162dbf11634ccfceb502dbc6874531
|
refs/heads/master
| 2022-03-10T17:32:13.664052
| 2022-02-23T11:04:26
| 2022-02-23T11:04:26
| 144,862,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
# Am * Bn = Cm+n where 2*(m + n) = 9
# or Am * Bn = Cm+n+1 2*(m + n) + 1= 9
# => m + n = 4
import math
import Permutation as lp
numbers = [i for i in range(1, 10)]
ans = set([])
while (not lp.isEnd(numbers)):
C = numbers[0] + numbers[1]*10 + numbers[2]*100 + numbers[3]*1000
A = numbers[4] + numbers[5]*10
B = numbers[6] + numbers[7]*10 + numbers[8]*100
if A*B == C:
ans.add(C)
A = numbers[4]
B = numbers[5] + numbers[6]*10 + numbers[7]*100 + numbers[8]*1000
if A*B == C:
ans.add(C)
numbers = lp.nextPermuteStr(numbers)
res = 0
ans = list(ans)
for i in range(len(ans)):
res += ans[i]
print(res)
|
[
"chenypress@gmail.com"
] |
chenypress@gmail.com
|
71b44031eb64e59bca4f8751a39eed7376120754
|
031864a41f92433fb407706f6cc5791813c2f739
|
/corecode/context_processors.py
|
37b0d3ca55102714e38d09d7d8f6c2ba257b391b
|
[] |
no_license
|
OAO-kotiki/MB
|
741608e224c0ef9339654e7fd761d3a5714dada2
|
e358a0fa45fb7b693ca150fdeadd5a0c08870de0
|
refs/heads/master
| 2022-11-08T05:14:38.436039
| 2020-06-23T03:39:42
| 2020-06-23T03:39:42
| 274,294,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
from .models import AcademicTerm, AcademicSession, SiteConfig
def site_defaults(request):
current_session = AcademicSession.objects.get(current=True)
current_term = AcademicTerm.objects.get(current=True)
vals = SiteConfig.objects.all()
contexts = {
"current_session": current_session.name,
"Группа": current_term.name
}
for val in vals:
contexts[val.key] = val.value
return contexts
|
[
"noreply@github.com"
] |
OAO-kotiki.noreply@github.com
|
dc9ae3159f31cee6eab55e9eecbd026054356da6
|
7e8ff7f00f974f1ec8adec8d2c66d539d48b7676
|
/GUI in Python/module_sys.py
|
176ff894af0a20129e30dfc8f2bab3f6982a5ec2
|
[] |
no_license
|
TranHuuThuy-BKHN/Python
|
4c7c35a6e4f621140a85d0e0a429679e89b805b0
|
3051826cc91ea9bcfbebe16f5c5a03611c5f6851
|
refs/heads/master
| 2020-04-02T02:31:29.686007
| 2018-10-20T14:33:06
| 2018-10-20T14:33:06
| 153,913,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
import sys
def tong ( a, b = 10):
print(a+b)
print(sys.argv)
if len(sys.argv) > 1:
for i in sys.argv[1:]:
tong(int(i))
|
[
"huuthuy2000@gmail.com"
] |
huuthuy2000@gmail.com
|
1b1e7aef318a395793802019d2a1047d0e7533b2
|
86f07020293cfd625a1c01ffdc6aa09272f5537b
|
/commons/datasvc.py
|
3b6f12594334d3cbf5d36992ed8da513e2e039bf
|
[] |
no_license
|
CMSCompOps/TransferTeam
|
ec553a357decbc0650cc0302f58cec7cec7e57e9
|
b9259a3ee8be1f020d614721f62662ce0eafa2cb
|
refs/heads/master
| 2022-12-23T05:44:30.252624
| 2022-12-22T13:24:01
| 2022-12-22T13:24:01
| 20,916,607
| 5
| 13
| null | 2021-07-30T15:58:01
| 2014-06-17T08:52:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,594
|
py
|
#!/usr/bin/env python
import sys,getopt,urllib
try:
import json
except ImportError:
import simplejson as json
def help():
print "instance (optional): PhEDEx instance prod/debug/dev (default prod)"
print "service : datasvc name (blockreplica, filereplica, etc.)"
print "options (optional) : & seprated options (node=T2_CH_CERN&create_since=0)"
print "path : hierarchical path to attributes"
print "Example usage:"
print "datasvc.py --service filereplicas --options 'dataset=/TT_FullLept_noCorr_mass169_5_8TeV-mcatnlo/Summer12-START53_V7C-v1/GEN-SIM' --path /phedex/block:name/file:name,checksum,bytes"
print "\tThis will return BlockName FileName FileCksum FileBytes one line each"
def parseResult(out, path, result, firstEntry=False):
if path:
first, rest = getKey(path[0]), path[1:]
item, attr = first[0], first[1]
subResults = result[item]
if not isinstance(subResults, (list, set)):
subResults = [subResults]
for subResult in subResults:
if attr:
newOut = list(out);
for key in attr.split(','):
newOut.append(str(subResult[key]).replace('\n',''))
parseResult(newOut,rest,subResult)
else:
parseResult(out, rest, subResult)
else:
print " ".join(out)
def getKey(key):
if ':' in key:
res = key.split(':')
else:
res = [key, None]
return res
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["instance=","service=","options=","path=","help"])
except getopt.GetoptError:
print >> sys.stderr, 'Please specify data service with --service and path with --path'
sys.exit(2)
service=None
options=None
path=None
instance="prod"
# check command line parameter
for opt, arg in opts :
if opt == "--instance":
instance = arg
elif opt == "--service" :
service = arg
elif opt == "--options" :
options = arg
elif opt == "--path" :
path = filter(None, arg.split('/'))
elif opt == "--help":
help()
sys.exit(0)
if service == None or path == None:
print >> sys.stderr, 'Please specify data service with --service and path with --path'
sys.exit(2)
result=None
output=[]
url='https://cmsweb.cern.ch/phedex/datasvc/json/' + instance + '/' + service
if options != None:
url += '?' + options.replace('#','%23')
result = json.load(urllib.urlopen(url))
try:
parseResult(output,path,result)
except:
print >> sys.stderr, 'error'
|
[
"merictaze@gmail.com"
] |
merictaze@gmail.com
|
f5ca5fa772883d2d76cc8a38eb28417455f7df55
|
0550c8b03627929bb079c322ba740b36f142aae9
|
/kagen_srt.py
|
b8e50a321e078c07c64c13161cd1576a8bc422df
|
[
"MIT"
] |
permissive
|
gonadarian/kagen
|
5d691d0665c98650bf282c0fa3d75f946ad2c4f4
|
d2df5d7f02bc5b69556b3dcf3f00061b0d0e7ee9
|
refs/heads/master
| 2021-01-23T11:47:58.908752
| 2013-10-25T16:00:21
| 2013-10-25T16:00:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,548
|
py
|
import os
import re
from kagen import utils
def work(filename):
srt = open(filename, encoding="utf-8")
lines = [line.strip("\ufeff\n") for line in srt.readlines()]
srt.close()
errs = []
titles = []
title = None
data = None
id = 0
for i in range(len(lines)):
line = lines[i]
if i == 0 or line == "":
id += 1
data = []
title = {"id": id, "row": i+2, "data": data}
titles.append(title)
if i == 0 or line != "":
data.append(line)
validate_sections(titles, errs)
validate_order(titles, errs)
validate_times(titles, errs)
validate_trim(titles, errs)
print("Errors in {}:".format(filename))
[print("\t{}".format(err)) for err in errs]
def add_error(row, kind, message, errs):
errs.append("{} error at [{}]: {}".format(kind, row, message))
def add_warning(row, kind, message, errs):
errs.append("{} warning at [{}]: {}".format(kind, row, message))
def validate_sections(titles, errs):
kind = "Sections"
for title in titles:
if len(title["data"]) < 3:
add_error(title["row"], kind, "Not enough lines per title", errs)
if len(title["data"]) > 4:
add_warning(title["row"], kind, "Too many lines per title", errs)
def validate_order(titles, errs):
kind = "Order"
id = 1
for title in titles:
if not len(title["data"]):
continue
data0 = title["data"][0]
if id == -1:
try:
id = int(data0)
except ValueError:
add_error(title["row"], kind, "Not a number", errs)
id = -1
if data0 != str(id):
add_error(title["row"], kind, "ID missmatch", errs)
id = -1
else:
id += 1
def validate_times(titles, errs):
kind = "Times"
pattern = r'^\d\d:\d\d:\d\d,\d\d\d --> \d\d:\d\d:\d\d,\d\d\d$'
for title in titles:
if len(title["data"]) < 2:
continue
line = title["data"][1]
if not(re.match(pattern, line, re.M|re.I)):
add_error(title["row"]+1, kind, "Time missmatch", errs)
def validate_trim(titles, errs):
kind = "Trim"
for title in titles:
for line in title["data"]:
if len(line) != len(line.strip()):
add_error(title["row"]+1, kind, "Extra spaces", errs)
@utils.entry_point
def main():
for filename in os.listdir("."):
if filename.endswith(".srt"):
work(filename)
|
[
"gonadarian@gmail.com"
] |
gonadarian@gmail.com
|
ae1e46cc4f41130b792538a1800b059a6bc17646
|
fede0ffee3a308f1e639be2323655ab4b41e260a
|
/tests/test_hash.py
|
a16e14e1249d975ce0be4d15725ae70a8d3a8290
|
[
"Apache-2.0"
] |
permissive
|
acjh/pip-api
|
0f0e57de0cbeeeee973639a7fd172aff04f75fc1
|
a20a984a44ab66e10c0f25c8b34813829f292769
|
refs/heads/master
| 2022-12-16T04:14:43.201175
| 2020-08-17T23:39:41
| 2020-08-17T23:39:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,224
|
py
|
import pytest
import pip_api
xfail_incompatible = pytest.mark.xfail(
pip_api._hash.incompatible, reason="Incompatible"
)
@xfail_incompatible
@pytest.mark.parametrize(
"algorithm, expected",
[
("sha256", "c3ebc5b7bc06d4466d339f4d8d1e61d1fdc256dd913d6d5752acea9ce5581a15"),
(
"sha384",
"f2cf1e1a9235568adf0bd19ea41fff179c8f3cc1155ad9f806225a9fe3ea8ba57d3bda"
"65bd90370aa681f1d4d9251dd8",
),
(
"sha512",
"42444c9b60c49bf932562195d2f894e3031bbb8c11a22b414d335b2c862147377ec0c4"
"eb718ac599eff2fac7ecf8333ca5cc0efc75c12965f0386bc1f6624a01",
),
],
)
def test_hash(some_distribution, algorithm, expected):
result = pip_api.hash(some_distribution.filename, algorithm=algorithm)
assert result == expected
@xfail_incompatible
def test_hash_default_algorithm_is_256(some_distribution):
sha256 = "c3ebc5b7bc06d4466d339f4d8d1e61d1fdc256dd913d6d5752acea9ce5581a15"
assert pip_api.hash(some_distribution.filename) == sha256
@xfail_incompatible
def test_hash_invalid_algorithm():
with pytest.raises(pip_api.exceptions.InvalidArguments):
pip_api.hash("whatever", "invalid")
|
[
"di@users.noreply.github.com"
] |
di@users.noreply.github.com
|
38009460ca2805bef4aa4faccd564db4c60f5600
|
0cc59d7106371cb2b5f2420f3cd103c902435f18
|
/jobs/models.py
|
ec6dc78d1232136b8a3db666060ace5c6cb9ec95
|
[] |
no_license
|
omerolmez/django-examples
|
3d4450d148ba55a6f69b65c5bfee6060646973c4
|
4685a6fc199fbcf8819ec2c70b453b6e202a6b23
|
refs/heads/master
| 2020-04-01T11:00:47.188790
| 2018-11-01T15:50:36
| 2018-11-01T15:50:36
| 153,142,532
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
from django.db import models
# Create your models here.
class Job(models.Model):
image = models.ImageField(upload_to='images')
summary = models.CharField(max_length=200)
|
[
"omerolmez@gmail.com"
] |
omerolmez@gmail.com
|
3114ddfea054bdb84d63e455b7de35507155608f
|
46da209602dc1b0b05af781f6e0d080d56a4b8fe
|
/userInput.py
|
728a2cee395277ef83f3d3bf1770659ecdc8e1d9
|
[] |
no_license
|
derekngoh/PfolioProject---Offline-Password-Vault
|
5e05e4a13d08fb2a5d3074a6f52f802cb63dca53
|
8e25c6ff6ddc2564a67696459e467d5b7faca7b5
|
refs/heads/master
| 2022-11-15T22:40:46.467538
| 2020-07-10T22:58:45
| 2020-07-10T22:58:45
| 278,079,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,785
|
py
|
from blackBox import BlackBox
class User_Inputs:
"""
Collects user input and sets format to facilitate usage by other methods
"""
#sets server to default using default email and password.
def __init__(self, email=BlackBox.def_email, passw=BlackBox.def_pass):
self.email = email
self.passw = passw
#get login details if not using default server
def login_details(self):
self.email_input = input("Please enter your email account to login: \n")
self.password_input = input("Please enter the password for login: \n")
#get subject heading and secret from user to encrypt then store in server
def get_message_content(self):
self.subject = "Subject: !~~" + input("Enter the subject header for " +
"retrieval of secret (This information is not encrypted.): \n") + "~~!\n"
while True:
self.message = input("\nEnter the secret (limits: minimum 2 and maximum" +
" 200 characters): \n")
if len(self.message) < 201 and len(self.message) > 1:
break
print("Please check message limits and try again.")
#get encryption key from user to encrypt secret
def get_enc_key(self):
while True:
self.enc_key = input("\nPlease enter a key for encrypting your message " +
"(limits: this key cannot be longer than your secret.): \n")
if len(self.enc_key) > 1 and len(self.enc_key)<=len(self.message):
break
print("Please check message limits and try again.")
#get decryption key from user to decrypt encrypted secret
def get_dec_key(self):
while True:
self.dec_key = input("\nPlease enter your secret key for decryption: \n")
if len(self.dec_key) > 1:
break
#get destination email address if sending email
def get_destination_email(self):
self.recipient = input("Please enter email to send to: \n")
#search for secret by using keywords to search subject headings
def retrieve_user_inputs(self):
self.keyword = input("Enter the precise subject keyword to locate your" +
" secret (only the first most relevant result will be displayed.): \n")
#ask user for permission to delete secret from server.
def delete_message(self):
self.delmsg = input("\nDelete message from server? If you choose to delete," +
"this would be your last chance to view the message. (Y/N): \n")
#ask user to confirm deletion
def delete_confirmation(self):
self.del_conf = input("\nConfirm delete? (Y/N): ")
#default main menu options
def default_main_menu(self):
self.def_menu = input("Select which function you would like to perform: " +
"\n1. Encrypt and store secret.\n2. Decrypt stored secret. " +
"\nSelect option(1 or 2): ")
|
[
"noreply@github.com"
] |
derekngoh.noreply@github.com
|
81364a72bb1e11dd2e5832ea931af1ae0c421746
|
c16105668eaf9548bb418234734553b7433b7463
|
/face_search/server/old/fulldb.py
|
d13e7a2d57b5f003c9dcf1f041eda4f4084f1c6d
|
[] |
no_license
|
HarkHem/check_face
|
7918270c5bc62eb9fc7b72986ddfad99d08ddc14
|
4e28fc89729dd7a837ff0acd08fba1867fce07fa
|
refs/heads/master
| 2020-03-31T17:09:01.625559
| 2019-07-27T13:57:24
| 2019-07-27T13:57:24
| 152,408,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,711
|
py
|
import subprocess
import vk
import requests
import psycopg2
import sys
import face_recognition
import dlib
import cv2
import os
import postgresql
import numpy as np
import urllib.request
login = ''
password =''
vk_id = ''
session = vk.AuthSession(app_id = vk_id, user_login = login, user_password = password)
vkapi = vk.API(session, v= 5.87)
#fr = vkapi.friends.get(user_id =33514885, fields='photo_200_orig, nickname, contac, domain')
#print(fr)
l=0
handle = open("name.txt", "w")
for kol in range(14, 80):
for se in range(1,2):
try:
fr = vkapi.users.search(sort = 0,sex = se, age_from=kol, age_to=kol, count=1000, has_photo=1 , fields='photo_200_orig,nickname, contact, domain', city = 112 )
except:
sys.exit()
i = 0
t = 0
if not os.path.exists("./.faces"):
os.mkdir("./.faces")
#file = open('test/nickname.txt', 'w')
for ter in fr['items']:
i=i+1
l=l+1
# print(i if (i%100==0) else pass )
print ( ter['first_name'] , '_' , ter['last_name'])
#p = requests.get(ter['photo_200_orig'])
path = "".join(['./.faces/' ,str(ter['domain']),':', ter['first_name'] , '_' , ter['last_name'] , '.jpg'])
#out = open(path, "wb")
#out.write(p.content)
#out.close()
face_detector = dlib.get_frontal_face_detector()
try:
resp = urllib.request.urlopen(ter['photo_200_orig'])
except:
break
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
#image = cv2.imread(path)
detected_faces = face_detector(image, 1)
db = postgresql.open('pq://myuser:password@localhost:5432/mydatabase')
for i, face_rect in enumerate(detected_faces):
crop = image[face_rect.top():face_rect.bottom(), face_rect.left():face_rect.right()]
encodings = face_recognition.face_encodings(crop)
if (len(encodings) > 0):
query = "INSERT INTO vectors (file, vec_low, vec_high) VALUES ('{}', CUBE(array[{}]), CUBE(array[{}]))".format(
str(ter['domain']),
','.join(str(s) for s in encodings[0][0:64]),
','.join(str(s) for s in encodings[0][64:128]),
)
db.execute(query)
handle.write(str(ter['domain']))
# os.remove(path)
# cv2.imwrite("./.faces/aligned_face_{}_{}_crop.jpg".format(path.replace('/', '_'), i), crop)
print('count ', l, ' revision ', kol)
handle.close()
print(l)
|
[
"artcrimechanel@gmail.com"
] |
artcrimechanel@gmail.com
|
0cfb118955a371bf08264c05fbf635317433bcc9
|
e599f012fe051cd678654f5ec2baa6d414715cfd
|
/WTube/wsgi.py
|
87f1268f8ad01a57b558ccdd12c21d8ba9278121
|
[] |
no_license
|
PanosMichalakos/WTube
|
d961cd4fafd8f21bf15d7f74872a795b47c88433
|
2cb1f8c8bccc7fc087cb535203449828da324440
|
refs/heads/main
| 2023-07-20T23:49:03.865760
| 2021-08-27T23:23:57
| 2021-08-27T23:23:57
| 400,187,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
"""
WSGI config for WTube project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'WTube.settings')
application = get_wsgi_application()
|
[
"panmichalakos@live.com"
] |
panmichalakos@live.com
|
9deb3342ea01b7cf425823fc08b41dd2ca04eac2
|
b14c5032bce5aa85715560b1e036c52bea34af79
|
/myapp/api_keys.py
|
0f9b1ccbbd0a3e5fce11ffca2a47023297e235c2
|
[] |
no_license
|
sazia31/InstaClone
|
ec2f3c9fe5ad3d41f94ef212ab056367718aa805
|
f463e95d4d53e17dbb0ca48a2b414c12566743a5
|
refs/heads/master
| 2021-01-01T17:39:47.719611
| 2018-04-30T18:56:23
| 2018-04-30T18:56:23
| 97,402,726
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
send_grid_api = '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'
parallel_dots_api = 'yOFRCjkrUw0SWjVxxaljRwjLpRhzwRsAjRaCqnGNIWU'
|
[
"noreply@github.com"
] |
sazia31.noreply@github.com
|
ecb539c9cf2f8a08e8f02480abca0e26de72bdec
|
f66613e822f0847d485108782805b0a6153af603
|
/Youtube/ydown/youtube_download/views.py
|
08759b56c253185c074cee0c84ff96cf9c91dade
|
[] |
no_license
|
SuryaParbat/video-downloader
|
c862d2c7551f87883bc0fabac18f43850799a2db
|
ba9bad1e6573ea915b596ba0784af40adcde7d74
|
refs/heads/master
| 2023-07-10T05:15:40.151813
| 2021-07-31T15:44:49
| 2021-07-31T15:44:49
| 326,500,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,239
|
py
|
from django.shortcuts import render, redirect
import os
# pytube for download
from pytube import YouTube
url = ''
# Create your views here.
def ytb_down(request):
return render(request, 'ytb_main.html')
def yt_download(request):
global url
url = request.GET.get('url')
# create object for known which video download
try:
obj = YouTube(url)
resolutions = []
strm_all = obj.streams.filter(progressive=True, file_extension='mp4').all()
for i in strm_all:
resolutions.append(i.resolution)
resolutions = list(dict.fromkeys(resolutions))
embed_link = url.replace("watch?v=", "embed/")
path = 'D:\\'
return render(request, 'yt_download.html', {'rsl': resolutions, 'embd': embed_link})
except:
return render(request, 'sorry.html')
def download_complete(request, res):
global url
homedir = os.path.expanduser("~")
dirs = homedir + '/Downloads'
print(f'DIRECT :', f'{dirs}/Downloads')
if request.method == "POST":
YouTube(url).streams.get_by_resolution(res).download(homedir + '/Downloads')
return render(request, 'download_complete.html')
else:
return redirect(request, 'sorry.html')
|
[
"SuryaParbat@.github.com"
] |
SuryaParbat@.github.com
|
ba83ebf2a4f037c357bdf173b970ba7538843adf
|
64fe509f72c93b9a8375eb6d886167f4986998bc
|
/app/models.py
|
2429ccfb2288aec39f0a3555da91e27832913546
|
[] |
no_license
|
yebiro/flask_spider
|
c50cc563ca90505a88188ced84f33a7f8725a915
|
e3761ea6ae93d9a41a9a9359d6bbac2bd0a4aaf2
|
refs/heads/master
| 2020-03-12T03:46:48.445226
| 2018-04-20T03:16:25
| 2018-04-20T03:16:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,445
|
py
|
# -*- coding: utf-8 -*-
# @Author: longzx
# @Date: 2018-03-19 23:44:05
# @cnblog:http://www.cnblogs.com/lonelyhiker/
from . import db
class Fiction(db.Model):
__tablename__ = 'fiction'
__table_args__ = {"useexisting": True}
id = db.Column(db.Integer, primary_key=True)
fiction_name = db.Column(db.String)
fiction_id = db.Column(db.String)
fiction_real_url = db.Column(db.String)
fiction_img = db.Column(db.String)
fiction_author = db.Column(db.String)
fiction_comment = db.Column(db.String)
update = db.Column(db.String)
new_content = db.Column(db.String)
new_url = db.Column(db.String)
def __repr__(self):
return '<fiction %r> ' % self.fiction_name
class Fiction_Lst(db.Model):
__tablename__ = 'fiction_lst'
__table_args__ = {"useexisting": True}
id = db.Column(db.Integer, primary_key=True)
fiction_name = db.Column(db.String)
fiction_id = db.Column(db.String)
fiction_lst_url = db.Column(db.String)
fiction_lst_name = db.Column(db.String)
fiction_real_url = db.Column(db.String)
def __repr__(self):
return '<fiction_lst %r> ' % self.fiction_name
class Fiction_Content(db.Model):
__tablename__ = 'fiction_content'
__table_args__ = {"useexisting": True}
id = db.Column(db.Integer, primary_key=True)
fiction_url = db.Column(db.String)
fiction_content = db.Column(db.String)
fiction_id = db.Column(db.String)
|
[
"904185259@qq.com"
] |
904185259@qq.com
|
618fdc4d4c76091aaaa2907d3007117aa98d4e57
|
b3f5b101ce21bd52a6c0f4d084c981751d1070a9
|
/source/callback/PredictionWriter.py
|
d4f4c396c0197f62c614d3df319a8fa0b9581722
|
[
"MIT"
] |
permissive
|
claudiovaliense/TeCBench
|
3f219810c126ee81ef665126ca7073599447d302
|
dafe5fc5df1a03a054d20be184ce4410b5301ecc
|
refs/heads/main
| 2023-07-18T00:59:53.423980
| 2021-08-26T22:34:55
| 2021-08-26T22:34:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,928
|
py
|
from typing import Any, List
import torch
from pytorch_lightning.callbacks import BasePredictionWriter
from torch import Tensor
class PredictionWriter(BasePredictionWriter):
def __init__(self, params):
super(PredictionWriter, self).__init__(params.write_interval)
self.params=params
self.predictions = []
def write_on_batch_end(
self, trainer, pl_module, prediction: Any, batch_indices: List[int], batch: Any,
batch_idx: int, dataloader_idx: int
):
torch.save(prediction, self.params.output_dir + dataloader_idx + f"{batch_idx}.pt")
def write_on_epoch_end(
self, trainer, pl_module, dataloaders: List[Any], batch_indices: List[Any]
):
for dataloader in dataloaders:
for batch in dataloader:
# Convert any tensor values to list
batch_items = {k: v if not isinstance(v, Tensor) else v.tolist() for k, v in batch.items()}
# Switch predictions so each entry has its own dict
for values in zip(*batch_items.values()):
prediction = dict(zip(batch_items.keys(), values))
self.predictions.append(prediction)
self._checkpoint()
def _checkpoint(self):
# Write predictions for current file to disk
torch.save(self.predictions, f"{self.params.dir}{self.params.name}")
# def to_disk(self) -> None:
# """Write predictions to file(s)."""
# for filepath, predictions in self.predictions.items():
# fs = get_filesystem(filepath)
# # normalize local filepaths only
# if fs.protocol == "file":
# filepath = os.path.realpath(filepath)
# if self.world_size > 1:
# stem, extension = os.path.splitext(filepath)
# filepath = f"{stem}_rank_{self.global_rank}{extension}"
# dirpath = os.path.split(filepath)[0]
# fs.mkdirs(dirpath, exist_ok=True)
#
# # Convert any tensor values to list
# predictions = {k: v if not isinstance(v, Tensor) else v.tolist() for k, v in predictions.items()}
#
# # Check if all features for this file add up to same length
# feature_lens = {k: len(v) for k, v in predictions.items()}
# if len(set(feature_lens.values())) != 1:
# raise ValueError("Mismatching feature column lengths found in stored EvalResult predictions.")
#
# # Switch predictions so each entry has its own dict
# outputs = []
# for values in zip(*predictions.values()):
# output_element = dict(zip(predictions.keys(), values))
# outputs.append(output_element)
#
# # Write predictions for current file to disk
# with fs.open(filepath, "wb") as fp:
# torch.save(outputs, fp)
|
[
"c.el@live.com"
] |
c.el@live.com
|
d0b083e78e26f763f1726542d83772a92c20531f
|
31a928cff4960236923b6bc3b68e34bb2f46f470
|
/Speculator/speculator/utils/poloniex.py
|
f74f11896142749195cf7e8f56bba0f7e2208b7b
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
webclinic017/ml_monorepo
|
707df2afd2f986eb0721d26430e6135c917817c6
|
945f0a83d6b94282c547bb6f4805f3381ad9c16a
|
refs/heads/master
| 2021-10-19T21:02:53.322944
| 2019-02-19T20:58:51
| 2019-02-23T20:06:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,198
|
py
|
import logging
import requests
from speculator.utils import date
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def json_to_url(json, symbol):
""" Converts a JSON to a URL by the Poloniex API
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
symbol: String of currency pair, like a ticker symbol.
Returns:
String URL to Poloniex API representing the given JSON.
"""
start = json[0]['date']
end = json[-1]['date']
diff = end - start
# Get period by a ratio from calculated period to valid periods
# Ratio closest to 1 is the period
# Valid values: 300, 900, 1800, 7200, 14400, 86400
periods = [300, 900, 1800, 7200, 14400, 86400]
diffs = {}
for p in periods:
diffs[p] = abs(1 - (p / (diff / len(json)))) # Get ratio
period = min(diffs, key=diffs.get) # Find closest period
url = ('https://poloniex.com/public?command'
'=returnChartData¤cyPair={0}&start={1}'
'&end={2}&period={3}').format(symbol, start, end, period)
return url
def chart_json(start, end, period, symbol):
""" Requests chart data from Poloniex API
Args:
start: Int epoch date to START getting market stats from.
Note that this epoch is FURTHER from the current date.
end: Int epoch date to STOP getting market stats from.
Note that this epoch is CLOSER to the current date.
period: Int defining width of each chart candlestick in seconds.
Valid values: 300, 900, 1800, 7200, 14400, 86400
symbol: String of currency pair, like a ticker symbol.
Returns:
Tuple of (JSON data, URL to JSON).
JSON data as a list of dict dates, where the keys are
the raw market statistics.
String URL to Poloniex API representing the given JSON.
"""
url = ('https://poloniex.com/public?command'
'=returnChartData¤cyPair={0}&start={1}'
'&end={2}&period={3}').format(symbol, start, end, period)
logger.debug(' HTTP Request URL:\n{0}'.format(url))
json = requests.get(url).json()
logger.debug(' JSON:\n{0}'.format(json))
if 'error' in json:
logger.error(' Invalid parameters in URL for HTTP response')
raise SystemExit
elif all(val == 0 for val in json[0]):
logger.error(' Bad HTTP response. Time unit too short?')
raise SystemExit
elif len(json) < 1: # time to short
logger.error(' Not enough dates to calculate changes')
raise SystemExit
return json, url
def parse_changes(json):
""" Gets price changes from JSON
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
Returns:
List of floats of price changes between entries in JSON.
"""
changes = []
dates = len(json)
for date in range(1, dates):
last_close = json[date - 1]['close']
now_close = json[date]['close']
changes.append(now_close - last_close)
logger.debug('Market Changes (from JSON):\n{0}'.format(changes))
return changes
def get_gains_losses(changes):
""" Categorizes changes into gains and losses
Args:
changes: List of floats of price changes between entries in JSON.
Returns:
Dict of changes with keys 'gains' and 'losses'.
All values are positive.
"""
res = {'gains': [], 'losses': []}
for change in changes:
if change > 0:
res['gains'].append(change)
else:
res['losses'].append(change * -1)
logger.debug('Gains: {0}'.format(res['gains']))
logger.debug('Losses: {0}'.format(res['losses']))
return res
def get_attribute(json, attr):
""" Gets the values of an attribute from JSON
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
attr: String of attribute in JSON file to collect.
Returns:
List of values of specified attribute from JSON
"""
res = [json[entry][attr] for entry, _ in enumerate(json)]
logger.debug('{0}s (from JSON):\n{1}'.format(attr, res))
return res
def get_json_shift(year, month, day, unit, count, period, symbol):
""" Gets JSON from shifted date by the Poloniex API
Args:
year: Int between 1 and 9999.
month: Int between 1 and 12.
day: Int between 1 and 31.
unit: String of time period unit for count argument.
How far back to check historical market data.
Valid values: 'hour', 'day', 'week', 'month', 'year'
count: Int of units.
How far back to check historical market data.
period: Int defining width of each chart candlestick in seconds.
symbol: String of currency pair, like a ticker symbol.
Returns: JSON, list of dates where each entry is a dict of raw market data.
"""
epochs = date.get_end_start_epochs(year, month, day, 'last', unit, count)
return chart_json(epochs['shifted'], epochs['initial'],
period, symbol)[0]
|
[
"tmichael.yu@gmail.com"
] |
tmichael.yu@gmail.com
|
b0ba0acbb648af7939f881a10a0dbd654373afcc
|
9ded0ebe45e5c28490b9fb48a2b649e75123be69
|
/autoarg/__init__.py
|
54eb6fa2230a6b3dbd3e1be918214aaa9a7c4b16
|
[] |
no_license
|
chenchao-clarifai/AutoArg
|
a85d6d28a08f2e7bc4aad31ccfa29f03d0aeb366
|
8abb67d96517fe477a2c59899d27dfed070d40c3
|
refs/heads/main
| 2023-08-28T01:21:54.868450
| 2021-10-27T14:34:23
| 2021-10-27T14:34:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
from . import arguments, constraints
from .arguments import *
__version__ = "0.0.10"
|
[
"chenchao.zhao@clarifai.com"
] |
chenchao.zhao@clarifai.com
|
c14ee57e81a63e7b7f4f965cf981a387ee04224d
|
0bc81c8742e6b7cd4bd3a804ac41cedee637c921
|
/portalweb/models/messagemanager.py
|
0a41cc2c9308f228c09a8f58fc9cb5e30a789459
|
[] |
no_license
|
TPAC-MARVL/portal
|
c7ff9445ea340774aaa1890e2b847001e6564379
|
b9660d7b771f105360c814e1a861fb16dc036c2b
|
refs/heads/master
| 2016-09-16T11:25:25.742221
| 2014-11-07T04:44:19
| 2014-11-07T04:44:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,111
|
py
|
import logging
from django.utils import timezone
from ..models.message import MessageTbl
from portalweb.decorators.logger import logger
class MessageTblManager:
_logger = logging.getLogger(__name__)
CONST_RECEIVER = "receiver"
CONST_SENDER = "sender"
@classmethod
@logger
def createMessage(cls, title, content, user_from, user_to, is_group=False):
message = None
message = MessageTbl.objects.create(title=title, content=content, user_from = user_from, user_to = user_to, created=timezone.now(), active=True, is_group=is_group)
return message
@classmethod
@logger
def getMessagesByUser(cls, user, type_flag="receiver"):
messages = None
if type_flag == cls.CONST_RECEIVER:
messages = MessageTbl.objects.filter(user_to=user, active=True).order_by('-created')
if type_flag == cls.CONST_SENDER:
messages = MessageTbl.objects.filter(user_from=user, active=True).order_by('-created')
return messages
|
[
"fxmzb123@gmail.com"
] |
fxmzb123@gmail.com
|
35acdf2aeaab5430420633d3ad3dce39e0de08ad
|
a0d292683c55025111be78ef7694d327df27d61a
|
/readNames.py
|
24888e842b139b480a346d3f9045df31e112fd13
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
cparker/pythonclub
|
3d9b3e583e5bb0a881a3285b675719bfaa772635
|
202d3f2140e183ee165a2deadc19f04d518f227f
|
refs/heads/master
| 2021-01-19T18:29:53.606756
| 2014-10-19T22:01:16
| 2014-10-19T22:01:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 67
|
py
|
nameFile = open('names.txt')
for name in nameFile:
print(name)
|
[
"christian.parker@emc.com"
] |
christian.parker@emc.com
|
038bea7382d6f31f934f61fa6566ce86371d8383
|
eea802130e35e69cf302c77835cd8a51f04563b3
|
/predict.py
|
96104ee3b5b882e55391194d67d3693fb58ee303
|
[] |
no_license
|
HandHui/BERT_ESIM
|
3ab172a16185c41dc85451b56c942ab0b93043e8
|
e94fd83f49bd203952e5655d13427a53c117650d
|
refs/heads/main
| 2023-05-08T18:13:11.405187
| 2021-05-24T03:39:22
| 2021-05-24T03:39:22
| 370,204,533
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,919
|
py
|
import argparse
import torch.nn as nn
from model.ESIM import ESIM
from dataHandle.dataPrepare import prepare
from dataHandle.MyLoader import Mydata
import torch
from torch.utils.data import Dataset,DataLoader
from util import *
parser = get_parser()
parser = parser.parse_args()
batch_size = parser.batch_size
use_gpu = parser.use_gpu
learning_rate = parser.learning_rate
epochs = parser.epochs
AB = [
['A','Abest_model.pkl','Aresult.csv'],
['B','Bbest_model.pkl','Bresult.csv']
]
for ab in AB:
_, _,test_data = prepare(ab[0])
# train_loader = DataLoader(Mydata(train_data),batch_size=batch_size,shuffle=True,collate_fn=mycollate_a)
# valid_loader = DataLoader(Mydata(valid_data),batch_size=batch_size,shuffle=True,collate_fn=mycollate_a)
test_loader = DataLoader(Mydata(test_data),batch_size=batch_size,collate_fn=mycollate_test)
model = ESIM()
model.load_state_dict(torch.load(ab[1]))
if use_gpu:
model = model.cuda()
model.eval()
with open(ab[2],'w+') as fout:
fout.write('id,label\n')
for i,data in enumerate(test_loader):
print(i)
batch_flags , encoded_sents, batch_ids = data
source_con,target_con = encoded_sents
sr_batch_token_ids,sr_batch_segment_ids,sr_batch_attention_mask = source_con
tg_batch_token_ids,tg_batch_segment_ids,tg_batch_attention_mask = target_con
logits,p = model(sr_batch_token_ids,sr_batch_segment_ids,sr_batch_attention_mask,
tg_batch_token_ids,tg_batch_segment_ids,tg_batch_attention_mask)
p = p.argmax(dim=-1)
# p = p.to(torch.device('cpu'))
for idx,y in zip(batch_ids,p):
# print(idx)
# print(y.item())
fout.write('%s,%s\n' % (idx, y.item()))
# print(batch_ids)
# print(p)
# break
|
[
"2738696244@qq.com"
] |
2738696244@qq.com
|
36134f1a72b21d720d523aede713e61bea8b0885
|
bd81b23bdcbdafeeb3d4eaef01e374e5fc4e5ccb
|
/1.1gary666爬虫/1025.py
|
bb272f8ab4c83a4b41ad62c6c69880693742bb67
|
[] |
no_license
|
LorentzForceWorkEnzyme/PythonSpiderDemo
|
67a42f5ab5345adf812d71abf699475569c7f344
|
954645c8ab474a8bdf7611fde38ca976951abc0c
|
refs/heads/master
| 2023-04-23T11:56:26.330080
| 2021-05-11T12:08:32
| 2021-05-11T12:08:32
| 307,107,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
# -*- codeing = utf-8 -*-
#@author : Sai
import requests
import os
import sys
from bs4 import BeautifulSoup
import random
# url = 'https://baike.baidu.com/item/%E5%90%8D%E4%BE%A6%E6%8E%A2%E6%9F%AF%E5%8D%97%E5%90%84%E9%9B%86%E5%88%97%E8%A1%A8/49823770'
url='https://gary666.com/learn?page=1'
rep = requests.get(url)
rep.encoding = rep.apparent_encoding
# print(rep.text)
soup = BeautifulSoup(rep.content, 'html.parser') #解析器 ?lxml
page_list = soup.find_all('div', class_='blogs')
# print(page_list)
f = open('bs4标题Test.txt', 'w+')
for i in page_list:
f.write('文章名:' + i.find('h3').string + '\n'*2)
f.close()
|
[
"746139767@qq.com"
] |
746139767@qq.com
|
dfdd9ae3fd8bfd26429a0fba4bb68dc9acb6065f
|
c4733aae35dcb5fefb10d8bd34ac40dd4a51bb99
|
/eventsourcing/exceptions.py
|
bee352faf21d8741ee47855eee47e915192feb83
|
[
"BSD-3-Clause"
] |
permissive
|
trumanw/eventsourcing
|
bddeb7375a2985401969ea182a7fa4eb04dfa25f
|
d1e53373b201f2dda32c8186879d453f449b1104
|
refs/heads/master
| 2023-03-21T05:29:15.416447
| 2021-03-10T04:06:42
| 2021-03-10T04:06:42
| 320,460,606
| 0
| 0
|
BSD-3-Clause
| 2021-03-10T04:06:24
| 2020-12-11T03:50:00
| null |
UTF-8
|
Python
| false
| false
| 3,024
|
py
|
class EventSourcingError(Exception):
"""Base eventsourcing exception."""
class TopicResolutionError(EventSourcingError):
"""Raised when unable to resolve a topic to a Python class."""
class EntityVersionNotFound(EventSourcingError):
"""Raise when accessing an entity version that does not exist."""
class RecordConflictError(EventSourcingError):
"""Raised when database raises an integrity error."""
class PromptFailed(EventSourcingError):
"""Raised when prompt fails."""
class ConcurrencyError(RecordConflictError):
"""Raised when a record conflict is due to concurrency."""
class ConsistencyError(EventSourcingError):
"""Raised when applying an event stream to a versioned entity."""
class MismatchedOriginatorError(ConsistencyError):
"""Raised when applying an event to an inappropriate object."""
class OriginatorIDError(MismatchedOriginatorError):
"""Raised when applying an event to the wrong entity or aggregate."""
class OriginatorVersionError(MismatchedOriginatorError):
"""Raised when applying an event to the wrong version of an entity or aggregate."""
class MutatorRequiresTypeNotInstance(ConsistencyError):
"""Raised when mutator function received a class rather than an entity."""
class DataIntegrityError(ValueError, EventSourcingError):
"Raised when a sequenced item is damaged (hash doesn't match data)"
class EventHashError(DataIntegrityError):
"Raised when an event's seal hash doesn't match the hash of the state of the event."
class HeadHashError(DataIntegrityError, MismatchedOriginatorError):
"""Raised when applying an event with hash different from aggregate head."""
class EntityIsDiscarded(AssertionError):
"""Raised when access to a recently discarded entity object is attempted."""
class ProgrammingError(EventSourcingError):
"""Raised when programming errors are encountered."""
class RepositoryKeyError(KeyError, EventSourcingError):
"""Raised when using entity repository's dictionary like interface to get an entity that does not exist."""
class ArrayIndexError(IndexError, EventSourcingError):
"""Raised when appending item to an array that is full."""
class DatasourceSettingsError(EventSourcingError):
"Raised when an error is detected in settings for a datasource."
class OperationalError(EventSourcingError):
"Raised when an operational error is encountered."
class TimeSequenceError(EventSourcingError):
"Raised when a time sequence error occurs e.g. trying to save a timestamp that already exists."
class TrackingRecordNotFound(EventSourcingError):
"Raised when a tracking record is not found."
class CausalDependencyFailed(EventSourcingError):
"Raised when a causal dependency fails (after its tracking record not found)."
class EventRecordNotFound(EventSourcingError):
"Raised when an event record is not found."
class EncoderTypeError(TypeError):
pass
class ExceptionWrapper(object):
def __init__(self, e):
self.e = e
|
[
"john.bywater@appropriatesoftware.net"
] |
john.bywater@appropriatesoftware.net
|
eca887d54dac34bc1d995e8ef98f8d0c4c6db21c
|
20ff9c30344ba1c43ba6b3fe862a5894737fcd9f
|
/mongopy/mongodelete.py
|
4a6ecdef138e4a7ac42e4d10147fe2abcb07e24f
|
[] |
no_license
|
jzorrof/mongopy
|
c36bc4b2592cc983438e4dcbddedfca694b7815e
|
4b1270c6936c664e4b1e670756f43ab0ca0e38e0
|
refs/heads/master
| 2021-01-18T13:59:25.101572
| 2015-05-15T10:35:59
| 2015-05-15T10:35:59
| 34,641,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
__author__ = 'joe_fan'
# -*- coding: utf-8 -*-
from pymongo import MongoClient
"""
Create connection
"""
client = MongoClient()
db = client.test
"""
Remove all documents that match a condition
"""
result = db.restaurants.delete_many({"borough": "Manhattan"})
result.deleted_count
"""
Remove all Documents
"""
result = db.restaurants.delete_many({})
result.deleted_count
"""
Drop a collection
"""
db.restaurants.drop()
|
[
"jzorrof@gmail.com"
] |
jzorrof@gmail.com
|
e011a8ff41b4c1e2a48cf3549283d614f53ce725
|
20e47375ace95ea050b76ce31aa50c9124f07c7e
|
/Preciosa/search.py
|
2593144891ace7e2382c9a017417aaa9bbed894c
|
[
"MIT"
] |
permissive
|
ajlopez/PythonSamples
|
583fbc32c4f818db8ac3c4d9694244449afc86ba
|
0efd850e13b0e98dcee542f21b30bb50afd0e838
|
refs/heads/master
| 2016-09-06T10:05:28.445224
| 2014-07-28T11:17:37
| 2014-07-28T11:17:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
import requests
import sys
words = sys.argv[:]
words.pop(0)
query = " ".join(words)
if query:
result = requests.get('http://preciosaannalisa.heroku.com/api/search?q=' + query)
print(result.text)
else:
print("Bazinga!!!")
|
[
"webmaster@ajlopez.com"
] |
webmaster@ajlopez.com
|
0ef403570e34d0e3fef295763dac69505f7cace6
|
273e5f885e64071f6d4e64b0d90402298d480b79
|
/index/templatetags/cart_template_tags.py
|
431ab772ad9f67136c5bc527dd3b4286ffd4a3f5
|
[] |
no_license
|
Aditta-das/ecommerce-website
|
c930cd9899f4fe14c841c1fcc60f8219473017d1
|
9f139bc1e951ceacf9f839e417f806181fd27e8c
|
refs/heads/master
| 2022-12-25T12:41:11.496259
| 2020-06-11T04:02:19
| 2020-06-11T04:02:19
| 247,246,466
| 0
| 0
| null | 2022-12-08T04:01:01
| 2020-03-14T09:21:07
|
CSS
|
UTF-8
|
Python
| false
| false
| 298
|
py
|
from django import template
from index.models import Order
register = template.Library()
@register.filter
def cart_item_count(user):
if user.is_authenticated:
qs = Order.objects.filter(user=user, ordered=False)
if qs.exists:
return qs[0].items.count()
return 0
|
[
"ndas5662@gmail.com"
] |
ndas5662@gmail.com
|
ca923898591e0dc74839d91c8aad39aa33996dc0
|
5b96cc4231bdbadbf17e3bcc88da2811f79473c3
|
/conditions.py
|
46193ede1872db0be8125c7436f74f332dc6694e
|
[] |
no_license
|
Melbinpjossy/A1_week2_python
|
4012d416b2134eeb7f20db0b2e1285d0fce08b03
|
123dc7276bef821421c14edf30e61919c0f6de5d
|
refs/heads/master
| 2020-04-01T21:30:28.530980
| 2018-11-01T16:11:47
| 2018-11-01T16:11:47
| 153,660,047
| 0
| 0
| null | 2018-11-01T16:11:49
| 2018-10-18T17:14:30
|
Python
|
UTF-8
|
Python
| false
| false
| 607
|
py
|
print("Rules that govern the state of water")
# let the user pick a temp, see what happens to water (conditon statements)
current_temp = False
while current_temp is False:
current_temp = input("Enter a temperature: \n")
print(current_temp)
if(int(current_temp) < 0) or (int(current_temp) == 0):
print("water is a solid. icy!")
current_temp = False
elif (int(current_temp) < 100):
print("water is a Liquid. profit!")
current_temp = False
elif (int(current_temp > 100)) or (int(current_temp) == 100):
print("water is a vapor, cuz its HOT")
current_temp = False
|
[
"m_punchakkunneljossy@tss-hr420-dm27.fc.ca"
] |
m_punchakkunneljossy@tss-hr420-dm27.fc.ca
|
ad2e5d951bee92ca9ec8d6d656328db883ed18db
|
7e41d70ee3bf07dc3043afef020cde173d5fb0bc
|
/airflow_client/client/model/inline_response200.py
|
b00cfe40cb975b7e384a63db36410b0efb1454d7
|
[
"Apache-2.0"
] |
permissive
|
apache/airflow-client-python
|
fb11789076bfed191d730c459c84273781d50246
|
38d55888f7533253857baa878322007f4581fc21
|
refs/heads/main
| 2023-09-05T18:23:37.049610
| 2023-08-23T13:10:27
| 2023-08-23T13:10:27
| 275,569,232
| 251
| 44
|
Apache-2.0
| 2023-08-23T07:49:13
| 2020-06-28T11:20:41
|
Python
|
UTF-8
|
Python
| false
| false
| 20,039
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Airflow API (Stable)
# Overview To facilitate management, Apache Airflow supports a range of REST API endpoints across its objects. This section provides an overview of the API design, methods, and supported use cases. Most of the endpoints accept `JSON` as input and return `JSON` responses. This means that you must usually add the following headers to your request: ``` Content-type: application/json Accept: application/json ``` ## Resources The term `resource` refers to a single type of object in the Airflow metadata. An API is broken up by its endpoint's corresponding resource. The name of a resource is typically plural and expressed in camelCase. Example: `dagRuns`. Resource names are used as part of endpoint URLs, as well as in API parameters and responses. ## CRUD Operations The platform supports **C**reate, **R**ead, **U**pdate, and **D**elete operations on most resources. You can review the standards for these operations and their standard parameters below. Some endpoints have special behavior as exceptions. ### Create To create a resource, you typically submit an HTTP `POST` request with the resource's required metadata in the request body. The response returns a `201 Created` response code upon success with the resource's metadata, including its internal `id`, in the response body. ### Read The HTTP `GET` request can be used to read a resource or to list a number of resources. A resource's `id` can be submitted in the request parameters to read a specific resource. The response usually returns a `200 OK` response code upon success, with the resource's metadata in the response body. If a `GET` request does not include a specific resource `id`, it is treated as a list request. The response usually returns a `200 OK` response code upon success, with an object containing a list of resources' metadata in the response body. When reading resources, some common query parameters are usually available. e.g.: ``` v1/connections?limit=25&offset=25 ``` |Query Parameter|Type|Description| |---------------|----|-----------| |limit|integer|Maximum number of objects to fetch. Usually 25 by default| |offset|integer|Offset after which to start returning objects. For use with limit query parameter.| ### Update Updating a resource requires the resource `id`, and is typically done using an HTTP `PATCH` request, with the fields to modify in the request body. The response usually returns a `200 OK` response code upon success, with information about the modified resource in the response body. ### Delete Deleting a resource requires the resource `id` and is typically executed via an HTTP `DELETE` request. The response usually returns a `204 No Content` response code upon success. ## Conventions - Resource names are plural and expressed in camelCase. - Names are consistent between URL parameter name and field name. - Field names are in snake_case. ```json { \"description\": \"string\", \"name\": \"string\", \"occupied_slots\": 0, \"open_slots\": 0 \"queued_slots\": 0, \"running_slots\": 0, \"scheduled_slots\": 0, \"slots\": 0, } ``` ### Update Mask Update mask is available as a query parameter in patch endpoints. It is used to notify the API which fields you want to update. Using `update_mask` makes it easier to update objects by helping the server know which fields to update in an object instead of updating all fields. The update request ignores any fields that aren't specified in the field mask, leaving them with their current values. Example: ``` resource = request.get('/resource/my-id').json() resource['my_field'] = 'new-value' request.patch('/resource/my-id?update_mask=my_field', data=json.dumps(resource)) ``` ## Versioning and Endpoint Lifecycle - API versioning is not synchronized to specific releases of the Apache Airflow. - APIs are designed to be backward compatible. - Any changes to the API will first go through a deprecation phase. # Trying the API You can use a third party client, such as [curl](https://curl.haxx.se/), [HTTPie](https://httpie.org/), [Postman](https://www.postman.com/) or [the Insomnia rest client](https://insomnia.rest/) to test the Apache Airflow API. Note that you will need to pass credentials data. For e.g., here is how to pause a DAG with [curl](https://curl.haxx.se/), when basic authorization is used: ```bash curl -X PATCH 'https://example.com/api/v1/dags/{dag_id}?update_mask=is_paused' \\ -H 'Content-Type: application/json' \\ --user \"username:password\" \\ -d '{ \"is_paused\": true }' ``` Using a graphical tool such as [Postman](https://www.postman.com/) or [Insomnia](https://insomnia.rest/), it is possible to import the API specifications directly: 1. Download the API specification by clicking the **Download** button at the top of this document 2. Import the JSON specification in the graphical tool of your choice. - In *Postman*, you can click the **import** button at the top - With *Insomnia*, you can just drag-and-drop the file on the UI Note that with *Postman*, you can also generate code snippets by selecting a request and clicking on the **Code** button. ## Enabling CORS [Cross-origin resource sharing (CORS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) is a browser security feature that restricts HTTP requests that are initiated from scripts running in the browser. For details on enabling/configuring CORS, see [Enabling CORS](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html). # Authentication To be able to meet the requirements of many organizations, Airflow supports many authentication methods, and it is even possible to add your own method. If you want to check which auth backend is currently set, you can use `airflow config get-value api auth_backends` command as in the example below. ```bash $ airflow config get-value api auth_backends airflow.api.auth.backend.basic_auth ``` The default is to deny all requests. For details on configuring the authentication, see [API Authorization](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html). # Errors We follow the error response format proposed in [RFC 7807](https://tools.ietf.org/html/rfc7807) also known as Problem Details for HTTP APIs. As with our normal API responses, your client must be prepared to gracefully handle additional members of the response. ## Unauthenticated This indicates that the request has not been applied because it lacks valid authentication credentials for the target resource. Please check that you have valid credentials. ## PermissionDenied This response means that the server understood the request but refuses to authorize it because it lacks sufficient rights to the resource. It happens when you do not have the necessary permission to execute the action you performed. You need to get the appropriate permissions in other to resolve this error. ## BadRequest This response means that the server cannot or will not process the request due to something that is perceived to be a client error (e.g., malformed request syntax, invalid request message framing, or deceptive request routing). To resolve this, please ensure that your syntax is correct. ## NotFound This client error response indicates that the server cannot find the requested resource. ## MethodNotAllowed Indicates that the request method is known by the server but is not supported by the target resource. ## NotAcceptable The target resource does not have a current representation that would be acceptable to the user agent, according to the proactive negotiation header fields received in the request, and the server is unwilling to supply a default representation. ## AlreadyExists The request could not be completed due to a conflict with the current state of the target resource, e.g. the resource it tries to create already exists. ## Unknown This means that the server encountered an unexpected condition that prevented it from fulfilling the request. # noqa: E501
The version of the OpenAPI document: 2.7.0
Contact: dev@airflow.apache.org
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from airflow_client.client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from airflow_client.client.exceptions import ApiAttributeError
class InlineResponse200(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'continuation_token': (str,), # noqa: E501
'content': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'continuation_token': 'continuation_token', # noqa: E501
'content': 'content', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse200 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
continuation_token (str): [optional] # noqa: E501
content (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse200 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
continuation_token (str): [optional] # noqa: E501
content (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
[
"noreply@github.com"
] |
apache.noreply@github.com
|
0b9c5be27cee3fa644a051abca088f1e969473c2
|
e7bea18d93799fb46ecb91d7d15f81976785c015
|
/TupleEg.py
|
fbaf56593bbe313fced394abaa597f97f9bde7f4
|
[] |
no_license
|
RashmiJK/LearningPython
|
d4fd8c657f8b21cd413c7cfae8865b76717bece5
|
3e4e60e846c5fe6f49096ef2111abef69c0ef459
|
refs/heads/master
| 2020-06-12T20:28:43.680781
| 2019-08-20T07:07:24
| 2019-08-20T07:07:24
| 194,415,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
# Tuples are immutable python objects
# Tuples are sequences like lists, but cannot be changed like lists
# Tuples have parenthesis, whereas lists use square brackets
# + - concatenation, * - repetition works for tuples
tup1 = ('physics', 'chemistry', 1997, 2000)
tup2 = (1, 2, 3, 4, 5, 6, 7)
# empty tuple
tup3 = ()
print("tup1[0] : ", tup1[0])
print("tup2[1:5] : ", tup2[1:5])
tup3 = tup1 + tup2
print(tup3)
for x in tup2:
print(x)
for x in tup2:
print(x, end="")
|
[
"rashmi.kare@gmail.com"
] |
rashmi.kare@gmail.com
|
5c9a624c85218f5c046aa3e147d62c85008ff733
|
d3e48e7f4dd1d080413db921284bb57fc4053ec2
|
/video_crawler/videomonitor/crawler_pptv.py
|
b255713adc88dda1346bded135d0e98b058d823f
|
[] |
no_license
|
muzidudu/video_crawler
|
adb301f001caca22dc446f10e4b78e49f4c39e62
|
3d527f1ac016bc63afbbdc057dd4f9f78c057540
|
refs/heads/master
| 2021-06-05T04:24:54.781942
| 2016-07-05T08:59:14
| 2016-07-05T08:59:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,431
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#encoding=utf8
import sys
sys.path.insert(0, '/opt/video_data/')
from resource.http import HTMLResource,JsonResource
from resource.database import DataBase
import threadpool
import datetime
import traceback
site = 'pptv'
headers = { 'User-Agent': 'Mozilla/5.0 (Linux; Android 4.4.4; MI 4LTE Build/KTU84P) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/33.0.0.0 Mobile Safari/537.36 V1_AND_SQ_5.9.1_272_YYB_D QQ/5.9.1.2535 NetType/WIFI WebP/0.3.2 Pixel/1080',
'Accept': '*/*',
'Referer': 'http://player.aplus.pptv.com/corporate/proxy/proxy.html',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'zh-CN,en-US;q=0.8',
# 'Cookie': 'sctx=; PUID=68bcf4237f9847f6bd27a2ac9a982a2a; PUID_CTM=315532800; ppi=302c31; ad_show=1',
'X-Requested-With': 'com.tencent.mobileqq',
}
def get_maxid(table):
max_id = 0
conn, cur = DataBase.open_conn_video()
cur.execute('SELECT max(id) FROM '+table)
for data in cur.fetchall():
max_id = data[0]
cur.execute('SELECT max(id) FROM '+table+'_long')
for data in cur.fetchall():
max_id_long = data[0]
if max_id_long > max_id:
max_id = max_id_long
DataBase.close_conn(conn, cur)
return max_id
def get_datas():
data_list = []
conn, cur = DataBase.open_conn_video()
cur.execute('SELECT id,sid FROM '+site+' WHERE duration=0 order by id desc;')
for data in cur.fetchall():
data_list.append([data[0],data[1]])
DataBase.close_conn(conn, cur)
return data_list
def save(table, result):
conn, cur = DataBase.open_conn_video()
cur.executemany(u'insert into '+table+'(id,sid,duration,imageurl,title,online_time,status) values(%s,%s,%s,%s,%s,%s,%s)', result)
conn.commit()
DataBase.close_conn(conn, cur)
def get_pptv_data(para):
# base_url = 'http://epg.api.pptv.com/detail.api?auth=d410fafad87e7bbf6c6dd62434345818&canal=18&userLevel=0&ppi=AQACAAAAAQAATkoAAAABAAAAAFYb2QAwLQIUZozYqBZXlBVxHpJVldyRl2OTuocCFQCIxNn5rPJ1pNb2cPTIvXEYer727w&appid=com.pplive.androidphone&appver=5.2.2&appplt=aph&vid=%s&series=1&virtual=1&ver=2&platform=android3&contentType=Preview'
base_url = 'http://web-play.pptv.com/webplay3-0-%s.xml?version=4&type=mpptv&kk=c312abb400f993d82dbb9489f21846c6-6506-561b7382&fwc=0&complete=1&o=m.pptv.com&rcc_id=m.pptv.com&cb=getPlayEncode'
_id, data_list, data_list_long = para
try:
content = JsonResource(base_url % _id, headers).get_resource()
if content is None:
content = JsonResource(base_url % _id, headers).get_resource()
if content is not None:
datas = content.get('childNodes', [])
for data in datas:
if data["tagName"] == "error":
break
if data["tagName"] == "channel":
# http://v.pptv.com/show/NYFJxia6UBEKlI4s.html
sid = data['lk'][23: -5]
# http://v.img.pplive.cn/cp120/44/1b/441b029781a73aff2a12850daa32e341/31.jpg
# http://s1.pplive.cn/v/cap/id/w300.jpg
# http://s1.pplive.cn/v/cap/23557685/w640.jpg?v01
imageurl = data['pic']
duration = data['dur']
online_time = str(data['timestamp'])
title = str(data['nm']).replace('\r','|').replace('\n','|') #.replace(',',';')
if duration < 1500:
data_list.append([_id,sid,duration,imageurl,title,online_time,0])
else:
data_list_long.append([_id,sid,duration,imageurl,title,online_time,0])
break
except Exception as e:
print e
traceback.print_exc()
print _id
def craw_pptv(start, end):
data_list = []
data_list_long = []
para_list=[]
while start < end:
para_list.append([start, data_list, data_list_long])
start += 1
pool_size = 8
pool = threadpool.ThreadPool(pool_size)
requests = threadpool.makeRequests(get_pptv_data, para_list)
[pool.putRequest(req) for req in requests]
pool.wait()
pool.dismissWorkers(pool_size, do_join=True)
count = len(data_list)
if count > 0:
save(site, data_list)
count_long = len(data_list_long)
if count_long > 0:
save(site+'_long', data_list_long)
print count, count_long
return (count + count_long, count_long)
def save_oplog(site, id_start, num, num_long):
id_end = get_maxid(site)
print id_end
exe_time = datetime.datetime.now()
conn, cur = DataBase.open_conn_video()
cur.execute('insert into oplog(site, id_start, id_end, num, num_long, create_date, update_date) values(%s, %s, %s, %s, %s, %s, %s)',( site, id_start, id_end, num, num_long, exe_time, exe_time))
conn.commit()
DataBase.close_conn(conn, cur)
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding('utf8')
# max_id = 23500000
max_id = get_maxid(site)
start = max_id + 1
print start
id_start = start
num = 0
num_long = 0
step = 500
while(True):
count, count_long = craw_pptv(start, start+step)
if count == 0:
break
num += count
num_long += count_long
start += step
save_oplog(site, id_start, num, num_long)
|
[
"HaoFeng@qiyi.com"
] |
HaoFeng@qiyi.com
|
12431a60252e4c5ae6050e542d0137baa9531b36
|
850c423efb96a767c2e57a66ee70e5e6487fac16
|
/nomadgram/users/migrations/0002_auto_20180804_2233.py
|
609974e5aea73e6ce079ef9a2b90e506e0ae662f
|
[
"MIT"
] |
permissive
|
eueezy61/nomadgram2
|
3dacbf8f4e3ec854ab1819c6930405e380974195
|
66a7ad0356a72e5eab5e28d8e8573cb848cb0288
|
refs/heads/master
| 2021-08-22T11:45:12.395541
| 2018-12-11T10:37:38
| 2018-12-11T10:37:38
| 143,878,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
# Generated by Django 2.0.8 on 2018-08-04 13:33
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='bio',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='user',
name='followers',
field=models.ManyToManyField(related_name='_user_followers_+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='user',
name='following',
field=models.ManyToManyField(related_name='_user_following_+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='user',
name='gender',
field=models.CharField(choices=[('male', 'Male'), ('female', 'Female'), ('not-specified', 'Not-specified')], max_length=80, null=True),
),
migrations.AddField(
model_name='user',
name='phone',
field=models.TextField(max_length=140, null=True),
),
migrations.AddField(
model_name='user',
name='website',
field=models.URLField(null=True),
),
]
|
[
"eueezy61@naver.com"
] |
eueezy61@naver.com
|
f0a87dbe0590dc9dc507f2fbee528d4ce82f70e5
|
aa598fe85e58ce10e94b6ea56093ab6b30b49e0e
|
/app/tests/test_orders.py
|
ac5e50e9e9d14a308aa1705a95c320cf9b024f36
|
[] |
no_license
|
SyntaxStacks/NOMS
|
4c5c6dc91ce9213ec12e49ba9efbbf522532a648
|
8057a045cf0923180b3df7f348e7f525a467fe55
|
refs/heads/master
| 2021-01-21T19:20:25.861375
| 2014-02-18T18:39:52
| 2014-02-28T05:05:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,937
|
py
|
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from rest_framework import status
import json
import time
class OrdersTest(APITestCase):
def create_product(self, name, quantity):
data = {
'name': name,
'quantity': quantity
}
url = reverse('product-list');
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
return response
def create_line_item(self, name, quantity, productIds):
data = {
"product": productIds,
"name": "product",
"quantity": 0,
"value": "12"
}
url = reverse('lineitem-list');
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
return response
# orders
def test_get_orders_200(self):
"""
check for 200 status from orders
"""
url = reverse('order-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
return response
def test_create_order(self):
"""
create order via POST
"""
productName = 'product' + str(time.time())
productQuantity = 10
res = self.create_product(productName, productQuantity)
res = json.loads(res.content)
productId = res['id']
lineItemName = 'product' + str(time.time())
lineItemQuantity = '1'
res = self.create_line_item(lineItemName, lineItemQuantity, productId)
res = json.loads(res.content)
self.assertEqual(res['id'],1)
lineItemIds = [ res['id'] ]
data = {
'name': 'order' + str(time.time()),
'vendor': 'seller',
'customer': 'cust1',
"lineItems": lineItemIds
}
url = reverse('order-list')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
|
[
"info@johnlozano.me"
] |
info@johnlozano.me
|
3eb01e44c2877f45f57e082b9ea9d506646a55f7
|
a4ac3bad8fc4d3616686670dce3029b93fbc1fce
|
/PROJETOPOO2/QUESTAO2/Produto.py
|
591f52a7d11b66bdc43cb2da68c1c044bf29408c
|
[] |
no_license
|
mariaIFPB/ProjetoPOO2
|
4940894b8543a8f30e2727254aeccb64e85bb5a9
|
b2686649140f03382669f0a26ecb6d1fb64034d4
|
refs/heads/master
| 2021-01-01T06:04:17.143190
| 2017-07-16T02:52:13
| 2017-07-16T02:52:13
| 97,352,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
class Produto:
def __init__(self, valor, nome):
self.valor = valor
self.nome = nome
def __str__(self):
return "Produto Nome: " + self.nome + ", valor: " + str(self.valor)
|
[
"noreply@github.com"
] |
mariaIFPB.noreply@github.com
|
63a271058bf991c8114ebc3f4f07b8b77e894cc3
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/68/usersdata/205/35684/submittedfiles/crianca.py
|
808ab86c894e144b339c8b36f946604e7d1ae392
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
# -*- coding: utf-8 -*-
p1=float(input('digite o peso da crianca da esquerda:'))
c1=float(input('digite o comprimento da gangorra do lado esquerdo':))
p2=float(input('digite o peso da crianca da direito:'))
c2=float(input('digite o comprimento da gangorra do lado direito:'))
a=p1*c1
b=p2*c2
if(a==b):
print('0')
elif(a>b):
print('-1')
else:
print('1')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
aa89b4e16ad47f84047e2a4ea6602526f0671485
|
a7afe0edc04faaca0aad48675d872bba9238c50a
|
/8.py
|
b75fb17b7cb317224b0dea66476f126223146bc6
|
[] |
no_license
|
tungduonghgg123/projectEuler100Challenge
|
5fcb3082ccdc7870f7eaaaba28ae3f91b6f5b1ab
|
f246dfbff69318429f0057b6c0bebf724958331a
|
refs/heads/master
| 2020-12-03T21:37:35.424835
| 2020-02-10T07:49:19
| 2020-02-10T07:49:19
| 231,494,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,607
|
py
|
from primeNumber import lengthOfInteger
a = 7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450
interval = 4
maxProduct = 0
def productOfDigits ( num ):
string = str ( num )
product = 1
for x in range ( 0, len ( string ) ):
product = product * int ( string[x] )
return product
# first approach, consider a as a number
while( a != 0 ):
sequence = a % 10**interval
a = a // 10
if ( lengthOfInteger( sequence ) != interval ):
continue
newProduct = productOfDigits( sequence )
if ( maxProduct < newProduct ):
maxProduct = newProduct
# second approach, consider a as a string
print ( maxProduct )
|
[
"tungduonghgg123@gmail.com"
] |
tungduonghgg123@gmail.com
|
21abd04a47e8353b9508e1ebe3947a85cc81bc00
|
878278cc472cf0b13d143dcf142ba60b103756b3
|
/expenseProj/expenseApp/models.py
|
be317afd9359ecc883893fec5999d7f5a2c5792c
|
[
"Apache-2.0"
] |
permissive
|
cs-fullstack-fall-2018/project3-django-cmadisonne
|
313bf265f62039b28866d84dc2d51d3a7858da12
|
787acef8fb9e9a2447f8a5a9eb28968bc9acbb1e
|
refs/heads/master
| 2020-04-02T00:58:53.621802
| 2018-11-15T12:51:34
| 2018-11-15T12:51:34
| 153,831,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
class ExpenseModel(models.Model):
username = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True)
checking = models.IntegerField(default=0)
emergency = models.IntegerField(default=0)
timeCreated = models.DateTimeField(default=datetime.now)
def __unicode__(self):
return self.username
class Transaction(models.Model):
depositOrWithdrawl = models.FloatField()
account_fk = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
timeOfTransaction = models.DateTimeField(default=datetime.now)
def __unicode__(self):
return self.account_fk
|
[
"cmadisonne@gmail.com"
] |
cmadisonne@gmail.com
|
4bc43807e3a1a36fbcce46e231264d56a8537701
|
0945654b58b39ef967dc17e8444483205d5fb6aa
|
/core/urls.py
|
c48965dd8b431790747c6864bb28b9c62408ed8f
|
[] |
no_license
|
Juzeka/Site-Portifolio
|
3b77a53be31129e0750047745b1abe6a81ea7217
|
d8a9448f35ac105a3ae81ccf444a1fff568ff9a3
|
refs/heads/master
| 2023-04-14T15:09:26.411448
| 2021-04-15T21:46:54
| 2021-04-15T21:46:54
| 358,950,398
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
from django.urls import path
from core import views
urlpatterns = [
path('', views.home, name='index'),
path('contato', views.contato, name='contato'),
]
|
[
"rafaelgomesalmeida@hotmail.com"
] |
rafaelgomesalmeida@hotmail.com
|
ec8a8679df2b0b323978226a6e24c4af74cc0f57
|
ea878b753a9265904dde7b99429c26733b9576f4
|
/深度学习/tensorFlow加法器.py
|
1a554e1616ef1426c721ae7b2bf76cc9e5ac7713
|
[] |
no_license
|
songzeceng/pythonProjects
|
434c1bcacc8902b4931ad55d19fa16509b07ee5e
|
95914e14b0b7416e94d98e4aa38cd40859f27d8a
|
refs/heads/master
| 2020-06-22T12:09:18.791953
| 2019-07-20T02:20:49
| 2019-07-20T02:20:49
| 197,712,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 725
|
py
|
import tensorflow as tf
if __name__ == '__main__':
graph = tf.get_default_graph()
# 默认的图,相当于给程序分配内存
print(graph)
# 构建新的上下文图,包括一组op(api函数)和tensor(数据)
g = tf.Graph()
with g.as_default():
c = tf.constant(7.0)
print(c.graph)
a = tf.constant(5.0)
b = tf.constant(6.0)
c = 3.2
print(a + c)
print("a=", a)
print("b=", b)
sum1 = tf.add(a, b)
print("sum1=", sum1)
with tf.Session(graph=graph) as session:
# 会话,运行图的结构、计算,掌管和分配资源
# 只能运行一张图
print(session.run(sum1))
print(a.graph)
session.close()
|
[
"392004248@qq.com"
] |
392004248@qq.com
|
76b920b35337545b8f16694c0692f0d9f40102db
|
46a12d7e5236e09973e44fa8f846c6f8eaf0d429
|
/controller/create_sqs.py
|
ed037fca8c042f25b083d3f7c7ace4a78c257d9a
|
[] |
no_license
|
aluka1994/EdgeComputing
|
d21ddd13c19f2082534c4d7be4c0705875d6171c
|
849fed2214cf50b0229f69db5f9aa13e4f9dd9c1
|
refs/heads/master
| 2022-12-14T12:47:09.848985
| 2020-09-09T00:55:15
| 2020-09-09T00:55:15
| 247,433,447
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
import boto3
def list_queue():
sqs = boto3.client('sqs')
response = sqs.list_queues()
print(response)
def list_queue_name(name="testProject"):
sqs = boto3.resource('sqs')
response = sqs.get_queue_by_name(QueueName='testProject')
print(response.url)
print("VisibilityTimeout: ", response.attributes.get('VisibilityTimeout'))
print("ApproximateNumberOfMessages: ", response.attributes.get('ApproximateNumberOfMessages'))
print("ApproximateNumberOfMessagesNotVisible: ", response.attributes.get('ApproximateNumberOfMessagesNotVisible'))
print(response)
return response
|
[
"pr92zala@gmail.com"
] |
pr92zala@gmail.com
|
657212fc998e61d1ecd01b678a88998c4db624f0
|
64465ccb6a445c7115ca7d58a508aa5d40b09060
|
/blog/models.py
|
10138a523799eee061fd4cd74d079cb94c90d225
|
[] |
no_license
|
gegiants/BeautySalonSite
|
7de7d9aa12852017ae6c330d2c7d422102b6fc10
|
fe8ab51cd28fc2707b5e1a93640c38acb845473f
|
refs/heads/master
| 2020-11-28T16:31:47.784532
| 2016-08-17T12:52:09
| 2016-08-17T12:52:09
| 65,906,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,338
|
py
|
from django.db import models
from django.utils import timezone
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
# Hair Extentions/Prices.
class Weave(models.Model):
type = models.CharField(max_length=200,primary_key=True)
description = models.TextField()
def __str__(self):
return self.type
class Weave_Price(models.Model):
disc = models.CharField(max_length=200)
weave = models.ForeignKey(Weave, on_delete=models.CASCADE)
price = models.IntegerField()
def __str__(self):
return "%s - %s" % (self.weave, self.disc)
class KeratinBond(models.Model):
type = models.CharField(max_length=200,primary_key=True)
description = models.TextField()
def __str__(self):
return self.type
class KeratinBond_Price(models.Model):
disc = models.CharField(max_length=200)
weave = models.ForeignKey(Weave, on_delete=models.CASCADE)
price = models.IntegerField()
def __str__(self):
return "%s - %s" % (self.weave, self.disc)
class MicroRing(models.Model):
type = models.CharField(max_length=200,primary_key=True)
description = models.TextField()
def __str__(self):
return self.type
class MicroRing_Price(models.Model):
disc = models.CharField(max_length=200)
microRing = models.ForeignKey(MicroRing, on_delete=models.CASCADE)
price = models.IntegerField()
def __str__(self):
return "%s - %s" % (self.microRing, self.disc)
class TapedHairExtention(models.Model):
type = models.CharField(max_length=200,primary_key=True)
description = models.TextField()
def __str__(self):
return self.type
class TapedHairExtention_Price(models.Model):
disc = models.CharField(max_length=200)
weave = models.ForeignKey(Weave, on_delete=models.CASCADE)
price = models.IntegerField()
def __str__(self):
return "%s - %s" % (self.weave, self.disc)
class MakeUp(models.Model):
type = models.CharField(max_length=200,primary_key=True)
description = models.TextField()
def __str__(self):
return self.type
class Lash_Brow(models.Model):
type = models.CharField(max_length=200,primary_key=True)
description = models.TextField()
def __str__(self):
return self.type
class Nail(models.Model):
type = models.CharField(max_length=200,primary_key=True)
description = models.TextField()
def __str__(self):
return self.type
class Hairdressing(models.Model):
type = models.CharField(max_length=200,primary_key=True)
description = models.TextField()
def __str__(self):
return self.type
class Pricee(models.Model):
type = models.CharField(max_length=200)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
price = models.IntegerField()
def __str__(self):
return self.type
# Create your models here.
|
[
"George Y"
] |
George Y
|
7a554e93ba98a00c3cc36a2a09a8955dfb311c98
|
aae4661e3e7047a1a3d5ed772c5d02c7033adfa1
|
/could_improve/p11s1.py
|
a91c6ec6661e6d94d2415bdf502fdba9c31e9e0e
|
[] |
no_license
|
martingehrke/projecteuler
|
97c4225ccacb81d0498d9fb6b21714d68ecae75f
|
8572f220d17bab59b85035fed79b9b867e64f886
|
refs/heads/master
| 2021-01-19T10:38:44.130545
| 2015-03-06T21:12:12
| 2015-03-06T21:12:12
| 31,789,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,518
|
py
|
L = [[8,02,22,97,38,15,00,40,00,75,04,05,07,78,52,12,50,77,91,8],
[49,49,99,40,17,81,18,57,60,87,17,40,98,43,69,48,04,56,62,00],
[81,49,31,73,55,79,14,29,93,71,40,67,53,88,30,03,49,13,36,65],
[52,70,95,23,04,60,11,42,69,24,68,56,01,32,56,71,37,02,36,91],
[22,31,16,71,51,67,63,89,41,92,36,54,22,40,40,28,66,33,13,80],
[24,47,32,60,99,03,45,02,44,75,33,53,78,36,84,20,35,17,12,50],
[32,98,81,28,64,23,67,10,26,38,40,67,59,54,70,66,18,38,64,70],
[67,26,20,68,02,62,12,20,95,63,94,39,63,8,40,91,66,49,94,21],
[24,55,58,05,66,73,99,26,97,17,78,78,96,83,14,88,34,89,63,72],
[21,36,23,9,75,00,76,44,20,45,35,14,00,61,33,97,34,31,33,95],
[78,17,53,28,22,75,31,67,15,94,03,80,04,62,16,14,9,53,56,92],
[16,39,05,42,96,35,31,47,55,58,88,24,00,17,54,24,36,29,85,57],
[86,56,00,48,35,71,89,07,05,44,44,37,44,60,21,58,51,54,17,58],
[19,80,81,68,05,94,47,69,28,73,92,13,86,52,17,77,04,89,55,40],
[04,52,8,83,97,35,99,16,07,97,57,32,16,26,26,79,33,27,98,66],
[88,36,68,87,57,62,20,72,03,46,33,67,46,55,12,32,63,93,53,69],
[04,42,16,73,38,25,39,11,24,94,72,18,8,46,29,32,40,62,76,36],
[20,69,36,41,72,30,23,88,34,62,99,69,82,67,59,85,74,04,36,16],
[20,73,35,29,78,31,90,01,74,31,49,71,48,86,81,16,23,57,05,54],
[01,70,54,71,83,51,54,69,16,92,33,48,61,43,52,01,89,19,67,48]]
def indexes(dir,x,y):
if dir == 'up':
return [[x,y],[x,y-1],[x,y-2],[x,y-3]]
if dir == 'down':
return [[x,y],[x,y+1],[x,y+2],[x,y+3]]
if dir == 'left':
return [[x,y],[x-1,y],[x-2,y],[x-3,y]]
if dir == 'right':
return [[x,y],[x+1,y],[x+2,y],[x+3,y]]
if dir == 'upl':
return [[x,y],[x-1,y-2],[x-2,y-2],[x-3,y-3]]
if dir == 'downr':
return [[x,y],[x+1,y+2],[x+2,y+2],[x+3,y+3]]
if dir == 'downl':
return [[x,y],[x+1,y-1],[x+2,y-2],[x+3,y-3]]
if dir == 'upr':
return [[x,y],[x-1,y+1],[x-2,y+2],[x-3,y+3]]
dirs = ['up','down','left','right','upl','upr','downr','downl']
def getsum(indexes):
SUM = 1
for I in indexes:
x = I[0]
y = I[1]
try: NUM = L[x][y]
except: return 0
SUM *= NUM
return SUM
MAX = 0
for i in range(50):
for j in range(50):
for dir in dirs:
indexs = indexes(dir,i,j)
SUM = getsum(indexs)
if SUM > MAX: MAX=SUM
print MAX
|
[
"martin@teamgehrke.com"
] |
martin@teamgehrke.com
|
a9feb59e258316c682051d477b944c1386b6c575
|
47dc90dad18d70ae28cebf4f208ff35eaac604a1
|
/Cienciometrico/apps/Privilegios/migrations/0001_initial.py
|
8d464f820f38e6783cfe40db4d9e6483f9fe3e99
|
[] |
no_license
|
cienciometrico2017/cienciometricoutc
|
4061e027cde6550fbcbb5e7ed4dfa2a5734ded94
|
0c9562aeaf808c9f06a31abdeba885139931b869
|
refs/heads/master
| 2021-05-08T23:23:00.936242
| 2018-05-07T21:53:04
| 2018-05-07T21:53:04
| 119,704,564
| 0
| 0
| null | 2018-05-07T21:53:05
| 2018-01-31T15:22:22
|
HTML
|
UTF-8
|
Python
| false
| false
| 852
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-05-07 19:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('Investigador', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='privilegios',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Nombre', models.CharField(max_length=40)),
('Descripcion', models.TextField(max_length=200)),
('investigador', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='Investigador.investigador')),
],
),
]
|
[
"35942089+SirAvalon@users.noreply.github.com"
] |
35942089+SirAvalon@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.