repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
Rodney-Smith/rc522-mqtt | readRFID.py | <gh_stars>0
#!/usr/bin/python
import json
import os
import socket
import sys
import time
from pirc522 import RFID
import paho.mqtt.client as mqtt
def LOG(msg):
print(msg)
def message(client, userdata, msg):
# Method callled when a client's subscribed feed has a new value.
LOG("Message received on topic {0}:{1}".format(msg.topic, str(msg.payload))) # print a received msg
#LOG("MQTT message received")
pass
def connect(client, userdata, flags, rc):
# This function will be called when the mqtt_client is connected
# successfully to the broker.
LOG("Flags: {0}\n RC: {1}".format(flags, rc)) # print result of connection attempt
if rc == 0:
LOG("Successfully connected to MQTT Broker!")
client.publish(
config['mqtt']['status_topic'], #topic
"rc522 is up and running", #userdata
qos=1,
retain=True
)
else:
LOG("Connection to MQTT Broker Failed!")
sys.exit(1)
def disconnect(client, userdata, rc):
# This method is called when the mqtt_client disconnects
# from the broker.
LOG("Disconnected from MQTT Broker!")
def subscribe(client, userdata, topic, qos):
# This method is called when the mqtt_client subscribes to a new feed.
LOG("Subscribed to {0} with QOS level {1}".format(topic, qos))
def unsubscribe(client, userdata, topic, pid):
# This method is called when the mqtt_client unsubscribes from a feed.
LOG("Unsubscribed from {0} with PID {1}".format(topic, pid))
def publish(client, userdata, topic):
# This method is called when the mqtt_client publishes data to a feed.
LOG("Published to {0}".format(topic))
def getConfig():
# get configuration data from config.json file
if os.path.isfile('./config.json'):
with open('./config.json') as json_file:
c = json.load(json_file)
else:
LOG("Could not find configuration file.")
exit(1)
# must have a mqtt broker
if c['mqtt']['broker'] is None:
LOG("Please supply an MQTT BROKER value.")
exit(1)
# must have a mqtt topic
if c['mqtt']['topic'] is None:
LOG("Please supply an MQTT TOPIC value.")
exit(1)
# must have a mqtt user
if c['mqtt']['user'] is None:
LOG("Please supply an MQTT USER value.")
exit(1)
# must have a mqtt password
if c['mqtt']['password'] is None:
LOG("Please supply an MQTT PASSWORD value.")
exit(1)
return c
def rfid_read(reader):
uid_str = None
LOG("Waiting to read tag")
reader.wait_for_tag()
LOG("Found")
(error, data) = reader.request()
if not error:
LOG("Detected: " + format(data, "02x"))
(error, uid) = reader.anticoll()
if not error:
uid_str = ".".join(str(e) for e in uid)
LOG("Card UID: " + uid_str)
else:
LOG("Error in Anti-collision")
else:
LOG("Error in Request")
return uid_str
def main():
try:
LOG("Attempting to connect to %s" % config['mqtt']['broker'])
client.connect(host=config['mqtt']['broker'],port=config['mqtt']['port'])
except socket.error as err:
LOG(err)
sys.exit(1)
client.loop_start()
reader = RFID()
try:
last_uid_str = None
last_time = 0
while True:
uid_str = rfid_read(reader)
td = time.time() - last_time
if uid_str:
if (uid_str != last_uid_str) or (td > 1):
last_uid_str = uid_str
client.publish(
config['mqtt']['event_topic'], #topic
uid_str, #userdata
qos=1,
retain=True
)
time.sleep(0.1)
last_time = time.time()
except KeyboardInterrupt:
LOG("Keyboard Interrupt Received")
finally:
reader.cleanup()
client.publish(
config['mqtt']['status_topic'], #topic
"rc522 is not responding", #userdata
qos=1,
retain=True
)
client.disconnect()
sys.exit(0)
# get the config
config = getConfig()
# create paho mqtt client object
client = mqtt.Client(
client_id=config['mqtt']['clientid'],
clean_session=True,
userdata=None,
transport="tcp"
)
client.username_pw_set(username=config['mqtt']['user'],password=config['mqtt']['password']) # Create authentication
client.on_connect = connect # define callback function for successful connection
client.on_disconnect = disconnect # define callback function for successful disconnection
client.on_subscribe = subscribe # define callback function for subscription to a topic
client.on_unsubscribe = unsubscribe # define callback function for unsubscribing from a topic
#client.on_publish = publish # define callback function for publishing of a message
client.on_message = message # define callback function for receipt of a message
if __name__ == "__main__":
main()
__author__ = "<NAME>"
__copyright__ = "Copyright 2022, RFID Card Reader Project"
__license__ = "MIT"
__version__ = "1.0.1"
__contact__ = "<EMAIL>"
__status__ = "Development"
|
Kiru-axis/I-connect | config.py | import os
import secrets
secret =secrets.token_urlsafe(32)
class Config:
SECRET_KEY = secret
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
UPLOADED_PHOTOS_DEST = 'app/static/photos'
# db test
class ProdConfig(Config):
'''
Production configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL","")
if SQLALCHEMY_DATABASE_URI.startswith("postgres://"):
SQLALCHEMY_DATABASE_URI =SQLALCHEMY_DATABASE_URI.replace("postgres://","postgresql://",1)
class DevConfig(Config):
# this is the location of the database with authentication.
SQLALCHEMY_DATABASE_URI ='postgresql+psycopg2://axs:code@localhost/iconnect'
DEBUG = True
#These dictionary help us access different configuration option classes.
config_options = {
'development':DevConfig,
'production':ProdConfig,
} |
Kiru-axis/I-connect | app/auth/forms.py | from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField,BooleanField
from wtforms.validators import DataRequired,Length,Email,EqualTo
from ..models import User
from wtforms import ValidationError
# Registration form
class RegistrationForm(FlaskForm):
"""
class for registration of users
"""
username = StringField("Username",validators=[DataRequired(),Length(min = 3,max =20)])
email = StringField("Email",validators=[DataRequired(),Email()])
password = PasswordField("Password",validators=[DataRequired(),])
confirm_password = PasswordField("<PASSWORD>password",validators=[DataRequired(),EqualTo("password")])
submit = SubmitField("Sign up")
def validate_email(self,data_field):
if User.query.filter_by(email = data_field.data).first():
raise ValidationError(message="The Email has already been taken!")
def validate_username(self, data_field):
if User.query.filter_by(username = data_field.data).first():
raise ValidationError(message="The username has already been taken")
class LoginForm(FlaskForm):
"""
class for login of users
"""
email = StringField("Email",validators=[DataRequired(),Email()])
password = PasswordField("Password",validators=[DataRequired(),])
remember = BooleanField("Remember me")
submit = SubmitField("Login")
|
Kiru-axis/I-connect | app/main/forms.py | from flask_wtf import FlaskForm
from flask_wtf.file import FileField,FileAllowed
from wtforms import StringField,TextAreaField, SubmitField,ValidationError
from wtforms.validators import Required,Email
from flask_login import current_user
from ..models import User
# Update proile form
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
def validate_email(self,email):
if email.data != current_user.email:
if User.query.filter_by(email = email.data).first():
raise ValidationError("The Email has already been taken!")
def validate_username(self, username):
if username.data != current_user.username:
if User.query.filter_by(username = username.data).first():
raise ValidationError("The username has already been taken")
# giving the user abilty to create posts
class CreateBlog(FlaskForm):
title = StringField('Title',validators=[Required()])
content = TextAreaField('Blog Content',validators=[Required()])
submit = SubmitField('Post')
# Subscribe form
class SubscriptionForm(FlaskForm):
email = StringField('Your Email Address',validators=[Required(),Email()])
subscribe=StringField('Subscribe')
|
Kiru-axis/I-connect | app/auth/views.py | <reponame>Kiru-axis/I-connect
from flask import render_template,flash, request, redirect, url_for
from flask_login import login_user, logout_user,login_required
from app.auth import auth
from app.models import User
from .forms import RegistrationForm,LoginForm
from .. import db
# registration of users with
@auth.route('/register',methods = ["POST","GET"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email = form.email.data, password=form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',registration_form=form )
# login
@auth.route('/login',methods = ['POST','GET'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "Blog login"
return render_template('auth/login.html',login_form = login_form,title=title)
# logout
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
|
psavery/mongochemserver | girder/molecules/server/openbabel.py | from openbabel import OBMol, OBConversion
def convert_str(str_data, in_format, out_format):
mol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
conv.SetOutFormat(out_format)
conv.ReadString(mol, str_data)
return (conv.WriteString(mol), conv.GetOutFormat().GetMIMEType())
def to_inchi(str_data, in_format):
mol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
# Hackish for now, convert to xyz first...
conv.SetOutFormat('xyz')
conv.ReadString(mol, str_data)
xyz = conv.WriteString(mol)
# Now convert to inchi and inchikey.
mol = OBMol()
conv.SetInFormat('xyz')
conv.ReadString(mol, xyz)
conv.SetOutFormat('inchi')
inchi = conv.WriteString(mol).rstrip()
conv.SetOptions("K", conv.OUTOPTIONS)
inchikey = conv.WriteString(mol).rstrip()
return (inchi, inchikey)
def atom_count(str_data, in_format):
mol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
conv.ReadString(mol, str_data)
return mol.NumAtoms() |
psavery/mongochemserver | girder/molecules/plugin_tests/calculations_test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2018 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the 'License' );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import json
import pytest
import os
from pytest_girder.assertions import assertStatusOk, assertStatus
from . import calculation
from . import molecule
@pytest.mark.plugin('molecules')
def test_create_calc(server, molecule, user):
from girder.plugins.molecules.models.calculation import Calculation
from girder.constants import AccessType
assert '_id' in molecule
dir_path = os.path.dirname(os.path.realpath(__file__))
# This cjson should match the molecule
with open(dir_path + '/data/ethane.cjson', 'r') as rf:
ethane_cjson = rf.read()
# Let's make some properties
properties = {
'molecular mass': 30.0690,
'melting point': -172,
'boiling point': -88
}
body = {
'cjson': ethane_cjson,
'properties': properties,
'moleculeId': molecule['_id']
}
r = server.request('/calculations', method='POST', body=json.dumps(body),
type='application/json', user=user)
assertStatus(r, 201)
calc = r.json
assert '_id' in calc
assert 'moleculeId' in calc
calc_id = str(calc['_id'])
molecule_id = calc['moleculeId']
calc2 = Calculation().load(calc_id, level=AccessType.READ, user=user)
# It should have an _id and a molecule id, and it should match
assert '_id' in calc2
assert 'moleculeId' in calc2
assert str(calc2['_id']) == calc_id
assert str(calc2['moleculeId']) == molecule_id
@pytest.mark.plugin('molecules')
def test_get_calc(server, molecule, calculation, user):
assert '_id' in calculation
assert 'moleculeId' in calculation
calc_id = str(calculation['_id'])
calc_molecule_id = str(calculation['moleculeId'])
# Find it by molecule id
params = { 'moleculeId': calc_molecule_id }
r = server.request('/calculations', method='GET', params=params, user=user)
assertStatusOk(r)
# Should just be one calculation
assert len(r.json) == 1
calc = r.json[0]
assert '_id' in calc
assert str(calc['_id']) == calc_id
# Find it by its own id
r = server.request('/calculations/%s' % calc_id, method='GET', user=user)
assertStatusOk(r)
calc = r.json
assert '_id' in calc
assert str(calc['_id']) == calc_id
|
psavery/mongochemserver | girder/notebooks/server/__init__.py | import os
import glob
from bson.objectid import ObjectId
from girder import events
from girder.models.folder import Folder
from girder.models.upload import Upload
from girder.utility.path import lookUpPath
from .rest import Notebook
def createNotebooks(event):
user = event.info
folder_model = Folder()
result = lookUpPath('user/%s/Private' % user['login'], force=True)
private_folder = result['document']
oc_folder = folder_model.createFolder(private_folder, 'oc',
parentType='folder',
creator=user,
public=True,
reuseExisting=True)
notebook_folder = folder_model.createFolder(oc_folder, 'notebooks',
parentType='folder',
creator=user,
public=True,
reuseExisting=True)
notebooks_dir = os.path.join(os.path.dirname(__file__), 'notebooks')
upload_model = Upload()
for file in glob.glob('%s/*.ipynb' % notebooks_dir):
size = os.path.getsize(file)
name = os.path.basename(file)
with open(file, 'rb') as fp:
upload_model.uploadFromFile(
fp, size=size, name=name, parentType='folder',
parent={'_id': ObjectId(notebook_folder['_id'])}, user=user,
mimeType='application/x-ipynb+json')
def load(info):
events.bind('model.user.save.created', 'notebooks', createNotebooks)
info['apiRoot'].notebooks = Notebook()
|
psavery/mongochemserver | girder/molecules/server/chemspider.py | import os
from chemspipy import ChemSpider
from girder.constants import TerminalColor
try:
chemspikey = os.environ['chemspikey']
except KeyError:
chemspikey = None
print(TerminalColor.warning('WARNING: chemspikey not set, common names will not be resolved.'))
def find_common_name(inchikey, formula):
# Try to find the common name for the compound, if not use the formula.
name = formula
if chemspikey:
cs = ChemSpider(chemspikey)
if (len(inchikey) > 0):
result = cs.search(inchikey)
if (len(result) == 1):
name = result[0].common_name
return name
|
psavery/mongochemserver | girder/molecules/server/experiment.py | <reponame>psavery/mongochemserver
from girder.api.describe import Description
from girder.api.docs import addModel
from girder.api import access
from girder.api.rest import Resource
from girder.api.rest import RestException, getBodyJson, getCurrentUser, \
loadmodel
from girder.models.model_base import ModelImporter, ValidationException
from girder.constants import AccessType
from girder.plugins.molecules.models.experimental import Experimental
class Experiment(Resource):
def __init__(self):
super(Experiment, self).__init__()
self.resourceName = 'experiments'
self.route('GET', (), self.find_experiment)
self._model = self.model('experimental', 'molecules')
@access.public
def find_experiment(self, params):
user = getCurrentUser()
query = { }
if 'molecularFormula' in params:
query['molecularFormula'] = params['molecularFormula']
limit = int(params.get('limit', 50))
experiments = self._model.find(query, limit=limit)
return [self._model.filter(x, user) for x in experiments]
find_experiment.description = (
Description('Get the calculation types available for the molecule')
.param(
'molecularFormula',
'The molecular formula to search for experiments.',
dataType='string', required=False, paramType='query')
.param(
'limit',
'The max number of experiments to return',
dataType='integer', paramType='query', default=50, required=False))
|
psavery/mongochemserver | girder/molecules/server/models/calculation.py | from jsonschema import validate, ValidationError
from girder.models.model_base import AccessControlledModel, ValidationException
from girder.constants import AccessType
class Calculation(AccessControlledModel):
'''
{
'frames': {
'<mode>': [[3n]]
}
},
'cjson': '...'
}
'''
schema = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'type': 'object',
'required': ['cjson'],
'definitions': {
'frame': {
'type': 'array',
'items': {
'type': 'number'
}
},
'modeFrame': {
'type': 'array',
'items': {
'$ref': '#/definitions/frame'
}
},
'eigenVector': {
'type': 'array',
'items': {
'type': 'number'
}
}
}
}
def __init__(self):
super(Calculation, self).__init__()
def initialize(self):
self.name = 'calculations'
self.ensureIndices([
'moleculeId', 'calculationType', 'properties.functional',
'properties.theory', 'properties.basisSet.name', 'properties.pending',
'theoryPriority'
])
self.exposeFields(level=AccessType.READ, fields=(
'_id', 'moleculeId', 'fileId', 'properties', 'notebooks'))
def filter(self, calc, user):
calc = super(Calculation, self).filter(doc=calc, user=user)
del calc['_accessLevel']
del calc['_modelType']
return calc
def validate(self, doc):
try:
validate(doc, Calculation.schema)
except ValidationError as ex:
raise ValidationException(ex.message)
# If we have a moleculeId check it valid
if 'moleculeId' in doc:
mol = self.model('molecule', 'molecules').load(doc['moleculeId'],
force=True)
doc['moleculeId'] = mol['_id']
return doc
def create_cjson(self, user, cjson, props, moleculeId = None, fileId = None,
public=False, notebooks=[]):
calc = {
'cjson': cjson,
'properties': props,
'notebooks': notebooks
}
if moleculeId:
calc['moleculeId'] = moleculeId
if fileId:
calc['fileId'] = fileId
self.setUserAccess(calc, user=user, level=AccessType.ADMIN)
if public:
self.setPublic(calc, True)
return self.save(calc)
def add_notebooks(self, calc, notebooks):
query = {
'_id': calc['_id']
}
update = {
'$addToSet': {
'notebooks': {
'$each': notebooks
}
}
}
super(Calculation, self).update(query, update)
|
psavery/mongochemserver | girder/molecules/server/models/cubecache.py | from jsonschema import validate, ValidationError
from bson.objectid import ObjectId
from girder.models.model_base import AccessControlledModel, ValidationException
from girder.constants import AccessType
class Cubecache(AccessControlledModel):
def __init__(self):
super(Cubecache, self).__init__()
def initialize(self):
self.name = 'cubecache'
self.ensureIndices(['calculationId', 'mo'])
self.exposeFields(level=AccessType.READ, fields=(
'_id', 'calculationId', 'mo', 'cjson'))
def filter(self, calc, user):
calc = super(Calculation, self).filter(doc=calc, user=user)
del calc['_accessLevel']
del calc['_modelType']
return calc
def validate(self, doc):
# If we have a calculationId check it is valid.
if 'calculationId' in doc:
calc = self.model('calculation', 'molecules').load(doc['calculationId'],
force=True)
doc['calculationId'] = calc['_id']
return doc
def create(self, calcId, mo, cjson):
cache = {
'calculationId': calcId,
'mo': mo,
'cjson': cjson
}
# For now set as public
self.setPublic(cache, True)
return self.save(cache)
def find_mo(self, calcId, mo):
query = {
'calculationId': ObjectId(calcId),
'mo': mo
}
cache = self.findOne(query)
return cache
|
psavery/mongochemserver | girder/molecules/server/avogadro.py | from avogadro2 import *
import json
from jsonpath_rw import parse
def convert_str(str_data, in_format, out_format):
mol = Molecule()
conv = FileFormatManager()
conv.readString(mol, str_data, in_format)
return conv.writeString(mol, out_format)
def atom_count(str_data, in_format):
mol = Molecule()
conv = FileFormatManager()
conv.readString(mol, str_data, in_format)
return mol.atomCount()
def molecule_properties(str_data, in_format):
mol = Molecule()
conv = FileFormatManager()
conv.readString(mol, str_data, in_format)
properties = {
'atomCount': mol.atomCount(),
'heavyAtomCount': mol.atomCount() - mol.atomCount(1),
'mass': mol.mass(),
'spacedFormula': mol.formula(' ', 0),
'formula': mol.formula('', 1)
}
return properties
# We expect JSON input here, using the NWChem format
def calculation_properties(json_data):
properties = {}
if not 'simulation' in json_data:
return properties
calcs = json_data['simulation']['calculations']
if not isinstance(calcs, list) or len(calcs) == 0:
return properties
firstCalc = calcs[0]
waveFunctionTypes = {
'Density Functional Theory': 'DFT',
'Hartree-Fock': 'HF' }
if 'calculationSetup' in firstCalc:
setup = firstCalc['calculationSetup']
# Use a lookup, probably needs to be extended to cover all types...
properties['theory'] = waveFunctionTypes[setup['waveFunctionTheory']].lower()
properties['type'] = setup['waveFunctionType']
calcName = properties['theory'] + ' (' + properties['type']
if 'exchangeCorrelationFunctional' in setup:
for piece in setup['exchangeCorrelationFunctional']:
if 'xcName' in piece:
properties['functional'] = piece['xcName']
calcName += ' - ' + properties['functional']
calcName += ')'
properties['friendlyName'] = calcName
if 'molecularCharge' in setup:
properties['charge'] = setup['molecularCharge']
if 'numberOfElectrons' in setup:
properties['electronCount'] = setup['numberOfElectrons']
if 'molecularSpinMultiplicity' in setup:
properties['spin'] = setup['molecularSpinMultiplicity']
if 'simulationEnvironment' in json_data['simulation']:
env = json_data['simulation']['simulationEnvironment']
properties['code'] = env['programRun']
properties['codeVersion'] = env['programVersion']
properties['processorCount'] = env['processorCount']
properties['runDate'] = env['runDate']
calcTypes = {
'energyCalculation': 'energy',
'geometryOptimization': 'optimization',
'vibrationalModes': 'vibrational',
'molecularProperties': 'properties' }
calculationTypes = []
calculations = []
for calc in calcs:
if 'calculationType' in calc:
calculationTypes.append(calcTypes[calc['calculationType']])
calc_obj = {
'type': calcTypes[calc['calculationType']]
}
total_energy = parse('calculationResults.totalEnergy').find(calc)
if total_energy:
calc_obj['totalEnergy'] = total_energy[0].value
zero_point_energy = \
parse('calculationResults.zeroPointEnergyCorrection').find(calc)
if zero_point_energy:
calc_obj['zeroPointEnergyCorrection'] = \
zero_point_energy[0].value
calculations.append(calc_obj)
properties['calculationTypes'] = calculationTypes
properties['calculations'] = calculations
return properties
# This is far from ideal as it is a CPU intensive task blocking the main thread.
def calculate_mo(json_str, mo):
mol = Molecule()
conv = FileFormatManager()
conv.readString(mol, json_str, 'json')
# Do some scaling of our spacing based on the size of the molecule.
atomCount = mol.atomCount()
spacing = 0.30
if atomCount > 50:
spacing = 0.5
elif atomCount > 30:
spacing = 0.4
elif atomCount > 10:
spacing = 0.33
cube = mol.addCube()
# Hard wiring spacing/padding for now, this could be exposed in future too.
cube.setLimits(mol, spacing, 4)
gaussian = GaussianSetTools(mol)
gaussian.calculateMolecularOrbital(cube, mo)
return json.loads(conv.writeString(mol, "cjson"))
|
psavery/mongochemserver | girder/molecules/server/models/molecule.py | <filename>girder/molecules/server/models/molecule.py
# -*- coding: utf-8 -*-
import re
from girder.models.model_base import AccessControlledModel, ValidationException
from girder.constants import AccessType
class Molecule(AccessControlledModel):
def __init__(self):
super(Molecule, self).__init__()
self.ensureIndex('properties.formula')
def initialize(self):
self.name = 'molecules'
def validate(self, doc):
return doc
def findmol(self, search = None):
query = {}
if search:
if 'name' in search:
query['name'] = { '$regex': '^' + search['name'], '$options': 'i' }
if 'inchi' in search:
query['inchi'] = search['inchi']
if 'inchikey' in search:
query['inchikey'] = search['inchikey']
cursor = self.find(query)
mols = list()
for mol in cursor:
molecule = { 'id': mol['_id'], 'inchikey': mol.get('inchikey'),
'name': mol.get('name')}
mols.append(molecule)
return mols
def find_inchi(self, inchi):
query = { 'inchi': inchi }
mol = self.findOne(query)
return mol
def find_inchikey(self, inchikey):
query = { 'inchikey': inchikey }
mol = self.findOne(query)
return mol
def find_formula(self, formula, user):
formula_regx = re.compile('^%s$' % formula, re.IGNORECASE)
query = {
'properties.formula': formula_regx
}
mols = self.find(query)
return self.filterResultsByPermission(mols, user, level=AccessType.READ)
def create(self, user, inchi, public=False):
mol = { 'inchi': inchi }
self.setUserAccess(mol, user=user, level=AccessType.ADMIN)
if public:
self.setPublic(mol, True)
self.save(mol)
return mol
def create_xyz(self, user, mol, public=False):
self.setUserAccess(mol, user=user, level=AccessType.ADMIN)
if public:
self.setPublic(mol, True)
self.save(mol)
return mol
def delete_inchi(self, user, inchi):
mol = self.find_inchi(inchi)
if not mol:
return False
else:
return self.remove(mol)
def update(self, mol):
self.save(mol)
return mol
def add_notebooks(self, mol, notebooks):
query = {
'_id': mol['_id']
}
update = {
'$addToSet': {
'notebooks': {
'$each': notebooks
}
}
}
super(Molecule, self).update(query, update)
|
psavery/mongochemserver | girder/molecules/server/__init__.py | <gh_stars>0
# -*- coding: utf-8 -*-
from .molecule import Molecule
from .calculation import Calculation
from .experiment import Experiment
from girder import events
from girder.models.model_base import ValidationException
from .constants import PluginSettings
def validateSettings(event):
if event.info['key'] == PluginSettings.VIRTUOSO_BASE_URL or \
event.info['key'] == PluginSettings.VIRTUOSO_RDF_UPLOAD_PATH or \
event.info['key'] == PluginSettings.VIRTUOSO_USER or \
event.info['key'] == PluginSettings.SEMANTIC_URI_BASE or \
event.info['key'] == PluginSettings.VIRTUOSO_PASSWORD:
event.preventDefault().stopPropagation()
def load(info):
info['apiRoot'].molecules = Molecule()
info['apiRoot'].calculations = Calculation()
info['apiRoot'].experiments = Experiment()
events.bind('model.setting.validate', 'molecules', validateSettings)
|
psavery/mongochemserver | girder/molecules/plugin_tests/molecules_test.py | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2018 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import json
import os
import pytest
from pytest_girder.assertions import assertStatusOk
from . import molecule
@pytest.mark.plugin('molecules')
def test_create_molecule(server, user):
from girder.plugins.molecules.models.molecule import Molecule
from girder.constants import AccessType
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(dir_path + '/data/ethane.xyz', 'r') as rf:
xyzData = rf.read()
body = {
'name': 'ethane',
'xyz': xyzData
}
r = server.request('/molecules', method='POST', type='application/json',
body=json.dumps(body), user=user)
assertStatusOk(r)
mol = r.json
assert '_id' in mol
assert 'inchi' in mol
assert 'inchikey' in mol
# Double check and make sure it exists
id = mol['_id']
mol2 = Molecule().load(id, level=AccessType.READ, user=user)
assert '_id' in mol2
assert 'inchi' in mol2
assert 'inchikey' in mol2
# id, inchi, and inchikey should match
assert str(mol['_id']) == str(mol2['_id'])
assert mol['inchi'] == mol2['inchi']
assert mol['inchikey'] == mol2['inchikey']
# Delete the molecule
r = server.request('/molecules/%s' % id, method='DELETE', user=user)
assertStatusOk(r)
@pytest.mark.plugin('molecules')
def test_get_molecule(server, molecule, user):
# The molecule will have been created by the fixture
assert '_id' in molecule
assert 'inchi' in molecule
assert 'inchikey' in molecule
# This one is not essential, but we set it ourselves
assert 'name' in molecule
_id = molecule['_id']
inchi = molecule['inchi']
inchikey = molecule['inchikey']
name = molecule['name']
# Find the molecule by name
params = { 'name': name }
r = server.request('/molecules', method='GET', params=params, user=user)
assertStatusOk(r)
# There should be exactly one
assert len(r.json) == 1
mol = r.json[0]
assert mol.get('id') == _id
assert mol.get('inchikey') == inchikey
assert mol.get('name') == name
# Find the molecule by inchi
params = { 'inchi': inchi }
r = server.request('/molecules', method='GET', params=params, user=user)
assertStatusOk(r)
# There should be exactly one
assert len(r.json) == 1
mol = r.json[0]
assert mol.get('id') == _id
assert mol.get('inchikey') == inchikey
assert mol.get('name') == name
# Find the molecule by inchikey
params = { 'inchikey': inchikey }
r = server.request('/molecules', method='GET', params=params, user=user)
assertStatusOk(r)
# There should be exactly one
assert len(r.json) == 1
mol = r.json[0]
assert mol.get('id') == _id
assert mol.get('inchikey') == inchikey
assert mol.get('name') == name
|
psavery/mongochemserver | girder/molecules/server/constants.py | class PluginSettings:
VIRTUOSO_BASE_URL = 'molecules.virtuoso.base_url'
VIRTUOSO_RDF_UPLOAD_PATH = 'molecules.virtuoso.rdf_upload_path'
VIRTUOSO_USER = 'molecules.virtuoso.user'
VIRTUOSO_PASSWORD = '<PASSWORD>'
SEMANTIC_URI_BASE = 'molecules.semantic.url_base'
theory_priority = {
'mm': 10, # (molecular mechanics)
'mp7': 20, # (semi-empirical)
'rhf': 30, # (hartree fock, etc)
'scf': 30, # (hartree fock, etc)
'dft': 40,
'mp2': 100,
'ccsd': 200 # (coupled cluster)
}
|
psavery/mongochemserver | girder/molecules/server/semantic/cheminf.py | <filename>girder/molecules/server/semantic/cheminf.py
from rdflib import Namespace, Graph, URIRef
from rdflib.term import BNode, Literal
from rdflib.namespace import RDF, OWL, NamespaceManager
cheminf = Namespace('http://semanticscience.org/resource/')
def create_molecule_graph(uri_base, mol):
mongochem = Namespace('%s/api/v1/molecules/' % uri_base)
g = Graph()
inchi = mol['inchi']
name = mol['name']
inchi_node = BNode()
molecule = URIRef(mongochem[mol['_id']])
namespace_manager = NamespaceManager(g)
namespace_manager.bind('cheminf', cheminf, override=False)
namespace_manager.bind('mongochem', mongochem, override=False)
namespace_manager.bind('owl', OWL, override=False)
g.add((molecule, OWL.subClassOf, cheminf.CHEMINF_000000))
g.add((molecule, OWL.label, Literal(name.lower())))
g.add((inchi_node, RDF.type, cheminf.CHEMINF_000113))
g.add((inchi_node, cheminf.SIO_000300, Literal(inchi)))
g.add((molecule, cheminf.CHEMINF_000200, inchi_node))
return g.serialize()
|
psavery/mongochemserver | girder/molecules/server/semantic/__init__.py | from . import gainesville
from . import cheminf
from . import virtuoso
from girder.utility.model_importer import ModelImporter
from girder.plugins.molecules.constants import PluginSettings
def upload_molecule(mol):
settings = ModelImporter.model('setting')
uri_base = settings.get(PluginSettings.SEMANTIC_URI_BASE, 'http://openchemistry.kitware.com')
uri_base = uri_base.rstrip('/')
gainesville_graph = gainesville.create_molecule_graph(uri_base, mol)
gainesville_id = '%s_gainesville' % mol['_id']
virtuoso.upload_rdf(gainesville_id, gainesville_graph)
cheminf_graph = cheminf.create_molecule_graph(uri_base, mol)
cheminf_id = '%s_cheminf' % mol['_id']
virtuoso.upload_rdf(cheminf_id, cheminf_graph)
|
psavery/mongochemserver | girder/molecules/server/calculation.py | import cherrypy
import functools
from jsonpath_rw import parse
from bson.objectid import ObjectId
import json
from girder.api.describe import Description, autoDescribeRoute
from girder.api.docs import addModel
from girder.api import access
from girder.api.rest import Resource
from girder.api.rest import RestException, getBodyJson, getCurrentUser, \
loadmodel
from girder.models.model_base import ModelImporter, ValidationException
from girder.constants import AccessType
from girder.utility import toBool
from girder.plugins.molecules.models.calculation import Calculation as CalculationModel
from . import avogadro
from .molecule import Molecule
import pymongo
class Calculation(Resource):
output_formats = ['cml', 'xyz', 'inchikey', 'sdf']
input_formats = ['cml', 'xyz', 'pdb']
def __init__(self):
super(Calculation, self).__init__()
self.resourceName = 'calculations'
self.route('POST', (), self.create_calc)
self.route('GET', (), self.find_calc)
self.route('GET', ('types',), self.find_calc_types)
self.route('GET', (':id', 'vibrationalmodes'),
self.get_calc_vibrational_modes)
self.route('GET', (':id', 'vibrationalmodes', ':mode'),
self.get_calc_vibrational_mode)
self.route('GET', (':id', 'sdf'),
self.get_calc_sdf)
self.route('GET', (':id', 'cjson'),
self.get_calc_cjson)
self.route('GET', (':id', 'xyz'),
self.get_calc_xyz)
self.route('GET', (':id', 'cube', ':mo'),
self.get_calc_cube)
self.route('GET', (':id',),
self.find_id)
self.route('PUT', (':id', 'properties'),
self.update_properties)
self.route('PATCH', (':id', 'notebooks'), self.add_notebooks)
self._model = self.model('calculation', 'molecules')
self._cube_model = self.model('cubecache', 'molecules')
@access.public
def get_calc_vibrational_modes(self, id, params):
fields = ['cjson..vibrations.modes', 'cjson.vibrations.intensities',
'cjson.vibrations.frequencies', 'access']
calc = self._model.load(id, fields=fields, user=getCurrentUser(),
level=AccessType.READ)
del calc['access']
return calc['cjson']['vibrations']
get_calc_vibrational_modes.description = (
Description('Get the vibrational modes associated with a calculation')
.param(
'id',
'The id of the calculation to get the modes from.',
dataType='string', required=True, paramType='path'))
@access.public
def get_calc_vibrational_mode(self, id, mode, params):
try:
mode = int(mode)
except ValueError:
raise ValidationException('mode number be an integer', 'mode')
fields = ['cjson.vibrations.modes', 'access']
calc = self._model.load(id, fields=fields, user=getCurrentUser(),
level=AccessType.READ)
vibrational_modes = calc['cjson.vibrations']
#frames = vibrational_modes.get('modeFrames')
modes = vibrational_modes.get('modes', [])
index = modes.index(mode)
if index < 0:
raise RestException('No such vibrational mode', 400)
# Now select the modeFrames directly this seems to be more efficient
# than iterating in Python
query = {
'_id': calc['_id']
}
projection = {
'cjson.vibrations.frequencies': {
'$slice': [index, 1]
},
'cjson.vibrations.intensities': {
'$slice': [index, 1]
},
'cjson.vibrations.eigenVectors': {
'$slice': [index, 1]
}
}
mode = self._model.findOne(query, fields=projection)
return mode
get_calc_vibrational_mode.description = (
Description('Get a vibrational mode associated with a calculation')
.param(
'id',
'The id of the calculation that the mode is associated with.',
dataType='string', required=True, paramType='path')
.param(
'mode',
'The index of the vibrational model to get.',
dataType='string', required=True, paramType='path'))
@access.public
@loadmodel(model='calculation', plugin='molecules', level=AccessType.READ)
def get_calc_sdf(self, calculation, params):
def stream():
cherrypy.response.headers['Content-Type'] = 'chemical/x-mdl-sdfile'
yield calculation['sdf']
return stream
get_calc_sdf.description = (
Description('Get the molecular structure of a give calculation in SDF format')
.param(
'id',
'The id of the calculation to return the structure for.',
dataType='string', required=True, paramType='path'))
@access.public
@loadmodel(model='calculation', plugin='molecules', level=AccessType.READ)
def get_calc_cjson(self, calculation, params):
return calculation['cjson']
get_calc_cjson.description = (
Description('Get the molecular structure of a give calculation in CJSON format')
.param(
'id',
'The id of the calculation to return the structure for.',
dataType='string', required=True, paramType='path'))
@access.public
@loadmodel(model='calculation', plugin='molecules', level=AccessType.READ)
def get_calc_xyz(self, calculation, params):
data = json.dumps(calculation['cjson'])
data = avogadro.convert_str(data, 'cjson', 'xyz')
def stream():
cherrypy.response.headers['Content-Type'] = Molecule.mime_types['xyz']
yield data
return stream
get_calc_xyz.description = (
Description('Get the molecular structure of a give calculation in XYZ format')
.param(
'id',
'The id of the calculation to return the structure for.',
dataType='string', required=True, paramType='path'))
@access.public
def get_calc_cube(self, id, mo, params):
try:
mo = int(mo)
except ValueError:
# Check for homo lumo
mo = mo.lower()
if mo in ['homo', 'lumo']:
cal = self._model.load(id, force=True)
electron_count = parse('cjson.basisSet.electronCount').find(cal)
if electron_count:
electron_count = electron_count[0].value
else:
# Look here as well.
electron_count = parse('properties.electronCount').find(cal)
if electron_count:
electron_count = electron_count[0].value
else:
raise RestException('Unable to access electronCount', 400)
if mo == 'homo':
mo = int(electron_count / 2)
elif mo == 'lumo':
mo = int(electron_count / 2 + 1)
else:
raise ValidationException('mo number be an integer or \'homo\'/\'lumo\'', 'mode')
cached = self._cube_model.find_mo(id, mo)
# If we have a cached cube file use that.
if cached:
return cached['cjson']
fields = ['cjson', 'access', 'fileId']
# Ignoring access control on file/data for now, all public.
calc = self._model.load(id, fields=fields, force=True)
file_id = calc['fileId']
file = self.model('file').load(file_id, force=True)
parts = file['name'].split('.')
input_format = parts[-1]
name = '.'.join(parts[:-1])
with self.model('file').open(file) as fp:
data_str = fp.read().decode()
# This is where the cube gets calculated, should be cached in future.
cjson = avogadro.calculate_mo(data_str, mo)
# Remove the vibrational mode data from the cube - big, not needed here.
if 'vibrations' in cjson:
del cjson['vibrations']
# Cache this cube for the next time, they can take a while to generate.
self._cube_model.create(id, mo, cjson)
return cjson
get_calc_cube.description = (
Description('Get the cube for the supplied MO of the calculation in CJSON format')
.param(
'id',
'The id of the calculation to return the structure for.',
dataType='string', required=True, paramType='path')
.param(
'mo',
'The molecular orbital to get the cube for.',
dataType='string', required=True, paramType='path'))
@access.user
def create_calc(self, params):
body = getBodyJson()
self.requireParams(['cjson'], body)
user = getCurrentUser()
cjson = body['cjson']
props = body.get('properties', {})
moleculeId = body.get('moleculeId', None)
public = body.get('public', False)
notebooks = body.get('notebooks', [])
calc = self._model.create_cjson(user, cjson, props, moleculeId,
notebooks=notebooks, public=public)
cherrypy.response.status = 201
cherrypy.response.headers['Location'] \
= '/molecules/%s/calc/%s' % (id, str(calc['_id']))
return self._model.filter(calc, user)
# Try and reuse schema for documentation, this only partially works!
calc_schema = CalculationModel.schema.copy()
calc_schema['id'] = 'CalculationData'
addModel('Calculation', 'CalculationData', calc_schema)
create_calc.description = (
Description('Get the molecular structure of a give calculation in SDF format')
.param(
'body',
'The calculation data', dataType='CalculationData', required=True,
paramType='body'))
@access.public
def find_calc(self, params):
user = getCurrentUser()
query = { }
if 'moleculeId' in params:
query['moleculeId'] = ObjectId(params['moleculeId'])
if 'calculationType' in params:
calculation_type = params['calculationType']
if not isinstance(calculation_type, list):
calculation_type = [calculation_type]
query['properties.calculationTypes'] = {
'$all': calculation_type
}
if 'functional' in params:
query['properties.functional'] = params.get('functional').lower()
if 'theory' in params:
query['properties.theory'] = params.get('theory').lower()
if 'basis' in params:
query['properties.basisSet.name'] = params.get('basis').lower()
if 'pending' in params:
pending = toBool(params['pending'])
query['properties.pending'] = pending
# The absence of the field mean the calculation is not pending ...
if not pending:
query['properties.pending'] = {
'$ne': True
}
limit = params.get('limit', 50)
fields = ['cjson.vibrations.modes', 'cjson.vibrations.intensities',
'cjson.vibrations.frequencies', 'properties', 'fileId', 'access', 'public']
sort = None
sort_by_theory = toBool(params.get('sortByTheory', False))
if sort_by_theory:
sort = [('properties.theoryPriority', pymongo.DESCENDING)]
# Exclude calculations that don't have a theoryPriority,
# otherwise they will appear first in the list.
query['properties.theoryPriority'] = { '$exists': True }
calcs = self._model.find(query, fields=fields, sort=sort)
calcs = self._model.filterResultsByPermission(calcs, user,
AccessType.READ, limit=int(limit))
calcs = [self._model.filter(x, user) for x in calcs]
not_sortable = []
if sort_by_theory and len(calcs) < int(limit):
# Now select any calculations without theoryPriority
query['properties.theoryPriority'] = { '$exists': False }
not_sortable = self._model.find(query, fields=fields)
not_sortable = self._model.filterResultsByPermission(not_sortable, user,
AccessType.READ, limit=int(limit) - len(calcs))
not_sortable = [self._model.filter(x, user) for x in not_sortable]
return calcs + not_sortable
find_calc.description = (
Description('Search for particular calculation')
.param(
'moleculeId',
'The moleculeId the calculations should be associated with',
dataType='string', paramType='query', required=False)
.param(
'calculationType',
'The type or types of calculation being searched for',
dataType='string', paramType='query', required=False)
.param(
'basis',
'The basis set used for the calculations.',
dataType='string', paramType='query', required=False)
.param(
'functional',
'The functional used for the calculations.',
dataType='string', paramType='query', required=False)
.param(
'theory',
'The theory used for the calculations.',
dataType='string', paramType='query', required=False)
.param(
'pending',
'Whether the calculation is currently running.',
dataType='boolean', paramType='query', required=False)
.param(
'limit',
'The max number of calculations to return',
dataType='integer', paramType='query', default=50, required=False)
.param(
'sortByTheory',
'Sort the result by theory "priority", "best" first.',
dataType='boolean', paramType='query', default=False, required=False))
@access.public
def find_id(self, id, params):
cal = self._model.load(id, level=AccessType.READ, user=getCurrentUser())
if not cal:
raise RestException('Calculation not found.', code=404)
return cal
find_id.description = (
Description('Get the calculation by id')
.param(
'id',
'The id of calculatino.',
dataType='string', required=True, paramType='path'))
@access.public
def find_calc_types(self, params):
fields = ['access', 'properties.calculationTypes']
query = { }
if 'moleculeId' in params:
query['moleculeId'] = ObjectId(params['moleculeId'])
calcs = self._model.find(query, fields=fields)
allTypes = []
for types in calcs:
calc_types = parse('properties.calculationTypes').find(types)
if calc_types:
calc_types = calc_types[0].value
allTypes.extend(calc_types)
typeSet = set(allTypes)
return list(typeSet)
find_calc_types.description = (
Description('Get the calculation types available for the molecule')
.param(
'moleculeId',
'The id of the molecule we are finding types for.',
dataType='string', required=True, paramType='query'))
@access.token
@autoDescribeRoute(
Description('Update the calculation properties.')
.notes('Override the exist properties')
.modelParam('id', 'The ID of the calculation.', model='calculation',
plugin='molecules', level=AccessType.ADMIN)
.param('body', 'The new set of properties', paramType='body')
.errorResponse('ID was invalid.')
.errorResponse('Write access was denied for the calculation.', 403)
)
def update_properties(self, calculation, params):
props = getBodyJson()
calculation['properties'] = props
calculation = self._model.save(calculation)
return calculation
@access.user
@autoDescribeRoute(
Description('Add notebooks ( file ids ) to molecule.')
.modelParam('id', 'The calculation id',
model=CalculationModel, destName='calculation',
force=True, paramType='path')
.jsonParam('notebooks', 'List of notebooks', required=True, paramType='body')
)
def add_notebooks(self, calculation, notebooks):
notebooks = notebooks.get('notebooks')
if notebooks is not None:
CalculationModel().add_notebooks(calculation, notebooks)
|
psavery/mongochemserver | girder/molecules/plugin_tests/__init__.py | <reponame>psavery/mongochemserver
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2018 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import pytest
import os
# Our method for creating a molecule
@pytest.fixture
def molecule(user):
from girder.plugins.molecules.models.molecule import Molecule
from girder.plugins.molecules import openbabel
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(dir_path + '/data/ethane.xyz', 'r') as rf:
xyzData = rf.read()
input_format = 'xyz'
data = xyzData
name = 'ethane'
(inchi, inchikey) = openbabel.to_inchi(data, input_format)
mol = {
'inchi': inchi,
'inchikey': inchikey,
'name': name
}
mol = Molecule().create_xyz(user, mol, public=False)
# These are normally performed in the molecule resource _clean() function
del mol['access']
mol['_id'] = str(mol['_id'])
yield mol
# Delete mol
Molecule().remove(mol)
# Our method for creating a calculation
@pytest.fixture
def calculation(user, molecule):
from girder.plugins.molecules.models.calculation import Calculation
assert '_id' in molecule
dir_path = os.path.dirname(os.path.realpath(__file__))
# This cjson should match the molecule
with open(dir_path + '/data/ethane.cjson', 'r') as rf:
ethane_cjson = rf.read()
# Let's make some properties
properties = {
"molecular mass": 30.0690,
"melting point": -172,
"boiling point": -88
}
_calc = Calculation().create_cjson(user, ethane_cjson, properties,
molecule['_id'], notebooks=[],
public=False)
calc = Calculation().filter(_calc, user)
yield calc
# Delete calc
Calculation().remove(calc)
|
psavery/mongochemserver | girder/molecules/server/molecule.py | import cherrypy
import json
import os
import functools
import requests
from jsonpath_rw import parse
from girder.api.describe import Description, autoDescribeRoute
from girder.api.docs import addModel
from girder.api.rest import Resource
from girder.api.rest import RestException, loadmodel, getCurrentUser
from girder.api import access
from girder.constants import AccessType
from girder.constants import TerminalColor
from . import avogadro
from . import openbabel
from . import chemspider
from . import query
from . import semantic
from . import constants
from girder.plugins.molecules.models.molecule import Molecule as MoleculeModel
class Molecule(Resource):
output_formats = ['cml', 'xyz', 'inchikey', 'sdf', 'cjson']
input_formats = ['cml', 'xyz', 'sdf', 'cjson', 'json', 'log', 'nwchem', 'pdb']
mime_types = {
'cml': 'chemical/x-cml',
'xyz': 'chemical/x-xyz',
'sdf': 'chemical/x-mdl-sdfile',
'cjson': 'application/json'
}
def __init__(self):
super(Molecule, self).__init__()
self.resourceName = 'molecules'
self.route('GET', (), self.find)
self.route('GET', ('inchikey', ':inchikey'), self.find_inchikey)
self.route('GET', (':id', ':output_format'), self.get_format)
self.route('GET', (':id', ), self.find_id)
self.route('GET', ('search',), self.search)
self.route('POST', (), self.create)
self.route('DELETE', (':id',), self.delete)
self.route('PATCH', (':id',), self.update)
self.route('PATCH', (':id', 'notebooks'), self.add_notebooks)
self.route('POST', ('conversions', ':output_format'), self.conversions)
self._model = self.model('molecule', 'molecules')
self._calc_model = self.model('calculation', 'molecules')
def _clean(self, doc):
del doc['access']
if 'sdf' in doc:
del doc['sdf']
doc['_id'] = str(doc['_id'])
if 'cjson' in doc:
if 'basisSet' in doc['cjson']:
del doc['cjson']['basisSet']
if 'vibrations' in doc['cjson']:
del doc['cjson']['vibrations']
return doc
@access.public
def find(self, params):
return self._model.findmol(params)
find.description = (
Description('Find a molecule.')
.param('name', 'The name of the molecule', paramType='query',
required=False)
.param('inchi', 'The InChI of the molecule', paramType='query',
required=False)
.param('inchikey', 'The InChI key of the molecule', paramType='query',
required=False)
.errorResponse())
@access.public
def find_inchikey(self, inchikey, params):
mol = self._model.find_inchikey(inchikey)
if not mol:
raise RestException('Molecule not found.', code=404)
return self._clean(mol)
find_inchikey.description = (
Description('Find a molecule by InChI key.')
.param('inchi', 'The InChI key of the molecule', paramType='path')
.errorResponse()
.errorResponse('Molecule not found.', 404))
@access.public
def find_id(self, id, params):
mol = self._model.load(id, level=AccessType.READ, user=getCurrentUser())
if not mol:
raise RestException('Molecule not found.', code=404)
return self._clean(mol)
def _process_experimental(self, doc):
facility_used = parse('experiment.experimentalEnvironment.facilityUsed').find(doc)[0].value
experiments = parse('experiment.experiments').find(doc)[0].value
experiment_model = self.model('experimental', 'molecules')
experiments_list = []
for experiment in experiments:
spectrum_type = experiment['spectrumType']
experimental_technique = experiment['experimentalTechnique']
id = experiment['id']
molecular_formula = experiment['molecularFormula']
instenisty_units = parse('measuredSpectrum.unitsY').find(experiment)[0].value
frequency_units = parse('measuredSpectrum.unitsX').find(experiment)[0].value
data_points = parse('measuredSpectrum.dataPoints').find(experiment)[0].value
frequencies = data_points[::2]
intensities = data_points[1::2]
measured_spectrum = {
'frequencies': {
'units': frequency_units,
'values': frequencies
},
'intensities': {
'units': instenisty_units,
'values': intensities
}
}
experiments_list.append(experiment_model.create(
facility_used, spectrum_type, experimental_technique, id,
molecular_formula, measured_spectrum))
return experiments_list
@access.user
def create(self, params):
body = self.getBodyJson()
user = self.getCurrentUser()
public = body.get('public', False)
if 'fileId' in body:
file_id = body['fileId']
calc_id = body.get('calculationId')
file = self.model('file').load(file_id, user=user)
parts = file['name'].split('.')
input_format = parts[-1]
name = '.'.join(parts[:-1])
if input_format not in Molecule.input_formats:
raise RestException('Input format not supported.', code=400)
contents = functools.reduce(lambda x, y: x + y, self.model('file').download(file, headers=False)())
data_str = contents.decode()
# For now piggy backing experimental results upload here!
# This should be refactored ...
json_data = json.loads(data_str)
if 'experiment' in json_data:
return self._process_experimental(json_data)
# Use the SDF format as it is the one with bonding that 3Dmol uses.
output_format = 'sdf'
if input_format == 'pdb':
(output, _) = openbabel.convert_str(data_str, input_format, output_format)
else:
output = avogadro.convert_str(data_str, input_format, output_format)
# Get some basic molecular properties we want to add to the database.
props = avogadro.molecule_properties(data_str, input_format)
pieces = props['spacedFormula'].strip().split(' ')
atomCounts = {}
for i in range(0, int(len(pieces) / 2)):
atomCounts[pieces[2 * i ]] = int(pieces[2 * i + 1])
cjson = []
if input_format == 'cjson':
cjson = json.loads(data_str)
elif input_format == 'pdb':
cjson = json.loads(avogadro.convert_str(output, 'sdf', 'cjson'))
else:
cjson = json.loads(avogadro.convert_str(data_str, input_format,
'cjson'))
atom_count = openbabel.atom_count(data_str, input_format)
if atom_count > 1024:
raise RestException('Unable to generate inchi, molecule has more than 1024 atoms .', code=400)
(inchi, inchikey) = openbabel.to_inchi(output, 'sdf')
if not inchi:
raise RestException('Unable to extract inchi', code=400)
# Check if the molecule exists, only create it if it does.
molExists = self._model.find_inchikey(inchikey)
mol = {}
if molExists:
mol = molExists
else:
# Whitelist parts of the CJSON that we store at the top level.
cjsonmol = {}
cjsonmol['atoms'] = cjson['atoms']
cjsonmol['bonds'] = cjson['bonds']
cjsonmol['chemical json'] = cjson['chemical json']
mol = self._model.create_xyz(user, {
'name': chemspider.find_common_name(inchikey, props['formula']),
'inchi': inchi,
'inchikey': inchikey,
output_format: output,
'cjson': cjsonmol,
'properties': props,
'atomCounts': atomCounts
}, public)
# Upload the molecule to virtuoso
try:
semantic.upload_molecule(mol)
except requests.ConnectionError:
print(TerminalColor.warning('WARNING: Couldn\'t connect to virtuoso.'))
if 'vibrations' in cjson or 'basisSet' in cjson:
# We have some calculation data, let's add it to the calcs.
sdf = output
moleculeId = mol['_id']
calc_props = {}
if calc_id is not None:
calc = self._calc_model.load(calc_id, user=user, level=AccessType.ADMIN)
calc_props = calc['properties']
# The calculation is no longer pending
if 'pending' in calc_props:
del calc_props['pending']
if input_format == 'json':
jsonInput = json.loads(data_str)
# Don't override existing properties
new_calc_props = avogadro.calculation_properties(jsonInput)
new_calc_props.update(calc_props)
calc_props = new_calc_props
# Use basisSet from cjson if we don't already have one.
if 'basisSet' in cjson and 'basisSet' not in calc_props:
calc_props['basisSet'] = cjson['basisSet']
# Use functional from cjson properties if we don't already have
# one.
functional = parse('properties.functional').find(cjson)
if functional and 'functional' not in calc_props:
calc_props['functional'] = functional[0].value
# Add theory priority to 'sort' calculations
theory = calc_props.get('theory')
functional = calc_props.get('functional')
if theory in constants.theory_priority:
priority = constants.theory_priority[theory]
calc_props['theoryPriority'] = priority
if calc_id is not None:
calc['properties'] = calc_props
calc['cjson'] = cjson
calc['fileId'] = file_id
self._calc_model.save(calc)
else:
self._calc_model.create_cjson(user, cjson, calc_props,
moleculeId, file_id, public)
elif 'xyz' in body or 'sdf' in body:
if 'xyz' in body:
input_format = 'xyz'
data = body['xyz']
else:
input_format = 'sdf'
data = body['sdf']
(inchi, inchikey) = openbabel.to_inchi(data, input_format)
mol = {
'inchi': inchi,
'inchikey': inchikey,
input_format: data
}
if 'name' in body:
mol['name'] = body['name']
mol = self._model.create_xyz(user, mol, public)
elif 'inchi' in body:
inchi = body['inchi']
mol = self._model.create(user, inchi, public)
else:
raise RestException('Invalid request', code=400)
return self._clean(mol)
addModel('Molecule', 'MoleculeParams', {
"id": "MoleculeParams",
"required": ["name", "inchi"],
"properties": {
"name": {"type": "string", "description": "The common name of the molecule"},
"inchi": {"type": "string", "description": "The InChI of the molecule."}
}
})
create.description = (
Description('Create a molecule')
.param(
'body',
'The molecule to be added to the database.',
dataType='MoleculeParams',
required=True, paramType='body')
.errorResponse('Input format not supported.', code=400))
@access.user
def delete(self, id, params):
user = self.getCurrentUser()
mol = self._model.load(id, user=user, level=AccessType.WRITE)
if not mol:
raise RestException('Molecule not found.', code=404)
return self._model.remove(mol)
delete.description = (
Description('Delete a molecule by id.')
.param('id', 'The id of the molecule', paramType='path')
.errorResponse()
.errorResponse('Molecule not found.', 404))
@access.user
def update(self, id, params):
user = self.getCurrentUser()
mol = self._model.load(id, user=user, level=AccessType.WRITE)
if not mol:
raise RestException('Molecule not found.', code=404)
body = self.getBodyJson()
# TODO this should be refactored to use $addToSet
if 'logs' in body:
logs = mol.setdefault('logs', [])
logs += body['logs']
mol = self._model.update(mol)
return self._clean(mol)
addModel('Molecule', 'UpdateMoleculeParams', {
"id": "UpdateMoleculeParams",
"properties": {
"logs": {"type": "array", "description": "List of Girder file ids"}
}
})
update.description = (
Description('Update a molecule by id.')
.param('id', 'The id of the molecule', paramType='path')
.param(
'body',
'The update to the molecule.',
dataType='UpdateMoleculeParams',
required=True, paramType='body')
.errorResponse('Molecule not found.', 404))
@access.user
@autoDescribeRoute(
Description('Add notebooks ( file ids ) to molecule.')
.modelParam('id', 'The molecule id',
model=MoleculeModel, destName='molecule',
force=True, paramType='path')
.jsonParam('notebooks', 'List of notebooks', required=True, paramType='body')
)
def add_notebooks(self, molecule, notebooks):
notebooks = notebooks.get('notebooks')
if notebooks is not None:
MoleculeModel().add_notebooks(molecule, notebooks)
@access.user
def conversions(self, output_format, params):
user = self.getCurrentUser()
if output_format not in Molecule.output_formats:
raise RestException('Output output_format not supported.', code=404)
body = self.getBodyJson()
if 'fileId' not in body:
raise RestException('Invalid request body.', code=400)
file_id = body['fileId']
file = self.model('file').load(file_id, user=user)
input_format = file['name'].split('.')[-1]
if input_format not in Molecule.input_formats:
raise RestException('Input format not supported.', code=400)
if file is None:
raise RestException('File not found.', code=404)
contents = functools.reduce(lambda x, y: x + y, self.model('file').download(file, headers=False)())
data_str = contents.decode()
if output_format.startswith('inchi'):
atom_count = 0
if input_format == 'pdb':
atom_count = openbabel.atom_count(data_str, input_format)
else:
atom_count = avogadro.atom_count(data_str, input_format)
if atom_count > 1024:
raise RestException('Unable to generate InChI, molecule has more than 1024 atoms.', code=400)
if input_format == 'pdb':
(inchi, inchikey) = openbabel.to_inchi(data_str, input_format)
else:
sdf = avogadro.convert_str(data_str, input_format, 'sdf')
(inchi, inchikey) = openbabel.to_inchi(sdf, 'sdf')
if output_format == 'inchi':
return inchi
else:
return inchikey
else:
output = ''
mime = 'text/plain'
if input_format == 'pdb':
(output, mime) = openbabel.convert_str(data_str, input_format, output_format)
else:
output = avogadro.convert_str(data_str, input_format, output_format)
def stream():
cherrypy.response.headers['Content-Type'] = mime
yield output
return stream
addModel('Molecule', 'ConversionParams', {
"id": "ConversionParams",
"properties": {
"fileId": {"type": "string", "description": "Girder file id to do conversion on"}
}
})
conversions.description = (
Description('Update a molecule by id.')
.param('format', 'The format to convert to', paramType='path')
.param(
'body',
'Details of molecule data to perform conversion on',
dataType='ConversionParams',
required=True, paramType='body')
.errorResponse('Output format not supported.', 404)
.errorResponse('File not found.', 404)
.errorResponse('Invalid request body.', 400)
.errorResponse('Input format not supported.', code=400))
@access.public
def get_format(self, id, output_format, params):
# For now will for force load ( i.e. ignore access control )
# This will change when we have access controls.
molecule = self._model.load(id, force=True)
if output_format not in Molecule.output_formats:
raise RestException('Format not supported.', code=400)
data = json.dumps(molecule['cjson'])
if output_format != 'cjson':
data = avogadro.convert_str(data, 'cjson', output_format)
def stream():
cherrypy.response.headers['Content-Type'] = Molecule.mime_types[output_format]
yield data
return stream
get_format.description = (
Description('Get molecule in particular format.')
.param('id', 'The id of the molecule', paramType='path')
.param('output_format', 'The format to convert to', paramType='path')
.errorResponse('Output format not supported.', 400))
@access.public
def search(self, params):
query_string = params.get('q')
formula = params.get('formula')
cactus = params.get('cactus')
if query_string is None and formula is None and cactus is None:
raise RestException('Either \'q\', \'formula\' or \'cactus\' is required.')
if query_string is not None:
try:
mongo_query = query.to_mongo_query(query_string)
except query.InvalidQuery:
raise RestException('Invalid query', 400)
mols = []
for mol in self._model.find(query=mongo_query, fields = ['_id', 'inchikey', 'name']):
mol['id'] = mol['_id']
del mol['_id']
mols.append(mol)
return mols
elif formula:
# Search using formula
return list(self._model.find_formula(formula, getCurrentUser()))
elif cactus:
# Disable cert verification for now
# TODO Ensure we have the right root certs so this just works.
r = requests.get('https://cactus.nci.nih.gov/chemical/structure/%s/sdf' % cactus, verify=False)
if r.status_code == 404:
return []
else:
r.raise_for_status()
(inchi, inchikey) = openbabel.to_inchi(r.content.decode('utf8'), 'sdf')
# See if we already have a molecule
mol = self._model.find_inchikey(inchikey)
# Create new molecule
if mol is None:
cjson_str = avogadro.convert_str(r.content, 'sdf', 'cjson')
mol = {
'cjson': json.loads(cjson_str),
'inchikey': inchikey,
'origin': 'cactus'
}
user = getCurrentUser()
if user is not None:
mol = self._model.create_xyz(getCurrentUser(), mol, public=True)
return [mol]
search.description = (
Description('Search for molecules using a query string or formula')
.param('q', 'The query string to use for this search', paramType='query', required=False)
.param('formula', 'The formula to search for', paramType='query', required=False)
.param('cactus', 'The identifier to pass to cactus', paramType='query', required=False))
|
Maninder-mike/bant-shuttring-store-renting-system | src/main.py | <gh_stars>1-10
from PyQt5.QtWidgets import QMainWindow, QApplication, QTextEdit, QStyleFactory, QHBoxLayout, QDockWidget, QWidget, \
QListWidget, QVBoxLayout, QAction
from PyQt5.QtGui import QPalette, QColor
from PyQt5.QtCore import Qt
import sys
from tab_widget import TabWidget
from toolbox import ToolBox
from status_bar import StatusBar
from layout import Layout
class Color(QWidget):
def __init__(self, color, *args, **kwargs):
super(Color, self).__init__(*args, **kwargs)
self.setAutoFillBackground(True)
palette = self.palette()
palette.setColor(QPalette.Window, QColor(color))
self.setPalette(palette)
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setWindowTitle('Bant Shuttering Store Renting System')
self.setMinimumSize(1000, 600)
self.status_bar()
self.gui_layout()
self.menu_bar()
self.tool_bar()
self.show()
def gui_layout(self):
main_layout = QHBoxLayout()
layout1 = QVBoxLayout()
right_layout = QHBoxLayout()
toolbox = ToolBox()
layout1.addWidget(toolbox)
layout1.addWidget(Color('pink'))
layout1.addWidget(Color('blue'))
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.setSpacing(0)
main_layout.addLayout(layout1)
table = TabWidget()
main_layout.addWidget(table)
right_layout.addWidget(self.right_up_text_edit())
right_layout.addWidget(self.right_down_text_edit())
main_layout.addLayout(right_layout)
widget = QWidget()
widget.setLayout(main_layout)
self.setCentralWidget(widget)
def right_up_text_edit(self):
layout = QHBoxLayout()
items = QDockWidget("Contacts", self)
listWidget = QListWidget()
listWidget.addItem('Item1')
items.setWidget(listWidget)
items.setFloating(False)
self.setCentralWidget(QTextEdit())
self.addDockWidget(Qt.RightDockWidgetArea, items)
self.setLayout(layout)
def right_down_text_edit(self):
layout = QHBoxLayout()
items = QDockWidget("Payable", self)
listWidget = QListWidget()
listWidget.addItem('Item1')
items.setWidget(listWidget)
items.setFloating(False)
self.setCentralWidget(QTextEdit())
self.addDockWidget(Qt.RightDockWidgetArea, items)
self.setLayout(layout)
def menu_bar(self):
menu_bar = self.menuBar()
file_menu = menu_bar.addMenu('File')
file_menu.addAction('Open')
file_menu.addAction('Save')
file_menu.addSeparator()
file_menu.addAction('Exit', self.close)
edit_menu = menu_bar.addMenu('Edit')
edit_menu.addAction('Undo')
window_menu = menu_bar.addMenu('Window')
window_menu.addAction('Themes')
help_menu = menu_bar.addMenu('Help')
help_menu.addAction('Info')
def tool_bar(self):
edit_toolbar = self.addToolBar('Edit')
edit_toolbar.addAction('Undo')
edit_toolbar.addAction('Redo')
edit_toolbar.addAction('Cut')
edit_toolbar.addAction('Copy')
edit_toolbar.addAction('Paste')
view_toolbar = self.addToolBar('View')
view_toolbar.addAction('Themes')
view_toolbar.addAction('Recent Files')
view_toolbar.addAction('Location')
view_toolbar.addAction('Contact Info')
def status_bar(self):
bar = self.statusBar()
bar.showMessage('Welcome to the Application', 5000)
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setStyle(QStyleFactory.create('Fusion'))
win = MainWindow()
sys.exit(app.exec_())
|
Maninder-mike/bant-shuttring-store-renting-system | src/tab_widget.py | <filename>src/tab_widget.py
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QTabWidget, QFormLayout, QLineEdit, QHBoxLayout
import sys
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.table = TabWidget(self)
self.setCentralWidget(self.table)
self.show()
class TabWidget(QTabWidget):
def __init__(self, *args, **kwargs):
super(TabWidget, self).__init__(*args, **kwargs)
self.tab1 = QWidget()
self.tab2 = QWidget()
self.tab3 = QWidget()
self.addTab(self.tab1, 'Tab 1')
self.addTab(self.tab2, 'Tab 2')
self.addTab(self.tab3, 'Tab 3')
self.tab1UI()
self.tab2UI()
self.tab3UI()
def tab1UI(self):
layout = QFormLayout()
layout.addRow('Name', QLineEdit())
layout.addRow('Address', QLineEdit())
self.setTabText(0, 'Contact Details')
self.tab1.setLayout(layout)
def tab2UI(self):
pass
def tab3UI(self):
pass
if __name__ == '__main__':
app = QApplication(sys.argv)
w = MainWindow()
sys.exit(app.exec_())
|
Maninder-mike/bant-shuttring-store-renting-system | setup.py | from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='bssrs',
version='0.1.0',
description='A POS Python project',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/maninder-mike',
author='@maninder_mike',
author_email='<EMAIL>',
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
],
keywords='sample setuptools development',
# When your source code is in a subdirectory under the project root, e.g.
# `src/`, it is necessary to specify the `package_dir` argument.
package_dir={'': 'src'}, # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(where='src'),
python_requires='>=3.7, <4',
install_requires=[
'pyqt5'
],
extras_require={
'dev': ['check-manifest'],
'test': ['pytest'],
},
# If there are data files included in your packages that need to be
# installed, specify them here.
package_data={ # Optional
'sample': ['package_data.dat'],
},
data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
entry_points={ # Optional
'console_scripts': [
'sample=sample:main',
],
},
project_urls={
# 'Bug Reports': 'https://github.com/pypa/sampleproject/issues',
# 'Funding': 'https://donate.pypi.org',
# 'Say Thanks!': 'http://saythanks.io/to/example',
# 'Source': 'https://github.com/pypa/sampleproject/',
},
)
|
Maninder-mike/bant-shuttring-store-renting-system | src/time_mangement.py | from datetime import datetime
import calendar
def calculateAge(birthDate):
today = datetime.date.today()
age = today.year - birthDate.year-((today.month, today.day) < (birthDate.month, birthDate.day))
return age
def birthCalendar(year):
return calendar.calendar(year)
def today():
return datetime.now().strftime('%d-%m-%Y')
# print(calculateAge(date(1992, 7, 13)), 'Years')
# print(birthCalendar(1992))
print(today()) |
Maninder-mike/bant-shuttring-store-renting-system | clc.py | <filename>clc.py<gh_stars>1-10
from PyQt5.QtWidgets import QVBoxLayout, QWidget, QGridLayout, QLineEdit, QApplication, QPushButton, QStyleFactory
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('Calculator')
self.setMaximumSize(500, 300)
self.setLayout(QVBoxLayout())
self.result_field = QLineEdit()
self.temp_nums = []
self.fin_nums = []
self.keypad()
self.show()
# noinspection PyArgumentList
def keypad(self):
container = QWidget()
container.setLayout(QGridLayout())
btn_result = QPushButton('Enter', clicked=self.func_result)
btn_clear = QPushButton('Clear', clicked=self.clear_calc)
btn_0 = QPushButton('0', clicked=lambda: self.num_press('0'))
btn_1 = QPushButton('1', clicked=lambda: self.num_press('1'))
btn_2 = QPushButton('2', clicked=lambda: self.num_press('2'))
btn_3 = QPushButton('3', clicked=lambda: self.num_press('3'))
btn_4 = QPushButton('4', clicked=lambda: self.num_press('4'))
btn_5 = QPushButton('5', clicked=lambda: self.num_press('5'))
btn_6 = QPushButton('6', clicked=lambda: self.num_press('6'))
btn_7 = QPushButton('7', clicked=lambda: self.num_press('7'))
btn_8 = QPushButton('8', clicked=lambda: self.num_press('8'))
btn_9 = QPushButton('9', clicked=lambda: self.num_press('9'))
btn_add = QPushButton('+', clicked=lambda: self.func_press('+'))
btn_min = QPushButton('-', clicked=lambda: self.func_press('-'))
btn_div = QPushButton('/', clicked=lambda: self.func_press('/'))
btn_mul = QPushButton('*', clicked=lambda: self.func_press('*'))
btn_exit = QPushButton('Exit', clicked=lambda: MainWindow.destroy())
container.layout().addWidget(self.result_field, 0, 0, 1, 4)
container.layout().addWidget(btn_result, 1, 0, 1, 2)
container.layout().addWidget(btn_clear, 1, 2, 1, 2)
container.layout().addWidget(btn_9, 2, 0)
container.layout().addWidget(btn_8, 2, 1)
container.layout().addWidget(btn_7, 2, 2)
container.layout().addWidget(btn_add, 2, 3)
container.layout().addWidget(btn_6, 3, 0)
container.layout().addWidget(btn_5, 3, 1)
container.layout().addWidget(btn_4, 3, 2)
container.layout().addWidget(btn_min, 3, 3)
container.layout().addWidget(btn_3, 4, 0)
container.layout().addWidget(btn_2, 4, 1)
container.layout().addWidget(btn_1, 4, 2)
container.layout().addWidget(btn_mul, 4, 3)
container.layout().addWidget(btn_0, 5, 0, 1, 3)
container.layout().addWidget(btn_div, 5, 3)
container.layout().addWidget(btn_exit, 6, 0, 1, 4)
self.layout().addWidget(container)
def num_press(self, key_number):
self.temp_nums.append(key_number)
temp_string = ''.join(self.temp_nums)
if self.fin_nums:
self.result_field.setText(''.join(self.fin_nums) + temp_string)
else:
self.result_field.setText(temp_string)
def func_press(self, operator):
temp_string = ''.join(self.temp_nums)
self.fin_nums.append(temp_string)
self.fin_nums.append(operator)
self.temp_nums = []
self.result_field.setText(''.join(self.fin_nums))
def func_result(self):
fin_string = ''.join(self.fin_nums) + ''.join(self.temp_nums)
result_string = eval(fin_string)
fin_string += '='
fin_string += str(result_string)
self.result_field.setText(fin_string)
def clear_calc(self):
self.result_field.clear()
self.temp_nums = []
self.fin_nums = []
if __name__ == '__main__':
app = QApplication([])
app.setStyle(QStyleFactory.create('Fusion'))
mw = MainWindow()
app.exec_()
|
Maninder-mike/bant-shuttring-store-renting-system | src/window_size.py | <gh_stars>1-10
def normal_window():
if True:
window = (500, 500)
return window
else:
print('Please change window size.') |
Maninder-mike/bant-shuttring-store-renting-system | tests/test_main.py | <reponame>Maninder-mike/bant-shuttring-store-renting-system<gh_stars>1-10
from src.check import add, mul
def test_add():
assert add(5, 5) == 10
def test_mul():
assert mul(4, 4) == 16
|
Maninder-mike/bant-shuttring-store-renting-system | src/menubar.py | from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QVBoxLayout, QMenu, QMenuBar
import sys
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.menubar = MenuBar()
self.setCentralWidget(self.menubar)
self.show()
class MenuBar(QWidget):
def __init__(self, *args, **kwargs):
super(MenuBar, self).__init__(*args, **kwargs)
self.file_menu()
self.edit_menu()
self.tool_menu()
self.window_menu()
self.help_menu()
def file_menu(self):
pass
def edit_menu(self):
pass
def tool_menu(self):
pass
def window_menu(self):
pass
def help_menu(self):
pass
def toggle_menu(self, state):
if state:
self.status_bar().show()
else:
self.status_bar().hide()
if __name__ == '__main__':
app = QApplication(sys.argv)
w = MainWindow()
sys.exit(app.exec_())
|
Maninder-mike/bant-shuttring-store-renting-system | src/customwidgets.py | from PyQt5.QtWidgets import QWidget, QLabel, QPushButton, QHBoxLayout
class OnOffWidget(QWidget):
def __init__(self, name):
super(OnOffWidget, self).__init__()
self.name = name
self.is_on = False
self.lbl = QLabel(self.name)
self.btn_on = QPushButton('On')
self.btn_off = QPushButton('Off')
self.hbox = QHBoxLayout()
self.hbox.addWidget(self.lbl)
self.hbox.addWidget(self.btn_on)
self.hbox.addWidget(self.btn_off)
self.setLayout(self.hbox)
self.btn_on.clicked.connect(self.on)
self.btn_off.clicked.connect(self.off)
self.update_btn_state()
def off(self):
self.is_on = False
self.update_btn_state()
def on(self):
self.is_on = True
self.update_btn_state()
def update_btn_state(self):
if self.btn_on:
self.btn_on.setStyleSheet('background-color: #4CAF50; color: #fff')
self.btn_off.setStyleSheet('background-color: none; color: none')
else:
self.btn_on.setStyleSheet('background-color: none; color: none')
self.btn_off.setStyleSheet('background-color: #D32F2F; color: #fff')
|
Maninder-mike/bant-shuttring-store-renting-system | src/layout.py | from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QHBoxLayout, QVBoxLayout
import sys
from toolbox import ToolBox
from tab_widget import TabWidget
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setMinimumSize(500, 500)
self.layout = Layout()
self.setCentralWidget(self.layout)
self.show()
class Layout(QWidget):
def layout_gui(self):
main_layout = QHBoxLayout()
layout1 = QVBoxLayout()
right_layout = QHBoxLayout()
toolbox = ToolBox()
layout1.addWidget(toolbox)
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.setSpacing(0)
main_layout.addLayout(layout1)
table = TabWidget()
main_layout.addWidget(table)
right_layout.addWidget(self.right_up_text_edit())
right_layout.addWidget(self.right_down_text_edit())
main_layout.addLayout(right_layout)
widget = QWidget()
widget.setLayout(main_layout)
self.setCentralWidget(widget)
if __name__ == '__main__':
app = QApplication(sys.argv)
w = MainWindow()
sys.exit(app.exec_())
|
Maninder-mike/bant-shuttring-store-renting-system | tests/test_database_set.py | <reponame>Maninder-mike/bant-shuttring-store-renting-system
from src.database.database_connect import connect
import sqlite3 |
Maninder-mike/bant-shuttring-store-renting-system | colored_lines.py | from colorama import Fore, Back, Style, init
init(autoreset=True)
def solution(n):
sum = 0
terms = (n - 1) // 3
sum += (terms * (6 + (terms - 1) * 3)) // 2 # sum of an A.P.
terms = (n - 1) // 5
sum += (terms * (10 + (terms - 1) * 5)) // 2
terms = (n - 1) // 15
sum -= (terms * (30 + (terms - 1) * 15)) // 2
return Fore.GREEN + f'The result is {sum}'
if __name__ == "__main__":
print(solution(int(input(Back.RED + 'Put number here: ').strip())))
# print(Fore.BLUE + 'This is Blue')
# print(Fore.RED + 'This is Blue')
# print(Fore.WHITE + 'This is Blue')
#
# print("auto reset")
|
Maninder-mike/bant-shuttring-store-renting-system | src/database/database_connect.py | import sqlite3
def connect():
conn = sqlite3.connect("books.db")
cur = conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS book (id INTEGER PRIMARY KEY, title TEXT, "
"author TEXT, year INTEGER, isbn INTEGER)")
conn.commit()
conn.close()
def insert(title, author, year, isbn):
conn = sqlite3.connect("books.db")
cur = conn.cursor()
# the NULL parameter is for the auto-incremented id
cur.execute("INSERT INTO book VALUES(NULL,?,?,?,?)", (title, author, year, isbn))
conn.commit()
conn.close()
def view():
conn = sqlite3.connect("books.db")
cur = conn.cursor()
cur.execute("SELECT * FROM book")
rows = cur.fetchall()
conn.close()
return rows
def search(title="", author="", year="", isbn=""):
conn = sqlite3.connect("books.db")
cur = conn.cursor()
cur.execute("SELECT * FROM book WHERE title = ? OR author = ? OR year = ? "
"OR isbn = ?", (title, author, year, isbn))
rows = cur.fetchall()
conn.close()
return rows
def delete(id):
conn = sqlite3.connect("books.db")
cur = conn.cursor()
cur.execute("DELETE FROM book WHERE id = ?", (id,))
conn.commit()
conn.close()
def update(id, title, author, year, isbn):
conn = sqlite3.connect("books.db")
cur = conn.cursor()
cur.execute("UPDATE book SET title =?, author =?, year =?, isbn =? WHERE id =?", (title, author, year, isbn, id))
conn.commit()
conn.close()
connect()
# insert("another novel", "<NAME>.", 2017, 1234)
# update(2, title="new book", author="DH", year=2005, isbn=5555)
# for x in view():
# print(x)
|
Maninder-mike/bant-shuttring-store-renting-system | src/status_bar.py | from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget
from PyQt5.Qt import Qt
import sys
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setMinimumSize(500, 500)
self.bar = StatusBar()
self.setCentralWidget(self.bar)
self.show()
class StatusBar(QWidget):
def __init__(self, *args, **kwargs):
super(StatusBar, self).__init__(*args, **kwargs)
self.status_bar()
def status_bar(self):
bar = self.statusBar()
bar.showMessage('Welcome to the Application', 5000)
if __name__ == '__main__':
app = QApplication(sys.argv)
w = MainWindow()
sys.exit(app.exec_())
|
Maninder-mike/bant-shuttring-store-renting-system | src/toolbox.py | <reponame>Maninder-mike/bant-shuttring-store-renting-system
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QToolBox, QLabel, QMainWindow
import sys
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.table = ToolBox(self)
self.setCentralWidget(self.table)
self.show()
class ToolBox(QWidget):
def __init__(self, *args, **kwargs):
super(ToolBox, self).__init__(*args, **kwargs)
# self.setStyleSheet('background-color:yellow')
layout = QVBoxLayout()
toolbox = QToolBox()
toolbox.setItemToolTip(0, 'This is a tooltip')
layout.addWidget(toolbox)
label1 = QLabel()
toolbox.addItem(label1, 'One')
label2 = QLabel()
toolbox.addItem(label2, 'Two')
label3 = QLabel()
toolbox.addItem(label3, 'Three')
label4 = QLabel()
toolbox.addItem(label4, 'Four')
label5 = QLabel()
toolbox.addItem(label5, 'Five')
label6 = QLabel()
toolbox.addItem(label6, 'Six')
self.setLayout(layout)
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec_())
|
VicHug27/web-scraping-challenge | scrape_mars.py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
#Import dependencies and setup
import pandas as pd
from bs4 import BeautifulSoup as bs
from splinter import Browser
import requests
import pymongo
from webdriver_manager.chrome import ChromeDriverManager
# In[2]:
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
# ### Step1 Scraping
# In[3]:
#NASA Mars News
url = "https://mars.nasa.gov/news/"
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
# In[4]:
# Search for news title
t_results = soup.find_all('div', class_='content_title')
# Search for paragraph text under news titles
p_results = soup.find_all('div', class_='article_teaser_body')
# Extract first title and paragraph, and assign to variables
title = t_results[0].text
paragraph = p_results[0].text
print(title)
print(paragraph)
# In[5]:
#JPL Mars image
url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
featured_image_url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/image/featured/mars2.jpg'
print(featured_image_url )
# In[6]:
#Mars Facts
url = 'https://space-facts.com/mars/'
facts = pd.read_html("https://space-facts.com/mars/")[0]
print(facts)
# In[7]:
#Mars Hemispheres
url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(url)
# ### Step2 MongoDB and Flask
# In[ ]:
|
mhyeun/emojified-tweets-wall-of-fame | emojified_tweets_wall_of_fame/urls.py | from django.urls import path
from . import views
urlpatterns = [
path("", views.wall_of_fame, name="wall_of_fame"),
path("wall-of-fame", views.wall_of_fame, name="wall_of_fame"),
path("wall-of-shame", views.wall_of_shame, name="wall_of_shame"),
path("health", views.health, name="health"),
path("authentication", views.authentication, name="authentication"),
path("signup", views.signup, name="signup"),
path("emojify", views.emojify, name="emojify"),
path("emojifytweets", views.emojifytweets, name="emojifytweets"),
path("like", views.like, name="like"),
path("dislike", views.dislike, name="dislike"),
path("logout", views.handle_logout, name="handle_logout"),
path("about", views.about, name="about"),
path("all-tweets", views.all_tweets, name="all_tweets"),
]
|
mhyeun/emojified-tweets-wall-of-fame | emojified_tweets_wall_of_fame/views.py | <reponame>mhyeun/emojified-tweets-wall-of-fame<gh_stars>1-10
from django.shortcuts import render, redirect, get_list_or_404
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.hashers import make_password
from django.contrib.auth.decorators import login_required
from datetime import datetime
from .models import Tweet, CustomUser, CustomUserToTweet
import requests
import json
def wall_of_fame(request):
tweets_list = Tweet.objects.all().order_by("-votes")
tweets = []
for tweet in tweets_list:
tweet_dict = {
"content": tweet.content,
"votes": tweet.votes,
"poster_id": tweet.poster,
"id": tweet.pk,
}
if len(tweets) >= 10:
break
tweets.append(tweet_dict)
voted_tweets = []
if not request.user.is_anonymous:
custom_user_to_tweets_list = CustomUserToTweet.objects.filter(
voter=request.user
)
for custom_user_to_tweets in custom_user_to_tweets_list:
tweet_dict = {
"id": custom_user_to_tweets.tweet.pk,
"is_upvote": custom_user_to_tweets.is_upvote,
}
voted_tweets.append(tweet_dict)
is_user_anonymous = request.user.is_anonymous
return render(
request,
"emojified_tweets_wall_of_fame/wall_of_fame.html",
{
"tweets": tweets,
"voted_tweets": voted_tweets,
"is_user_anonymous": is_user_anonymous,
},
)
@login_required(login_url="/authentication")
def wall_of_shame(request):
tweets_list = Tweet.objects.all().order_by("votes")
tweets = []
for tweet in tweets_list:
tweet_dict = {
"content": tweet.content,
"votes": tweet.votes,
"poster_id": tweet.poster,
"id": tweet.pk,
}
if len(tweets) >= 10:
break
tweets.append(tweet_dict)
voted_tweets = []
if not request.user.is_anonymous:
custom_user_to_tweets_list = CustomUserToTweet.objects.filter(
voter=request.user
)
for custom_user_to_tweets in custom_user_to_tweets_list:
tweet_dict = {
"id": custom_user_to_tweets.tweet.pk,
"is_upvote": custom_user_to_tweets.is_upvote,
}
voted_tweets.append(tweet_dict)
is_user_anonymous = request.user.is_anonymous
return render(
request,
"emojified_tweets_wall_of_fame/wall_of_fame.html",
{
"tweets": tweets,
"voted_tweets": voted_tweets,
"is_user_anonymous": is_user_anonymous,
},
)
def all_tweets(request):
tweets_list = Tweet.objects.all().order_by("-posted_at")
tweets = []
for tweet in tweets_list:
tweet_dict = {
"content": tweet.content,
"votes": tweet.votes,
"poster_id": tweet.poster,
"posted_at": tweet.posted_at,
"id": tweet.pk,
}
tweets.append(tweet_dict)
voted_tweets = []
if not request.user.is_anonymous:
custom_user_to_tweets_list = CustomUserToTweet.objects.filter(
voter=request.user
)
for custom_user_to_tweets in custom_user_to_tweets_list:
tweet_dict = {
"id": custom_user_to_tweets.tweet.pk,
"is_upvote": custom_user_to_tweets.is_upvote,
}
voted_tweets.append(tweet_dict)
return render(
request,
"emojified_tweets_wall_of_fame/wall_of_fame.html",
{"tweets": tweets, "voted_tweets": voted_tweets},
)
def health(response):
success_message = {"success": True}
return HttpResponse(json.dumps(success_message))
def about(request):
is_user_anonymous = request.user.is_anonymous
return render(
request,
"emojified_tweets_wall_of_fame/about.html",
{"is_user_anonymous": is_user_anonymous},
)
def signup(request):
error = {"error": True, "fields": [], "message": "Something went wrong."}
if request.method == "POST":
username = request.POST["username"]
password = request.POST["password"]
password_retry = request.POST["password_retry"]
email = request.POST["email"]
if password != password_retry:
error["message"] = "Passwords did not match."
error["fields"].append("password")
error["fields"].append("password_retry")
return render(
request, "emojified_tweets_wall_of_fame/signup.html", {"error": error}
)
# check if user already exists
try:
matching_username = CustomUser.objects.get(username=username)
except CustomUser.DoesNotExist:
matching_username = None
if matching_username is not None:
error["message"] = "Username already taken."
error["fields"].append("username")
return render(
request, "emojified_tweets_wall_of_fame/signup.html", {"error": error}
)
new_user = CustomUser.objects.create(
username=username, password=<PASSWORD>), email=email
)
new_user.save()
return redirect("wall_of_fame")
return render(request, "emojified_tweets_wall_of_fame/signup.html")
def authentication(request):
error = {"error": True, "fields": [], "message": "Invalid username or password."}
if request.method == "POST":
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect("wall_of_fame")
else:
if username == "" and password == "":
error["message"] = "Username and password cannot be empty."
error["fields"].append("username")
error["fields"].append("password")
elif username == "":
error["message"] = "Username cannot be empty."
error["fields"].append("username")
elif password == "":
error["message"] = "Password cannot be empty."
error["fields"].append("password")
return render(
request,
"emojified_tweets_wall_of_fame/authentication.html",
{"error": error},
)
return render(request, "emojified_tweets_wall_of_fame/authentication.html")
@login_required(login_url="/authentication")
def emojify(request):
error = {"error": True, "fields": [], "message": "Invalid username or password."}
TWITTER_API_URL = "http://mhyeun.pythonanywhere.com/emojify-tweets"
if request.method == "POST":
twitter_username = request.POST["twitter_username"]
number_of_tweets = request.POST["number_of_tweets"]
try:
emojified_tweets = requests.get(
TWITTER_API_URL,
params={"username": twitter_username, "tweets": number_of_tweets},
)
emojified_tweets_list = json.loads(emojified_tweets.text)
emojified_tweets_to_add = []
for emojified_tweets in emojified_tweets_list:
tweet = Tweet(content=emojified_tweets, votes=0, poster=request.user)
emojified_tweets_to_add.append(tweet)
Tweet.objects.bulk_create(emojified_tweets_to_add)
return render(
request,
"emojified_tweets_wall_of_fame/emojifytweets.html",
{"emojified_tweets": emojified_tweets_list},
)
except:
error["message"] = "Username is not valid."
error["fields"].append("twitter_username")
return render(
request, "emojified_tweets_wall_of_fame/emojify.html", {"error": error}
)
return render(request, "emojified_tweets_wall_of_fame/emojify.html")
@login_required(login_url="/authentication")
def emojifytweets(request):
return render(request, "emojified_tweets_wall_of_fame/emojifytweets.html")
@login_required(login_url="/authentication")
def like(request):
if request.method == "POST":
next = request.POST["next"]
tweet_id = request.POST["tweet_id"]
tweet = Tweet.objects.get(id=tweet_id)
tweet.votes += 1
relation = CustomUserToTweet.objects.create(
voter=request.user, tweet=tweet, is_upvote=True
)
relation.save()
tweet.save()
return HttpResponseRedirect(next)
@login_required(login_url="/authentication")
def dislike(request):
if request.method == "POST":
next = request.POST["next"]
tweet_id = request.POST["tweet_id"]
tweet = Tweet.objects.get(id=tweet_id)
tweet.votes -= 1
relation = CustomUserToTweet.objects.create(
voter=request.user, tweet=tweet, is_upvote=False
)
relation.save()
tweet.save()
return HttpResponseRedirect(next)
def handle_logout(request):
logout(request)
return render(request, "emojified_tweets_wall_of_fame/authentication.html")
|
mhyeun/emojified-tweets-wall-of-fame | emojified_tweets_wall_of_fame/admin.py | <filename>emojified_tweets_wall_of_fame/admin.py
from django.contrib import admin
from .models import Tweet, CustomUser, CustomUserToTweet
admin.site.register(CustomUser)
admin.site.register(Tweet)
admin.site.register(CustomUserToTweet)
|
mhyeun/emojified-tweets-wall-of-fame | emojified_tweets_wall_of_fame/apps.py | from django.apps import AppConfig
class EmojifiedTweetsWallOfFameConfig(AppConfig):
name = "emojified_tweets_wall_of_fame"
|
mhyeun/emojified-tweets-wall-of-fame | emojified_tweets_wall_of_fame/models.py | from django.db import models
from django.conf import settings
from django.utils.timezone import now
from django.contrib.auth.models import AbstractUser
# Made foreign key relation with no cascade delete since
# we will just replace username with [DELETED]
class CustomUser(AbstractUser):
pass
def __str__(self):
return self.username
class Tweet(models.Model):
content = models.CharField(max_length=512)
votes = models.IntegerField()
poster = models.ForeignKey(
settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL
)
posted_at = models.DateTimeField(default=now)
def __str__(self):
return self.content
def getDescription(self):
return self.content + " with " + self.votes + " votes."
class CustomUserToTweet(models.Model):
voter = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
tweet = models.ForeignKey(Tweet, on_delete=models.CASCADE)
is_upvote = models.BooleanField()
|
charlyalizadeh/ESILV_ADSA_Problem | steps/step1/numpyVSlist.py | <reponame>charlyalizadeh/ESILV_ADSA_Problem
import numpy as np
import matplotlib.pyplot as plt
import timeit
class NumpyVSList:
@staticmethod
def compute(operation):
"""Compute the time of an operation on a numpy array and a core_list.
:param str operation: The operation to compute.
:return: Tuple containing the time of the operation on an numpy array and a core list.
:rtype: tuple of float
"""
if operation == "init":
return NumpyVSList.compute_time_init()
elif operation == "reading":
return NumpyVSList.compute_time_reading()
elif operation == "writing":
return NumpyVSList.compute_time_writing()
@staticmethod
def compute_time_init(nb_values=100, repeat=10000):
"""Compare the time of initialization of a numpy array and a core list.
:param int nb_values: The number of values in the iterables.
:param int repeat: The number of times we repeat the experiment.
:return: The time of initialization of the numpy array and the core list.
:rtype: tuple of float
"""
time_numpy_array = timeit.timeit("numpy_array = np.array(range({}))".format(nb_values), number=repeat, setup="import numpy as np") / repeat
time_core_list = timeit.timeit("[i for i in range({})]".format(nb_values), number=repeat) / repeat
return time_numpy_array, time_core_list
@staticmethod
def compute_time_reading(nb_values=100, repeat=10000):
"""Compare the time of reading of a numpy array and a core list.
:param int nb_values: The number of values in the iterables.
:param int repeat: The number of times we repeat the experiment.
:return: The time of reading of the numpy array and the core list.
:rtype: tuple of float
"""
numpy_setup = "import numpy as np\nnumpy_array = np.array(range({}))".format(nb_values)
numpy_test = "for i in numpy_array: pass"
core_setup = "core_list = [i for i in range({})]".format(nb_values)
core_test = "for i in core_list: pass"
time_numpy_array = timeit.timeit(numpy_test, setup=numpy_setup, number=repeat) / repeat
time_core_list = timeit.timeit(core_test, setup=core_setup, number=repeat) / repeat
return time_numpy_array, time_core_list
@staticmethod
def compute_time_writing(nb_values=100, repeat=10000):
"""Compare the time of writing of a numpy array and a core list.
:param int nb_values: The number of values in the iterables.
:param int repeat: The number of times we repeat the experiment.
:return: The time of writing of the numpy array and the core list.
:rtype: tuple of float
"""
numpy_setup = "import numpy as np\nnumpy_array = np.array(range({}))".format(nb_values)
numpy_test = "for i in range(len(numpy_array)): numpy_array[i] = 1"
core_setup = "core_list = [i for i in range({})]".format(nb_values)
core_test = "for i in range(len(core_list)): core_list[i] = 1"
time_numpy_array = timeit.timeit(numpy_test, setup=numpy_setup, number=repeat) / repeat
time_core_list = timeit.timeit(core_test, setup=core_setup, number=repeat) / repeat
return time_numpy_array, time_core_list
@staticmethod
def display_compare_init(nb_values=100, repeat=10000):
"""Display the time of initialization of a numpy array and a core list.
:param int nb_values: The number of values in the iterables.
:param int repeat: The number of times we repeat the experiment.
"""
time_numpy_array, time_core_list = NumpyVSList.compute_time_init(nb_values, repeat)
print("=====RESULT INITIATION=====")
print("Number of values: {}".format(nb_values))
print("Number of repeat: {}".format(repeat))
print("Mean numpy array: {}".format(time_numpy_array))
print("Mean core list: {}".format(time_core_list))
print("T_numpy - T_core = {}".format(time_numpy_array - time_core_list))
print("===========================")
@staticmethod
def display_compare_reading(nb_values=100, repeat=10000):
"""Display the time of reading of a core list and a numpy array.
:param int nb_values: The number of values in the iterables.
:param int repeat: The number of times we repeat the experiment.
"""
numpy_setup = "import numpy as np\nnumpy_array = np.array(range({}))".format(nb_values)
numpy_test = "for i in numpy_array: pass"
core_setup = "core_list = [i for i in range({})]".format(nb_values)
core_test = "for i in core_list: pass"
time_numpy_array, time_core_list = NumpyVSList.compute_time_reading(nb_values, repeat)
print("=====RESULT READING=====")
print("Number of values: {}".format(nb_values))
print("Number of repeat: {}".format(repeat))
print("Mean numpy array: {}".format(time_numpy_array))
print("Mean core list: {}".format(time_core_list))
print("T_numpy - T_core = {}".format(time_numpy_array - time_core_list))
print("===========================")
return time_numpy_array, time_core_list
@staticmethod
def display_compare_writing(nb_values=100, repeat=10000):
"""Display the time of writing of a core list and a numpy array.
:param int nb_values: The number of values in the iterables.
:param int repeat: The number of times we repeat the experiment.
"""
numpy_setup = "import numpy as np\nnumpy_array = np.array(range({}))".format(nb_values)
numpy_test = "for i in range(len(numpy_array)): numpy_array[i] = 1"
core_setup = "core_list = [i for i in range({})]".format(nb_values)
core_test = "for i in range(len(core_list)): core_list[i] = 1"
time_numpy_array, time_core_list = NumpyVSList.compute_time_writing(nb_values, repeat)
print("=====RESULT WRITING=====")
print("Number of values: {}".format(nb_values))
print("Number of repeat: {}".format(repeat))
print("Mean numpy array: {}".format(time_numpy_array))
print("Mean core list: {}".format(time_core_list))
print("T_numpy - T_core = {}".format(time_numpy_array - time_core_list))
print("===========================")
return time_numpy_array, time_core_list
@staticmethod
def compare_all_plot(nb_values=100, repeat=10000):
"""Plot in a bar plot the different mean computation times of different operations on a numpy array and a core list.
:param int nb_values: The number of values in the iterables NOT IMPLEMENTED.
:param int repeat: The number of times we repeat the experiment NOT IMPLEMENTED.
"""
time_numpy_array = {}
time_core_list = {}
for operation in ["init", "reading", "writing"]:
times = NumpyVSList.compute(operation)
time_numpy_array[operation] = times[0]
time_core_list[operation] = times[1]
print(time_numpy_array)
print(time_core_list)
plt.bar(time_numpy_array.keys(), time_numpy_array.values())
plt.bar(time_core_list.keys(), time_core_list.values())
plt.show()
NumpyVSList.compare_all_plot()
|
charlyalizadeh/ESILV_ADSA_Problem | interface/base.py | <gh_stars>0
import curses
import uuid
import os
class App:
def __init__(self, unicode_support=False):
os.environ.setdefault('ESCDELAY', '0')
self.stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
self.stdscr.keypad(True)
self.windows = {}
self.order = []
self.deactivated_windows = []
self.unicode_support = False
def __del__(self):
curses.nocbreak()
self.stdscr.keypad(False)
curses.echo()
curses.endwin()
class Widget:
def __init__(self, coord = [0, 0], id=None):
self.coord = coord
def draw(self, app):
height, width = app.stdscr.getmaxyx()
app.stdscr.addstr(height - 1, 0, "Copyright: © <NAME>, <NAME>")
def update(self, app):
pass
def start(self, app):
pass
|
charlyalizadeh/ESILV_ADSA_Problem | interface/menu.py | import curses
from .base import *
class Menu(Widget):
def __init__(self, coord, texts, title=True, center=True):
super().__init__(coord)
self.texts = list(texts)
if center:
max_length = len(max(texts, key=len))
for i in range(len(texts)):
nb_space = int(max_length / 2 - len(texts[i]) / 2)
self.texts[i] = " " * nb_space + texts[i]
self.texts[i] = self.texts[i] + (max_length - len(self.texts[i])) * " "
self.title = title
self.select = 1 if self.title else 0
self.function = lambda x : None
def _go_up(self):
if self.select == (1 if self.title else 0):
self.select = len(self.texts) - 1
else:
self.select -= 1
def _go_down(self):
if self.select == len(self.texts) - 1:
self.select = 1 if self.title else 0
else:
self.select += 1
def bind(self, f):
self.function = f
def _draw_border(self, app):
pass
def _draw_text(self, app):
for i in range(len(self.texts)):
if i == self.select:
app.stdscr.addstr(self.coord[0] + i, self.coord[1], self.texts[i], curses.A_STANDOUT)
else:
app.stdscr.addstr(self.coord[0] + i, self.coord[1], self.texts[i])
def draw(self, app):
super().draw(app)
self._draw_border(app)
self._draw_text(app)
def update(self, app):
c = app.stdscr.getch()
if c == curses.KEY_UP:
self._go_up()
if c == curses.KEY_DOWN:
self._go_down()
if c in (curses.KEY_ENTER, 10 ,13):
return self.function(self.select)
if c == 27:
return False
return True
def start(self, app):
self.draw(app)
while self.update(app):
app.stdscr.clear()
app.stdscr.refresh()
self.draw(app)
|
charlyalizadeh/ESILV_ADSA_Problem | start.py | <gh_stars>0
from adsaapp import ADSAApp
from interface import App
def main():
main_app = App()
adsa_app = ADSAApp(main_app)
adsa_app.start()
if __name__ == '__main__':
main()
|
charlyalizadeh/ESILV_ADSA_Problem | steps/step2/findimpostors.py | <gh_stars>0
import numpy as np
def get_suspects_pairs(suspects, adjacency_matrix, dead_players):
"""Return the pairs of suspects of a game.
:param suspects: Array of suspect players.
:param adjacency_matrix: Matrix of players that have seen each other.
:param dead_players: Array of players that have been eliminated.
:return: Array of pairs of non-adjacent points (pairs of suspects).
"""
if type(suspects) is dict:
suspects = suspects.keys()
suspects_pairs = set()
for suspect in suspects:
for i in range(len(adjacency_matrix[suspect])):
if adjacency_matrix[suspect, i] == 0 and i not in dead_players and i != suspect:
suspects_pairs.add((min(i, suspect), max(i, suspect)))
suspects_pairs = sorted(suspects_pairs, key = lambda x: (x[0], x[1]))
return suspects_pairs
def get_suspects(adjacency_matrix, dead_players):
"""Compute the suspect with the number of dead players they met.
:param adjacency_matrix: A two-dimensional numpy matrix representing the adjacency matrix.
:param dead_players: list of dead players.
:return: A dictionary containing the suspect with the number of dead players they met.
:rtype: dict
"""
suspects = {}
for dead_player in dead_players:
for i in range(len(adjacency_matrix[dead_player])):
if i not in dead_players and adjacency_matrix[dead_player, i] == 1:
if i in suspects:
suspects[i] += 1
else:
suspects[i] = 1
return suspects
|
charlyalizadeh/ESILV_ADSA_Problem | steps/step1/arraywrapper.py | <filename>steps/step1/arraywrapper.py
from .player import Player
class Array:
"""Wrapper around the list type."""
def __init__(self, size=0):
self.size = size
self._array = [Player(i, 0) for i in range(self.size)]
def delete_last(self, n=10):
"""Reduce the value of self.size.
:param int n: The number of which is strip from `self.size`
"""
self.size -= n
def add_values(self, values):
"""Set the values of the array to values.
:param iterable values: New values of the array.
"""
for i in range(self.size):
if i >= len(values):
break
self._array[i].score += values[i]
def sort(self, alg="merge"):
"""Sort the array with the specified algorithm.
:param str alg: A string representing the sort algorithm.
"""
if alg == "merge":
self._array = sorted(self._array, reverse=True)
elif alg == "couting":
self._array = self.counting_sort(self._array)
def counting_sort(self, array):
"""Return an list sorted with couting sort algorithm.
:param iterable array: Iterable which is sorted.
:return: Sorted list version of `array`.
:rtype: list
"""
max_node = max(self._array)
count = [0] * (max_node.score + 1)
for node in array:
count[node.score] += 1
total = 0
for i in range(max_node.score, -1, -1):
count[i], total = total, count[i] + total
output = [0] * len(self._array)
for node in self._array:
output[count[node.score]] = node
count[node.score] += 1
return output
def display_cli(self):
"""Display the array in the console."""
for node in self._array:
print(node.value, end = '-')
print()
def __getattr__(self, key):
if key == "nb_player":
return self.size
def __iter__(self):
return self._array.__iter__()
def __next__(self):
return self._array.__next__()
def __str__(self):
descrition = ""
index = 0
print(self.size)
for node in self._array:
descrition += node.__str__() + "\n"
index += 1
if index == self.size:
break
return descrition[:-1]
def __len__(self):
return self.size
|
charlyalizadeh/ESILV_ADSA_Problem | utils/utils.py | def read_coordinates(path):
"""Read two-dimensional coordinates from a file and return them as a nested list.
:param str path: path to the coordinates file.
:return: a iterable containing the coordinates.
:rtype: a list of list
"""
file = open(path, "r")
coordinates = {}
index = 0
for line in file.readlines():
line = line.split(",")
coordinates[index] = [float(line[0]), -float(line[1])]
index += 1
return coordinates
|
charlyalizadeh/ESILV_ADSA_Problem | steps/step1/player.py | <gh_stars>0
class Player:
"""Player implementation for the step 1.
:param id: unique identifier for the player. (Default None)
:param score: score of the player. (Default 0)
"""
def __init__(self, id=None, score=0):
self.id = id
self.score = score
def __lt__(self, other):
return self.score < other.score
def __eq__(self, other):
return self.socre == other.score
def __str__(self):
return "ID: {0}, Score: {1}".format(self.id, self.score)
|
charlyalizadeh/ESILV_ADSA_Problem | steps/graph/unionfind.py | import pandas as pd
class UnionFind:
"""Disjoint-set data structure implementation """
def __init__(self):
self.parents = []
self.index_map = {}
self.size = []
def make_set(self, node):
if node not in self.index_map.keys():
index = len(self.parents)
self.parents.append(index)
self.index_map[node] = index
self.size.append(1)
def make_sets(self, nodes):
for node in nodes:
self.make_set(node)
def find(self, node):
if self.index_map[node] != self.parents[self.index_map[node]]:
self.parents[self.index_map[node]] = self.find(self.parents[self.index_map[node]])
return self.parents[self.index_map[node]]
else:
return self.index_map[node]
def union(self, node1, node2):
root1 = self.find(node1)
root2 = self.find(node2)
if root1 == root2:
return None
if self.size[root1] < self.size[root2]:
root1, root2 = root2, root1
self.parents[root2] = root1
self.size[root1] += self.size[root2]
def __str__(self):
nodes = self.index_map.keys()
index = [self.index_map[n] for n in nodes]
sizes = [self.size[i] for i in index]
parents = [self.parents[i] for i in index]
df = pd.DataFrame({"element": nodes,
"index": index,
"size": sizes,
"parent": parents
})
return df.__str__()
|
charlyalizadeh/ESILV_ADSA_Problem | adsaapp.py | from interface import *
from steps import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from copy import copy
class ADSAApp():
"""The class managing the interface for the project.
:param App app: The curses app wrapper where we will draw the interface.
"""
def __init__(self, app=None):
if app is None:
app = App()
self.app = app
height, width = self.app.stdscr.getmaxyx()
self.widgets = {}
# Main Menu
texts_main_menu = ["Choose a step:", "Step 1", "Step 2", "Step 3", "Step 4", "Exit"]
main_menu = Menu(self._get_coord_centered(height, width, texts_main_menu), texts_main_menu, True, True)
main_menu.bind(lambda x : self.main_menu_function(x))
self.widgets["main_menu"] = main_menu
# Step1 Menu
texts_step1 = [ "Wich datastructure do you want to use ?", "AVL Tree", "Array", "Return"]
step1_menu = Menu(self._get_coord_centered(height, width, texts_step1), texts_step1, True, True)
step1_menu.bind(lambda x : self.step1_menu_function(x))
self.widgets["step1_menu"] = step1_menu
def main_menu_function(self, index):
self.app.stdscr.clear()
if index == 1:
self.widgets["step1_menu"].start(self.app)
elif index == 2:
self._find_impostors("data/adjacency_matrix.txt")
elif index == 3:
self._get_distance("data/graph_crewmates.txt", "data/graph_impostors.txt")
elif index == 4:
#self.display_step4()
self.step4()
elif index == 5:
return False
return True
def step1_menu_function(self, index):
self.app.stdscr.clear()
game = None
if index == 1:
self._play_game("AVLTree")
elif index == 2:
self._play_game("Array")
return False
def _play_game(self, datastructure):
height, width = self.app.stdscr.getmaxyx()
screen_game = FakeScreen([5, 5], [height - 10, width - 10])
game = Game(datastructure)
screen_game.insert_line(f"Game created with {datastructure} to store the players.")
for i in range(3):
screen_game.insert_line(f"Playing round {game.round}.")
screen_game.insert_line(f" ↪Remaining players {game.get_nb_players()}.")
game.simulate_game()
game.sort_players()
screen_game.insert_line(f"END POOL !")
while game.get_nb_players() > 10:
screen_game.insert_line(f"Playing round {game.round}")
screen_game.insert_line(f" ↪Remaining players {game.get_nb_players()}.")
game.simulate_game(True)
game.sort_players()
game.delete_last_player()
screen_game.insert_line(f"FINALS:")
for i in range(5):
screen_game.insert_line(f"Playing round {game.round}")
screen_game.insert_line(f" ↪Remaining players {game.get_nb_players()}.")
game.simulate_game(True)
game.sort_players()
last_players = game.players.__str__().split('\n')
if datastructure == "AVLTree":
last_players = last_players[::-1]
for i in range(len(last_players)):
screen_game.insert_line(f"{i + 1}. {last_players[i]}")
screen_game.start(self.app)
def _find_impostors(self, filepath):
height, width = self.app.stdscr.getmaxyx()
screen_game = FakeScreen([5, 5], [height - 10, width - 10])
adjacency_matrix = np.genfromtxt(filepath, delimiter=",")
suspects = get_suspects(adjacency_matrix, [0])
screen_game.insert_line("Suspects:")
for key, val in suspects.items():
screen_game.insert_line(f" {key} is a suspect. He met {val} dead player.")
suspects_pair = get_suspects_pairs(suspects, adjacency_matrix, [0])
screen_game.insert_line("")
screen_game.insert_line("Suspects pair:")
for pair in suspects_pair:
screen_game.insert_line(f" {pair[0]} and {pair[1]}")
screen_game.insert_line("")
screen_game.insert_line("Press the escape key to continue...")
screen_game.start(self.app)
def _get_distance(self, filepath_crewmates, filepath_impostors, position=None):
height, width = self.app.stdscr.getmaxyx()
screen_game = FakeScreen([5, 5], [height - 10, width - 10])
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.expand_frame_repr', False)
names = ["Reactor", "UpperE", "LowerE", "Security", "Electrical",
"Medbay", "Storage", "Cafetaria", "Unnamed1", "Unnamed2",
"O2", "Weapons", "Shield", "Navigations"]
graph_crewmates = Graph(0)
graph_crewmates.import_from_file(filepath_crewmates)
distances = graph_crewmates.floydWarshall()
df_crewmates = pd.DataFrame(data=distances, index=names, columns=names)
lines = df_crewmates.__str__().split("\n")
screen_game.insert_line("CREWMATES")
for line in lines:
screen_game.insert_line(line)
names = ["Reactor", "UpperE", "LowerE", "Security", "Electrical",
"Medbay", "Storage", "Cafetaria", "Unnamed1", "Unnamed2",
"O2", "Weapons", "Shield", "Navigations", "CorridorW"]
graph_impostors = Graph(0)
graph_impostors.import_from_file(filepath_impostors)
distances = graph_impostors.floydWarshall()
df_impostors = pd.DataFrame(data=distances, index=names, columns=names)
lines = df_impostors.__str__().split("\n")
screen_game.insert_line("")
screen_game.insert_line("IMPOSTORS")
for line in lines:
screen_game.insert_line(line)
screen_game.start(self.app)
def step4(self):
step4app = Step4App(self.app)
step4app.start()
def display_step4(self, adjmatrix_path="data/graph_crewmates.txt", pos_path="data/coordinates.txt"):
graph = Graph(0)
graph.import_from_file(adjmatrix_path)
paths = graph.get_all_hamilton_path()
height, width = self.app.stdscr.getmaxyx()
screen_game = FakeScreen([5, 5], [height - 10, width - 10])
screen_game.insert_line(f"Graph with {graph.nb_vertex} vertices and {graph.nb_edge} edges.")
screen_game.insert_line(f"This graph contains {len(paths)} hamilton paths.")
shortest_path = graph.get_shortest_path(paths)
screen_game.insert_line(f"Shortest path -> {shortest_path}")
screen_game.insert_line("Press the escape key to display the shortest path...")
screen_game.start(self.app)
graph.set_path(shortest_path)
graph.plot(filepos=pos_path)
plt.show()
def _get_coord_centered(self, height, width, texts):
max_length = len(max(texts, key=len))
y = int(height / 2 - len(texts) / 2)
x = int(width / 2 - max_length / 2)
return y, x
def start(self):
self.widgets["main_menu"].start(self.app)
class Step4App():
def __init__(self, app=None, adjmatrix_path="data/graph_crewmates.txt", pos_path="data/coordinates.txt"):
if app is None:
app = App()
self.app = app
height, width = self.app.stdscr.getmaxyx()
self.widgets = {}
self.graph = Graph(0)
self.graph.import_from_file(adjmatrix_path)
self.pos_path = pos_path
self.src_vertex = range(self.graph.nb_vertex)
self.dst_vertex = range(self.graph.nb_vertex)
# Main Menu
texts_main_menu = ["Choose an option for finding a Hamilton path:",
"Minimum between all path",
"Minium at a specific source vertex",
"Minimum at a specific destination vertex",
"Minium at a specific source and destination vertices",
"Exit"]
main_menu = Menu(self._get_coord_centered(height, width, texts_main_menu),
texts_main_menu,
True,
True)
main_menu.bind(lambda choice : self.main_menu(choice))
self.widgets["main_menu"] = main_menu
# Source vertex menu
texts_menu_src_vertex = ["Choose a source vertex:"]
texts_menu_src_vertex.extend(self.graph.label)
texts_menu_src_vertex.append("All")
menu_src_vertex = Menu(self._get_coord_centered(height, width, texts_menu_src_vertex),
texts_menu_src_vertex,
True,
True)
menu_src_vertex.bind(lambda choice: self.menu_src_vertex(choice))
self.widgets["menu_src_vertex"] = menu_src_vertex
# Destination vertex menu
texts_menu_dst_vertex = ["Choose an destination vertex:"]
texts_menu_dst_vertex.extend(self.graph.label)
texts_menu_dst_vertex.append("All")
menu_dst_vertex = Menu(self._get_coord_centered(height, width, texts_menu_dst_vertex),
texts_menu_dst_vertex,
True,
True)
menu_dst_vertex.bind(lambda choice: self.menu_dst_vertex(choice))
self.widgets["menu_dst_vertex"] = menu_dst_vertex
def main_menu(self, choice):
if choice == 1:
self.min_hamilton_path()
if choice == 2:
self.app.stdscr.clear()
self.widgets["menu_src_vertex"].start(self.app)
self.min_hamilton_path()
if choice == 3:
self.app.stdscr.clear()
self.widgets["menu_dst_vertex"].start(self.app)
self.min_hamilton_path()
if choice == 4:
self.app.stdscr.clear()
self.widgets["menu_src_vertex"].start(self.app)
self.widgets["menu_dst_vertex"].start(self.app)
self.min_hamilton_path()
def min_hamilton_path(self, src_vertex=None, dst_vertex=None):
self.app.stdscr.clear()
if src_vertex is None:
src_vertex = self.src_vertex
if dst_vertex is None:
dst_vertex = self.dst_vertex
graph = self.graph
paths = graph.get_hamilton_path(src_vertex, dst_vertex)
height, width = self.app.stdscr.getmaxyx()
screen_game = FakeScreen([5, 5], [height - 10, width - 10])
screen_game.insert_line(f"Graph with {graph.nb_vertex} vertices and {graph.nb_edge} edges.")
screen_game.insert_line(f"Starting vertex index: {src_vertex}")
screen_game.insert_line(f"Starting vertex label: {[self.graph.label[i] for i in src_vertex]}")
screen_game.insert_line(f"Ending vertex index: {dst_vertex}")
screen_game.insert_line(f"Ending vertex label: {[self.graph.label[i] for i in dst_vertex]}")
if not paths:
screen_game.insert_line("")
screen_game.insert_line("No Hamilton paths")
screen_game.insert_line("Press the escape key to continue...")
screen_game.start(self.app)
else:
screen_game.insert_line("")
screen_game.insert_line(f"{len(paths)} hamilton paths found.")
shortest_path, min_weight = graph.get_shortest_path(paths)
screen_game.insert_line(f"Shortest path index -> {shortest_path}")
screen_game.insert_line(f"Shortest path label -> {[self.graph.label[i] for i in shortest_path]}")
screen_game.insert_line(f"Min weight -> {min_weight}")
screen_game.insert_line("")
screen_game.insert_line("Press the escape key to display the shortest path...")
screen_game.start(self.app)
graph.set_path(shortest_path)
graph.plot(filepos=self.pos_path)
plt.show()
def menu_src_vertex(self, choice):
if choice == self.graph.nb_vertex + 1:
self.src_vertex = range(self.graph.nb_vertex)
else:
self.src_vertex = [choice - 1]
def menu_dst_vertex(self, choice):
if choice == self.graph.nb_vertex + 1:
self.dst_vertex = range(self.graph.nb_vertex)
else:
self.dst_vertex = [choice - 1]
def _get_coord_centered(self, height, width, texts):
max_length = len(max(texts, key=len))
y = int(height / 2 - len(texts) / 2)
x = int(width / 2 - max_length / 2)
return y, x
def start(self):
self.widgets["main_menu"].start(self.app)
|
charlyalizadeh/ESILV_ADSA_Problem | steps/step1/arrayVSavltree.py | from .gameSimulation import Game
import timeit
def benchmark(repeat = 1000):
test_Array = "game.start()"
setup_Array = "from GameSimulation import Game; game = Game('Array')"
test_AVLTree = "game.start()"
setup_AVLTree = "from GameSimulation import Game; game = Game('AVLTree')"
time_Array = timeit.timeit(test_Array, setup = setup_Array, number = repeat) / repeat
time_AVLTree = timeit.timeit(test_AVLTree, setup = setup_AVLTree, number = repeat) / repeat
print("====RESULT====")
print("Array mean time: {}".format(time_Array))
print("AVLTree mean time: {}".format(time_AVLTree))
benchmark(10000)
|
charlyalizadeh/ESILV_ADSA_Problem | steps/step2/__init__.py | <reponame>charlyalizadeh/ESILV_ADSA_Problem<gh_stars>0
from .findimpostors import *
|
charlyalizadeh/ESILV_ADSA_Problem | steps/__init__.py | from .graph.graph import Graph
from .step1 import *
from .step2 import *
|
charlyalizadeh/ESILV_ADSA_Problem | steps/step1/game.py | from .avltree import AVLTree
from .arraywrapper import Array
import random
class Game:
"""Implementation of the Game for the step 1
:param str datastructure: The datastructure type to use for the game.
:param int nb_players: The number of players in the game.
"""
def __init__(self, datastructure="Array", nb_players=100):
self.nb_players = nb_players
self.datastructure = datastructure
if self.datastructure == "Array":
self.players = Array(nb_players)
else:
self.players = AVLTree(nb_players)
self.round = 0
def delete_last_player(self, n=10):
"""Delete the n last player from the game.
:param int n: The number of player to delete.
"""
self.players.delete_last(n)
def sort_players(self):
"""Sort the players by their score.
Apply the correct algorithm in function of the chosen data structure.
We couldn't do one interface for both datastructure because for the AVLTree
we need to create a new instance of AVLTree. This may change in the future.
"""
if self.datastructure == "Array":
self._sort_players_Array()
elif self.datastructure == "AVLTree":
self._sort_players_AVLTree()
def _sort_players_Array(self):
"""Sort the players by their score considering the data structure being an Array."""
self.players.sort()
def _sort_players_AVLTree(self):
"""Sort the players by their score considering the data structure being an AVLTree."""
new_players = AVLTree()
self.players.copy_nodes(new_players)
self.players = new_players
def _generate_random_score(self):
"""Generate random finale scores of an Among Us game.
Generate random scores. For now the scores are not feasible with the rules.
:return: A list of interger representing the scores of the playes.
:rtype: list of int.
"""
return random.choices(range(13), k=10)
def simulate_game(self, rand=False):
"""Simulate game.
:param bool rand: Boolean which indicates whether the game are created randomly or based on the score.
"""
nb_team = int(self.players.nb_player/10)
scores = []
for i in range(nb_team):
scores.extend(self._generate_random_score())
if rand:
random.shuffle(scores)
self.players.add_values(scores)
self.round += 1
def get_nb_players(self):
return len(self.players)
|
charlyalizadeh/ESILV_ADSA_Problem | steps/graph/graph.py | <gh_stars>0
from math import inf
import numpy as np
from copy import copy
import networkx as nx
from .unionfind import UnionFind
from utils.utils import read_coordinates
class Graph():
"""
Graph implementation with an adjacency matrix.
:param int nb_vertex: Number of vertices to initialize the graph with. (Default 0)
"""
def __init__(self, nb_vertex=0):
self.adj_matrix = np.ones((nb_vertex, nb_vertex)) * inf
self.label = [i for i in range(nb_vertex)]
for i in range(nb_vertex):
self.adj_matrix[i, i] = 0
def get_edges(self, weight=True):
"""Get the edges from the graph.
:param bool weight: Add the weight in the return tuples if True. (Default True)
:return: A list of tuple, each tuple representing one edge.
:rtype: list
"""
edges = []
for i in range(self.nb_vertex - 1):
for j in range(i + 1, self.nb_vertex):
if self.adj_matrix[i, j] != inf:
if weight:
edges.append((i, j, self.adj_matrix[i, j]))
else:
edges.append((i, j))
return edges
def get_neighbors(self, vertex):
"""Get the neighbors of a vertex.
:param int vertex: The vertex whose neighbors are computed.
:return: A list of integer representing the neighbors of `vertex`.
:rtype: list
"""
neighbors = []
for i in range(self.nb_vertex):
if self.are_neighbors(vertex, i) and vertex != i:
neighbors.append(i)
return neighbors
def are_neighbors(self, vertex1, vertex2):
"""Check if `vertex1` and `vertex2` are neighbors.
:param int vertex1: the index of the first vertex.
:param int vertex2: the index of the second vertex.
:return: `True` if `vertex1` and `vertex2` are neighbors, `False` otherwise.
:rtype: bool
"""
return (self.adj_matrix[vertex1, vertex2], self.adj_matrix[vertex2, vertex1]) != (inf, inf)
def rem_edge(self, edge):
"""Delete an edge from the graph
:param iterable edge: A iterable representing an edge.
"""
i, j = edge
self.adj_matrix[i, j] = inf
self.adj_matrix[j, i] = inf
def rem_edges(self, edges):
"""Delete multiple edges from the graph.
:param iterable edges: An iterable containing the edges to be deleted.
"""
for edge in edges:
self.rem_edge(edge)
def rem_all_edges(self):
"""Delete all the edges from the graph."""
dimension = self.nb_vertex
self.adj_matrix = np.ones((dimension, dimension)) * inf
def add_edge(self, edge, weight=1, by="index"):
"""Add an edge to the graph.
:param iterable edge: An iterable representing the edge to be added.
:param weight: The weight of the edge. (Default 1)
:param str by: The method used to add edge. (Default "index")
"""
if by == "index":
self.adj_matrix[edge[0], edge[1]] = weight
self.adj_matrix[edge[1], edge[0]] = weight
elif by == "label":
index1 = self.label.index(edge[0])
index2 = self.label.index(edge[1])
self.adj_matrix[index1, index2] = weight
self.adj_matrix[index2, index1] = weight
def add_edges(self, edges, weights=None):
"""Add multiple edges to the graph.
:param iterable edges: An iterable containing the edges to be added.
:param iterable weights: An iterable containing the weights of the new edges. (Default None)
"""
if weights is None:
weights = [1] * len(edges)
elif len(weights) != len(edges):
raise ValueError("weights must be the same length as edges.")
for i in range(len(edges)):
self.add_edge(edges[i], weights[i])
def add_vertex(self, label=""):
"""Add a vertex to the graph.
:param str label: label for the vertex whose being added to the graph. (Default "")
"""
if self.nb_vertex == 0:
self.adj_matrix = np.array([0])
elif self.nb_vertex == 1:
self.adj_matrix = np.array([[0, inf], [inf, 0]])
else:
self.adj_matrix = np.vstack((self.adj_matrix, np.ones(self.adj_matrix.shape[1]) * inf))
self.adj_matrix = np.column_stack((self.adj_matrix, np.ones(self.nb_vertex) * inf))
shape0, shape1 = self.adj_matrix.shape
self.adj_matrix[shape0 - 1, shape1 - 1] = 0
def set_label_vertex(self, vertex, label):
"""Set the label of a vertex.
:param int vertex: The index of the vertex whose label.
:param str label: The label of the index.
"""
if vertex >= len(self.label):
raise ValueError("`vertex` doesn't exist in the current graph.")
self.label[vertex] = label
def _import_from_file_specific_index(self, lines):
index = 1
if "INDEX" in lines[0]:
while not (lines[index].rstrip("\n").isspace() or lines[index] == "\n"):
self.add_vertex()
self.label.append(lines[index].rstrip("\n"))
index += 1
index += 1
for line in lines[index:]:
line = line.split(",")
self.add_edge(line[:2], weight=int(line[2].strip("\n")), by="label")
def _import_from_file(self, lines):
for line in lines:
line = line.split(",")
for label in line[:2]:
if label not in self.label:
self.add_vertex()
self.label.append(label)
self.add_edge(line[:2], weight=int(line[2].strip("\n")), by="label")
def import_from_file(self, filename):
"""Import a graph from a file. (!! Doesn't support same label for two different vertices. !!)
:param str filename: the name of the file containing the graph data.
"""
self.adj_matrix = np.array([])
self.label.clear()
with open(filename, "r") as file:
lines = file.readlines()
if lines[0].rstrip("\n") == "INDEX":
self._import_from_file_specific_index(lines)
else:
self._import_from_file(lines)
def plot(self, label_as_index=False, position=None, filepos=None):
"""Plot the graph in a matplotlib graph.
:param bool label_as_index: Display the index vertex instead of the label if True. (Default False)
:param iterable position: The position for the
"""
G = nx.Graph()
G.add_nodes_from([(i, {"label": self.label[i]}) for i in range(len(self.label))])
edges = self.get_edges()
for edge in edges:
G.add_edge(edge[0], edge[1], weight=edge[2])
if filepos is not None:
pos = read_coordinates(filepos)
elif position is not None:
pos = position
else:
pos = nx.planar_layout(G) if position is None else position
nx.draw(G, pos, with_labels=label_as_index, font_weight='bold')
if not label_as_index:
nx.draw_networkx_labels(G,
pos,
dict(zip(range(len(self.label)), self.label)))
labels = nx.get_edge_attributes(G, 'weight')
nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)
def kruskal(self, inplace=False):
"""Apply Kruskal algorithm to the graph.
:param bool inplace: Remove the edge not in the spanning tree if True. (Default False)
:return: The edges of the spanning tree and the total weight of the tree.
:rtype: tuple
"""
spanning_tree_edges, weight = self._kruskal()
if inplace:
self.rem_all_edges()
for edge in spanning_tree_edges:
self.add_edge(edge[:2], edge[2])
return spanning_tree_edges, weight
def _kruskal(self):
"""Apply Kruskal algorithm to the graph.
:return: The edges of the spanning tree and the total weight of the tree.
:rtype: tuple
"""
disjoint_set = UnionFind()
s = set()
for i in range(self.nb_vertex):
disjoint_set.make_set(i)
edges = self.get_edges()
edges = sorted(edges, key=lambda e: e[2])
for edge in edges:
root1 = disjoint_set.find(edge[0])
root2 = disjoint_set.find(edge[1])
if root1 != root2:
s.add(edge)
disjoint_set.union(root1, root2)
return s, sum([e[2] for e in s])
def floydWarshall(self):
"""Return the minimal distances between every vertex.
:param matrix: Matrix of adjacency of a graph.
:return: Matrix of minimal distances between every vertex.
"""
self.dist = copy(self.adj_matrix)
for k in range(len(self.adj_matrix)):
for i in range(len(self.adj_matrix)):
for j in range(len(self.adj_matrix)):
self.dist[i][j] = min(self.dist[i][j], self.dist[i][k] + self.dist[k][j])
return self.dist
def get_shortest_path(self, paths):
"""Get the shortest path between multiple path.
:param iterable paths: An iterable containing the paths.
:return: The shortest path in `paths`.
:rtype: iterable
"""
best_path = paths[0]
best_weight = self.get_path_weight(paths[0])
for path in paths[1:]:
weight = self.get_path_weight(path)
if weight < best_weight:
best_weight = weight
best_path = path
return best_path, best_weight
def get_path_weight(self, path):
"""Compute the weight of `path`.
:param iterable path: The path whose weight is computed.
:return: The weight of `path`.
:rtype: int
"""
weight = 0
for i in range(len(path) - 1):
weight += self.adj_matrix[path[i], path[i + 1]]
return weight
def get_shortest_path_all_hamilton_path(self):
"""Compute all the shortest Hamtilon paths in the graph.
:param iterable starting_vertex: An iterable containing the starting vertices.
:param iterable ending_vertex: An iterable containing the ending vertices.
:return: The shortest path if such a path exists, None otherwise.
:rtype: list or None
"""
return self.get_shortest_hamilton_path(range(self.nb_vertex), range(self.nb_vertex))
def get_shortest_hamilton_path(self, starting_vertex=None, ending_vertex=None):
"""Compute the shortest hamilton path such a path exists."""
all_path = self.get_hamilton_path(starting_vertex, ending_vertex)
if not all_path:
return None
return self.get_shortest_path(all_path)
def get_all_hamilton_path(self):
"""Compute all the possible hamilton paths.
:return: A list containing all the Hamilton paths in the graph.
:rtype: list
"""
return self.get_hamilton_path(range(self.nb_vertex), range(self.nb_vertex))
def get_hamilton_path(self, starting_vertex=None, ending_vertex=None):
"""Compute all the hamilton starting in `starting_vertex` and ending in `ending_vertex`.
:param iterable starting_vertex: An iterable containing the starting vertices.
:param iterable ending_vertex: An iterable containing the ending vertices.
:return: An list containing all the valid Hamilton paths.
:rtype: list
"""
all_path = []
for i in starting_vertex:
self._backtrack_hamilton([i], all_path)
all_path = [path for path in all_path if path[-1] in ending_vertex]
return all_path
def _backtrack_hamilton(self, path, all_path):
"""Recursive algorithm to compute all the hamilton path starting at path[0].
:param iterable path: The current path being tested.
:param iterable all_path: An iterable storing all the hamilton path.
"""
if path.count(path[-1]) == 2:
return None
elif len(path) == self.nb_vertex:
all_path.append(path)
for n in self.get_neighbors(path[-1]):
new_path = path + [n]
self._backtrack_hamilton(new_path, all_path)
def set_path(self, vertices):
"""Convert the graph to a path graph
:param iterable vertices: Path to be converted to.
"""
edges = [(vertices[i], vertices[i + 1]) for i in range(len(vertices) - 1)]
weights = [self.adj_matrix[edge[0], edge[1]] for edge in edges]
self.rem_all_edges()
for i in range(len(edges)):
self.add_edge(edges[i], weights[i])
def __getattr__(self, key):
if key == "nb_vertex":
return self.adj_matrix.shape[0]
if key == "nb_edge":
return len(self.get_edges()) # TODO: more efficient solution.
def __str__(self):
return self.adj_matrix.__str__()
|
charlyalizadeh/ESILV_ADSA_Problem | interface/fakescreen.py | import curses
from .base import *
class FakeScreen(Widget):
def __init__(self, coord, dim):
super().__init__(coord)
self.dim = dim
self.lines = []
self.offsets = [0, 0]
self.longest_string = 0
def _draw_border(self, app):
for i in range(self.coord[0], self.coord[0] + self.dim[0] + 1):
app.stdscr.addstr(i, self.coord[1], " ", curses.A_STANDOUT)
app.stdscr.addstr(i, self.coord[1] + self.dim[1], " ", curses.A_STANDOUT)
for i in range(self.coord[1], self.coord[1] + self.dim[1] + 1):
app.stdscr.addstr(self.coord[0], i, " ", curses.A_STANDOUT)
app.stdscr.addstr(self.coord[0] + self.dim[0], i, " ", curses.A_STANDOUT)
def _draw_lines(self, app):
for i in range(self.offsets[0], self.offsets[0] + self.dim[0] - 1):
index = i - self.offsets[0]
if i >= len(self.lines):
break
line = self.lines[i][self.offsets[1]:self.offsets[1] + self.dim[1] - 1]
app.stdscr.addstr(self.coord[0] + 1 + index, self.coord[1] + 1, line)
def set_line(index, line):
if len(line) > self.longest_string:
self.longest_string = len(line)
self.lines[index] = line
def insert_line(self, line):
if len(line) > self.longest_string:
self.longest_string = len(line)
if len(self.lines) >= self.dim[0] - 1:
self.offsets[0] += 1
self.lines.append(line)
def _go_up(self):
if len(self.lines) - self.offsets[0] >= self.dim[0]:
self.offsets[0] += 1
def _go_down(self):
if self.offsets[0] > 0:
self.offsets[0] -= 1
def _go_right(self):
if self.longest_string - self.offsets[1] > self.dim[1] - 1:
self.offsets[1] += 1
def _go_left(self):
if self.offsets[1] > 0:
self.offsets[1] -= 1
def draw(self, app):
super().draw(app)
self._draw_border(app)
self._draw_lines(app)
def update(self, app):
c = app.stdscr.getch()
if c == curses.KEY_DOWN:
self._go_up()
elif c == curses.KEY_UP:
self._go_down()
elif c == curses.KEY_LEFT:
self._go_left()
elif c == curses.KEY_RIGHT:
self._go_right()
elif c == 27:
return False
return True
def start(self, app):
self.draw(app)
while self.update(app):
app.stdscr.clear()
app.stdscr.refresh()
self.draw(app)
|
charlyalizadeh/ESILV_ADSA_Problem | steps/step1/avltree.py | from collections import deque
import copy
from graphviz import Digraph
import random
from .player import Player
class NodePlayer:
"""Represent a node in the AVLTree class.
:param value: The value of the node.
:param NodePlayer left: The left child. (Default: None)
:param NodePlayer right: The right child. (Default: None)
:param int height: The height of the node. (Default: 0)
"""
def __init__(self, player, left=None, right=None, height=0):
self.player = player
self.left = left
self.right = right
self.height = height
def update_height(self):
"""Update the height of a node.
Assigne to self.height the maximum height of its children plus one.
"""
if self.left is None and self.right is None:
self.height = 0
elif self.left is None:
self.height = self.right.height + 1
elif self.right is None:
self.height = self.left.height + 1
else:
self.height = max(self.left.height, self.right.height) + 1
def get_balance_factor(self):
"""Compute the balance factor of a node
Compute the difference between the heights of the right and left subtree.
:return: The balance factor.
:rtype: int
"""
if self.left is None and self.right is None:
return 0
elif self.left is None:
return self.right.height - (-1)
elif self.right is None:
return -1 - self.left.height
else:
return self.right.height - self.left.height
def get_largest_child(self):
"""Return the largest child
:return: The largest child.
:rtype: TreeNode
"""
if self.left is None and self.right is None:
return None
elif self.left is None:
return self.right
elif self.right is None:
return self.left
else:
return self.left if self.left.value > self.right.value else self.right
def __lt__(self, other):
"""Operator overloading of the lesser than operator.
:param other: The node which is compared to `self`.
:return: A boolean value indicating whether `self` is less than `other` or not.
:rtype: bool
"""
return self.player.score < other.player.score
def __eq__(self, other):
"""Operator overloading of the equal operator.
:param other: The node which is compared to `self`.
:return: A boolean value indicating whether `self` equal than `other` or not.
:rtype: bool
"""
return self.player.value == other.player.value
def __getattr__(self, key):
if key == "value":
return self.player.score
elif key == "id":
return self.player.id
def __str__(self):
return self.player.__str__()
class AVLTree:
"""An AVL tree implementation.
:param NodePlayer root_node: The root node of the tree.
:param int nb_node: The total number of nodes in the tree.
"""
def __init__(self, nb_node=0):
self.nb_node = 0
self.root_node = None
self.insert_nodes(NodePlayer(Player(i,0)) for i in range(nb_node))
def _left_rotation(self, node):
"""Apply a left rotation to a node.
:param NodePlayer node: NodePlayer where we apply the left rotation.
:raise: AttributeError if node doesn't have a right child.
"""
node_child = node.right
node_child_subtree = node_child.left
node_child.left = node
node.right = node_child_subtree
node.update_height()
node_child.update_height()
return node_child
def _right_rotation(self, node):
"""Apply a left rotation to a node.
:param NodePlayer node: NodePlayer where we apply the right rotation.
:raise: AttributeError if node doesn't have a left child.
"""
node_child = node.left
node_child_subtree = node_child.right
node_child.right = node
node.left = node_child_subtree
node.update_height()
node_child.update_height()
return node_child
def _insert_node(self, current_node, node):
"""Insert a node in the tree and balance it afterward.
:param NodePlayer current_node: Parent node.
:param node: Key that is being inserted.
:raise: TypeError if we try to insert a different type than self.root_node type or if the type of the tree doesn't support '<' operator.
:return: A modified current_node with `node` inserted.
:rtype: NodePlayer
"""
# Insertion
if current_node is None:
current_node = node
elif node < current_node:
current_node.left = self._insert_node(current_node.left, node)
else:
current_node.right = self._insert_node(current_node.right, node)
current_node.update_height()
## Balance
balance_factor = current_node.get_balance_factor()
if balance_factor > 1:
if node < current_node.right:
current_node.right = self._right_rotation(current_node.right)
return self._left_rotation(current_node)
else:
return self._left_rotation(current_node)
elif balance_factor < -1:
if node < current_node.left:
return self._right_rotation(current_node)
else:
current_node.left = self._left_rotation(current_node.left)
return self._right_rotation(current_node)
return current_node
def insert_node(self, node):
"""Update some internal variables and call the insert method on node.
:param node: Value of the node that is being inserted.
:raise: TypeError if we try to insert a different type than self.root_node type or if the type of the tree doesn't support '<' operator.
"""
self.nb_node += 1
if self.root_node is None:
self.root_node = node
else:
self.root_node = self._insert_node(self.root_node, node)
def insert_nodes(self, nodes):
"""Insert multiple nodes in the tree.
:param nodes: A list of node to insert in the tree.
"""
for node in nodes:
self.insert_node(node)
def insert_keys(self, keys):
"""Construct NodePlayer instances with values in keys as score and insert them into the tree.
:param iterable keys: A list of integer representing the score of new nodes inserted.
"""
for key in keys:
self.insert_node(NodePlayer(Player(0, key)))
def _delete_key(self, current_node, key):
"""Delete a node from an AVL Tree
:param NodePlayer current_node: Parent node.
:param key: Value that is being deleted.
:return: A modified current_node with `key` deleted or None.
:rtype: NodePlayer or Nonde
"""
if current_node is None:
return current_node
elif key < current_node.value:
current_node.left = self._delete_key(current_node.left, key)
elif key > current_node.value:
current_node.right = self._delete_key(current_node.right, key)
else:
if current_node.left is None:
return current_node.right
elif current_node.right is None:
return current_node.left
else:
working_node = current_node.right
while working_node.left is not None:
working_node = working_node.left
current_node.value = working_node.value
current_node.right = self._delete_key(current_node.right, working_node.value)
current_node.update_height()
balance_factor = current_node.get_balance_factor()
if balance_factor > 1:
if current_node.right.get_balance_factor() < 0:
current_node.right = self._right_rotation(current_node.right)
return self._left_rotation(current_node)
else:
return self._left_rotation(current_node)
elif balance_factor < -1:
if current_node.left.get_balance_factor() > 0:
current_node.left = self._left_rotation(current_node.left)
return self._right_rotation(current_node)
else:
return self._right_rotation(current_node)
return current_node
def delete_key(self, key):
"""Delete a key of an AVL Tree if exist.
:param key: The key that is deleted.
:raise: AttributeError if key not in the tree. (But sometimes not, I don't know and don't have the force to search why)
.. todo::
Check if key is in the tree
"""
self.root_node = self._delete_key(self.root_node, key)
self.nb_node -= 1
def delete_last(self, n=10):
"""Delete the n smallest value of the tree
:param int n: The number of node to delete.
..todo:
Optimize this function, indeed we can avoid traverse all the left branch every time.
"""
for i in range(n):
node = self.root_node
while node.left is not None:
node = node.left
self.delete_key(node.value)
def _copy_nodes(self, node, new_AVLTree):
"""Insert the all the values of the tree into antother AVLTree.
:param AVLTree new_AVLTree: The AVLTree where we insert the nodes.
"""
if node is not None:
self._copy_nodes(node.left, new_AVLTree)
new_AVLTree.insert_node(NodePlayer(Player(node.player.id, node.player.score)))
self._copy_nodes(node.right, new_AVLTree)
def copy_nodes(self, new_AVLTree):
"""Copy all nodes of `self` into a new AVLTree.
:param AVLTree new_AVLTree: The AVLTree where we insert the nodes.
"""
self._copy_nodes(self.root_node, new_AVLTree)
def add_values(self, values, traversal = "inorder"):
"""Set values of the tree in a defined order.
Set the values of the tree. The length of `values` doesn't need to be equal to the number of nodes in the tree.
:param iterable values: Values to set.
:param str type: String representing the type of traversal to perform when setting the values.
"""
if traversal == "inorder":
self._add_values_inorder(values)
elif traversal == "preorder":
self._add_values_preorder(values)
def _add_values_inorder(self, values):
"""Set the values of the tree to values in an in-order traversal.
:param values: A list of new values of the tree nodes.
"""
s = deque()
node = self.root_node
index = 0
while s or node is not None:
if node is not None:
s.append(node)
node = node.left
else:
node = s.pop()
node.player.score += values[index] # Something wrong with this. Need to clean __getattr__ and __setattr__ in NodePlayer.
index += 1
if index >= len(values):
break
node = node.right
def display_cli(self, data=True, node=None, sep='|', height=False, parent=False, balance=False, index=False, adress=False, id=False):
"""Display the tree in the console.
:param bool data: Display the height and the number of nodes of the tree if True. (Default: True)
:param NodePlayer node: The node which subtree is displayed. (Default: None)
:param str sep: String chosen to seprate nodes. (Default: '|')
:param bool height: Display the height of each key if True. (Default: False)
:param bool parent: Display the parent of each key if True. (Default: False)
:param bool balance: Display the balance factor of each key if True. (Default: False)
:param bool index: Display the index of each key following a Breadth First search ordering if True. (Default: False)
:param bool adress: Display the address of each node if True. (Default: False)
:param bool id: Display the id of each node if True. (Default: False)
"""
if node is None:
node = self.root_node
if node is None:
return None
current_list = deque()
current_list.append((node, None))
index_nb = 0
next_list = deque()
while current_list:
while current_list:
current_node, parent_index = current_list.popleft()
print(sep,current_node.value,sep, end = '')
if height:
print('<h', current_node.height, '>', sep = '', end = '')
if parent:
print('<p', parent_index, '>', sep = '', end = '')
if balance:
print('<b', current_node.get_balance_factor(), '>', sep = '', end = '')
if index:
print('<i', index_nb, '>', sep = '', end = '')
if adress:
print('<a', current_node, '>', sep = '', end = '')
if id:
print('<id', current_node.id, '>', sep = '', end = '')
if current_node.left is not None:
next_list.append((current_node.left, index_nb))
if current_node.right is not None:
next_list.append((current_node.right, index_nb))
index_nb += 1
print()
current_list = next_list.copy()
next_list.clear()
if data:
print("Height:", self.root_node.height)
print("Number of nodes:", self.nb_node)
def plot_graphviz(self, data=True, node=None, sep='|', height=False, parent=False, balance=False, index=False, adress=False, id=False):
"""Construct a graphviz Digraph of the tree.
:param bool data: Display the height and the number of nodes of the tree if True. (Default: True)
:param NodePlayer node: The node whose subtree is displayed. (Default: None)
:param str sep: String chosen to seprate nodes. (Default: '|')
:param bool height: Display the height of each node if True. (Default: False)
:param bool parent: Display the parent of each node if True. (Default: False)
:param bool balance: Display the balance factor of each node if True. (Default: False)
:param bool index: Display the index of each node following a Breadth First search ordering if True. (Default: False)
:param bool adress: Display the address of each node if True. (Default: False)
:param bool id: Display the id of each node if True. (Default: False)
"""
dot = Digraph()
if node is None:
node = self.root_node
current_list = deque()
current_list.append((node, None))
index_nb = 0
next_list = deque()
while current_list:
while current_list:
current_node, parent_index = current_list.popleft()
label = str(current_node.value)+'\n'
if height:
label += '<h {}>\n'.format(current_node.height)
if parent:
label += '<p {}>\n'.format(parent_index)
if balance:
label += '<b {}>\n'.format(current_node.get_balance_factor())
if index:
label += '<i {}>\n'.format(index_nb)
if adress:
label += '<a {}>\n'.format(adress)
if id:
label += '<id {}>\n'.format(current_node.id)
if current_node.left is not None:
next_list.append((current_node.left, index_nb))
if current_node.right is not None:
next_list.append((current_node.right, index_nb))
dot.node(str(index_nb), label)
if parent_index is not None:
dot.edge(str(parent_index), str(index_nb))
index_nb += 1
current_list = next_list.copy()
next_list.clear()
return dot
def __getattr__(self, key):
if key == "nb_player":
return self.nb_node
def _compute_nb_node(self):
"""Method to compute the number of nodes inside the tree."""
s = deque()
nb = 0
node = self.root_node
while s or node is not None:
if node is not None:
s.append(node)
node = node.left
else:
node = s.pop()
nb += 1
node = node.right
return nb
def __str__(self):
descrition = ""
s = deque()
node = self.root_node
while s or node is not None:
if node is not None:
s.append(node)
node = node.left
else:
node = s.pop()
descrition += node.__str__() + '\n'
node = node.right
return descrition[:-1]
def __len__(self):
return self.nb_node
|
charlyalizadeh/ESILV_ADSA_Problem | interface/__init__.py | <filename>interface/__init__.py
from .base import App, Widget
from .menu import Menu
from .fakescreen import FakeScreen
|
ZeccaLehn/WordWork | WordWork.py | #!/usr/bin/env python
# coding: utf-8
#
# PART 1: WORD FREQUENCIES
#
# In[1]:
# Linux Command from Anaconda (Prints inline): ipython notebook --matplotlib=inline
import os
HOME = os.environ['HOME'] # Finds computer home on Linux
WORKINGDIR = HOME + '/anaconda/Examples/WordWork' # Points to Anaconda package
WORDINPDIR = WORKINGDIR + '/wc_input' # Points to Input text files
WORDOUTDIR = WORKINGDIR + '/wc_output' # Points to output word counts
# In[2]:
os.chdir(WORDINPDIR) #Sets current directory
Line = open('First.txt', 'r')
print "Example: " + Line.read()
# In[3]:
allFiles = os.listdir(WORDINPDIR) # Stores files in wc_input
print(allFiles)
# In[4]:
Lines = ""
for line in allFiles:
Lines += open(line, 'r').read().lower() #Sets to lower case
print(Lines)
# In[5]:
# Note setup from http://www.nltk.org/install.html
# sudo pip install -U numpy
# Downloaded then ran...
# nltk.download()
import nltk
import nltk.tokenize
from nltk import FreqDist
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+') # Inspired by: <NAME> on StackOverflow
tokens = tokenizer.tokenize(Lines) # Returns a list of words without punctuation
# For setting max frequency
wordFreq = FreqDist(tokens)
maxFreq = wordFreq[FreqDist(tokens).max()] + 1
wordFreq
# In[6]:
import matplotlib
# % matplotlib inline # For plotting in iPython Notebook
# Plots the counts
import matplotlib.pyplot as plt
plt.figure(figsize = [15,5])
plt.bar(range(len(wordFreq)), wordFreq.values(), align="center")
plt.xticks(range(len(wordFreq)), list(wordFreq.keys()))
plt.show()
# In[7]:
## Writes frequency table to 'wc_result.txt' ##
import os
import re
import numpy as np
import pandas as pd
os.chdir(WORDOUTDIR) #Sets current directory
# Removes text if already exists
if os.path.exists('wc_result.txt'):
os.remove("wc_result.txt")
wc = open('wc_result.txt', 'w')
# Inspired from "bytesoftly.com" & "truppo" on StackOverflow
for key in sorted(wordFreq):
wc.write('%s\n' % ("%s: %s" % (key, wordFreq[key])))
wc.close()
#Returns key-value table
pd.Series(wordFreq)
#
# PART TWO: RUNNING MEDIANS
#
# In[8]:
allFiles = sorted(allFiles) # Sorts files by name
print(allFiles)
# In[9]:
os.chdir(WORDINPDIR) #Sets current directory
#For setting max frequency
Line = open(allFiles[0], 'r').read() # Reads in first line sorted by file name
tokenizer = RegexpTokenizer(r'\w+') # Inspired by: <NAME> on StackOverflow
firstLine = len(tokenizer.tokenize(Line)) # Counts non-punctuation words in first by sort
print(tokenizer.tokenize(Line))
str(firstLine) + ' Words in first line' # Word count in first sorted line
# In[10]:
lineLen = {}
for i in range(len(allFiles)):
lineLen[i] = len(tokenizer.tokenize(open(allFiles[i], 'r').read()))
#Returns sorted line counts
for key, value in lineLen.items():
print allFiles[key], value
# In[21]:
# Prints cumulative medians
med = {}
for i in range(len(lineLen.values())):
med[i] = np.median(lineLen.values()[:i + 1])
for median in med.values():
print median
# In[34]:
# Saves to med_result.txt
os.chdir(WORDOUTDIR)
# Removes text if already exists
if os.path.exists('med_result.txt'):
os.remove('med_result.txt')
wc = open('med_result.txt', 'w')
# Inspired from "bytesoftly.com" & "truppo" on StackOverflow
wc.write(str(med.values()))
wc.close()
|
alexandrealbuquerque000/Web-series | código-WebSeries(v3.5)/WebSeries(v3.5).py |
#<NAME>
from datetime import date
import speech_recognition
from gtts import gTTS
from playsound import playsound
import webbrowser
import pyautogui
import time
import sys
import os
import shutil
import pathlib
def presets():
global allquestions, width, height
width, height=pyautogui.size()
allquestions={ 'reboot':"Deseja reiniciar o programa? ",
'Série':"Fale a série desejada :",
'rep':"Deseja ativar a reprodução automática? ",
'typeofwatch':"Deseja escolher um episódio específico ou prefere continuar de onde parou? ",
'temporada':"Digite a temporada desejada: ",
'episódio':"Digite o episódio desejado: ",
'watchonemoreep':"Deseja ver mais um episódio? ",
'maratona':"Deseja maratonar? ",
'epsequence':"Digite quantos episódios seguidos deseja ver: ",
'timeepisodesinfo2':"Digite quantos minutos têm em cada episódio: ",
'rever':"Deseja recomeçar a série? ",
'continuarmaratona':"Deseja continuar a maratona?" }
pyautogui.FAILSAFE = False
navegador=webbrowser.open("https://www.google.com/", autoraise=True)
time.sleep(2.50)
pyautogui.hotkey("Ctrl","K")
time.sleep(0.25)
pyautogui.typewrite("Por favor, apenas feche esta guia quando o programa terminar. Pode falar...esse App ficou top!")
time.sleep(0.15)
pyautogui.hotkey("Alt","Tab")
#Função responsável por ouvir e reconhecer a fala:
def ouvir_microfone(msg):
while True:
#Habilita o microfone para ouvir o usuário
microfone = speech_recognition.Recognizer()
with speech_recognition.Microphone() as source:
#Chama a função de redução de ruido disponível na speech_recognition
microfone.adjust_for_ambient_noise(source)
#Avisa ao usuário que esta pronto para ouvir
print(msg)
if "speak" in comandvoz:
cria_audio(msg)
else:
pass
#Armazena a informação de audio na variavel
audio = microfone.listen(source)
try:
#Passa o audio para o reconhecedor de padroes do speech_recognition
frase = microfone.recognize_google(audio,language='pt-BR')
#Após alguns segundos, retorna a frase falada
print()
print("Você disse: " +frase)
if "speak" in comandvoz:
cria_audio("Você disse: " +frase)
else:
pass
break
#Caso nao tenha reconhecido o padrão de fala, exibe esta mensagem
except speech_recognition.UnknownValueError:
print()
print("Desculpe, não entendi.")
if "speak" in comandvoz:
cria_audio("Desculpe, não entendi.")
else:
pass
print()
print("Tente novamente.")
if "speak" in comandvoz:
cria_audio("Tente novamente.")
else:
pass
print()
continue
except speech_recognition.RequestError:
print()
print("Por favor, verifique sua conexão com a internet.")
print()
print("Tente novamente.")
print()
continue
return frase
def audiocontfunction():
while True:
audiocont=audiocont+1
yield audiocont
#Função responsável por reproduzir o que foi dito:
def cria_audio(audio):
if audiocontnumbers==0:
audiocontnumbers=audiocontfunction()
audiocont=next(audiocontnumbers)
tts = gTTS(audio,lang="pt-br")
#Salva o arquivo de áudio
try:
os.mkdir("audiopys")
except (OSError) or (FileExistsError):
pass
try:
tts.save("audiopys/audiopy"+str(audiocont)+".mp3")
except PermissionError:
print()
print("Verifique se todos os arquivos 'audiopy' anteriores estão apagados.")
print()
input("O programa será desligado.")
sys.exit()
#Da play ao áudio
playsound("audiopys/audiopy"+str(audiocont)+".mp3")
#remove o áudio para, posteriormente, adicionar um novo
#Função verificadora de números decimais para comando de voz:
def leiafloatvoz(msg):
while True:
try:
limiter4=float(msg.strip().replace(" ", ""))
except (ValueError, TypeError, IndexError):
print()
print("ERRO:""\nDiga apenas números.")
if "speak" in comandvoz:
cria_audio("ERRO:""\nDiga apenas números.")
else:
pass
print()
break
else:
return limiter4
#Função para ouvir apenas letras:
def leiastrvoz(msg):
while True:
verif=str(msg.strip().replace(" ", ""))
if verif.isalpha() == False:
print()
print("ERRO:""\nDiga apenas letras.")
if "speak" in comandvoz:
cria_audio("ERRO:""\nDiga apenas letras.")
else:
pass
print()
break
else:
verif=verif.lower()
return verif
#Função verificadora de números inteiros para comando de voz:
def leiaintvoz(msg):
while True:
try:
limiter4=int(msg.strip().replace(" ", ""))
except (ValueError, TypeError, IndexError):
print()
print("ERRO:""\nDiga apenas números inteiros.")
if "speak" in comandvoz:
cria_audio("ERRO:""\nDiga apenas números inteiros.")
else:
pass
print()
break
else:
return limiter4
#Função para ler apenas letras:
def leiastr(msg):
while True:
print()
verif=str(input(msg).strip().replace(" ", ""))
if verif.isalpha() == False:
print()
print("ERRO:""\nDigite apenas letras.")
continue
else:
verif=verif.lower()
return verif
#Função desligar programa:
def desligar():
print()
print("Agradecemos por usar o programa! ;)")
if "speak" in comandvoz:
cria_audio("Agradecemos por usar o programa! ;)")
try:
shutil.rmtree("audiopys")
except (OSError) or (FileNotFoundError):
pass
else:
pass
input()
sys.exit()
#Função verificadora de existência de dados:
def verifexist(dados):
try:
verifexist=conteudo[(dados)]
except (IndexError):
erroarq()
reboot=perguntas('reboot', 'str')
if "s" in reboot:
return -1
else:
desligar()
else:
return verifexist
#Função verificadora caso numepisodesinfo esteja com dados errados
def leiaintforQ(msg):
try:
limiter4=int(msg)
except (ValueError, TypeError, IndexError):
erroarq()
reboot=perguntas('reboot', 'str')
if "s" in reboot:
return -1
else:
desligar()
else:
return limiter4
#Função para iniciar a reprodução automática:
def startrepauto():
pyautogui.position()
time.sleep(10+(systime/1.5))
posxfirstclick=((width/2)+posx)*(72.5/100)
posyfirstclick=((height/2)-posy)*(165/100)
if posxfirstclick > width:
posxfirstclick = width
elif posxfirstclick < -(width):
posxfirstclick = -(width)
else:
pass
if posyfirstclick > height:
posyfirstclick = height
elif posyfirstclick < -(height):
posyfirstclick = -(height)
else:
pass
time.sleep(0.5+systime/50)
pyautogui.hotkey("Ctrl", "1")
pyautogui.moveTo(width, height/2)
time.sleep(0.5+systime/50)
im1 = pyautogui.screenshot(region=(int((0.95-posxy)*(width/2)), int((0.95-posxy)*(height/2)), int((1.05+posxy)*(width/2)), int((1.05+posxy)*(height/2))))
time.sleep(0.5+systime/50)
pyautogui.hotkey("Ctrl", "2")
time.sleep(2.5+systime/15)
pyautogui.click((posxfirstclick), (posyfirstclick))
time.sleep(15+systime)
pyautogui.middleClick(((width/2)+posx), ((height/1.5)-posy))
for loop1 in range(2):
time.sleep(0.5+systime/50)
pyautogui.hotkey("Ctrl", "t")
time.sleep(0.5+systime/50)
pyautogui.hotkey("Ctrl", "4")
for loop2 in range(2):
time.sleep(0.25+systime/50)
pyautogui.hotkey("Ctrl", "F4")
time.sleep(0.5+systime/50)
pyautogui.moveTo(width, height/2)
while pyautogui.screenshot(region=(int((0.95-posxy)*(width/2)), int((0.95-posxy)*(height/2)), int((1.05+posxy)*(width/2)), int((1.05+posxy)*(height/2))))==im1:
time.sleep(0.75+systime/45)
pyautogui.hotkey("Ctrl", "F4")
continue
else:
pass
time.sleep(10+systime/5)
pyautogui.click(((width/2)+posx), ((height/2.35)-posy))
time.sleep(10+systime/5)
pyautogui.doubleClick(((width/2)+posx), ((height/2)-posy))
time.sleep(0.5+(systime/25))
pyautogui.moveTo(width, height/2)
im3=pyautogui.screenshot(region=(int((0.90-posxy)*(width/2)), int((0.945-posxy)*(height/2)), int((1.10+posxy)*(width/2)), int((1.055+posxy)*(height/2))))
time.sleep(2.5+systime/15)
pyautogui.moveTo(width, height/2)
if im3==pyautogui.screenshot(region=(int((0.90-posxy)*(width/2)), int((0.945-posxy)*(height/2)), int((1.10+posxy)*(width/2)), int((1.055+posxy)*(height/2)))):
pyautogui.press('space')
else:
pass
pyautogui.press('up', presses=10)
pyautogui.position()
#Função para abrir nova janela:
def novajanela():
time.sleep(0.25+(systime/50))
pyautogui.hotkey("Alt","Tab")
time.sleep(0.5+(systime/100))
pyautogui.hotkey("Ctrl", "n")
#Função verificadora do tempo/episódio:
def leiatemp(msg):
while True:
try:
limiter4=float((msg))
except (ValueError, TypeError, IndexError):
erroarq()
reboot=perguntas('reboot', 'str')
if "s" in reboot:
return -1
else:
desligar()
else:
if limiter4 < 0:
limiter4=limiter4*(-1)
else:
pass
return limiter4
#Função verificadora de números inteiros
def leiaint(msg):
while True:
try:
print()
limiter4=int(input(msg).strip().replace(" ", ""))
except (ValueError, TypeError, IndexError):
print()
print("ERRO:""\nDigite apenas números inteiros.")
continue
else:
return limiter4
#Função verificadora de números decimais
def leiafloat(msg):
while True:
try:
print()
limiter4=float(input(msg).strip().replace(" ", ""))
except (ValueError, TypeError, IndexError):
print()
print("ERRO:""\nDigite apenas números.")
continue
else:
return limiter4
#Função para fazer perguntas:
def perguntas(perg, tipo):
perg=allquestions[perg]
pergcopy=perg
if tipo=='str':
tipo1=leiastrvoz
tipo2=leiastr
tipo3=str
elif tipo=='int':
tipo1=leiaintvoz
tipo2=leiaint
tipo3=int
elif tipo=='float':
tipo1=leiafloatvoz
tipo2=leiafloat
tipo3=float
else:
tipo1=None
tipo2=input
tipo3=str
if "s" in comandvoz:
print()
perg=ouvir_microfone(perg.replace("Digite", "Diga")).lower()
if tipo1!=None:
while tipo1(perg)==None:
perg=str(pergcopy)
perg=ouvir_microfone(perg.replace("Digite", "Diga")).lower()
else:
pass
else:
pass
else:
if tipo2==input:
print()
perg=tipo2(perg)
if type(perg)==int or type(perg)==float:
return tipo3(perg)
else:
return tipo3(perg.strip())
#Função que pede (se necessário) 'Enter' para prosseguir:
def Enter():
if "s" in comandvoz:
print()
input("Pressione 'Enter' para prosseguir.")
if "speak" in comandvoz:
cria_audio("Pressione 'Enter' para prosseguir.")
else:
pass
print()
def erroarq():
print()
print("Por favor, confira se todos os dados do arquivo estão corretos e desligue/reinicie o programa")
if "speak" in comandvoz:
cria_audio("Por favor, confira se todos os dados do arquivo estão corretos e desligue ou reinicie o programa")
else:
pass
def filetest():
global conteudo
#Dados do arquivo:
try:
arquivo = open(str(Série)+ ".txt", 'r')
except (FileNotFoundError, IndexError):
print()
print("Esse arquivo não existe.")
if "speak" in comandvoz:
cria_audio("Esse arquivo não existe.")
else:
pass
reboot=perguntas('reboot', 'str')
if "s" in reboot:
return -1
else:
desligar()
else:
pass
conteudo = arquivo.readlines()
def numepisodesinfotest():
global numepisodesinfo
numepisodesinfo=verifexist(4)
if numepisodesinfo==-1:
return -1
numepisodesinfofix=[]
for testnumepisodes in numepisodesinfo:
try:
int(testnumepisodes)
except (IndexError, ValueError):
if '\n' in testnumepisodes:
pass
else:
numepisodesinfo=numepisodesinfo.replace(testnumepisodes, ' ')
numepisodesinfo=str(numepisodesinfo).split()
def positionstest():
global posx, posy, posxy
try:
posxy=float(conteudo[6])
except (IndexError, ValueError, TypeError):
posxy=0
else:
posxy=float(posxy)
if posxy>80:
posxy=80
elif posxy<-80:
posxy=-80
else:
pass
#Teste das coordenadas:
try:
posx=str(conteudo[0].split()[:conteudo[0].split().index(',') + 1][-2])
posy=str(conteudo[0].split()[conteudo[0].split().index(',') + 1:][0])
float(posx)
float(posy)
except (ValueError, TypeError, IndexError):
posx=0
posy=0
else:
posx=float(posx)
posy=float(posy)
if posx > width/2:
posx=width/2
elif posx < -(width/2):
posx = -(width/2)
if posy > height/2:
posy = height/2
elif posy < -(height/2):
posy = -(height/2)
pass
def systimetest():
global systime
#configuração avançada (velocidade do sistema)
try:
systime=str(conteudo[5].split()[conteudo[5].split().index('[') + 1:][0])
systime=float(systime)
except (ValueError, TypeError, IndexError):
systime=0
else:
if systime<0:
systime=0
elif systime>100:
systime=100
pass
def timeepisodesinfotest():
global timeepisodesinfo1
try:
timeepisodesinfo1=str(conteudo[5].split()[:conteudo[5].split().index('min/ep') + 1][0])
testeerrox=(conteudo[7].split()[:conteudo[7].split().index('x') + 1])
except (IndexError, ValueError):
erroarq()
reboot=perguntas('reboot', 'str')
if "s" in reboot:
return -1
else:
desligar()
else:
pass
def createnumtemporadas():
global numtemporadas
for loop3 in numepisodesinfo:
loop3=leiaintforQ(loop3)
if loop3==-1:
return -1
numtemporadas=len(numepisodesinfo)
def arqtests():
global URL
URL=verifexist(1)
if URL==-1 or numepisodesinfotest()==-1 or createnumtemporadas()==-1 or timeepisodesinfotest()==-1 or positionstest()==-1 or systimetest()==-1:
return -1
#Guardar a data atual:
def dataday():
data_atual=str(date.today())
data_atual=data_atual.replace("-", " ")
data_atual=(data_atual.split())
data_atual=list(data_atual[::-1])
data_atual='-'.join(map(str, data_atual))
return data_atual
#Apresentando programa:
def initshow():
os.system("cls")
print()
print("WebSeries Program by Alê")
def firstsconfig():
global limiter1, limiter2, comandvoz, Série
limiter1="reverselimiter1"
limiter2="x"
comandvoz=leiastr("Deseja ativar o comando por voz? ")
#Digite o nome da série:
Série=perguntas('Série', None)
if filetest()==-1 or arqtests()==-1:
return -1
def eptemoption():
global timeepisodesinfo1
try:
float(timeepisodesinfo1)
except (ValueError, TypeError, IndexError, NameError):
timeepisodesinfo2=perguntas('timeepisodesinfo2', 'float')
else:
timeepisodesinfo2=float(timeepisodesinfo1)
if timeepisodesinfo2<0:
timeepisodesinfo2=timeepisodesinfo2*(-1)
else:
pass
return timeepisodesinfo2
def closeandgoback(tmp):
timeepisodesinfo2=tmp
time.sleep(float(timeepisodesinfo2)*60)
pyautogui.hotkey("Alt","F4")
time.sleep(0.25+(systime/50))
pyautogui.hotkey("Alt","Tab")
def especificepoption():
global watchonemoreep, veriftest1, veriftest2
temporada=0
while temporada==0:
veriftest1=0
temporada=perguntas('temporada', 'int')
for loop4 in list(range(1, numtemporadas+1)) :
while True:
if int(temporada)==loop4:
veriftest2=numepisodesinfo[veriftest1]
break
else:
veriftest1=veriftest1+1
break
else:
if int(temporada) not in range(numtemporadas+1):
temporada=0
print()
print("Essa temporada não existe")
if "speak" in comandvoz:
cria_audio("Essa temporada não existe")
else:
pass
print()
print("Essa série tem",numtemporadas,"temporadas")
if "speak" in comandvoz:
cria_audio("Essa série tem{}temporadas".format(numtemporadas))
else:
pass
temporada=int(temporada)
while temporada!=0:
print()
print("Essa temporada possui",veriftest2,"episódios")
if "speak" in comandvoz:
cria_audio("Essa temporada possui{}episódios".format(veriftest2))
else:
pass
episódio=perguntas('episódio', 'int')
if episódio in range(int(veriftest2)+1) and episódio!=0:
formatedurl=str(URL).format(temporada, episódio)
if 's' in rep:
timeepisodesinfo2=eptemoption()
novajanela()
navegador=webbrowser.open(formatedurl, autoraise=True)
if "s" in rep:
startrepauto()
else:
pass
break
else:
print()
print("Esse episódio não existe")
if "speak" in comandvoz:
cria_audio("Esse episódio não existe")
else:
pass
continue
else:
continue
else:
if "s" in rep:
closeandgoback(timeepisodesinfo2)
else:
pass
Enter()
watchonemoreep=perguntas('watchonemoreep', 'str')
if "s" in watchonemoreep and "s" in rep:
novajanela()
time.sleep(0.5+(systime/25))
pyautogui.hotkey("Ctrl", "Alt", "Tab");pyautogui.press("right");pyautogui.press("right");pyautogui.press("enter")
else:
pass
def continuepreviewepoption():
global limiter4, limiter6, continuarmaratona, epsequence
if limiter3=="limiter3":
maratona="x"
limiter7=1
else:
limiter7=0
maratona=perguntas('maratona', 'str')
if "s" in maratona or "s" in continuarmaratona and limiter6!="limiter6":
limiter1="limiter1"
limiter4=1
epsequence=perguntas('epsequence', 'int')
if epsequence < 0:
epsequence=-(epsequence)
elif epsequence==0:
epsequence=1
else:
pass
else :
if limiter7==1:
pass
else:
limiter4=1
epsequence=1
pass
limiter5=0
veriftest1=0
arquivo = open(str(Série)+ ".txt", 'r')
conteudo = arquivo.readlines()
for loop5 in range(numtemporadas+1):
if str(loop5) in (conteudo[7].split()[:conteudo[7].split().index('x') + 1]):
temporada = 0 + int(loop5)
limiter5=1
if temporada==0:
temporada=1
else:
pass
for loop6 in list(range(1, numtemporadas+1)) :
while not int(temporada)==loop6:
veriftest1=veriftest1+1
break
else:
veriftest2=numepisodesinfo[veriftest1]
break
if temporada not in range(1,numtemporadas+1) and limiter5!=1:
erroarq()
reboot=perguntas('reboot', 'str')
if "s" in reboot:
return -1
else:
desligar()
for loop7 in range(int(veriftest2)+1):
limiter5=0
if str(loop7) in (conteudo[7].split()[conteudo[7].split().index('x') + 1:]):
episódio = 0 + int(loop7)
limiter5=1
break
else:
try:
limiter4=int(episódio)
except (ValueError, TypeError, IndexError, NameError):
erroarq()
reboot=perguntas('reboot', 'str')
if "s" in reboot:
return -1
else:
desligar()
else:
pass
if episódio not in range(1,int(veriftest2)+1) and episódio==0 and limiter5!=1:
erroarq()
reboot=perguntas('reboot', 'str')
if "s" in reboot:
return -1
else:
desligar()
arquivo.close()
if 's' in rep or 's' in maratona:
timeepisodesinfo2=eptemoption()
formatedurl=str(URL).format(temporada, episódio+1)
episódio=episódio+1
novajanela()
navegador=webbrowser.open(formatedurl, autoraise=True)
if "s" in rep:
startrepauto()
else:
pass
def endofseries():
nonlocal episódio, temporada
if episódio==int(numepisodesinfo[temporada-1]):
temporada=temporada+1
episódio=0
#mostrando que a séria acabou se temporada for maior que o número de temporadas
if temporada>numtemporadas:
aux2=[]
aux3=[]
print()
print("A série acabou")
if "speak" in comandvoz:
cria_audio("A série acabou")
else:
pass
Enter()
rever=perguntas('rever', 'str')
arquivo = open(str(Série)+ ".txt", 'r')
conteudo=arquivo.readlines()
arquivo.seek(0)
if "s" in rever:
temporada=1
try:
conteudo[9]
except (IndexError):
pass
else:
arquivo = open(str(Série)+ ".txt", 'r')
conteudo = arquivo.readlines()
arquivo.close()
for aux5 in conteudo:
if aux5!=conteudo[9]:
arquivo = open(str(Série)+ ".txt", 'w')
aux2.append(aux5)
arquivo.writelines(aux2)
arquivo.truncate()
arquivo.close()
pass
arquivo = open(str(Série)+ ".txt", 'r')
conteudo=arquivo.readlines()
for aux6 in conteudo:
if aux6 != conteudo[7]:
arquivo.close()
arquivo = open(str(Série)+ ".txt", 'w')
aux3.append(aux6)
arquivo.writelines(aux3)
else:
break
arquivo.truncate()
arquivo.close()
arquivo = open(str(Série)+ ".txt", 'r')
conteudo=arquivo.readlines()
conteudo.append("Último episódio visto: 0 x 0 .".rstrip('\n\n\n'))
arquivo = open(str(Série)+ ".txt", 'w')
arquivo.writelines(conteudo)
arquivo.truncate()
arquivo.close()
pass
else:
try:
conteudo[9]
except (IndexError) :
conteudo.append("\n\nA série terminou.")
arquivo = open(str(Série)+ ".txt", 'w')
arquivo.writelines(conteudo)
arquivo.truncate()
arquivo.close()
pass
else:
if '\n' in conteudo[9]:
conteudo.append("A série terminou.")
arquivo = open(str(Série)+ ".txt", 'w')
arquivo.writelines(conteudo)
arquivo.truncate()
arquivo.close()
pass
else:
pass
reboot=perguntas('reboot', 'str')
if "s" in reboot:
return -1
else:
desligar()
return episódio, temporada
episódio, temporada=endofseries()
def refreshtxtfileinfo():
nonlocal episódio, temporada
arquivo = open(str(Série)+ ".txt", 'r')
conteudo=arquivo.readlines()
arquivo.seek(0)
aux1=[]
for aux4 in conteudo:
if aux4 != conteudo[7]:
arquivo.close()
arquivo = open(str(Série)+ ".txt", 'w')
aux1.append(aux4)
arquivo.writelines(aux1)
arquivo.truncate()
arquivo.close()
arquivo = open(str(Série)+ ".txt", 'r')
conteudo=arquivo.readlines()
conteudo.append("Último episódio visto: {} x {} . ({}) ".format( temporada , episódio, dataday()).rstrip('\n\n'))
arquivo = open(str(Série)+ ".txt", 'w')
arquivo.writelines(conteudo)
arquivo.truncate()
arquivo.close()
def maratonacontroler():
global limiter2, limiter3, limiter4, limiter6, continuarmaratona, watchonemoreep
nonlocal maratona, timeepisodesinfo2
while limiter4%epsequence !=0:
limiter4=limiter4+1
time.sleep(timeepisodesinfo2*60)
pyautogui.hotkey("Alt","F4")
novajanela()
limiter3="limiter3"
limiter6="limiter6"
limiter2="limiter2"
break
else:
while "s" in maratona or limiter2=="limiter2":
if "s" in rep:
closeandgoback(timeepisodesinfo2)
else:
pass
Enter()
continuarmaratona=perguntas('continuarmaratona', 'str')
if "s" in continuarmaratona:
if "s" in rep:
novajanela()
time.sleep(0.5+(systime/25))
pyautogui.hotkey("Ctrl", "Alt", "Tab");pyautogui.press("right");pyautogui.press("right");pyautogui.press("enter")
pass
else:
pass
limiter3="limiter3"
maratona="s"
limiter6="continuarmaratona"
limiter2="limiter2"
break
else:
limiter2="x"
maratona="maratona"
pass
else:
if "maratona"==maratona:
watchonemoreep="continuarmaratona"
pass
else:
if "s" in rep:
closeandgoback(timeepisodesinfo2)
else:
pass
Enter()
watchonemoreep=perguntas('watchonemoreep', 'str')
if "s" in watchonemoreep and "s" in rep:
novajanela()
time.sleep(0.5+(systime/25))
pyautogui.hotkey("Ctrl", "Alt", "Tab");pyautogui.press("right");pyautogui.press("right");pyautogui.press("enter")
else:
pass
refreshtxtfileinfo()
maratonacontroler()
def secondsconfig():
global rep, limiter3, limiter6, continuarmaratona, watchonemoreep
if firstsconfig()==-1:
return -1
reboot='n'
print()
print("Série:__",Série,"__")
if "speak" in comandvoz:
cria_audio("Série: {}".format(Série))
else:
pass
rep=perguntas('rep', 'str')
watchonemoreep="s"
limiter3="x"
limiter6='x'
continuarmaratona="continuarmaratona"
while "s" in watchonemoreep:
if "limiter3" in limiter3:
pass
else:
typeofwatch=perguntas('typeofwatch', 'str')
if "ep" in typeofwatch:
especificepoption()
else:
if continuepreviewepoption()==-1:
return -1
else:
pass
else:
if "s" not in reboot:
reboot=perguntas('reboot', 'str')
if "s" in reboot:
return -1
else:
desligar()
def run():
presets()
#Mecanismo para reiniciar programa:
reboot="s"
while "s" in reboot:
initshow()
if secondsconfig()==-1:
continue
run()
|
richard-shepherd/crossword-helper | crossword_libs/word_utils/anagram_helper.py | <reponame>richard-shepherd/crossword-helper<gh_stars>0
import itertools
from singleton_decorator import singleton
from collections import defaultdict
from .word_manager import WordManager
from .word_utils import WordUtils
@singleton
class AnagramHelper(object):
"""
Finds anagrams for words.
"""
def __init__(self):
"""
Constructor.
"""
# We create a mapping of anagram-key -> [words-which-are-anagrams-of-each-other]...
self._character_prime_map = self._create_character_prime_map()
self._anagram_lookup = self._create_anagram_lookup()
def anagrams(self, word, word_lengths=None):
"""
Returns anagrams of the word passed in.
You can request that the result is split into words with lengths specified
in the optional word_lengths parameter. For example:
anagrams("astronomer", [4, 6]) -> ["moon", "starer"]
"""
# If no word-length was specified, we specify that we want anagrams for the full
# length of the word...
if word_lengths is None:
word_lengths = [len(word)]
# We find the combinations of the ways the word can be split up
# by the sizes requested...
splits = WordUtils.letter_combinations(word, word_lengths)
# We loop through the splits, finding ones where every word in the split
# is a valid anagram...
for split in splits:
# Each split is an array of the 'words' that make up the split.
# We check whether all these words have anagrams...
all_words_have_anagrams = True
anagrams_for_words = [] # List of anagrams for each word in the split
for word in split:
anagram_key = self._get_anagram_key(word)
anagrams_for_word = self._anagram_lookup.get(anagram_key, None)
if anagrams_for_word is None:
# There are no anagrams for this word...
all_words_have_anagrams = False
break
# There are anagrams for the word, so we record them...
anagrams_for_words.append(anagrams_for_word)
# If we found anagrams, we return them...
if all_words_have_anagrams:
# We first find all combinations (the cross-product) of the words we found...
products = itertools.product(*anagrams_for_words)
for product in products:
yield product
def _create_character_prime_map(self):
"""
Returns a dictionary of character -> prime-number for use with
anagram key generation.
"""
return dict( \
a=2, e=3, i=5, o=7, u=11, \
t=13, s=17, h=19, c=23, d=29, \
k=31, l=37, m=41, n=43, j=47, \
p=53, q=59, r=61, g=67, f=71, \
b=73, v=79, w=83, x=89, y=97, z=101)
def _get_anagram_key(self, word):
"""
Returns an anagram key for the word specified.
"""
key = 1
for c in word:
character_prime = self._character_prime_map[c]
if character_prime is not None:
key = key * character_prime
return key
def _create_anagram_lookup(self):
"""
Creates a dictionary of anagram-key -> [words].
The key is made by mapping each letter to a prime number and multiplying
these values for each letter in a word. This means that the key for each
word is unique for the letters in the word - but not by their order. So
anagrams share the same key.
"""
anagram_lookup = defaultdict(list)
# We map each word we have to its anagram key...
for word in WordManager().get_words():
# We find the anagram key for the word, and add it to the list of
# words for this key...
anagram_key = self._get_anagram_key(word)
anagram_lookup[anagram_key].append(word)
return anagram_lookup
|
richard-shepherd/crossword-helper | crossword_libs/word_utils/word_utils.py | <filename>crossword_libs/word_utils/word_utils.py
import itertools
class WordUtils(object):
"""
Utility functions for working with words.
"""
@staticmethod
def clean_word(word):
"""
Returns a 'clean' form of the word passed in.
- Removes whitespace
- Removes punctuation
- Converts to lower-case
"""
clean_word_filter = filter(str.isalpha, word)
clean_word = "".join(clean_word_filter)
clean_word = clean_word.lower()
return clean_word
@staticmethod
def remove_punctuation(text, convert_to_lowercase=True):
"""
Removes punctuation from the text passed in, but leaves spaces.
"""
result = text
result = "".join(filter(WordUtils.is_alpha_or_space, result))
if convert_to_lowercase:
result = result.lower()
return result
@staticmethod
def remove_letters_from_word(word, letters):
"""
Removes letters from word.
NOTE 1: Only one of each letter is removed. For example, ("hello", "l") -> "helo"
NOTE 2: letters can be a string or a collection of individual letters
"""
result = word
for letter in letters:
result = result.replace(letter, "", 1)
return result
@staticmethod
def letter_combinations(word, lengths):
"""
Returns an iterable for combinations of letters in the word requested
with the lengths requested. For example:
letter_combinations("abcd", [1, 3] -> ("a", "bcd"), ("b", "acd"), ...
"""
num_lengths = len(lengths)
if num_lengths == 1:
# We only have one length, so we return the combinations of words of that length...
length = lengths[0]
unique_words = set()
# We loop through the combinations of words with length letters...
combinations = itertools.combinations(word, length)
for combination in combinations:
# If the word is not one we've seen before, we return it.
# NOTE: This is done as combinations are unique on the position of letters
# not on their value. So combinations of "hello" for example otherwise
# include "hel" twice, once for each l in the word.
word_with_length = "".join(combination)
if word_with_length in unique_words: continue
unique_words.add(word_with_length)
yield [word_with_length]
else:
# We have more than one length.
# We first find combinations of letters with the first length...
combinations_with_first_length = WordUtils.letter_combinations(word, lengths[:1])
# For each of these combinations of letters, we find the remaining letters
# from the original word when they are removed, and then break these down
# by the next letter-lengths...
unique_remaining_letters = set()
for combination_with_first_length in combinations_with_first_length:
# Each combination is returned as a list. We find the word from the list, and
# then find the remaining letters...
word_with_first_length = combination_with_first_length[0]
remaining_letters = WordUtils.remove_letters_from_word(word, word_with_first_length)
if remaining_letters in unique_remaining_letters: continue
unique_remaining_letters.add(remaining_letters)
# We find the combinations of the remaining letters broken down by the remaining lengths...
remaining_combinations = WordUtils.letter_combinations(remaining_letters, lengths[1:])
# We return each original combination, along with the combinations of the remainder...
for remaining_combination in remaining_combinations:
l = list(combination_with_first_length)
l.extend(remaining_combination)
yield l
@staticmethod
def is_alpha_or_space(letter):
"""
Returns True if the letter is alpha or a space, False otherwise.
"""
return str.isalpha(letter) or str.isspace(letter)
|
richard-shepherd/crossword-helper | crossword_libs/cryptic_utils/clue.py | <filename>crossword_libs/cryptic_utils/clue.py
from .bits_and_pieces import BitsAndPieces
class Clue(object):
"""
Parses cryptic clues and provides information on how to solve them.
"""
def __init__(self, clue):
"""
Constructor.
"""
# The original text of the clue...
self.clue = clue
@staticmethod
def parse(clue):
"""
Parses the clue and returns a Clue object.
"""
# We show bits-and-pieces for the clue...
bits_and_pieces = BitsAndPieces().bits_and_pieces_from_clue(clue)
for (phrase, abbreviations) in bits_and_pieces:
phrase = phrase.upper()
abbreviations = [x.upper() for x in abbreviations]
print("{0} -> {1}".format(phrase, abbreviations)) |
richard-shepherd/crossword-helper | crossword_libs/word_utils/lemma_info.py | <gh_stars>0
from collections import defaultdict
class LemmaInfo(object):
"""
Holds info related to a lemma - ie, to the root form of a word.
In particular we hold a map of pos-tags to the word form for them.
For example, the LemmaInfo for "good" will include:
JJR -> "better"
JJT -> "best"
"""
def __init__(self):
"""
Constructor.
"""
# Collection of word forms keyed by pos-tag, eg RBT -> "best"...
self.word_forms = dict()
|
richard-shepherd/crossword-helper | crossword_libs/cryptic_utils/bits_and_pieces.py | import logging
from singleton_decorator import singleton
from collections import defaultdict
from ..word_utils import WordUtils
from ..utils import Utils
@singleton
class BitsAndPieces(object):
"""
Manages a collection of 'bits and pieces' - ie, substitutions of
words or phrases for collections of letters. For example:
lincoln -> abe
actor -> ham
"""
def __init__(self):
"""
Constructor.
"""
# A map of phrase -> [abbreviations]...
self.abbreviations = self._load_from_file()
def bits_and_pieces_from_clue(self, clue, require_whole_word_if_length_less_than=4):
"""
Returns a list of bits and pieces for the clue provided. Each item in
the list is tuple of (phrase, [abbreviations]). For example:
("sailor", ["tar", "jack"])
"""
results = []
# We make sure the clue has punctuation removed (and is lower case)...
clue = WordUtils.remove_punctuation(clue)
words_in_clue = clue.split()
# We check each phrase we manage to see if it is in the clue...
for (phrase, abbreviations) in self.abbreviations.items():
# We check differently depending on the length of the phrase.
if len(phrase) < require_whole_word_if_length_less_than:
# The phrase is short, so we check if it exists as a whole word in the clue...
if phrase in words_in_clue:
results.append((phrase, abbreviations))
else:
# The phrase is long, so we check if it is anywhere in the clue...
if phrase in clue:
results.append((phrase, abbreviations))
return results
def _load_from_file(self):
"""
Returns a map of phrase -> letters loaded from the bits_and_pieces.txt file.
"""
results = defaultdict(list)
# We load the file...
filename = "bits_and_pieces.txt"
path = Utils.path_relative_to_module(__file__, filename)
logging.info("Loading bits-and-pieces from " + path)
with open(path, "r") as file:
lines = file.readlines()
# Each line looks like:
# he: His Excellency
#
# We want to provide the mapping the other way around, ie from the word or phrase
# which we might find in a clue to the letters. Note that the same word can map
# to multiple letters, for example:
# sailor -> ab
# sailor -> tar
# So words map to a list of abbreviations.
for line in lines:
tokens = line.split(":")
if len(tokens) != 2: continue
abbreviation = tokens[0].strip().lower()
phrase = tokens[1].strip().lower()
results[phrase].append(abbreviation)
return results
|
richard-shepherd/crossword-helper | crossword_libs/word_utils/word_manager.py | import logging
import nltk
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
from collections import defaultdict
from singleton_decorator import singleton
from ..utils import Utils
from .lemma_info import LemmaInfo
from .word_info import WordInfo
from .word_utils import WordUtils
@singleton
class WordManager(object):
"""
Manages the collection of available words.
Loads words from a number of sources, including:
- The Brown tagged-words corpus
- A file of English words
Helps find the part of speech (pos) for a word, and creates pos forms of words
from the lemma (root form) and a pos indicator.
"""
def __init__(self):
"""
Constructor.
"""
# Collection of WordInfo, keyed by the word itself...
self.word_infos = defaultdict(WordInfo)
# Collection of LemmaInfo, keyed by the lemma string. Lets you look up pos
# word forms for the lemma...
self.lemma_infos = defaultdict(LemmaInfo)
# Converts words to their lemmas...
self._lemmatizer = WordNetLemmatizer()
# Loads all words, and finds their pos mappings...
self._load_all_words()
def get_words(self, length=None):
"""
Returns all words of the length requested. Or all words if the length is not specified.
"""
if length is None:
return self.word_infos.keys()
else:
return (word for word in self.word_infos if len(word) == length)
def get_pos_tags(self, word):
"""
Returns the collection of pos-tags for the word.
"""
return self.word_infos[word].pos_tags
def get_part_of_speech(self, lemma, pos_tag):
"""
Returns a word corresponding to the lemma and pos-tag specified.
If we cannot find a word for the pos-tag, we return the lemma itself.
"""
# We check if we have info for this lemma...
if lemma not in self.lemma_infos:
return lemma # We do not have info for the lemma, so we just return it
# We have info for this lemma - so we check if we have a form for the
# pos-tag requested...
lemma_info = self.lemma_infos[lemma]
if pos_tag not in lemma_info.word_forms:
return lemma # We do not have a word-form for the requested pos-tag, so we return the lemma
# We have a word-form for the lemma and pos-tag requested...
return self.lemma_infos[lemma].word_forms[pos_tag]
def _load_all_words(self):
"""
Loads a collection of all English words and creates maps:
- word -> part-of-speech indicators
- lemma -> (part-of-speech-indicator -> word)
"""
self._load_words_from_corpus(nltk.corpus.brown)
self._load_words_from_corpus(nltk.corpus.treebank)
self._load_words_from_file()
self._map_lemmas_to_words()
def _load_words_from_corpus(self, corpus):
"""
Loads words from a tagged corpus.
"""
logging.info("Loading words from corpus: {0}".format(str(corpus.root)))
for (word, pos_tag) in corpus.tagged_words():
# We clean up the word, removing punctuation, whitespace etc...
clean_word = WordUtils.clean_word(word)
if clean_word == "": continue
# We add the word and its tag to the map of word -> pos-tags...
word_info = self.word_infos[clean_word]
word_info.pos_tags.add(pos_tag)
def _load_words_from_file(self):
"""
Loads words from a file and attempts to infer pos info for them.
"""
# We read all lines from the file, removing whitespace...
filename = "words_alpha.txt"
logging.info("Loading words from {0}".format(filename))
path = Utils.path_relative_to_module(__file__, filename)
with open(path, "r") as file:
words = sorted([x.strip() for x in file.readlines()])
# We add these words to our collection, if we do not already have them...
active_first_letter = ""
for word in words:
clean_word = WordUtils.clean_word(word)
if clean_word in self.word_infos: continue # We already have this word from a different source
# We log when as we process each letter...
first_letter = clean_word[0]
if first_letter != active_first_letter:
active_first_letter = first_letter
logging.info(".. loading words starting with '{0}'".format(first_letter))
# We find the pos tags for the word, and store the WordInfo for this word...
pos_tags = self._infer_pos_tags(clean_word)
self.word_infos[clean_word].pos_tags = set(pos_tags)
def _infer_pos_tags(self, word):
"""
Returns a collection of pos tags for the word passed in.
"""
results = set()
# For the moment we just return the main tag provided by nltk. We may later try to do this
# better, eg by trying the word in various sentences...
pos_tags = nltk.pos_tag([word])
pos_tag = pos_tags[0][1]
results.add(pos_tag)
return results
def _map_lemmas_to_words(self):
"""
Finds the lemma for each (word, pos-tag) we have found and maps the
lemma to it.
"""
logging.info("Mapping lemmas to (word, pos-tag).")
for (word, word_info) in self.word_infos.items():
for pos_tag in word_info.pos_tags:
wordnet_pos = self._get_wordnet_pos(pos_tag)
if wordnet_pos is not None:
lemma = self._lemmatizer.lemmatize(word, pos=wordnet_pos)
self.lemma_infos[lemma].word_forms[pos_tag] = word
def _get_wordnet_pos(self, pos_tag):
"""
Returns a wordnet pos type from the pos-tag passed in.
We use the wordnet pos types when finding the lemma of a word.
"""
if pos_tag.startswith('J'):
return wordnet.ADJ
elif pos_tag.startswith('V'):
return wordnet.VERB
elif pos_tag.startswith('N'):
return wordnet.NOUN
elif pos_tag.startswith('R'):
return wordnet.ADV
else:
return None
|
richard-shepherd/crossword-helper | crossword_libs/utils/utils.py | import sys
import os
import logging
class Utils(object):
"""
Utility functions.
"""
@staticmethod
def log_to_stdout(level=logging.INFO):
"""
Sets up logging to stdout at the (optional) level specified.
"""
root = logging.getLogger()
root.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
formatter = logging.Formatter('%(asctime)s: %(levelname)s - %(message)s')
handler.setFormatter(formatter)
root.addHandler(handler)
@staticmethod
def path_relative_to_module(module_file_path, filename):
"""
Returns a path for filename in the same folder as the module_file_path.
When calling this, you will usually pass __file__ as the module_file_path parameter.
"""
return os.path.join(os.path.dirname(module_file_path), filename) |
richard-shepherd/crossword-helper | crossword_libs/word_utils/words.py | import re
from .anagram_helper import AnagramHelper
from .definition_helper import DefinitionHelper
from .word_manager import WordManager
from .word_utils import WordUtils
class Words(object):
"""
Provides a fluent interface for looking up words given certain constraints.
Examples:
- Words().match("...)
"""
def __init__(self):
"""
Constructor.
"""
# We default the collection of words we hold to the collection of all
# words known by the WordManager.
# Note: This object must be iterable.
self.words = WordManager().get_words()
def __iter__(self):
"""
Allows the words held by these objects to be iterated.
"""
return self.words
def print(self):
"""
Prints the collection of words we hold.
"""
for word in self.words:
print(word)
def match(self, pattern):
"""
Returns words which match the regex pattern supplied.
"""
result = Words()
result.words = self._internal_match(pattern)
return result
def contains(self, letters):
"""
Returns words which contain the letters specified.
"""
result = Words()
result.words = self._internal_contains(letters)
return result
def anagrams(self, word, word_lengths=None):
"""
Returns anagrams of the word passed in.
You can request that the result is split into words with lengths specified
in the optional word_lengths parameter. For example:
anagrams("astronomer", [4, 6]) -> ["moon", "starer"]
"""
result = Words()
# We find anagrams...
anagrams = AnagramHelper().anagrams(word, word_lengths)
# The anagrams are returned as a collection of tuples. We convert these
# to an iterable of single strings...
result.words = ("".join(anagram) for anagram in anagrams)
return result
def definition(self, definition):
"""
Returns words associated with the definition supplied.
"""
result = Words()
result.words = DefinitionHelper.words_for_definition(definition)
return result
def length(self, length):
"""
Returns words filtered to the length specified.
"""
result = Words()
result.words = (word for word in self.words if len(word) == length)
return result
def _internal_match(self, pattern):
"""
Returns an iterable of words which match the pattern provided.
"""
compiled_re = re.compile(pattern)
for word in self.words:
if compiled_re.fullmatch(word) is not None:
yield word
def _internal_contains(self, letters):
"""
Returns an iterable of words which contain the letters provided.
"""
len_letters = len(letters)
for word in self.words:
len_word = len(word)
word_with_letters_removed = WordUtils.remove_letters_from_word(word, letters)
if len(word_with_letters_removed) == (len_word - len_letters):
yield word
|
richard-shepherd/crossword-helper | main.py | <filename>main.py
import itertools
from crossword_libs import AnagramHelper
from crossword_libs import BitsAndPieces
from crossword_libs import Clue
from crossword_libs import DefinitionHelper
from crossword_libs import Utils
from crossword_libs import WordManager
from crossword_libs import Words
Utils.log_to_stdout()
Clue.parse("Self righteous sailors are good workers")
# for info in BitsAndPieces().bits_and_pieces_from_clue("Self righteous sailors are good workers"):
# print(info)
# for word in Words().match(".str.n.m.."):
# print(word)
# for word in Words().match("b..tle"):
# print(word)
# for word in Words().anagrams("rats").match("t..."):
# print(word)
# for word in Words().length(20):
# print(word)
#Words().definition("rodents").length(5).print()
#Words().definition("rodent").length(5).print()
# for word in DefinitionHelper.words_for_definition("stargazer"):
# print(word)
# for word in AnagramHelper().anagrams("rats"):
# print(word)
# print(WordManager().lemma_infos["fast"].word_forms)
# print(WordManager().lemma_infos["table"].word_forms)
# print(WordManager().lemma_infos["astronomer"].word_forms)
#words = WordManager().get_words()
#for word in itertools.islice(words, 0, 10):
# print(word)
|
richard-shepherd/crossword-helper | crossword_libs/cryptic_utils/__init__.py | <reponame>richard-shepherd/crossword-helper<gh_stars>0
from .bits_and_pieces import BitsAndPieces
from .clue import Clue
|
richard-shepherd/crossword-helper | crossword_libs/word_utils/word_info.py | <reponame>richard-shepherd/crossword-helper<gh_stars>0
class WordInfo(object):
"""
Information associated with a word.
"""
def __init__(self):
"""
Constructor.
"""
# The collection of part-of-speech tags for the word...
self.pos_tags = set()
|
richard-shepherd/crossword-helper | crossword_libs/word_utils/__init__.py | <reponame>richard-shepherd/crossword-helper
from .anagram_helper import AnagramHelper
from .definition_helper import DefinitionHelper
from .lemma_info import LemmaInfo
from .word_info import WordInfo
from .word_manager import WordManager
from .word_utils import WordUtils
from .words import Words
|
richard-shepherd/crossword-helper | crossword_libs/__init__.py | from .cryptic_utils import BitsAndPieces
from .cryptic_utils import Clue
from .utils import Utils
from .word_utils import AnagramHelper
from .word_utils import DefinitionHelper
from .word_utils import WordManager
from .word_utils import Words
|
richard-shepherd/crossword-helper | crossword_libs/word_utils/definition_helper.py | from nltk.corpus import wordnet
from .word_manager import WordManager
from .word_utils import WordUtils
class DefinitionHelper(object):
"""
Finds words which match a crossword definition. For example:
"rodents" -> "rats", "mice"
"""
@staticmethod
def words_for_definition(definition):
"""
Finds a collection of words from a hint. For example:
"rodents" -> "rats", "mice" etc
"""
# We find lemmas for the definition...
lemmas = DefinitionHelper._lemmas_for_definition(definition)
# We convert each lemma to the part-of-speech corresponding to the definition.
# For example, even if the definition was "rodents" (ie, plural) the lemmas
# we have found will all be singular.
#
# We find the pos-tags for the original word (for example, "NNS" for "rodents")
# and then find the corresponding form for each lemma.
word_manager = WordManager()
definition_pos_tags = word_manager.get_pos_tags(definition)
for definition_pos_tag in definition_pos_tags:
for lemma in lemmas:
# We find the part-of-speech for this lemma for the current pos-tag...
part_of_speech = word_manager.get_part_of_speech(lemma, definition_pos_tag)
yield part_of_speech
@staticmethod
def _lemmas_for_definition(definition):
"""
Returns an iterable of lemmas for the definition provided.
"""
synsets = set()
# We look up synsets for the word. These are words / concepts with the same meaning...
for synset in wordnet.synsets(definition):
# For each synset, we look up similar words...
synsets = synsets.union(DefinitionHelper._find_similar_synsets(synset, 3))
# We find hyponyms for each synset we've found...
hyponyms = set()
for synset in synsets:
hyponyms = hyponyms.union(DefinitionHelper._get_hyponyms_from_synset(synset))
synsets = synsets.union(hyponyms)
# We find all the words from the synsets we've found...
words = set()
for synset in synsets:
words_in_synset = DefinitionHelper._words_from_synset(synset)
for word_in_synset in words_in_synset:
# We clean the word, for example, to remove underscores...
clean_word = WordUtils.clean_word(word_in_synset)
# We return the word (if we have not already returned it previously)...
if clean_word in words: continue
words.add(clean_word)
yield clean_word
@staticmethod
def _find_similar_synsets(synset, similar_to_recursion_level=0):
"""
Returns a collection of synsets including the original synset provided as
well as other synsets which are similar to it. The similarity can be done
recursively on these synsets to the recursion level specified.
"""
# We add the synset provided to the results...
results = set()
results.add(synset)
if similar_to_recursion_level == 0:
return results
# We want to find similar synsets...
for similar_to in synset.similar_tos():
results = results.union(DefinitionHelper._find_similar_synsets(similar_to, similar_to_recursion_level-1))
return results
@staticmethod
def _words_from_synset(synset):
"""
Returns the list of words (lemma-names) from the synset passed in.
"""
words = set()
# We add the words from the synset...
for lemma_name in synset.lemma_names():
words.add(lemma_name)
return words
@staticmethod
def _get_hyponyms_from_synset(synset):
"""
Returns the collection of all hyponyms for the synset passed in.
A hyponym is a specific example of the synset, eg "oak" is a hyponym of "tree".
"""
hyponyms = set()
# We add all hyponyms (recursively) to the collection of results...
for hyponym in synset.hyponyms():
hyponyms.add(hyponym)
hyponyms = hyponyms.union(DefinitionHelper._get_hyponyms_from_synset(hyponym))
return hyponyms
|
richard-shepherd/crossword-helper | setup.py | from crossword_libs import BitsAndPieces
from crossword_libs import Utils
from crossword_libs import Words
# Sets up logging...
Utils.log_to_stdout()
# Pre-loads words...
Words()
# Pre-loads bits-and-pieces...
BitsAndPieces()
|
matthiasmullie/media-search-signal-test | logreg.old.py | <reponame>matthiasmullie/media-search-signal-test
# This is the script that was used to do the first logistic regression based on a ranklib file for MediaSearch
#
# It's no longer relevant, but keeping in the repo just for information's sake
import os
import subprocess
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.feature_selection import RFE
from statsmodels.stats.outliers_influence import variance_inflation_factor
import statsmodels.api as smapi
ranklibFile = 'out/MediaSearch_20210127.tsv'
split = '1'
subprocess.check_output( ["php", "ranklibToCsv.php", "--ranklibFile=" + ranklibFile, "--split=" + split ] )
# load the data from the csv file
data = pd.read_csv( ranklibFile.replace( '.tsv', '.csv' ), header=0 )
logreg = LogisticRegression(fit_intercept=True, solver='liblinear')
# NAMING CONVENTIONS
#
# X is an array containing the dependent variables - i.e. the elasticsearch scores for each search component
# y is an array containing the independent variable - i.e. the rating for the image
dependent_variable_columns = [
'descriptions.plain',
'descriptions',
'title',
'title.plain',
'category',
'redirect.title',
'redirect.title.plain',
'suggest',
'auxiliary_text',
'auxiliary_text.plain',
'text',
'text.plain',
'statements'
]
X = data.loc[:, dependent_variable_columns]
y = data.loc[:, data.columns == 'rating']
# First we need to figure out which search signals (i.e. dependent variables) we should use
# If we use them all we get negative weights, which we can't use in elasticsearch, so let's eliminate
# some of them in a sensible manner
#
# Step 1
# See which search signals are statistically significant by fitting a logistic regression model
logit_model=smapi.Logit(y,X)
result=logit_model.fit()
print('CHECKING FOR STATISICAL SIGNIFICANCE')
print('===')
print(result.summary2())
# Here are the results
# Results: Logit
# ====================================================================
# Model: Logit Pseudo R-squared: 0.081
# Dependent Variable: rating AIC: 9514.0487
# Date: 2021-03-25 14:13 BIC: 9603.9720
# No. Observations: 7459 Log-Likelihood: -4744.0
# Df Model: 12 LL-Null: -5161.7
# Df Residuals: 7446 LLR p-value: 4.1831e-171
# Converged: 1.0000 Scale: 1.0000
# No. Iterations: 5.0000
# --------------------------------------------------------------------
# Coef. Std.Err. z P>|z| [0.025 0.975]
# --------------------------------------------------------------------
# descriptions.plain -0.0266 0.0290 -0.9196 0.3578 -0.0834 0.0301
# descriptions 0.0545 0.0287 1.8961 0.0579 -0.0018 0.1108
# title 0.0601 0.0110 5.4793 0.0000 0.0386 0.0816
# title.plain 0.0154 0.0115 1.3393 0.1805 -0.0071 0.0379
# category 0.0348 0.0034 10.2257 0.0000 0.0282 0.0415
# redirect.title -0.0165 0.0190 -0.8685 0.3851 -0.0537 0.0207
# redirect.title.plain 0.0060 0.0193 0.3093 0.7571 -0.0319 0.0438
# suggest -0.0069 0.0049 -1.4115 0.1581 -0.0166 0.0027
# auxiliary_text -0.0666 0.0095 -7.0313 0.0000 -0.0852 -0.0481
# auxiliary_text.plain 0.0248 0.0087 2.8370 0.0046 0.0077 0.0419
# text -0.0701 0.0163 -4.2982 0.0000 -0.1021 -0.0382
# text.plain 0.0415 0.0168 2.4720 0.0134 0.0086 0.0743
# statements 0.0687 0.0063 10.9180 0.0000 0.0564 0.0810
# ====================================================================
#
# Anything with P>|z| < 0.05 is statistically significant, so that suggests we should ignore
# descriptions.plain, title.plain, redirect.title, redirect.title.plain, suggest and possibly
# descriptions
# Step 2
# Check for multicollinearity
# If 2 dependent variables vary in similar ways, then they're likely to not be independent of one another,
# and so we probably shouldn't include both of them. We test for this using variance influence factor AKA VIF
print('CHECKING FOR MULTICOLLINEARITY')
print('===')
vif_data = pd.DataFrame()
vif_data["feature"] = X.columns
vif_data["VIF"] = [variance_inflation_factor(X.values, i)
for i in range(len(X.columns))]
print(vif_data)
# Here are the results
# feature VIF
#0 descriptions.plain 32.414879
#1 descriptions 31.941416
#2 title 30.542579
#3 title.plain 32.308208
#4 category 2.538902
#5 redirect.title 18.091665
#6 redirect.title.plain 18.191382
#7 suggest 9.869167
#8 auxiliary_text 37.705318
#9 auxiliary_text.plain 29.930303
#10 text 24.548127
#11 text.plain 25.251712
#12 statements 1.122044
#
# Anything with VIF>10 is highly collinear with something else. The obvious thing here is <field> and <field>.plain -
# it's kind of obvious that they're pretty collinear, so let's remove those and re-run
print('CHECKING FOR MULTICOLLINEARITY - SECOND PASS')
print('===')
dependent_variable_columns = [
#'descriptions.plain',
'descriptions',
'title',
#'title.plain',
'category',
'redirect.title',
#'redirect.title.plain',
'suggest',
'auxiliary_text',
#'auxiliary_text.plain',
'text',
#'text.plain',
'statements'
]
X = data.loc[:, dependent_variable_columns]
vif_data = pd.DataFrame()
vif_data["feature"] = X.columns
vif_data["VIF"] = [variance_inflation_factor(X.values, i)
for i in range(len(X.columns))]
print(vif_data)
# Here are the results
# feature VIF
#0 descriptions 2.200676
#1 title 9.351241
#2 category 2.524780
#3 redirect.title 1.216832
#4 suggest 6.787788
#5 auxiliary_text 6.199441
#6 text 2.371407
#7 statements 1.119669
#
# Much better ... but really our target for VIF is <2.5
# Step 3
# Reduce dependent variables further
#
# Because we still have some multicollinearity, let's see if we can get rid of some of the fields to reduce it
#
# We can use Recursive Feature Elimination (RFE) to "select those features (columns) in a training dataset that are
# more or most relevant in predicting the target variable"
#
# ATM we have 8 dependent variables, so let's iterate, reducing the number by 1 each time, and checking
# whether we have any negative coefficients, plus some measures of precision/accuracy
for i in range(len(dependent_variable_columns), 1, -1):
# find the most significant fields
significantColumns = []
rfe = RFE(logreg, n_features_to_select=i)
rfe = rfe.fit(X, y.values.ravel())
support = dict(zip(list(X.columns), rfe.support_.ravel()))
for key, value in support.items():
if value:
significantColumns.append( key )
X = data.loc[:, significantColumns]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = logreg.fit(X_train, y_train.values.ravel())
print('Coefficients')
print(dict(zip(list(X_train.columns), model.coef_[0])))
print('Intercept')
print(model.intercept_[0])
# See how accurate the model is
y_pred = logreg.predict(X_test)
y_pred_p = logreg.predict_proba(X_test)
print('Accuracy of logistic regression classifier on test set: {:.2f}'.format(logreg.score(X_test, y_test)))
print('Balanced accuracy: {:.4f}'.format(metrics.balanced_accuracy_score(y_test, y_pred)))
print('Average precision score: {:.4f}'.format(metrics.average_precision_score(y_test, y_pred_p.T[1])))
print('Brier score loss (smaller is better): {:.4f}'.format(metrics.brier_score_loss(y_test, y_pred_p.T[1])))
print('F1 score: {:.4f}'.format(metrics.average_precision_score(y_test, y_pred)))
# The first set of coefficients with all positive values that we have is
# {'descriptions': 0.019320230186222098, 'title': 0.0702949038300864, 'category': 0.05158078808882278,
# 'redirect.title': 0.01060150471482338, 'statements': 0.11098311564161133}
# Intercept: -1.1975600089068401
#
# By a lucky coincidence, this also gives us the best set of accuracy measures, and also gives us all VIFs<2.5
# So let's use these! |
matthiasmullie/media-search-signal-test | logreg.py | import getopt
import numpy as np
import os
import pandas as pd
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.feature_selection import RFE
from statsmodels.stats.outliers_influence import variance_inflation_factor
import statsmodels.api as smapi
import subprocess
import sys
from pprint import pprint
ranklibFile = 'out/MediaSearch_20210127.tsv'
trainingDataSize = 0.8
generateNewCsv = True
try:
opts, args = getopt.getopt(sys.argv[1:],"hx", [ "trainingDataSize=" ])
except getopt.GetoptError:
print('ERROR: Incorrect options')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('')
print('Run logistic regression on ' + ranklibFile + ' and output scores and coefficients')
print('')
print('The ranklib file is first transformed to a csv file for processing using ranklibToCsv.php')
print('')
print('logreg.py -x --trainingDataSize={int}')
print('')
print('The option -x skips the transformation to csv, and uses the csv from the last run')
print('If trainingDataSize is set, the data will be trained on the first (total_rows)*trainingDataSize rows of the csv, and tested on the rows that follow')
print('')
sys.exit()
elif opt == "--trainingDataSize":
trainingDataSize = float(arg)
if opt == '-x':
generateNewCsv = False
if ( generateNewCsv == True ):
# transform the ranklib file to a csv file for processing
subprocess.check_output( ["php", "ranklibToCsv.php", "--ranklibFile=" + ranklibFile ] )
# load the data from the csv file
alldata = pd.read_csv( ranklibFile.replace( '.tsv', '.csv' ), header=0 )
trainingData = alldata[:round(len(alldata)*trainingDataSize)]
testData = alldata[len(alldata) - 1000:]
print('Training on the first ' + str(len(trainingData)) + ' rows of ' + ranklibFile.replace( '.tsv', '.csv' ))
print('Testing on the last ' + str(len(testData)) + ' rows of ' + ranklibFile.replace( '.tsv', '.csv' ))
logreg = LogisticRegression(fit_intercept=True, solver='liblinear')
# NAMING CONVENTIONS
#
# X(_(train|test)) -> array containing the dependent variables - i.e. the elasticsearch scores for each search component
# y(_(train|test)) is an array containing the independent variable - i.e. the rating for the image
y = alldata.loc[:, alldata.columns == 'rating']
y_train = trainingData.loc[:, trainingData.columns == 'rating']
y_test = testData.loc[:, testData.columns == 'rating']
# exclude obviously highly-collinear variables, and use "plain" because not all languages have stemmed fields
dependent_variable_columns = [
#'descriptions.plain',
'descriptions',
'title',
#'title.plain',
'category',
'redirect.title',
#'redirect.title.plain',
'suggest',
'auxiliary_text',
#'auxiliary_text.plain',
'text',
#'text.plain',
'statements'
]
X = alldata.loc[:, dependent_variable_columns]
X_train = trainingData.loc[:, dependent_variable_columns]
X_test = testData.loc[:, dependent_variable_columns]
# We need to have all positive coefficients for elasticsearch
#
# We can use Recursive Feature Elimination (RFE) to "select those features (columns) in a training dataset that are
# more or most relevant in predicting the target variable"
#
# Use RFE to reduce the number of dependent variables until we get all positive coefficients
#
# Optimise for AVERAGE PRECISION on the test data
bestAP = 0
bestPrecisionAtK = 0
k = 25
bestCoeffs = {}
bestIntercept = 0
for i in range(len(dependent_variable_columns), 1, -1):
# find the most significant fields
significantColumns = []
rfe = RFE(logreg, n_features_to_select=i)
rfe = rfe.fit(X, y.values.ravel())
support = dict(zip(list(X.columns), rfe.support_.ravel()))
for key, value in support.items():
if value:
significantColumns.append( key )
X_train = trainingData.loc[:, significantColumns]
X_test = testData.loc[:, significantColumns]
model = logreg.fit(X_train, y_train.values.ravel())
coeffs = dict(zip(list(X_train.columns), model.coef_[0]))
y_pred = logreg.predict(X_test)
# each y_pred_p row has 2 values
# - 1st value is the probability that the sample should be in class "0" (i.e. it's a bad image)
# - 2nd value is the probability that the sample should be in class "1" (i.e. it's a good image)
y_pred_p = logreg.predict_proba(X_test)
# calculate precision@k
indices = np.argsort(-np.array(y_pred_p.T[1]))
y_test_sorted_by_score = [y_test.values.ravel()[i] for i in indices]
precisionatk = sum([1 if l==1 else 0 for l in y_test_sorted_by_score[:k]])/float(k)
averagePrecision = metrics.average_precision_score(y_test, y_pred_p.T[1], average="micro")
if (averagePrecision > bestAP):
if ((len([x for x in model.coef_[0] if float(x) < 0])) == 0):
bestPrecisionAtK = precisionatk
bestAP = averagePrecision
bestCoeffs = coeffs
bestIntercept = model.intercept_[0]
print('Average precision score: {:.4f}'.format(bestAP))
print('Precision@25: {:.4f}'.format(bestPrecisionAtK))
print('Coefficients')
print(bestCoeffs)
print('Intercept')
print(bestIntercept)
|
Jayich-Lab/jax | examples/experiments/ex1_hardware_control.py | <gh_stars>1-10
from artiq.experiment import *
from jax import JaxExperiment, SinaraEnvironment
# __all__ in an experiment module should typically only include the experiment class.
# Specially, it cannot include the base experiment class.
# ARTIQ discovers experiments by trying to load all objects that are subclasses of
# artiq.experiment.Experiment. If __all__ includes the base experiment classs,
# ARTIQ will try to load the base experiment class which results in an error.
__all__ = ["HardwareControl"]
class HardwareControl(JaxExperiment, SinaraEnvironment):
"""Example experiment controlling a DDS and a TTL.
An experiment must first inherit from an base experiment and then inherit an environment.
The base experiment most importantly defines the self.run() function. It may contain
scan structures, and it may use pulse sequences.
The environment sets up the labrad connection and provides functions for data saving,
loading parameters, resetting hardware, etc.
This is an simple experiment changing a DDS and a TTL, and reset them at the end of
the experiment. To run this experiment, you need to run the "artiq" labrad server.
Device names in self.build() can be changed, with the names in self.run_kernel()
changed correspondingly.
Before running this experiment, the DDS output should be terminated with a 50 ohm terminator.
To run this experiment, import this experiment class in a file of the repository that
artiq_master controls (see ARTIQ manual), and the experiment should show up after
"scanning repository HEAD" using the experiment explorer in the artiq dashboard.
"""
def build(self):
super().build() # Calls JaxExperiment.build(), which calls SinaraEnvironment.build()
self.setattr_device("dp_468") # a AD9910 DDS
self.setattr_device("ttl4") # a TTL output
def prepare(self):
super().prepare() # Calls JaxExperiment.prepare(), which calls SinaraEnvironment.prepare()
def run(self):
try:
self.turn_off_all_ddses()
self.run_kernel() # Runs code on the device.
except Exception as e:
raise e
finally:
self.reset_sinara_hardware() # resets the hardware to pre-experiment state.
self.disconnect_labrad() # closes the labrad connection.
@kernel
def run_kernel(self):
self.core.reset() # resets the core and clears FIFOs.
self.core.break_realtime()
self.dp_468.set_att(15.) # sets DDS attenuation.
self.core.break_realtime()
delay_mu(self.dds_set_delay_mu)
self.dp_468.set(300*MHz, 0., 0.1) # sets a DDS.
self.core.break_realtime()
self.dp_468.sw.on() # turns on a DDS.
self.core.break_realtime()
self.ttl4.on() # turns on a TTL.
|
Jayich-Lab/jax | base/applets/jax_applet.py | import os
import asyncio
import threading
import pathlib
from PyQt5 import QtCore
from sipyco import pyon
__all__ = ["JaxApplet"]
class JaxApplet(QtCore.QObject):
"""Base class for all applets.
LabRAD cannot be run in the main thread as twisted asyncioreactor does not support the
WindowsProactorEventLoop, which ARTIQ requires. All LabRAD calls need to be done in a
separate thread.
"""
@classmethod
def add_labrad_ip_argument(self, applet, default_ip="127.0.0.1"):
"""Adds an argument to set the LabRAD IP address to connect to.
Args:
applet: artiq.applets.simple.SimpleApplet object.
default_ip: str, default IP address to connect to.
Default "127.0.0.1" (local computer).
"""
applet.argparser.add_argument("--ip", type=str, default=default_ip,
help="LabRAD manager IP address to connect to")
@classmethod
def add_id_argument(self, applet, default_id=""):
"""Adds an argument to set the ID of the applet.
Applet ID is used to assign different config files to instances of a same applet class.
Args:
applet: artiq.applets.simple.SimpleApplet object.
default_id: str, default ID of the applet. Default "".
"""
applet.argparser.add_argument("--id", type=str, default=default_id,
help="Configuration ID")
def __init__(self, **kwds):
super().__init__(**kwds)
self._labrad_loop = None
def load_config_file(self, module_name, args):
"""Loads the config file content into self.config.
Args:
module_name: str, applet module name.
args: command line arguments.
"""
try:
ip = args.ip
except AttributeError:
ip = "none"
try:
id = args.id
except AttributeError:
id = ""
folder_name = os.path.join(os.path.expanduser('~'), ".jax", "applets")
# create the folder if it does not exist
pathlib.Path(folder_name).mkdir(parents=True, exist_ok=True)
self._config_file_path = os.path.join(folder_name, f"{module_name}_{ip}_{id}.pyon")
try:
self.config = pyon.load_file(self._config_file_path)
except FileNotFoundError:
self.config = {}
def save_config_file(self):
"""Write self.config to the config file."""
pyon.store_file(self._config_file_path, self.config)
def connect_to_labrad(self, ip="127.0.0.1"):
"""Connects to labrad in another thread (non-blocking).
This function should be called by derived classes.
After the connection finishes, self.labrad_connected will be called.
The event loop in artiq.applets.simple.SimpleApplet is not compatible with asyncioreactor.
"""
def worker():
import selectors
selector = selectors.SelectSelector()
self._labrad_loop = asyncio.SelectorEventLoop(selector)
asyncio.set_event_loop(self._labrad_loop)
from twisted.internet import asyncioreactor
asyncioreactor.install(self._labrad_loop)
self._labrad_loop.create_task(self.labrad_worker(ip))
self._labrad_loop.run_forever()
self._labrad_thread = threading.Thread(target=worker)
self._labrad_thread.start()
def data_changed(self, data, mods):
"""We don't use ARTIQ dataset manager so this function is not used."""
pass
async def labrad_worker(self, ip):
"""Worker to connect to labrad in self._labrad_loop event loop."""
from pydux.lib.control.clients.connection_asyncio import ConnectionAsyncio
self.cxn = ConnectionAsyncio()
await self.cxn.connect(ip)
await self.labrad_connected()
while True: # required for the event loop to keep handling events.
short_time = 0.01
await asyncio.sleep(short_time)
async def labrad_connected(self):
"""Called when the labrad connection self.cxn is set.
Should be implemented by derived classes.
"""
raise NotImplementedError("This function must be overriden by derived classes.")
def run_in_labrad_loop(self, func):
"""Wrapper for an async function to run in self._labrad_loop.
All code that uses labrad needs to be run in the labrad event loop.
Args:
func: async function to control labrad.
Returns:
func_ensured_future: sync function.
"""
def func_ensured_future(*args, **kwargs):
asyncio.ensure_future(func(*args, **kwargs), loop=self._labrad_loop)
return func_ensured_future
def closeEvent(self, event):
if self._labrad_loop is not None:
self._labrad_loop.stop()
|
Jayich-Lab/jax | util/ui/fast_plot_trace.py | <gh_stars>1-10
import numpy as _np
import pyqtgraph as _pg
from PyQt5 import QtCore
class _PlotPath(_pg.QtGui.QGraphicsPathItem):
"""Generates a path for x-y plotting data.
Replaces the expensive work of plotting data by plotting a path. When plotting a large dataset,
a path can be generated for every n data points. The path can be plotted instead of the
n data points it replaces.
"""
def __init__(self, x, y, pen):
self.path = _pg.arrayToQPath(x.flatten(), y.flatten())
super().__init__(self.path)
self.setPen(pen)
def shape(self):
return super().shape() # overriden because QGraphicsPathItem.shape is too expensive.
def boundingRect(self):
return self.path.boundingRect()
class FastPlotTrace(QtCore.QObject):
"""An interface to plot a trace efficiently.
For a short trace (number of data points < length_to_path), it works the same as
normal PyQtGraph data plotting. For a long trace, it saves every length_to_path elements
as a _PlotPath object. The _PlotPath objects can be plotted more efficiently,
and actual data plotted is short, so appending data is more efficient too.
Args:
plot_widget: pyqtgraph.PlotWidget to plot on.
pen_kwargs: dict, pen arguments for the trace. See PyQtGraph documentation for details.
Avoid using 'width' other than 1 for performance reasons.
length_to_path: int, number of data points to save as a _PlotPath object.
"""
# emitted when the trace is updated.
# argument is the x-axis value of the last data point.
trace_updated = QtCore.pyqtSignal(float)
# emitted when the trace is removed.
trace_removed = QtCore.pyqtSignal()
def __init__(self, plot_widget, pen_kwargs={"color": "w", "width": 1},
length_to_path=20000):
super().__init__()
self._plot_widget = plot_widget
self._pen = _pg.mkPen(**pen_kwargs)
self._length_to_path = length_to_path
self._plot_paths = [] # list of _PlotPath.
self._plot_data_item = None # actual PlotWidget.plot object.
self._xdata = None
self._ydata = None
def update_trace(self):
"""Updates the trace on the plot."""
if self._xdata is None or self._ydata is None:
return
# length of data to plot.
# if xdata and ydata do not have the same length.
# the longer one is not fully plotted.
len_plot = min([len(self._xdata), len(self._ydata)])
if len_plot == self._last_length:
return
last_x = self._xdata[-1] # x-coordinate of the last data point.
while len_plot > self._length_to_path:
# creates _PlotPath until the length is shorter than self._length_to_path.
path = _PlotPath(self._xdata[:self._length_to_path],
self._ydata[:self._length_to_path],
self._pen)
self._plot_widget.addItem(path)
self._plot_paths.append(path)
self._xdata = self._xdata[self._length_to_path-1:]
self._ydata = self._ydata[self._length_to_path-1:]
len_plot = min([len(self._xdata), len(self._ydata)])
if len_plot == len(self._xdata):
xdata = self._xdata
else:
xdata = self._xdata[:len_plot]
if len_plot == len(self._ydata):
ydata = self._ydata
else:
ydata = self._ydata[:len_plot]
if self._plot_data_item is not None:
self._plot_data_item.setData(xdata, ydata)
else:
self._plot_data_item = self._plot_widget.plot(xdata, ydata, pen=self._pen)
self._last_length = len_plot
self.trace_updated.emit(last_x)
def remove_trace(self):
"""Removes the trace from the plot."""
for kk in self._plot_paths:
self._plot_widget.removeItem(kk)
self._plot_paths = []
if self._plot_data_item is not None:
self._plot_widget.removeItem(self._plot_data_item)
self._plot_data_item = None
self._last_length = -1
self.trace_removed.emit()
def set(self, xdata, ydata):
"""Sets data, clears the existing trace, and plots the new trace.
Args:
xdata: np.array, x-axis data.
ydata: np.array, y-axis data.
"""
self._xdata = _np.array(xdata)
self._ydata = _np.array(ydata)
self.remove_trace()
self.update_trace()
def append_x(self, xdata):
"""Appends to the x-axis data."""
self._xdata = _np.append(self._xdata, _np.array(xdata), axis=0)
def append_y(self, ydata):
"""Appends to the y-axis data."""
self._ydata = _np.append(self._ydata, _np.array(ydata), axis=0)
def append(self, xdata, ydata):
"""Appends to both x-axis and y-axis data, and updates the trace."""
self.append_x(xdata)
self.append_y(ydata)
self.update_trace()
|
Jayich-Lab/jax | tools/applets/dds_channel.py | from PyQt5 import QtWidgets, QtGui, QtCore
from jax.util.ui.dialog_on_top import DialogOnTop
class DDSParameters:
"""Stores DDS parameters and calls the artiq server when changes are made."""
def __init__(self, parent, channel, cpld, amplitude, att, frequency, phase, state):
self.parent = parent
self.channel = channel
self.cpld = cpld
self._amplitude = amplitude
self._att = att
self._frequency = frequency
self._phase = phase
self._state = state
@property
def amplitude(self):
return self._amplitude
@property
def att(self):
return self._att
@property
def frequency(self):
return self._frequency
@property
def phase(self):
return self._phase
@property
def state(self):
return self._state
def set_amplitude(self, value, update=True):
if value != self._amplitude and update:
command = (self.channel, "amplitude", value)
self._change_dds(command)
self._amplitude = value
def set_att(self, value, update=True):
if value != self._att and update:
command = (self.channel, "attenuation", value)
self._change_dds(command)
self._att = value
def set_frequency(self, value, update=True):
if value != self._frequency and update:
command = (self.channel, "frequency", value)
self._change_dds(command)
self._frequency = value
def set_phase(self, value, update=True):
if value != self._phase and update:
command = (self.channel, "phase", value)
self._change_dds(command)
self._phase = value
def set_state(self, value, update=True):
if value != self._state and update:
if value:
value_set = 1.
else:
value_set = -1.
command = (self.channel, "state", value_set)
self._change_dds(command)
self._state = value
def _change_dds(self, command):
async def worker(self, command):
await self.parent.artiq.set_dds(command)
self.parent.run_in_labrad_loop(worker)(self, command)
class DDSDetail(DialogOnTop):
"""A dialog showing details for a channel."""
def __init__(self, dds_parameters, parent=None):
self.dds_parameters = dds_parameters
super().__init__(parent)
self.setWindowTitle(dds_parameters.channel)
self.initialize_gui()
self.setup_gui_listeners()
def initialize_gui(self):
grid = QtWidgets.QGridLayout()
self.setLayout(grid)
labelfont = QtGui.QFont('Arial', 10)
spinboxfont = QtGui.QFont('Arial', 15)
label = QtWidgets.QLabel(f"CPLD: {self.dds_parameters.cpld}")
label.setSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed)
label.setFont(labelfont)
grid.addWidget(label, 0, 0)
label = QtWidgets.QLabel("Att (dB)")
label.setSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed)
label.setFont(labelfont)
grid.addWidget(label, 1, 0)
self.att_box = QtWidgets.QDoubleSpinBox()
self.att_box.setDecimals(1)
self.att_box.setMinimum(-31.5)
self.att_box.setMaximum(0.)
self.att_box.setSingleStep(0.5)
self.att_box.setFont(spinboxfont)
self.att_box.setKeyboardTracking(False)
self.att_box.setValue(-self.dds_parameters.att)
grid.addWidget(self.att_box, 2, 0)
def setup_gui_listeners(self):
self.att_box.valueChanged.connect(self.on_widget_att_changed)
def on_widget_att_changed(self, val):
self.dds_parameters.set_att(-val)
class DDSChannel(QtWidgets.QGroupBox):
"""GUI for a DDS channel."""
def __init__(self, dds_parameters, parent=None):
self.dds_parameters = dds_parameters
super().__init__(parent)
self.initialize_gui()
self.setup_gui_listeners()
def initialize_gui(self):
titlefont = QtGui.QFont('Arial', 16)
labelfont = QtGui.QFont('Arial', 10)
buttonfont = QtGui.QFont('Arial', 15)
spinboxfont = QtGui.QFont('Arial', 15)
grid = QtWidgets.QGridLayout()
self.setLayout(grid)
self.setSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed)
label = QtWidgets.QLabel(self.dds_parameters.channel)
label.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Fixed)
label.setAlignment(QtCore.Qt.AlignHCenter)
label.setFont(titlefont)
grid.addWidget(label, 0, 0, 1, 3)
label = QtWidgets.QLabel("Frequency (MHz)")
label.setSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed)
label.setFont(labelfont)
grid.addWidget(label, 1, 0)
label = QtWidgets.QLabel("Amplitude")
label.setSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed)
label.setFont(labelfont)
grid.addWidget(label, 1, 1)
self.freq_box = QtWidgets.QDoubleSpinBox()
self.freq_box.setDecimals(3)
self.freq_box.setMinimum(1.0)
self.freq_box.setMaximum(500.0)
self.freq_box.setSingleStep(0.1)
self.freq_box.setFont(spinboxfont)
self.freq_box.setKeyboardTracking(False)
MHz_to_Hz = 1.e6
self.freq_box.setValue(self.dds_parameters.frequency / MHz_to_Hz)
grid.addWidget(self.freq_box, 2, 0)
self.amp_box = QtWidgets.QDoubleSpinBox()
self.amp_box.setDecimals(5)
self.amp_box.setMinimum(0.0)
self.amp_box.setMaximum(1.0)
self.amp_box.setSingleStep(0.01)
self.amp_box.setFont(spinboxfont)
self.amp_box.setSizePolicy(QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Preferred)
self.amp_box.setKeyboardTracking(False)
self.amp_box.setValue(self.dds_parameters.amplitude)
grid.addWidget(self.amp_box, 2, 1)
self.switch_button = QtWidgets.QPushButton("o")
self.switch_button.setFont(buttonfont)
self.switch_button.setSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed)
self.switch_button.setCheckable(True)
if self.dds_parameters.state:
self.switch_button.setChecked(self.dds_parameters.state)
self.set_switch_button_text(self.dds_parameters.state)
grid.addWidget(self.switch_button, 2, 2)
def setup_gui_listeners(self):
self.freq_box.valueChanged.connect(self.on_widget_freq_changed)
self.amp_box.valueChanged.connect(self.on_widget_amp_changed)
self.switch_button.clicked.connect(self.on_widget_switch_changed)
def on_widget_freq_changed(self, val):
MHz_to_Hz = 1.e6
self.dds_parameters.set_frequency(val * MHz_to_Hz)
def on_widget_amp_changed(self, val):
self.dds_parameters.set_amplitude(val)
def on_widget_switch_changed(self, checked):
self.dds_parameters.set_state(checked)
self.set_switch_button_text(checked)
def on_monitor_freq_changed(self, val):
MHz_to_Hz = 1.e6
self.freq_box.blockSignals(True)
self.freq_box.setValue(val / MHz_to_Hz)
self.freq_box.blockSignals(False)
self.dds_parameters.set_frequency(val, False)
def on_monitor_amp_changed(self, val):
self.amp_box.blockSignals(True)
self.amp_box.setValue(val)
self.amp_box.blockSignals(False)
self.dds_parameters.set_amplitude(val, False)
def on_monitor_att_changed(self, val):
self.dds_parameters.set_att(val, False)
def on_monitor_switch_changed(self, checked):
self.switch_button.blockSignals(True)
self.switch_button.setChecked(checked)
self.switch_button.blockSignals(False)
self.set_switch_button_text(checked)
self.dds_parameters.set_state(checked, False)
def set_switch_button_text(self, checked):
if checked:
self.switch_button.setText("I")
else:
self.switch_button.setText("o")
def contextMenuEvent(self, event):
menu = QtWidgets.QMenu()
details_action = menu.addAction("Details")
action = menu.exec_(self.mapToGlobal(event.pos()))
if action == details_action:
self.show_details()
def show_details(self):
self.details = DDSDetail(self.dds_parameters, self)
self.details.exec_()
|
Jayich-Lab/jax | base/sequences/sequence.py | from artiq.experiment import *
class Sequence:
"""Base class for pulse sequences.
Pulse sequences are building blocks of kernel code.
A sequence can contain other Sequence instances as subsequences.
Set required_parameters to a list of parameters used in the sequence.
Set required_subsequences to a list of sequences used in the sequence.
They must be fully populated before __init__().
Args:
exp: experiment instance.
parameter_group: ParameterGroup, experiment parameters.
"""
required_parameters = []
required_subsequences = []
@classmethod
def all_required_parameters(cls):
"""Returns all required parameters in the sequence and its subsequences."""
parameters = list(cls.required_parameters)
for kk in cls.required_subsequences:
parameters.extend(kk.all_required_parameters())
return list(set(parameters))
def __init__(self, exp, parameter_group):
self.exp = exp
self.p = parameter_group
@kernel
def run(self):
"""Override this function to construct the pulse sequence."""
pass
|
Jayich-Lab/jax | tools/experiments/initialize_sinara.py | from sipyco import pyon
from artiq.experiment import *
from jax import JaxExperiment, SinaraEnvironment
__all__ = ["InitializeSinara"]
class InitializeSinara(JaxExperiment, SinaraEnvironment):
"""Handles device initialization.
This sets previously initialized devices to their original parameter settings.
For new uninitialized devices it saves their parameters.
To use this class, inherit this class in the experiment repository.
A device is a DDS channel or a TTL channel.
Parameters can be frequency, phase, amplitude, state, etc.
This experiment assumes that the sinara hardware has at least a DDS and a TTL board.
For an uninitialized DDS or TTL device, the experiment assumes its state to be off (low).
"""
kernel_invariants = {
"initialized_ddses", "uninitialized_ddses", "initialized_ttls", "uninitialized_ttls"
}
def build(self):
super().build()
second_lowest_priority = -99 # we use priorities range from -100 to 100.
self.set_default_scheduling(priority=second_lowest_priority, pipeline_name="main")
def prepare(self):
super().prepare()
self.get_dds_and_ttls()
def run(self):
self.run_kernel()
self.cxn.artiq.finish_dds_initialize()
self.cxn.artiq.finish_ttl_initialize()
self.disconnect_labrad()
@host_only
def get_dds_and_ttls(self):
"""Gets DDS and TTL objects that are initialized and uninitialized.
Populates self.initialized_ddses, self.uninitialized_ddses,
self.initialized_ttls, and self.uninitialized_ttls. The first element
in each of these lists is a placeholder element for determining element type.
"""
self.urukuls = [self.get_device(kk) for kk in self.devices.urukuls]
dds_params = pyon.decode(self.cxn.artiq.get_dds_parameters())
dummy_dds = self.get_device(self.devices.ad9910s[0])
self.initialized_ddses = [(dummy_dds, [0., 0., 0., 0., 0.])] # DDSes in artiq server.
self.uninitialized_ddses = [(dummy_dds, "")] # DDSes not in artiq server.
for name in dds_params:
if name in self.devices.ad9910s:
self.initialized_ddses.append((self.get_device(name), dds_params[name]))
else:
self.cxn.artiq.remove_sinara_dds(name)
for name in self.devices.ad9910s:
if name not in dds_params:
self.uninitialized_ddses.append((self.get_device(name), name))
ttl_params = pyon.decode(self.cxn.artiq.get_ttl_parameters())
dummy_ttl = self.get_device(self.devices.ttl_outs[0])
self.initialized_ttls = [(dummy_ttl, 0.)]
self.uninitialized_ttls = [(dummy_ttl, "")]
for name in ttl_params:
if name in self.devices.ttl_outs:
self.initialized_ttls.append((self.get_device(name), ttl_params[name]))
else:
self.cxn.artiq.remove_sinara_ttl(name)
for name in self.devices.ttl_outs:
if name not in ttl_params:
self.uninitialized_ttls.append((self.get_device(name), name))
@kernel
def run_kernel(self):
self.core.reset()
for kk in range(len(self.urukuls)):
self.core.break_realtime()
self.urukuls[kk].init()
for kk in range(len(self.initialized_ddses)):
if kk > 0:
dds, values = self.initialized_ddses[kk]
self.core.break_realtime()
dds.init()
self._set_dds(dds, values)
for kk in range(len(self.uninitialized_ddses)):
if kk > 0:
dds, name = self.uninitialized_ddses[kk]
self.core.break_realtime()
dds.init()
self.get_dds(dds, name)
for kk in range(len(self.initialized_ttls)):
if kk > 0:
ttl, value = self.initialized_ttls[kk]
self._set_ttl(ttl, value)
for kk in range(len(self.uninitialized_ttls)):
if kk > 0:
ttl, name = self.uninitialized_ttls[kk]
self.get_ttl(ttl, name)
@kernel
def get_dds(self, device, name):
"""Get the DDS values from the sinara hardware.
It gets the frequency, phase, amplitude, and attenuation from the DDS,
and assumes that the DDS state is -1. (off).
"""
self.core.break_realtime()
frequency, phase, amplitude = device.get()
self.core.break_realtime()
attenuation = device.get_att()
state = -1.
self.update_dds(name, [frequency, phase, amplitude, attenuation, state])
@rpc
def update_dds(self, name: TStr, values: TList(TFloat)):
self.cxn.artiq.update_sinara_dds_value_from_int_experiment(name, values)
@kernel
def get_ttl(self, device, name):
"""Assumes that the TTL state is -1. (low)."""
self.update_ttl(name, -1.)
@rpc
def update_ttl(self, name: TStr, value: TFloat):
self.cxn.artiq.update_sinara_ttl_value_from_int_experiment(name, value)
|
Jayich-Lab/jax | util/ui/dialog_on_top.py | <filename>util/ui/dialog_on_top.py
from PyQt5 import QtWidgets, QtCore
class DialogOnTop(QtWidgets.QDialog):
"""QDialog that shows on top of the main window."""
def __init__(self, parent):
super().__init__(parent)
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)
self.setWindowModality(QtCore.Qt.ApplicationModal) # Prevent interaction with main window
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
|
Jayich-Lab/jax | __init__.py | from jax.base.applets.jax_applet import JaxApplet
from jax.base.applets.real_time_plot_applet import RealTimePlotApplet
from jax.base.environments.jax_environment import JaxEnvironment
from jax.base.environments.sinara_environment import SinaraEnvironment
from jax.base.experiments.jax_experiment import JaxExperiment
from jax.base.experiments.scan import Scan
from jax.base.sequences.sequence import Sequence
|
Jayich-Lab/jax | util/labrad.py | import numpy as _np
from labrad.units import WithUnit, WithDimensionlessUnit
__all__ = ["remove_labrad_units"]
def remove_labrad_units(value):
"""Removes labrad unit from returned value of a labrad server.
Labrad sends some types of values in WithUnit or WithDimensionlessUnit types,
which we need to convert to np.array to serialize them or use them in ARTIQ python.
Tuple and list objects needs to be iterated over to remove labrad unit types.
WithUnit is converted to the base unit first, and then converted to np.array.
"""
def labrad_type_to_array(value):
if isinstance(value, (WithDimensionlessUnit, WithUnit)):
value = _np.array(value.inBaseUnits())
if _np.ndim(value) == 0:
value = value.item() # sipyco.pyon interprets numpy scalars as 1d arrays.
return value
if isinstance(value, (tuple, list)):
new_value = []
for kk in value:
new_value.append(remove_labrad_units(kk))
if isinstance(value, tuple):
new_value = tuple(new_value)
return new_value
return labrad_type_to_array(value)
|
Jayich-Lab/jax | util/drift_tracker.py | import time as _t
import numpy as _np
import scipy.constants as _c
from artiq.experiment import *
from jax.util.tree_dict import TreeDict
__all__ = ["DriftTracker"]
class ZeemanTracker:
"""Calculates Zeeman shifts.
Args:
Zeeman_dict: dict that includes the following key-value pairs:
lower_gF: float, Lande g-factor of the lower state.
upper_gF: float, Lande g-factor of the upper state.
B: float, magnetic field in T.
"""
kernel_invariants = {
"lower_gF", "upper_gF"
}
def __init__(self, Zeeman_dict):
self.lower_gF = Zeeman_dict["lower_gF"]
self.upper_gF = Zeeman_dict["upper_gF"]
self.B = Zeeman_dict["B"]
class DriftTracker:
"""Converts detuning to DDS frequency.
Args:
drift_tracker_dict: dict that includes the following key-value pairs:
center_frequency: float, frequency of the DDS when the light is resonant.
detuning_factor: int, factor to convert DDS frequency to detuning.
Use +/-2 when driving a double-pass AOM, and +/-1 when driving a single-pass AOM.
The plus(minus) sign is from shifting to higher(lower) frequency.
center_drift_rate: float, drift rate of center_frequency in Hz/s.
last_calibration: float, epoch time when the drift tracker was calibrated.
Zeeman: dict or None, parameters to calculate Zeeman shifts.
See ZeemanTracker for details. If None, all ZeemanTracker attributes are set to 0.
"""
kernel_invariants = {
"detuning_factor", "uB_over_h"
}
def __init__(self, drift_tracker_dict):
self.center_frequency = drift_tracker_dict["center_frequency"]
self.detuning_factor = drift_tracker_dict["detuning_factor"]
self.center_drift_rate = drift_tracker_dict["center_drift_rate"]
self.last_calibration = drift_tracker_dict["last_calibration"]
if drift_tracker_dict["Zeeman"] is None:
# define "Zeeman" key so ARTIQ python type checking works when there are instances of
# DriftTrackers with and without "Zeeman" defined in the dict.
drift_tracker_dict["Zeeman"] = {
"upper_gF": 0., "lower_gF": 0., "B": 0.
}
self.Zeeman = ZeemanTracker(drift_tracker_dict["Zeeman"])
self._last_calibration_mu = _np.int64(0)
self._center_drift_rate_mu = 0.
self.uB_over_h = _c.physical_constants["Bohr magneton"][0] / _c.h
@kernel(flags={"fast-math"})
def sync_time(self, time_now):
"""Syncs wall clock time with core device time.
Must be called if self.get_frequency_kernel or self.get_Zeeman_frequency_kernel are used.
Args:
time_now: float, epoch time now.
"""
time_after_calibration = time_now - self.last_calibration
self._last_calibration_mu = now_mu() - self.core.seconds_to_mu(time_after_calibration)
# time is converted to machine units, frequency is still in Hz.
self._center_drift_rate_mu = self.center_drift_rate * self.core.mu_to_seconds(1)
@host_only
def get_frequency_host(self, detuning):
center_drift = (_t.time() - self.last_calibration) * self.center_drift_rate
return self.center_frequency + detuning / self.detuning_factor + center_drift
@kernel(flags={"fast-math"})
def get_frequency_kernel(self, detuning):
center_drift = (now_mu() - self._last_calibration_mu) * self._center_drift_rate_mu
return self.center_frequency + detuning / self.detuning_factor + center_drift
@host_only
def get_Zeeman_frequency_host(self, detuning, lower_mF, upper_mF):
Zeeman_shift = ((self.Zeeman.upper_gF * upper_mF - self.Zeeman.lower_gF * lower_mF)
* self.uB_over_h * self.Zeeman.B)
return self.get_frequency_host(detuning + Zeeman_shift)
@kernel(flags={"fast-math"})
def get_Zeeman_frequency_kernel(self, detuning, lower_mF, upper_mF):
Zeeman_shift = ((self.Zeeman.upper_gF * upper_mF - self.Zeeman.lower_gF * lower_mF)
* self.uB_over_h * self.Zeeman.B)
return self.get_frequency_kernel(detuning + Zeeman_shift)
|
Jayich-Lab/jax | base/experiments/jax_experiment.py | from artiq.experiment import Experiment
class JaxExperiment(Experiment):
"""Base class for all Jayich lab experiments."""
def build(self):
super().build()
def prepare(self):
"""Skips Experiment.prepare() and goes to prepare() defined in the environment."""
delattr(Experiment, "prepare")
super().prepare()
|
Jayich-Lab/jax | examples/sequences/state_detect.py | from artiq.experiment import *
from jax import Sequence
__all__ = ["StateDetect"]
class StateDetect(Sequence):
"""An example state detection sequence.
Turns on the cool and repump DDSes, and counts the PMT output.
"""
kernel_invariants = {
"_cool_dds", "_repump_dds", "_cool_drift_tracker", "_repump_drift_tracker"
}
required_parameters = [
("devices", "cool_dds"),
("devices", "repump_dds"),
("devices", "pmt_edge_counter"),
("state_detect", "cool_detuning"),
("state_detect", "cool_amplitude"),
("state_detect", "cool_drift_tracker"),
("state_detect", "repump_detuning"),
("state_detect", "repump_amplitude"),
("state_detect", "repump_drift_tracker"),
("state_detect", "detect_time"),
]
def __init__(self, exp, parameter_group):
super().__init__(exp, parameter_group)
self.setup()
@host_only
def setup(self):
"""Initializes devices and sets constants.
Float number calculation is expensive on the device, so they should be calculated
in host code if possible.
"""
self._cool_dds = self.exp.get_device(self.p.devices.cool_dds)
self._repump_dds = self.exp.get_device(self.p.devices.repump_dds)
# pmt needs to be accessed in other classes.
self.pmt = self.exp.get_device(self.p.devices.pmt_edge_counter)
phase = 0.
self._cool_pow = self._cool_dds.turns_to_pow(phase)
self._repump_pow = self._repump_dds.turns_to_pow(phase)
s = self.p.state_detect
self._cool_asf = self._cool_dds.amplitude_to_asf(s.cool_amplitude)
self._repump_asf = self._repump_dds.amplitude_to_asf(s.repump_amplitude)
self._cool_drift_tracker = self.exp.get_drift_tracker(s.cool_drift_tracker)
self._repump_drift_tracker = self.exp.get_drift_tracker(s.repump_drift_tracker)
self._detect_time_mu = self.exp.core.seconds_to_mu(s.detect_time)
cool_frequency = self._cool_drift_tracker.get_frequency_host(d.cool_detuning)
self._cool_ftw = self._cool_dds.frequency_to_ftw(cool_frequency)
repump_frequency = self._repump_drift_tracker.get_frequency_host(d.repump_detuning)
self._repump_ftw = self._repump_dds.frequency_to_ftw(repump_frequency)
@kernel
def run(self):
self._cool_dds.set_mu(self._cool_ftw, self._cool_pow, self._cool_asf)
self._repump_dds.set_mu(self._repump_ftw, self._repump_pow, self._repump_asf)
delay_mu(self.exp.rtio_cycle_mu)
self._cool_dds.sw.on()
self._repump_dds.sw.on()
# counts rising edges.
self.pmt.gate_rising_mu(self._detect_time_mu)
self._cool_dds.sw.off()
self._repump_dds.sw.off()
delay_mu(self.exp.rtio_cycle_mu)
self._cool_dds.set_mu(self._cool_ftw, self._cool_pow, 0)
self._repump_dds.set_mu(self._repump_ftw, self._repump_pow, 0)
|
Jayich-Lab/jax | examples/experiments/ex2_pulse_sequence.py | from sipyco import pyon
from artiq.experiment import *
from jax import JaxExperiment, SinaraEnvironment
from jax.examples.sequences.example2 import Example2
from jax.util.parameter_group import ParameterGroup
from jax.util.drift_tracker import DriftTracker
__all__ = ["PulseSequence"]
class PulseSequence(JaxExperiment, SinaraEnvironment):
"""Example experiment that contains a pulse sequence and demonstrates data saving.
This is an experiment that runs a Doppler cooling - state detection sequence repeatedly.
The experiment can be cleanly terminated by the user, and it can handover to another higher
priority experiment too.
To run this experiment, you need to run the "artiq" and "vault" labrad servers.
It also demonstrates how to use "parameter_bank" and "drift_tracker" servers with an
experiment. The parameter_bank server saves parameters of the experiment, and the
drift_tracker server provides drift trackers for ion transition frequencies.
Set USE_PARAMETER_BANK and/or USE_DRIFT_TRACKER to True if the user
wants to use these servers. Otherwise, mock parameters and drift trackers are used.
Before running this experiment, the DDSes should be terminated with 50 ohm terminators.
To run this experiment, import this experiment class in a file of the repository that
artiq_master controls (see ARTIQ manual), and the experiment should show up after
"scanning repository HEAD" using the experiment explorer in the artiq dashboard.
"""
USE_PARAMETER_BANK = False
USE_DRIFT_TRACKER = False
def build(self):
super().build()
# parameters required by the experiment.
exp_parameters = [
("example2", "num_of_repeats"),
("example2", "cool_time"),
("example2", "wait_time")
]
self.get_parameter_paths(exp_parameters, [Example2]) # populates self.parameter_paths
def prepare(self):
super().prepare()
if self.USE_PARAMETER_BANK:
self.save_parameters()
else:
self.get_mock_parameters()
if not self.USE_DRIFT_TRACKER:
self.get_mock_drift_trackers()
self.sequence = Example2(self, self.p, self.p.example2.cool_time, self.p.example2.wait_time)
def run(self):
try:
self.repeats_done = 0 # tracks how many repeatitions have been done.
self.open_file() # opens up a file for writing data.
# defines a instance variable that will be used in the kernel.
# instance variables cannot be defined in the kernel, but can be modified
# the variable's type is not changed.
self.counts_dset_name = ""
while self.repeats_done < self.p.example2.num_of_repeats:
# checks if user has stopped the experiment.
should_stop = self.check_stop_or_do_pause()
if should_stop:
break
else:
self.turn_off_all_ddses()
self.run_kernel()
except Exception as e:
raise e
finally:
self.reset_sinara_hardware() # resets the hardware to pre-experiment state.
self.close_file() # closes the data file.
self.disconnect_labrad() # closes the labrad connection.
def get_mock_parameters(self):
"""Change 'devices' section to set to valid device names."""
params = {
"example2": {
"num_of_repeats": 1000,
"cool_time": 5*ms,
"wait_time": 5*us
},
"devices": {
"cool_dds": "dp_468",
"repump_dds": "dp_1079",
"pmt_edge_counter": "pmt_counter"
},
"state_detect": {
"cool_detuning": -20*MHz,
"cool_amplitude": 0.1,
"cool_drift_tracker": "468",
"repump_detuning": 20*MHz,
"repump_amplitude": 0.1,
"repump_drift_tracker": "1079",
"detect_time": 1*ms
},
"doppler_cool": {
"cool_detuning": -20*MHz,
"cool_amplitude": 0.05,
"cool_drift_tracker": "468",
"repump_detuning": 20*MHz,
"repump_amplitude": 0.05,
"repump_drift_tracker": "1079"
}
}
self.p = ParameterGroup(params)
self.add_attribute("parameters", pyon.encode(params))
def get_mock_drift_trackers(self):
self.drift_trackers = {}
param_468 = {"center_frequency": 260*MHz, "detuning_factor": -2, "center_drift_rate": 0.,
"last_calibration": 0., "Zeeman": None}
self.drift_trackers["468"] = DriftTracker(param_468)
self.add_attribute("468", pyon.encode(param_468), "drift_trackers")
param_1079 = {"center_frequency": 105*MHz, "detuning_factor": -2, "center_drift_rate": 0.,
"last_calibration": 0., "Zeeman": None}
self.drift_trackers["1079"] = DriftTracker(param_1079)
self.add_attribute("1079", pyon.encode(param_1079), "drift_trackers")
@kernel
def run_kernel(self):
self.core.reset()
while self.repeats_done < self.p.example2.num_of_repeats:
# if the experiment should pause or stop. This function takes several ms to run.
if self.scheduler.check_pause():
break
self.core.break_realtime()
count = self.sequence.run() # runs the pulse sequence.
if self.repeats_done == 0:
# initializes a dataset.
self.counts_dset_name = self.add_dataset("counts", [count])
else:
# appends to the dataset.
self.append_dataset(self.counts_dset_name, [count])
self.repeats_done += 1
|
Jayich-Lab/jax | tools/applets/pmt.py | from PyQt5 import QtWidgets, QtCore, QtGui
from artiq.applets.simple import SimpleApplet
from jax import JaxApplet
class PMT(QtWidgets.QWidget, JaxApplet):
def __init__(self, args, **kwds):
super().__init__(**kwds)
self._dv_on = False
self._pmt_on = False
self.set_disable_state()
self._normal_mode_text = "Normal"
self._differential_mode_text = "Differential"
self._pmt_counts_dataset = "pmt.counts_kHz"
self.initialize_gui()
# connects to LabRAD in a different thread, and calls self.labrad_connected when finished.
self.connect_to_labrad(args.ip)
def set_disable_state(self):
if self._pmt_on and self._dv_on:
self.setDisabled(False)
else:
self.setDisabled(True)
def initialize_gui(self):
shell_font = "MS Shell Dlg 2"
layout = QtWidgets.QGridLayout()
self.number = QtWidgets.QLCDNumber()
self.number.setDigitCount(4)
self.number.setSmallDecimalPoint(True)
layout.addWidget(self.number, 0, 0)
mode_label = QtWidgets.QLabel("Mode:")
mode_label.setAlignment(QtCore.Qt.AlignBottom)
mode_label.setFont(QtGui.QFont(shell_font, pointSize=12))
mode_label.setSizePolicy(QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Maximum)
layout.addWidget(mode_label, 1, 0)
self.mode_combobox = QtWidgets.QComboBox()
self.mode_combobox.addItem(self._normal_mode_text)
self.mode_combobox.addItem(self._differential_mode_text)
self.mode_combobox.setFont(QtGui.QFont(shell_font, pointSize=12))
self.mode_combobox.setSizePolicy(QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Maximum)
layout.addWidget(self.mode_combobox, 2, 0)
interval_label = QtWidgets.QLabel("Interval:")
interval_label.setAlignment(QtCore.Qt.AlignBottom)
interval_label.setFont(QtGui.QFont(shell_font, pointSize=12))
interval_label.setSizePolicy(QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Maximum)
layout.addWidget(interval_label, 3, 0)
self.interval_spinbox = QtWidgets.QDoubleSpinBox()
self.interval_spinbox.setSuffix(" s")
self.interval_spinbox.setSingleStep(0.1)
self.interval_spinbox.setDecimals(2)
self.interval_spinbox.setFont(QtGui.QFont(shell_font, pointSize=12))
self.interval_spinbox.setSizePolicy(QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Maximum)
layout.addWidget(self.interval_spinbox, 4, 0)
self.start_button = QtWidgets.QPushButton("Start")
self.start_button.setFont(QtGui.QFont(shell_font, pointSize=12))
self.start_button.setSizePolicy(QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Maximum)
self.start_button.setCheckable(True)
layout.addWidget(self.start_button, 5, 0)
self.setLayout(layout)
async def labrad_connected(self):
self.setup_gui_listeners()
await self.vault_connected()
await self.pmt_connected()
await self.setup_cxn_listeners()
async def vault_connected(self):
self.dv = self.cxn.get_server("vault")
self._dv_on = True
await self.dv.subscribe_to_shared_dataset(self._pmt_counts_dataset)
SHARED_DATA_CHANGE = 128936
await self.dv.on_shared_data_change(SHARED_DATA_CHANGE)
self.dv.addListener(listener=self._data_change, source=None, ID=SHARED_DATA_CHANGE)
self.set_disable_state()
async def pmt_connected(self):
self.pmt = self.cxn.get_server("pmt")
self._pmt_on = True
interval_range = await self.pmt.get_interval_range()
self.interval_spinbox.setRange(*interval_range)
interval = await self.pmt.get_interval()
self._set_pmt_interval(interval)
differential_mode = await self.pmt.is_differential_mode()
self._set_pmt_mode(differential_mode)
is_running = await self.pmt.is_running()
self._set_pmt_state(is_running)
NEW_MODE = 128937
await self.pmt.on_new_mode(NEW_MODE)
self.pmt.addListener(listener=self._new_pmt_mode, source=None, ID=NEW_MODE)
NEW_INTERVAL = 128938
await self.pmt.on_new_interval(NEW_INTERVAL)
self.pmt.addListener(listener=self._new_pmt_interval, source=None, ID=NEW_INTERVAL)
FILE_HALF_FULL = 128939
await self.pmt.on_file_half_full(FILE_HALF_FULL)
self.pmt.addListener(listener=self._file_half_full, source=None, ID=FILE_HALF_FULL)
AUTO_NEW_FILE = 128940
await self.pmt.on_auto_new_file(AUTO_NEW_FILE)
self.pmt.addListener(listener=self._auto_new_file, source=None, ID=AUTO_NEW_FILE)
START_STOP = 128941
await self.pmt.on_start_and_stop(START_STOP)
self.pmt.addListener(listener=self._on_start_and_stop, source=None, ID=START_STOP)
self.set_disable_state()
async def setup_cxn_listeners(self):
self.cxn.add_on_connect("pmt", self.run_in_labrad_loop(self.pmt_connected))
self.cxn.add_on_disconnect("pmt", self.pmt_disconnected)
self.cxn.add_on_connect("vault", self.run_in_labrad_loop(self.vault_connected))
self.cxn.add_on_disconnect("vault", self.vault_disconnected)
def setup_gui_listeners(self):
self.start_button.toggled.connect(self.start_button_toggled)
self.mode_combobox.currentTextChanged.connect(self.mode_combobox_text_changed)
self.interval_spinbox.valueChanged.connect(self.interval_spinbox_value_changed)
def pmt_disconnected(self):
self._pmt_on = False
self.set_disable_state()
def vault_disconnected(self):
self._dv_on = False
self.set_disable_state()
def _data_change(self, signal, value):
if value[1] == self._pmt_counts_dataset:
self._set_number(value[2][0][0])
def _new_pmt_mode(self, signal, value):
self._set_pmt_mode(value)
def _new_pmt_interval(self, signal, value):
self._set_pmt_interval(value)
def _file_half_full(self, signal, value):
print("PMT file half full")
def _auto_new_file(self, signal, value):
print("New PMT file automatically created.")
def _on_start_and_stop(self, signal, value):
self._set_pmt_state(value)
def _set_number(self, counts):
self.number.display(counts)
def _set_pmt_interval(self, interval):
self.interval_spinbox.blockSignals(True)
self.interval_spinbox.setValue(interval)
self.interval_spinbox.blockSignals(False)
def _set_pmt_mode(self, is_differential_mode):
self.mode_combobox.blockSignals(True)
if is_differential_mode:
self.mode_combobox.setCurrentText(self._differential_mode_text)
else:
self.mode_combobox.setCurrentText(self._normal_mode_text)
self.mode_combobox.blockSignals(False)
def _set_pmt_state(self, is_running):
self.start_button.blockSignals(True)
self.start_button.setChecked(is_running)
if is_running:
self.start_button.setText("Stop")
else:
self.start_button.setText("Start")
self.start_button.blockSignals(False)
def start_button_toggled(self, checked):
async def _start_button_toggled(self, checked):
self._set_pmt_state(checked)
if checked:
await self.pmt.start()
else:
await self.pmt.stop()
self.run_in_labrad_loop(_start_button_toggled)(self, checked)
def mode_combobox_text_changed(self, text):
async def _mode_combobox_text_changed(self, text):
await self.pmt.set_mode(text == self._differential_mode_text)
self.run_in_labrad_loop(_mode_combobox_text_changed)(self, text)
def interval_spinbox_value_changed(self, value):
async def _interval_spinbox_value_changed(self, value):
await self.pmt.set_interval(value)
self.run_in_labrad_loop(_interval_spinbox_value_changed)(self, value)
def main():
applet = SimpleApplet(PMT)
PMT.add_labrad_ip_argument(applet) # adds IP address as an argument.
applet.run()
if __name__ == "__main__":
main()
|
Jayich-Lab/jax | examples/sequences/example2.py | from artiq.experiment import *
from jax import Sequence
from jax.examples.sequences.doppler_cool import DopplerCool
from jax.examples.sequences.state_detect import StateDetect
__all__ = ["Example2"]
class Example2(Sequence):
"""Pulse sequence for the example 2 experiment."""
kernel_invariants = {}
required_subsequences = [
DopplerCool,
StateDetect
]
def __init__(self, exp, parameter_group, cool_time, wait_time):
super().__init__(exp, parameter_group)
self.setup(cool_time, wait_time)
@host_only
def setup(self, cool_time, wait_time):
"""Initializes devices and sets constants.
Float number calculation is expensive on the device, so they should be calculated
in host code if possible.
"""
self._doppler_cool = DopplerCool(self.exp, self.p, cool_time)
self._state_detect = StateDetect(self.exp, self.p)
self._wait_time_mu = self.exp.core.seconds_to_mu(wait_time)
@kernel
def run(self) -> TInt32:
self._doppler_cool.run() # runs the Doppler cool sequence.
delay_mu(self._wait_time_mu)
self._state_detect.run() # runs the state detect sequence.
delay_mu(self._wait_time_mu)
# collects the counts during state detection.
# this readout function can be called at any time in the kernel, as long as the RTIO FIFO
# is not filled. Roughly 100 PMT counts can be stored in the FIFO before readout.
count = self._state_detect.pmt.fetch_count()
return count
|
Jayich-Lab/jax | base/environments/jax_environment.py | <filename>base/environments/jax_environment.py
import threading
from sipyco import pyon
from artiq.experiment import *
from jax.util.parameter_group import ParameterGroup
from jax.util.drift_tracker import DriftTracker
__all__ = ["JaxEnvironment"]
class JaxEnvironment(HasEnvironment):
"""Environment for Jayich lab experiments.
Set USE_LABRAD to False in experiments that do not need labrad.
Attributes:
scheduler: ARTIQ scheduler device.
cxn: labrad connection.
dv: labrad vault server.
parameter_paths: list of (collection, parameter), paths of parameters.
Populated when calling self.get_parameter_paths().
p: ParameterGroup, experiment parameters. Populated when calling self.save_parameters().
drift_trackers: a dict of {name: DriftTracker}, drift trackers used by the experiment.
"""
USE_LABRAD = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.drift_trackers = {}
self._is_dataset_open = False
def _connect_labrad(self):
import labrad
self.cxn = labrad.connect()
try:
self.dv = self.cxn.vault
except Exception as e:
print("Data vault is not connected.")
def _get_experiment_info(self):
"""Gets the current experiment information from the scheduler.
Returns:
(rid, pipeline_name, priority, expid)
"""
rid = self.scheduler.rid
pipeline_name = self.scheduler.pipeline_name
priority = self.scheduler.priority
expid = self.scheduler.expid
return (rid, pipeline_name, priority, expid)
def build(self):
"""Building the experiment.
Called when the experiment is loaded in the scheduler, and before the experiment prepares.
Can be overriden by derived classes.
"""
self.setattr_device("scheduler")
self._dv_lock = threading.Lock()
def prepare(self):
"""Prepare for the experiment.
Called when the experiment is next in the queue to run.
Can be overriden by derived classes.
"""
if self.USE_LABRAD:
self._connect_labrad()
@host_only
def disconnect_labrad(self):
self.cxn.disconnect()
@host_only
def check_stop_or_do_pause(self):
"""Checks if the experiment should stop or pause in the host code.
To check in the kernel if the experiment should stop or pause,
use self.scheduler.check_pause(). The experiment should then exit from kernel and call
this function in the host code.
If a higher priority experiment is queued, it closes the core and yield control.
The function returns when this experiment is back at the top of the queue.
If the user has stopped the experiment, it returns True.
Returns:
should_stop: bool, whether the user has stopped the experiment. If True,
the experiment should be closed gracefully.
"""
status = self.scheduler.get_status()
same_pipeline_higher_priority = False
for exp in status:
same_pipeline = status[exp]["pipeline"] == self.scheduler.pipeline_name
high_priority = status[exp]["priority"] > self.scheduler.priority
if same_pipeline and high_priority:
same_pipeline_higher_priority = True
break
if not self.scheduler.check_pause():
return False
elif same_pipeline_higher_priority:
self.core.close() # close the connection so other experiment can use it
self.scheduler.pause() # yield control to the scheduler
return False
return True
@rpc
def open_file(self):
"""Opens a data file to write to."""
if self._is_dataset_open:
return
rid, pipeline_name, priority, expid = self._get_experiment_info()
self.dv.open(expid["class_name"], True, rid)
self.dv.add_attribute("rid", rid, "scheduler")
self.dv.add_attribute("expid", pyon.encode(expid), "scheduler")
self.dv.add_attribute("pipeline_name", pipeline_name, "scheduler")
self.dv.add_attribute("priority", priority, "scheduler")
self._is_dataset_open = True
@rpc
def close_file(self):
"""Saves and closes a data file to write to."""
if not self._is_dataset_open:
return
self.dv.close()
self._is_dataset_open = False
@rpc
def add_attribute(self, name, value, group_path="/") -> TStr:
"""Adds an attribute.
Args:
name: str, name of the attribute.
value: value of the attribute.
group_path: str, path to the group to save the attribute at. Default "/", file root.
Returns:
str, key of the dataset. Use this key to call set_dataset() or append_dataset().
"""
if not self._is_dataset_open:
self.open_file()
return self.dv.add_attribute(name, value, group_path)
@rpc
def add_dataset(self, name, value, group_path="/datasets", shared=False) -> TStr:
"""Adds a dataset.
Args:
name: str, dataset name.
value: value of the dataset.
group_path: str, path to the group to save the dataset at. Default "/datasets".
shared: bool, make the dataset accessible to other labrad connections. Default False.
Returns:
str, key of the dataset. Use this key to call set_dataset() or append_dataset().
"""
if not self._is_dataset_open:
self.open_file()
return self.dv.add_dataset(name, value, group_path, shared)
@rpc
def add_streaming_dataset(self, name, value, maxshape, rows_stream=1, group_path="/datasets",
shared=False) -> TStr:
"""Adds a streaming dataset that is automatically saved into the file.
Args:
name: str, dataset name.
value: value of the dataset.
maxshape: tuple, maximum shape of the data. Use None for axis that is unlimited.
rows_stream: int, rows of data in the cache when saving to the file. Default 1.
group_path: str, path to the group to save the dataset at. Default "/datasets".
shared: bool, make the dataset accessible to other labrad connections. Default False.
Returns:
str, key of the dataset. Use this key to call set_dataset() or append_dataset().
"""
if not self._is_dataset_open:
self.open_file()
return self.dv.add_streaming_dataset(name, value, maxshape, rows_stream,
group_path, shared)
@rpc(flags={"async"})
def set_dataset(self, dataset_path, value):
"""Sets the value of the dataset or a streaming dataset.
Args:
dataset_path: str, path to the dataset.
value: value of the dataset.
"""
with self._dv_lock:
self.dv.set_dataset(dataset_path, value)
@rpc(flags={"async"})
def append_dataset(self, dataset_path, value):
"""Appends to the value of the dataset or a streaming dataset.
Args:
dataset_path: str, path to the dataset.
value: value to append to the dataset.
"""
with self._dv_lock:
self.dv.append_dataset(dataset_path, value)
@rpc(flags={"async"})
def set_shared_dataset(self, name, value):
"""Sets the value of a shared dataset.
Args:
name: str, shared dataset name. This is the name used in add_dataset()
or add_streaming_dataset() as the argument, but not the dataset path.
value: value of the shared dataset.
"""
with self._dv_lock:
self.dv.set_shared_dataset(name, value)
@rpc(flags={"async"})
def append_shared_dataset(self, name, value):
"""Appends to the value of a shared dataset.
Args:
name: str, shared dataset name. This is the name used in add_dataset()
or add_streaming_dataset() as the argument, but not the dataset path.
value: value to append to the shared dataset.
"""
with self._dv_lock:
self.dv.append_shared_dataset(name, value)
@rpc(flags={"async"})
def delete_shared_dataset(self, name):
"""Deletes the shared dataset.
Args:
name: str, shared dataset name. This is the name used in add_dataset()
or add_streaming_dataset() as the argument, but not the dataset path.
"""
with self._dv_lock:
self.dv.delete_shared_dataset(name)
@host_only
def get_shared_dataset(self, name):
"""Gets the shared dataset.
This function can only be called from host, as the return type is unknown at compile time.
All shared dataset it gets are saved in "/shared" group in the data file to archive.
Args:
name: str, shared dataset name. This is the name used in add_dataset()
or add_streaming_dataset() as the argument, but not the dataset path.
Returns:
dataset value.
"""
if not self._is_dataset_open:
self.open_file()
with self._dv_lock:
value = self.dv.get_shared_dataset(name)
self.add_dataset(name, value, "/shared", False)
return value
@host_only
def get_parameter_paths(self, experiment_parameters=[], pulse_sequence_classes=[]):
"""Populates self.parameter_paths with required parameters.
Args:
experiment_parameters: list of tuples, parameter needed for the experiment.
pulse_sequence_classes: list of classes, pulse sequences classes needed.
"""
self.parameter_paths = experiment_parameters
for pulse_sequence_class in pulse_sequence_classes:
self.parameter_paths.extend(pulse_sequence_class.all_required_parameters())
self.parameter_paths = list(set(self.parameter_paths))
@host_only
def save_parameters(self):
"""Loads all parameters into self.p.
Also saves all parameters to the data file.
"""
from jax.util.labrad import remove_labrad_units
params = {}
params_full = {}
pb = self.cxn.parameter_bank
for collection, name in self.parameter_paths:
if collection not in params:
params[collection] = {}
params_full[collection] = {}
value = pb.get_parsed_value(collection, name)
params[collection][name] = remove_labrad_units(value)
value_full = pb.get_raw_form(collection, name)
params_full[collection][name] = remove_labrad_units(value_full)
self.p = ParameterGroup(params)
self.add_attribute("parameters", pyon.encode(params))
self.add_attribute("parameters_full", pyon.encode(params_full))
@host_only
def get_drift_tracker(self, name):
if not self._is_dataset_open:
self.open_file()
if not hasattr(self, "drift_trackers"):
self.drift_trackers = {}
if name not in self.drift_trackers:
value = self.cxn.drift_tracker.get_drift_tracker(name)
self.drift_trackers[name] = DriftTracker(pyon.decode(value))
self.add_attribute(name, value, "drift_trackers")
return self.drift_trackers[name]
|
Jayich-Lab/jax | examples/sequences/doppler_cool.py | from artiq.experiment import *
from jax import Sequence
__all__ = ["DopplerCool"]
class DopplerCool(Sequence):
"""An example Doppler cooling sequence."""
kernel_invariants = {
"_cool_dds", "_repump_dds", "_cool_drift_tracker", "_repump_drift_tracker"
}
required_parameters = [
("devices", "cool_dds"),
("devices", "repump_dds"),
("doppler_cool", "cool_detuning"),
("doppler_cool", "cool_amplitude"),
("doppler_cool", "cool_drift_tracker"),
("doppler_cool", "repump_detuning"),
("doppler_cool", "repump_amplitude"),
("doppler_cool", "repump_drift_tracker")
]
def __init__(self, exp, parameter_group, cool_time):
super().__init__(exp, parameter_group)
self.setup(cool_time)
@host_only
def setup(self, cool_time):
"""Initializes devices and sets constants used in the pulse sequence.
Float number calculation is expensive on the device, so they should be calculated
in host code if possible.
"""
self._cool_dds = self.exp.get_device(self.p.devices.cool_dds)
self._repump_dds = self.exp.get_device(self.p.devices.repump_dds)
phase = 0.
self._cool_pow = self._cool_dds.turns_to_pow(phase)
self._repump_pow = self._repump_dds.turns_to_pow(phase)
d = self.p.doppler_cool
self._cool_asf = self._cool_dds.amplitude_to_asf(d.cool_amplitude)
self._repump_asf = self._repump_dds.amplitude_to_asf(d.repump_amplitude)
self._cool_drift_tracker = self.exp.get_drift_tracker(d.cool_drift_tracker)
self._repump_drift_tracker = self.exp.get_drift_tracker(d.repump_drift_tracker)
self._cool_time_mu = self.exp.core.seconds_to_mu(cool_time)
cool_frequency = self._cool_drift_tracker.get_frequency_host(d.cool_detuning)
self._cool_ftw = self._cool_dds.frequency_to_ftw(cool_frequency)
repump_frequency = self._repump_drift_tracker.get_frequency_host(d.repump_detuning)
self._repump_ftw = self._repump_dds.frequency_to_ftw(repump_frequency)
@kernel
def run(self):
# set the values of the DDSes
self._cool_dds.set_mu(self._cool_ftw, self._cool_pow, self._cool_asf)
self._repump_dds.set_mu(self._repump_ftw, self._repump_pow, self._repump_asf)
# wait for a RTIO cycle to reduce likelihood of a collision error (see ARTIQ manual)
# in a complicated pulse sequence. Then turns on the rf switches of DDSes.
delay_mu(self.exp.rtio_cycle_mu)
self._cool_dds.sw.on()
self._repump_dds.sw.on()
# wait for cool time before turning off the rf switches.
delay_mu(self._cool_time_mu)
self._cool_dds.sw.off()
self._repump_dds.sw.off()
# Set amplitudes to 0 to eliminate the DDS signal leakthroughs from the switches.
delay_mu(self.exp.rtio_cycle_mu)
self._cool_dds.set_mu(self._cool_ftw, self._cool_pow, 0)
self._repump_dds.set_mu(self._repump_ftw, self._repump_pow, 0)
|
Jayich-Lab/jax | tools/experiments/pmt.py | import time as _t
from sipyco import pyon
from artiq.experiment import *
from jax import JaxExperiment, SinaraEnvironment
__all__ = ["PMT"]
class PMT(JaxExperiment, SinaraEnvironment):
"""Base class for a background running experiment that reads PMT and sets TTL/DDS parameters.
Inherit this class in the experiment repository and define the class variables:
REPUMP_AOM_CHANNELS: list of strs, names of DDSes controlling repump lasers.
To control ions that have hyperfine structures, multiple repump DDSes may be necessary.
PMT_EDGECOUNTER: str, edgecounter device for PMT input.
This experiment assumes that the device has at least one AD9910 DDS and at least one TTL board.
"""
REPUMP_AOM_CHANNELS = None
PMT_EDGECOUNTER = None
kernel_invariants = {
"REPUMP_AOM_CHANNELS", "PMT_EDGECOUNTER", "repump_aoms", "pmt_counter", "ad9910s",
"ttl_outs"
}
def build(self):
super().build()
lowest_priority = -100 # we use priorities range from -100 to 100.
self.set_default_scheduling(priority=lowest_priority, pipeline_name="main")
if self.REPUMP_AOM_CHANNELS is None:
raise Exception("REPUMP_AOM_CHANNELS must be defined.")
self.repump_aoms = [self.get_device(kk) for kk in self.REPUMP_AOM_CHANNELS]
if self.PMT_EDGECOUNTER is None:
raise Exception("PMT_EDGECOUNTER must be defined.")
self.pmt_counter = self.get_device(self.PMT_EDGECOUNTER)
self._get_all_dds_and_ttl_objects()
def prepare(self):
super().prepare()
self._get_repump_aom_states()
def run(self):
"""Keeps running until the experiment is stopped by the user."""
while True:
should_stop = self.check_stop_or_do_pause()
if should_stop:
self.disconnect_labrad()
break
else:
self.run_kernel()
@host_only
def _get_all_dds_and_ttl_objects(self):
self.ad9910s = []
for kk in self.devices.ad9910s:
self.ad9910s.append(self.get_device(kk))
self.ttl_outs = []
for kk in self.devices.ttl_outs:
self.ttl_outs.append(self.get_device(kk))
@host_only
def _get_repump_aom_states(self):
"""Gets the states of repump AOMs.
If a repump AOM is set to off, don't turn it on during the differential mode sequence.
"""
dds_params = pyon.decode(self.cxn.artiq.get_dds_parameters())
self.repump_aom_states = []
for kk in self.REPUMP_AOM_CHANNELS:
self.repump_aom_states.append(dds_params[kk][-1])
@kernel
def run_kernel(self):
"""Infinite loop of acquiring PMT counts and updating DDSes and TTLs.
It checks for DDS and TTL changes at least once per loop, and it may check for more times
if it has enough time.
The loop ends if a higher priority experiment is scheduled or if the user requests stop.
"""
self.core.reset()
self.core.break_realtime()
while True:
if self.scheduler.check_pause():
break
self.update_hardware()
differential_mode, interval_mu = self.get_pmt_mode_and_interval()
if interval_mu == 0: # if the PMT server is not connected.
continue
self.core.break_realtime()
if differential_mode:
for kk in range(len(self.repump_aoms)):
# if the repump AOM is off, don't turn on and off the AOM.
# the repump AOM stays off for both differential high and low counting periods.
if self.repump_aom_states[kk] > 0.:
self.repump_aoms[kk].sw.off()
t_count = self.pmt_counter.gate_rising_mu(interval_mu)
at_mu(t_count + self.rtio_cycle_mu)
for kk in range(len(self.repump_aoms)):
if self.repump_aom_states[kk] > 0.:
self.repump_aoms[kk].sw.on()
t_count = self.pmt_counter.gate_rising_mu(interval_mu)
else:
t_count = self.pmt_counter.gate_rising_mu(interval_mu)
twenty_ms_mu = 20*ms # 20 ms time slack to prevent slowing down PMT acquisition.
while t_count > now_mu() + twenty_ms_mu:
self.update_hardware()
if differential_mode:
count_low = self.pmt_counter.fetch_count()
count_high = self.pmt_counter.fetch_count()
else:
count_low = 0
count_high = self.pmt_counter.fetch_count()
self.save_counts(count_high, count_low)
@kernel(flags={"fast-math"})
def update_hardware(self):
"""Checks whether DDS/TTL needs to be updated and update them."""
dds_changes = self.get_dds_changes()
self.core.break_realtime()
for kk in range(len(dds_changes)):
if kk > 0:
index, index_repump, name, values = dds_changes[kk]
self.update_dds(index, index_repump, name, values)
ttl_changes = self.get_ttl_changes()
self.core.break_realtime()
for kk in range(len(ttl_changes)):
if kk > 0:
index, value = ttl_changes[kk]
self.update_ttl(index, value)
self.core.break_realtime()
@rpc
def get_dds_changes(self) -> TList(TTuple([TInt32, TInt32, TStr, TFloat])):
"""Gets all pending changes for DDSes.
If there is no change, it must have a placeholder element in the list to ensure that
ARTIQ python can recognize its type.
Returns:
DDS changes. The first element is always a placeholder.
The second int in the tuple is index in self.repump_aoms. This is used to
determine whether repump AOMs should be turned on in the differential mode.
"""
dds_changes = self.cxn.artiq.get_dds_change_queues()
to_kernel = [(-1, -1, "placeholder", 0.)]
for kk in dds_changes:
index = self.devices.ad9910s.index(kk[0])
try:
index_repump = self.REPUMP_AOM_CHANNELS.index(kk[0])
except ValueError as e:
index_repump = -1
to_kernel.append((index, index_repump, kk[1], kk[2]))
return to_kernel
@rpc
def get_ttl_changes(self) -> TList(TTuple([TInt32, TFloat])):
"""Gets all pending changes for TTLs.
If there is no change, it must have a placeholder element in the list to ensure that
ARTIQ python can recognize its type.
Returns:
TTL changes. The first element is always a placeholder.
"""
ttl_changes = self.cxn.artiq.get_ttl_change_queues()
to_kernel = [(-1, 0.)]
for kk in ttl_changes:
index = self.devices.ttl_outs.index(kk[0])
to_kernel.append((index, kk[1]))
return to_kernel
@kernel(flags={"fast-math"})
def update_dds(self, index: TInt32, index_repump: TInt32, attribute: TStr, value: TFloat):
"""Sets DDS value."""
device = self.ad9910s[index]
if attribute == "frequency":
freq, phase, amp = device.get()
self.core.break_realtime()
delay_mu(self.dds_set_delay_mu)
device.set(value, phase, amp)
self.core.break_realtime()
elif attribute == "phase":
freq, phase, amp = device.get()
self.core.break_realtime()
delay_mu(self.dds_set_delay_mu)
device.set(freq, value, amp)
self.core.break_realtime()
elif attribute == "amplitude":
freq, phase, amp = device.get()
self.core.break_realtime()
delay_mu(self.dds_set_delay_mu)
device.set(freq, phase, value)
self.core.break_realtime()
elif attribute == "attenuation":
# get_att_mu() required to correctly set the atts of other DDSes of the same urukul.
device.get_att_mu()
self.core.break_realtime()
device.set_att(value)
self.core.break_realtime()
elif attribute == "state":
is_on = value > 0.
if is_on:
device.sw.on()
else:
device.sw.off()
if index_repump >= 0:
self.repump_aom_states[index_repump] = value
self.core.break_realtime()
@kernel(flags={"fast-math"})
def update_ttl(self, index: TInt32, value: TFloat):
"""Sets TTL value."""
device = self.ttl_outs[index]
if value > 0.:
device.on()
else:
device.off()
self.core.break_realtime()
@rpc
def get_pmt_mode_and_interval(self) -> TTuple([TBool, TInt64]):
"""Gets PMT differential mode and counting interval."""
try:
is_differential = self.cxn.pmt.is_differential_mode()
interval = self.cxn.pmt.get_interval()
self.interval_ms = interval / ms
interval_mu = self.core.seconds_to_mu(interval)
if not self.cxn.pmt.is_running():
interval_mu = 0
return (is_differential, interval_mu)
except Exception as e:
pass
return (False, 0)
@rpc(flags={"async"})
def save_counts(self, high: TInt32, low: TInt32 = 0):
"""Sends counts to the PMT server."""
try:
self.cxn.pmt.save_counts(_t.time(), high / self.interval_ms, low / self.interval_ms)
except Exception as e:
pass
|
Jayich-Lab/jax | util/ui/custom_list_widget.py | from PyQt5 import QtWidgets, QtGui, QtCore
from jax.util.ui.dialog_on_top import DialogOnTop
class CustomListWidget(QtWidgets.QListWidget):
"""QListWidget that can reorder items and hide/show items.
Do not call self.addItem() or self.addItemWidget() directly.
Use self.add_item_and_widget() instead.
"""
# when the items' visibility and order are changed.
# this signal is only triggered when the visibility and order are updated in the popup window.
visibility_and_order_changed = QtCore.pyqtSignal(dict)
def __init__(self, parent=None):
super().__init__(parent)
self.visible_items = []
self.hidden_items = []
self.all_items = {}
self.setAcceptDrops(False)
self.setDragEnabled(False)
self.setFlow(QtWidgets.QListView.LeftToRight)
self.setResizeMode(QtWidgets.QListView.Adjust)
self.setViewMode(QtWidgets.QListView.IconMode)
def add_item_and_widget(self, name, widget, visible=True, sort_index=None, padding=10):
"""Adds an widget to the ListWidget.
Combines self.addItem() and self.setItemWidget().
Args:
name: str, name of the item.
widget: QWidget, widget to add to the ListWidget.
visible: bool, whether the widget should be visible. Default is True.
sort_index: int, order of the item in the ListWidget. If None, the displayed order
is the same as the order added. Default is None.
padding: int, padding between the item and the widget. Default is 10.
"""
if name in self.all_items:
raise ValueError(f"{name} already exists in the list widget.")
size = widget.sizeHint()
new_size = QtCore.QSize(size.width() + padding, size.height() + padding)
if self.gridSize().height() > new_size.height():
new_size.setHeight(self.gridSize().height())
if self.gridSize().width() > new_size.width():
new_size.setWidth(self.gridSize().width())
self.setGridSize(new_size)
if sort_index is None:
sort_index = len(self.all_items)
item = _CustomListWidgetItem(sort_index)
item.setSizeHint(widget.sizeHint())
if visible:
self.visible_items.append(name)
else:
self.hidden_items.append(name)
item.setHidden(True)
self.addItem(item)
self.setItemWidget(item, widget)
self.all_items[name] = item
self.sortItems()
self.setDragEnabled(False)
def set_visibility_and_order(self, widget_config):
"""Sets the visibility and order of items in the ListWidget based on a config.
This function should be called after the list widget is fully populated.
If an item is in the ListWidget but not in the config, it is set to visible.
If an item is in the config but not in the ListWidget, the item in the config is ignored.
The config file should be updated with the return value of this function.
Args:
widget_config: dict,
{
"visible_items": ["name_1", "name_2"],
"hidden_items": ["name_3", "name_4"]
}
where "name_x" is the name added in add_item_and_widget.
widget_config can be an empty dict and it would not change the GUI.
Returns:
updated_widget_config: dict, widget_config with the missing items added and
the extra items removed.
"""
all_items = self.visible_items + self.hidden_items
self.visible_items = []
self.hidden_items = []
if "visible_items" in widget_config: # adds all visible items in the config
for name in widget_config["visible_items"]:
if name in all_items:
self.visible_items.append(name)
if "hidden_items" in widget_config: # adds all hidden items in the config
for name in widget_config["hidden_items"]:
if name in all_items:
self.hidden_items.append(name)
for name in all_items: # adds items that are not defined in the config
if name not in self.visible_items and name not in self.hidden_items:
self.visible_items.append(name)
self.update_ui()
return self._get_config()
def _get_config(self):
config = {}
config["visible_items"] = list(self.visible_items)
config["hidden_items"] = list(self.hidden_items)
return config
def update_ui(self):
for name in self.all_items:
item = self.all_items[name]
if name in self.visible_items:
item.setHidden(False)
item.sort_index = self.visible_items.index(name)
else:
item.setHidden(True)
item.sort_index = len(self.visible_items) + self.hidden_items.index(name)
self.sortItems()
self.setDragEnabled(False)
def contextMenuEvent(self, event):
"""Override contextMenuEvent to add a custom context menu.
To add more actions to the context menu, you must copy the code from this method
and add your own actions.
"""
menu = QtWidgets.QMenu()
manage_widgets_action = menu.addAction("Manage Widgets")
action = menu.exec_(self.mapToGlobal(event.pos()))
if action == manage_widgets_action:
self._manage_widgets()
def _manage_widgets(self):
dialog = _CustomListWidgetManager(self, self.visible_items, self.hidden_items)
if dialog.exec_() == QtWidgets.QDialog.Accepted:
self.visible_items = dialog.visible_items
self.hidden_items = dialog.hidden_items
self.update_ui()
self.visibility_and_order_changed.emit(self._get_config())
def clear(self):
"""Overloads the parent clear function to properly clear the extra attributes."""
self.visible_items = []
self.hidden_items = []
self.all_items = {}
super().clear()
def addItem(self, item):
"""Do not call this directly. Use self.add_item_and_widget() instead."""
if not isinstance(item, _CustomListWidgetItem):
raise TypeError("item must be a CustomListWidgetItem.")
super().addItem(item)
def addItemWidget(self, item, widget):
"""Do not call this directly. Use self.add_item_and_widget() instead."""
if not isinstance(item, _CustomListWidgetItem):
raise TypeError("item must be a CustomListWidgetItem.")
super().addItemWidget(item, widget)
class _CustomListWidgetManager(DialogOnTop):
"""Select widget visibility and orders."""
def __init__(self, parent, visible_items, hidden_items):
super().__init__(parent)
self.setWindowTitle("Select widget visibility and orders")
self.visible_items = visible_items
self.hidden_items = hidden_items
self.init_ui()
def build_tree_widget_item(self, name, is_header):
item = QtWidgets.QTreeWidgetItem([name])
if is_header:
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsSelectable) # Disable selection
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsDragEnabled) # Disable drag
else:
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsDropEnabled) # Disable drop
return item
def init_ui(self):
self.tree_widget = QtWidgets.QTreeWidget()
self.tree_widget.setFont(QtGui.QFont("Arial", 12))
self.tree_widget.setHeaderHidden(True)
self.tree_widget.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
# Disable drop on the header
self.tree_widget.invisibleRootItem().setFlags(
self.tree_widget.invisibleRootItem().flags() & ~QtCore.Qt.ItemIsDropEnabled)
self.visible_items_header = self.build_tree_widget_item("Visible widgets", True)
self.tree_widget.insertTopLevelItem(0, self.visible_items_header)
self.tree_widget.expandItem(self.visible_items_header)
for item in self.visible_items:
self.visible_items_header.addChild(self.build_tree_widget_item(item, False))
self.hidden_items_header = self.build_tree_widget_item("Invisible widgets", True)
self.tree_widget.insertTopLevelItem(1, self.hidden_items_header)
self.tree_widget.expandItem(self.hidden_items_header)
for item in self.hidden_items:
self.hidden_items_header.addChild(self.build_tree_widget_item(item, False))
self.button_box = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
self.button_box.accepted.connect(self.accept)
self.button_box.rejected.connect(self.reject)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.tree_widget)
layout.addWidget(self.button_box)
self.setLayout(layout)
def accept(self) -> None:
"""Updates the visible and hidden items."""
self.visible_items = []
self.hidden_items = []
for kk in list(range(self.visible_items_header.childCount())):
self.visible_items.append(self.visible_items_header.child(kk).text(0))
for kk in list(range(self.hidden_items_header.childCount())):
self.hidden_items.append(self.hidden_items_header.child(kk).text(0))
return super().accept()
class _CustomListWidgetItem(QtWidgets.QListWidgetItem):
"""QListWidgetItem with custom sorting. Don't use this class outside of this module."""
def __init__(self, sort_index):
super().__init__()
self.sort_index = sort_index
def __lt__(self, other):
return self.sort_index < other.sort_index
|
Jayich-Lab/jax | tools/applets/plot_pmt.py | <reponame>Jayich-Lab/jax
import numpy as _np
from PyQt5 import QtCore, QtGui, QtWidgets
from artiq.applets.simple import SimpleApplet
from jax import RealTimePlotApplet
class PlotPMT(RealTimePlotApplet):
def __init__(self, args, **kwds):
num_of_traces = 3 # high, low, differential.
datasets_names = ["pmt.times", "pmt.counts_kHz"]
xlabel = "Time (s)"
ylabel = "Counts (kHz)"
super().__init__(num_of_traces, datasets_names, xlabel, ylabel, ip=args.ip, **kwds)
async def initialize_datasets(self):
try:
times = await self.dv.get_shared_dataset("pmt.times")
counts = await self.dv.get_shared_dataset("pmt.counts_kHz")
except Exception as e:
print(e)
times = _np.array([])
counts = _np.array([[], [], []])
self.set_data.emit("pmt.times", times)
self.set_data.emit("pmt.counts_kHz", counts)
def _set(self, dataset_name, value):
if dataset_name == "pmt.times":
self._times = value
elif dataset_name == "pmt.counts_kHz":
value = _np.transpose(value)
for kk in range(len(value)):
self.traces[kk].set(self._times, value[kk])
def _append(self, dataset_name, value):
if dataset_name == "pmt.times":
for kk in range(len(self.traces)):
self.traces[kk].append_x(value)
elif dataset_name == "pmt.counts_kHz":
value = _np.transpose(value)
for kk in range(len(self.traces)):
self.traces[kk].append_y(value[kk])
self.traces[kk].update_trace() # only update the plot when counts are updated.
def main():
applet = SimpleApplet(PlotPMT)
PlotPMT.add_labrad_ip_argument(applet)
applet.run()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.