max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
mlaas/api/score/serializers.py | ayanray-tech/faas | 2 | 12772351 | from flask_restplus import fields
from api.restplus import api
model_score = api.model('Model Score', {
'algorithm': fields.String(required=True, description='Model name'),
'source_url': fields.String(required=True, description='Source URL'),
'field_names': fields.String(required=True, description='Field Names'),
})
| 2.078125 | 2 |
music/file_source.py | MaT1g3R/YasenBaka | 6 | 12772352 | from functools import partial
from music.abstract_source import AbstractSource
from music.music_util import file_detail, get_file_info
class FileSource(AbstractSource):
"""
An audio source from a file.
"""
__slots__ = ('file_path', 'title')
def __init__(self, file_path: str):
"""
:param file_path: the file path.
"""
self.title, genre, artist, album, length = get_file_info(file_path)
self.file_path = file_path
super().__init__(
partial(file_detail, self.title, genre, artist, album, length)
)
def __str__(self):
return self.title
def clean_up(self):
del self.title
del self.file_path
async def true_name(self) -> str:
"""
See `AbstractSource.true_name`
"""
return self.file_path
| 2.859375 | 3 |
Lab5_2020/lab5_2_1.py | AlexandrosKyriakakis/StochasticProcesses | 3 | 12772353 | from numpy import random, pi
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
Ntrials, Nhits = 1_000_000, 0
for n in range(Ntrials):
x, y, z = random.uniform(-1, 1, 3) # draw 2 samples, each uniformly distributed over (-1,1)
if x**2 + y**2 + z**2 < 1:
Nhits += 1
print("Monte Carlo estimator of V(3): %.5f" % ((2**3)*(Nhits / Ntrials)))
print("Actual value of V(3) up to 5 decimal digits: %.5f" % (4*pi/3))
print("The relative error is %.5f%%" % (100 * abs((2**3)*(Nhits / Ntrials) - (4*pi/3))))
| 3.203125 | 3 |
botteryext/binput/inputhandler.py | IvanBrasilico/binput | 0 | 12772354 | '''InputHandler is an extension to add "Input commands" to bottery views
Usage:
On an Application:
app = App()
input = InputHandler(app)
On Patterns:
hang_user_pattern_input = HangUserPattern(input_example)
input.set_hang(hang_user_pattern_input, 'project')
patterns = [
hang_user_pattern_input,
Pattern('project', input_example),
On a View:
# This block will be executed on first call
if not app.input_queue:
app.hang_in(message)
app.input(message, 'name', 'Enter Project Name:')
app.input(message, 'language', 'Enter Project Language: ',
['python2', 'python3'])
return app.print_next_input(message) # To return first message of queue
# On next calls, this block wil be executed
stay, response = app.next_input_queue(message)
if stay:
return response # Contains message from Input Command
# Queue ended, now you could save resulting Project and exit view
app.hang_out(message)
return 'Project created: ' + response # Response contains user entries
'''
from collections import OrderedDict
class InputHandler:
'''Adds Input Command to views'''
def __init__(self, app):
self.hang = dict()
self.input_queue = dict()
self.user_inputs = dict()
self._app = app
def set_hang(self, hang, hang_pattern):
self.hang[hang_pattern] = hang
def hang_in(self, message):
'''Used in conjunction with HangUserPattern. Mantains app on the view'''
self.hang[message.text].activate_hang(message)
self.user_inputs[message.user.id] = dict()
def hang_out(self, message, hang_pattern):
self.hang[hang_pattern].deactivate_hang(message)
self.user_inputs.pop(message.user.id, None)
def input(self, message, name, prompt, valid_values=None):
'''Adds a input message to the dict'''
if not self.input_queue.get(message.user.id, None):
self.input_queue[message.user.id] = OrderedDict()
self.user_inputs[message.user.id] = dict()
user_input_dict = self.input_queue[message.user.id]
user_input_dict[name] = (prompt, valid_values)
def print_next_input(self, message):
user_input_dict = self.input_queue[message.user.id]
if not user_input_dict:
return _('No messages on the input command queue')
actual_prompt, _valid_values = list(user_input_dict.values())[0]
print(actual_prompt)
return actual_prompt
def next_input_queue(self, message):
'''Validates user input, saves user input.
Returns a "stay" flag to say if views mantains hang or not
Returns actual prompt OR
a dict of name:user_input on end of prompts'''
user_input_dict = self.input_queue[message.user.id]
if not user_input_dict:
return False, _('No messages on the input command queue')
_actual_prompt, valid_values = list(user_input_dict.values())[0]
if valid_values:
if message.text not in valid_values:
# If validation fail, remain on actual item
return True, _('Enter a Valid Value: ') + ' '.join(valid_values)
user_inputs = self.user_inputs[message.user.id]
name = list(user_input_dict.keys())[0]
user_inputs[name] = message.text
user_input_dict.popitem(last=False)
if not user_input_dict:
# Ended! Return what user entered
return False, user_inputs
next_prompt, valid_values = list(user_input_dict.values())[0]
if valid_values:
next_prompt = next_prompt + ' - ' + ' '.join(valid_values)
return True, next_prompt
| 2.953125 | 3 |
examples/geofence.py | datwwe/dronecontrol-MAVSDK | 0 | 12772355 | <gh_stars>0
#!/usr/bin/env python3
import asyncio
from mavsdk import Point, Polygon, System
import json
with open('./connect.json','r') as f:
aa = json.load(f)
async def run():
drone = System()
await drone.connect(system_address=aa['address'])
print("Waiting for drone...")
async for state in drone.core.connection_state():
if state.is_connected:
print(f"Drone discovered with UUID: {state.uuid}")
break
lat = 47.3977508
lon = 8.5456074
p1 = Point(lat - 0.0001, lon - 0.0001)
p2 = Point(lat + 0.0001, lon - 0.0001)
p3 = Point(lat + 0.0001, lon + 0.0001)
p4 = Point(lat - 0.0001, lon + 0.0001)
polygon = Polygon([p1, p2, p3, p4], Polygon.Type.INCLUSION)
print("Uploading geofence...")
await drone.geofence.upload_geofence([polygon])
print("Geofence uploaded!")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
| 2.515625 | 3 |
ATEMview/LocWindow.py | dwfmarchant/ATEMview | 2 | 12772356 | <gh_stars>1-10
""" ATEMview Location Window """
from PyQt5 import QtCore, QtWidgets
import pyqtgraph as pg
import numpy as np
from .ATEMWidget import ATEMWidget
from .colormaps import jetCM, jetBrush
class LocWidget(ATEMWidget):
"""docstring for LocWidget"""
def __init__(self, parent):
super(LocWidget, self).__init__(parent)
self.parent = parent
self.init_ui()
self.showData = False
self.data = None
self.tInd = -1
self.x = None
self.y = None
self.minVal = 1.
self.maxVal = 1.
self.cbFormatStr = '{:.2f}'
self.show()
def init_ui(self):
""" Docstring """
# Make the background white
palette = self.palette()
palette.setColor(self.backgroundRole(), QtCore.Qt.white)
self.setPalette(palette)
self.plotWidget = pg.PlotWidget(enableMenu=False)
self.plotWidget.setLabel('left', 'Easting', units='m')
self.plotWidget.setLabel('bottom', 'Northing', units='m')
self.plotWidget.showGrid(x=True, y=True)
self.plotWidget.getViewBox().setAspectLocked()
self.scatter = pg.ScatterPlotItem(pen=None, pxMode=True)
self.plotWidget.addItem(self.scatter)
self.selectedLocVline = pg.InfiniteLine(angle=90,
movable=False,
pen={'color':'k',
'width':2,
'style':QtCore.Qt.DotLine})
self.plotWidget.addItem(self.selectedLocVline, ignoreBounds=True)
self.selectedLocHline = pg.InfiniteLine(angle=0,
movable=False,
pen={'color':'k',
'width':2,
'style':QtCore.Qt.DotLine})
self.plotWidget.addItem(self.selectedLocHline, ignoreBounds=True)
self.plotWidget.scene().sigMouseClicked.connect(self.clickEvent)
self.colorbarWidget = pg.PlotWidget(enableMenu=False)
self.colorbarWidget.setMaximumWidth(100)
self.colorbarWidget.getViewBox().setMouseEnabled(False, False)
self.colorbarWidget.setXRange(0, 20, padding=0)
self.colorbarWidget.setYRange(0, 256, padding=0)
self.colorbarWidget.getAxis('bottom').setPen(None)
self.colorbarWidget.getAxis('left').setPen(None)
self.colorbarWidget.setVisible(False)
self.cbMinLabel = QtWidgets.QLabel()
self.cbMinLabel.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.cbMinLabel.setText('0.00')
self.cbMaxLabel = QtWidgets.QLabel()
self.cbMaxLabel.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.cbMaxLabel.setText('1.00')
self.colorbar = pg.ImageItem()
cbData = np.arange(0, 256)[:, np.newaxis].repeat(20, axis=1).T
self.colorbar.setImage(jetCM[cbData])
self.colorbarWidget.addItem(self.colorbar)
self.misfitCheckBox = QtWidgets.QCheckBox('Show Misfit')
self.misfitCheckBox.toggled.connect(self.toggleMisfit)
self.selectCombo = QtWidgets.QComboBox()
self.selectCombo.addItem("Misfit (time)")
self.selectCombo.addItem("Misfit (total)")
self.selectCombo.addItem("Observed")
self.selectCombo.addItem("Predicted")
self.selectCombo.activated[str].connect(self.changeCombo)
self.selectCombo.setVisible(False)
self.titleLabel = QtWidgets.QLabel(self.selectCombo.currentText())
self.titleLabel.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
self.titleLabel.setVisible(False)
self.maxCvalSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.maxCvalSlider.setMaximum(100)
self.maxCvalSlider.setValue(100)
self.maxCvalSlider.valueChanged.connect(self.setClim)
self.maxCvalSlider.setVisible(False)
self.minCvalSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.minCvalSlider.setMaximum(100)
self.minCvalSlider.setValue(0)
self.minCvalSlider.valueChanged.connect(self.updatePlot)
self.minCvalSlider.setVisible(False)
cbvLayout = QtWidgets.QVBoxLayout()
cbvLayout.addWidget(self.cbMaxLabel)
cbvLayout.addWidget(self.colorbarWidget)
cbvLayout.addWidget(self.cbMinLabel)
hLayout = QtWidgets.QHBoxLayout()
hLayout.addWidget(self.plotWidget)
hLayout.addLayout(cbvLayout)
vLayout = QtWidgets.QVBoxLayout(self)
hMisLayout = QtWidgets.QHBoxLayout()
hMisLayout.addWidget(self.misfitCheckBox)
hMisLayout.addWidget(self.selectCombo)
vLayout.addLayout(hMisLayout)
vLayout.addWidget(self.titleLabel)
vLayout.addLayout(hLayout)
vLayout.addWidget(self.maxCvalSlider)
vLayout.addWidget(self.minCvalSlider)
def clickEvent(self, event):
if self.plotWidget.sceneBoundingRect().contains(event.scenePos()):
mousePoint = self.plotWidget.getViewBox().mapSceneToView(event.scenePos())
signal = {'name':'closestLoc',
'x':mousePoint.x(),
'y':mousePoint.y()}
self.ChangeSelectionSignal.emit(signal)
else:
pass
@QtCore.pyqtSlot(bool)
def toggleMisfit(self, show):
""" Callback that gets fired 'Show Misfit' box is toggled """
if self.data is not None:
if show:
self.colorbarWidget.setVisible(True)
self.maxCvalSlider.setVisible(True)
self.minCvalSlider.setVisible(True)
self.selectCombo.setVisible(True)
self.titleLabel.setVisible(True)
self.showData = True
else:
self.colorbarWidget.setVisible(False)
self.maxCvalSlider.setVisible(False)
self.minCvalSlider.setVisible(False)
self.selectCombo.setVisible(False)
self.titleLabel.setVisible(False)
self.updatePlot()
@QtCore.pyqtSlot(str)
def changeCombo(self, text):
if self.selectCombo.currentText() == "Misfit (time)":
self.cbFormatStr = "{:.2f}"
elif self.selectCombo.currentText() == "Misfit (total)":
self.cbFormatStr = "{:.2f}"
elif self.selectCombo.currentText() == "Observed":
self.cbFormatStr = "{:.2e}"
elif self.selectCombo.currentText() == "Predicted":
self.cbFormatStr = "{:.2e}"
self.titleLabel.setText(text)
self.setData()
self.updatePlot()
def updatePlot(self):
if self.showData & (self.data is not None):
clMin, clMax = self.getClim()
self.cbMaxLabel.setText(self.cbFormatStr.format(clMax))
self.cbMinLabel.setText(self.cbFormatStr.format(clMin))
bins = np.linspace(clMin, clMax, 255)
di = np.digitize(self.data, bins)
self.scatter.setData(self.x, self.y, pen=None,
brush=jetBrush[di], symbolSize=10.)
else:
self.scatter.setData(self.x, self.y, pen=None, brush='k', symbolSize=10.)
def setAll(self, x, y):
""" Docstring """
self.scatter.setData(x, y, pen=None, brush='k', symbolSize=10.)
self.plotWidget.setXRange(x.min()-100., x.max()+100.)
self.plotWidget.setYRange(y.min()-100., y.max()+100.)
def setLocation(self, loc):
""" Docstring """
xl = loc.iloc[0].x
yl = loc.iloc[0].y
self.selectedLocVline.setPos(xl)
self.selectedLocHline.setPos(yl)
def setTime(self, data_times):
""" Set the displayed misfit data """
self.tInd = data_times.tInd.iloc[0]
if self.selectCombo.currentText() != "Misfit (total)":
self.setData()
self.updatePlot()
def setData(self):
data_time = self.parent.data.getTime(self.tInd)
self.x = data_time.x.values
self.y = data_time.y.values
if self.selectCombo.currentText() == "Misfit (time)":
if data_time.dBdt_Z_pred.any():
self.data = (data_time.dBdt_Z-data_time.dBdt_Z_pred).abs()/data_time.dBdt_Z_uncert
else:
self.data = None
elif self.selectCombo.currentText() == "Misfit (total)":
if data_time.dBdt_Z_pred.any():
grp = self.parent.data.df.groupby('locInd')
l22 = lambda g: np.linalg.norm((g.dBdt_Z - g.dBdt_Z_pred)/g.dBdt_Z_uncert)**2/g.shape[0]
grp = grp.agg(l22)[['x', 'y', 'dBdt_Z']]
self.data = grp.dBdt_Z.values
self.x = self.parent.data.locs.sort_index().x.values
self.y = self.parent.data.locs.sort_index().y.values
print(self.data)
else:
self.data = None
elif self.selectCombo.currentText() == "Observed":
self.data = data_time.dBdt_Z
elif self.selectCombo.currentText() == "Predicted":
self.data = data_time.dBdt_Z_pred
else:
self.data = None
if self.data is not None:
self.minVal = self.data.min()
self.maxVal = self.data.max()
def setClim(self):
""" Set the color limits on the misfit scatter plot """
lsVal = self.minCvalSlider.value()
hsVal = self.maxCvalSlider.value()
if lsVal >= hsVal:
self.minCvalSlider.setValue(hsVal-1)
lsVal = self.minCvalSlider.value()
self.updatePlot()
def getClim(self):
lsVal = self.minCvalSlider.value()
hsVal = self.maxCvalSlider.value()
dv = self.data.max()-self.data.min()
clMin = self.data.min()+dv*lsVal/100.
clMax = self.data.min()+dv*hsVal/100.
return clMin, clMax
| 2.296875 | 2 |
HREMGromacs/multidir_setup.py | MauriceKarrenbrock/HREMGromacs | 0 | 12772357 | <gh_stars>0
# -*- coding: utf-8 -*-
#############################################################
# Copyright (c) 2021-2021 <NAME> #
# #
# This software is open-source and is distributed under the #
# MIT License #
#############################################################
"""Module containing the functions to setup the multidir directories for the HREM
"""
import shutil
from pathlib import Path
def make_hrem_multidir_directory(number_of_replicas=8,
dir_name='BATTERY',
exist_ok=False):
"""Makes the multidir directory and sub-directories for HREM
Parameters
----------
number_of_replicas : int, default=8
dir_name : str, default=BATTERY
exist_ok : bool, default=False
if True won't raise an exception if the direcotry already exists
Returns
--------
pathlib.Path
the path object of the directory
Notes
----------
the sirectory will be filled with sub-directories called
scaled0, ..., scaled `number_of_replicas`-1
"""
dir_path = Path(dir_name).resolve()
dir_path.mkdir(parents=True, exist_ok=exist_ok)
for i in range(number_of_replicas):
(dir_path / f'scaled{i}').mkdir(parents=True, exist_ok=True)
return dir_path
def copy_file_in_hrem_multidir(file_name='empty_plumed.dat',
dir_name='BATTERY'):
"""copies a file inside all the scaled* direcotries inside a multidir directory
if the file doesn't exist it will be created on the fly (empty), very useful to copy the
needed plumed input file for HREM (often empty)
Parameters
-----------
file_name : str or path, default=empty_plumed.dat
dir_name : str or path, default=BATTERY
"""
dir_name = Path(dir_name)
sub_dirs = dir_name.glob('scaled*')
file_name = Path(file_name)
file_name.touch()
for sub_dir in sub_dirs:
shutil.copy(str(file_name), str(sub_dir))
def make_multiple_hrem_batteries(number_of_batteries,
replicas_per_battery=8,
plumed_file='empty_plumed.dat',
directory=None):
"""high level function that makes multiple hrem-multidir directories
often it is not possible to run the number of nanoseconds wanted in
the maximum wall time of an HPC cluster (often 24h) so you might want
to run multiple separated runs in parallel
The directories created will be called BATTERY0, BATTERY1, ...
andcontaind scaled0, scaled1, ...
Parameters
------------
number_of_batteries : int
the number of independent hrem you want to do
replicas_per_battery : int, default=8
the number of replicas you will run, and therefore the number of scaled*
sub-directories
plumed_file : str or path, default=empty_plumed.dat
the plumed input file, if it doesn't exist it is created empty
directory : str or path, optional, default=current working directory
the directory in which the HREM batteries shall be created
Returns
---------
list(pathlib.Path)
the path objects of the various created directories (not the sub-direcotries)
Notes
--------
on a gpu, with a 15000 atoms system a very conservative extimate is
that you can do 15 nanoseconds in 24h
on only cpu on the other end you may go with 5 ns on 64 cores with broadwell cpus
but in any case this are arbitrary and very conservative numbers
"""
if directory is None:
directory = Path.cwd()
else:
directory = Path(directory)
output = []
for i in range(number_of_batteries):
output.append(
make_hrem_multidir_directory(
number_of_replicas=replicas_per_battery,
dir_name=directory / f'BATTERY{i}',
exist_ok=True))
copy_file_in_hrem_multidir(file_name=plumed_file, dir_name=output[-1])
return output
def make_multidir_mdrun_string_for_hrem(multidir_directories,
gromacs_path='gmx_mpi',
plumed_file='empty_plumed.dat',
deffnm='HREM',
tpr_file='HREM.tpr',
replex=100):
"""Helper function to make the mdrun string for an HREM (with multidir)
Parameters
-----------
multidir_directories : list(str) or list(path)
the list of the multidir directories. must be ordered
the first one is the reference state the last one the most
scaled replica
gromacs_path : str, default=gmx_mpi
plumed_file : str, default=empty_plumed.dat
deffnm : str, default=HREM
tpr_file : str, default=HREM.tpr
replex : int, default=100
after how many steps a swap shall be attempted
Returns
---------
str
it will look something like
gmx_mpi mdrun -v -plumed empty_plumed.dat -replex 100 -hrex -dlb no
-multidir BATTERY/scaled0 BATTERY/scaled1 -s HREM.tpr -deffnm HREM
"""
multidir_str = [str(i) for i in multidir_directories]
multidir_str = ' '.join(multidir_str)
multidir_str = (
f'{gromacs_path} mdrun -v -plumed {plumed_file}'
f' -replex {replex} -hrex -dlb no -s {tpr_file} -deffnm {deffnm}'
f' -multidir {multidir_str}')
return multidir_str
| 2.609375 | 3 |
plugins/active_directory_ldap/komand_active_directory_ldap/actions/enable_user/action.py | JaredAllen13/insightconnect-plugins | 0 | 12772358 | import insightconnect_plugin_runtime
# Custom imports below
from .schema import EnableUserInput, EnableUserOutput, Input, Output
class EnableUser(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="enable_user",
description="Enable a account",
input=EnableUserInput(),
output=EnableUserOutput(),
)
def run(self, params={}):
return {Output.SUCCESS: self.connection.client.manage_user(params.get(Input.DISTINGUISHED_NAME), True)}
| 2.09375 | 2 |
keymaster/common/model/decrypted_data.py | shiroyuki/spymaster | 0 | 12772359 | <reponame>shiroyuki/spymaster
from dataclasses import dataclass
from typing import List, Any, Dict, Optional
from keymaster.common.model.credential import Credential
from keymaster.common.model.note import Note
@dataclass
class DecryptedData:
credentials: List[Credential]
notes: List[Note]
@staticmethod
def make(raw_data: Optional[Dict[str, Any]] = None):
if not raw_data:
return DecryptedData(credentials=[], notes=[])
return DecryptedData(
credentials=[
Credential(**item)
for item in raw_data.get('credentials')
],
notes=[
Note(**item)
for item in raw_data.get('notes')
],
)
| 2.53125 | 3 |
No_0018_4 Sum aka four Sum/4_sum.py | coderMaruf/leetcode-1 | 32 | 12772360 | '''
Given an array nums of n integers and an integer target, are there elements a, b, c, and d in nums
such that a + b + c + d = target? Find all unique quadruplets in the array which gives the sum of target.
Note:
The solution set must not contain duplicate quadruplets.
Example:
Given array nums = [1, 0, -1, 0, -2, 2], and target = 0.
A solution set is:
[
[-1, 0, 0, 1],
[-2, -1, 1, 2],
[-2, 0, 0, 2]
]
'''
# allow python featre, type hints
from typing import List
class Solution:
def fourSum(self, nums: List[int], target: int) -> List[List[int]]:
# preprocessing, sort input list to keep increasing order
nums.sort()
# a container to store 4 sum quadruple
solution = list()
size_of_input = len( nums )
# size_of_input - 3 is to aviod boundary crossing and reserve space for j, k, m
for i in range( size_of_input - 3 ):
# avoid element repetition
if i > 0 and nums[i-1] == nums[i]:
pass
continue
# size_of_input - 2 is to avoid boundary crossing and reseve space for k, m
for j in range( i+1, size_of_input - 2):
# avoid element repetition
if j > i+1 and nums[j-1] == nums[j]:
pass
continue
# k start from smallest value after j
# m start from largest value after j
k, m = (j+1), (size_of_input-1)
while k < m:
quadruple = [ nums[i], nums[j], nums[k],nums[m] ]
four_sum = sum( quadruple )
if four_sum == target :
solution.append( quadruple )
# k move to the right
k += 1
# avoid element repetition
while k < m and nums[k-1] == nums[k]:
k += 1
# m move to the left
m -= 1
# avoid element repetition
while k < m and nums[m+1] == nums[m]:
m -= 1
elif four_sum > target:
# four_sum is larger than target
# make the larger element become smaller by moving m to the left
m -= 1
# avoid element repetition
while k < m and nums[m+1] == nums[m]:
m -= 1
else:
# four_sum is smaller than target
# make the smaller element becomer bigger by moving k to the right
k += 1
# avoid element repetition
while k < m and nums[k-1] == nums[k]:
k += 1
return solution
## Time Complexity
# O( N^3 )
# Preprocess of sorting input list takes O( N log N )
# Outer for loop takes O( N ) to iterate index i
# Also, inner for loop takes O( N ) to iterate index j
# Next, while loop takes O( N ) to iterate index k, m
# These three loops takes O( N^3 )
## Space Complexity
# O( 1 )
# Preprocess of sorting is in-place, O( 1 ), no need of extra space
# In the nested loop, we use variables for index i, j, k, m
# and a list to store the solution
# These usage are of O( 1 )
def test_bench():
test_data = [
( [1, 0, -1, 0, -2, 2], 0),
( [1, 0, -1, 0, -2, 2], 1)
]
for test in test_data:
three_sum_closet = Solution().fourSum( nums = test[0], target = test[1] )
print( three_sum_closet )
return
# expected output:
'''
[[-2, -1, 1, 2], [-2, 0, 0, 2], [-1, 0, 0, 1]]
[[-2, 0, 1, 2], [-1, 0, 0, 2]]
'''
if __name__ == '__main__':
test_bench() | 3.796875 | 4 |
app/Service/Service_Livro.py | MerciaFerreira/Projeto_devweb | 0 | 12772361 | <reponame>MerciaFerreira/Projeto_devweb<gh_stars>0
from app import db
from app.Entidades.Livro import Livro
from app.Service.Service_Autor import ServiceAutor
class ServiceLivro:
@staticmethod
def save_livro(dados):
livro = Livro.query.filter_by(isbn=dados['isbn']).first()
if not livro:
novo_livro = Livro(
isbn=dados['isbn'],
titulo=dados['titulo'],
editora=dados['editora'],
quantidade=dados['quantidade']
)
[novo_livro.autores.append(obj) for obj in
[ServiceAutor.get_autor_by_id(autor) for autor in
dados['autores']]]
ServiceLivro.save(novo_livro)
return novo_livro
@staticmethod
def get_all_livros():
livros = Livro.query.all()
return livros
@staticmethod
def get_livro_by_id(id):
livro = Livro.query.get(id)
return livro
@staticmethod
def get_livro_by_isbn(isbn):
livro = Livro.query.filter_by(isbn=isbn).first()
return livro
def get_livros_by_autor(dados):
autor = ServiceAutor.get_autor_by_id(dados['id'])
livros = Livro.query.with_parent(autor).all()
return livros
@staticmethod
def update_livro(id, dados):
livro = Livro.query.get(id)
if livro:
livro.isbn = dados['isbn']
livro.titulo = dados['titulo']
livro.editora = dados['editora']
livro.quantidade = dados['quantidade']
db.session.commit()
return livro
@staticmethod
def delete_livro(id):
livro = Livro.query.get(id)
if livro:
ServiceLivro.delete(livro)
return livro
def save(dados):
db.session.add(dados)
db.session.commit()
def delete(dados):
db.session.delete(dados)
db.session.commit()
#outra forma de fazer mas melhor a que já tá
#linha 18 - 21
# for autor in dados['autores']:
# autor_encontrado = ServiceAutor.get_autor_by_id(autor)
# if autor:
# novo_livro.autores.append(autor_encontrado) | 2.5625 | 3 |
src/wavestate/iirrational/fitters_ZPK/codings_s/cplx_sos_NL_orig.py | wavestate/wavestate-iirrational | 0 | 12772362 | <reponame>wavestate/wavestate-iirrational
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 <NAME> <<EMAIL>>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
"""
from ..codings_cmn import (
CodingType,
# Ipi,
# I2pi
)
class CodingSOSnl(CodingType):
N_parameters = 2
p_nl_c1 = 0
p_nl_c2 = 0
deriv_deadzone = 1e-4
# should be a positive number
min_BW_Hz = 0
# True false means mirror to respective side
unstable = False
single_root = False
def update(self, nl_c1, nl_c2=None):
if self.single_root:
self.p_nl_c1 = nl_c1
else:
assert nl_c2 is not None
self.p_nl_c1 = nl_c1
self.p_nl_c2 = nl_c2
def reduce(self):
if self.single_root:
return [self.p_nl_c1]
else:
return [self.p_nl_c1, self.p_nl_c2]
@property
def F_Hz(self):
if self.single_root:
return 0
c1 = self.p_nl_c1 ** 2
c2 = self.p_nl_c2 ** 2
disc = c1 * c1 - 4 * c2
if disc > 0:
return 0
else:
return ((-disc) ** 0.5) / 2
@property
def gain_effect(self):
rs = self.roots_r()
if len(rs) == 0:
return 1
elif len(rs) == 1:
return 1 if rs[0] < 0 else -1
else:
if rs[0] < 0:
return 1 if rs[1] < 0 else -1
else:
return 1 if rs[1] > 0 else -1
def check_dist_limit(self, F_Hz=None, thresh=1):
if self.sys.distance_limit_auto >= thresh:
if F_Hz is None:
self.min_BW_Hz, D = self.sys.distance_limit(
self.F_Hz, with_derivative=True
)
else:
self.min_BW_Hz, D = self.sys.distance_limit(F_Hz, with_derivative=True)
return D
return 0
def transfer(self):
if self.single_root:
self.check_dist_limit(F_Hz=0, thresh=2)
# second order sections (2x roots either real or complex conj)
c1 = self.p_nl_c1 ** 2
X = self.sys.Xsf_grid
if not self.unstable:
return (c1 + self.min_BW_Hz) + X
else:
return -(c1 + self.min_BW_Hz) + X
else:
# second order sections (2x roots either real or complex conj)
c1 = self.p_nl_c1 ** 2
c2 = self.p_nl_c2 ** 2
disc = c1 * c1 - 4 * c2
if disc > 0:
self.check_dist_limit(F_Hz=0, thresh=2)
else:
F_Hz = ((-disc) ** 0.5) / 2
self.check_dist_limit(F_Hz=F_Hz, thresh=2)
V = self.min_BW_Hz
X = self.sys.Xsf_grid
Xsq = self.sys.Xsf_grid_sq
# V = 0
if self.unstable:
xfer = (c2 + V * (V + c1)) - (X * (c1 + 2 * V) - Xsq)
else:
xfer = (c2 + V * (V + c1)) + (X * (c1 + 2 * V) + Xsq)
return xfer
@property
def derivative_deadzoned(self):
if abs(self.p_nl_c1) < self.deriv_deadzone:
return True
if not self.single_root:
if abs(self.p_nl_c2) < self.deriv_deadzone:
return True
return False
def derivative(self):
pD_c1 = self.p_nl_c1
if self.p_nl_c1 > 0:
if self.p_nl_c1 < self.deriv_deadzone:
pD_c1 = self.deriv_deadzone
else:
if self.p_nl_c1 > -self.deriv_deadzone:
pD_c1 = -self.deriv_deadzone
if self.single_root:
self.check_dist_limit(F_Hz=0, thresh=2)
if not self.unstable:
return [2 * pD_c1]
else:
return [-2 * pD_c1]
else:
c1 = self.p_nl_c1 ** 2
c2 = self.p_nl_c2 ** 2
pD_c2 = self.p_nl_c2
if self.p_nl_c2 > 0:
if self.p_nl_c2 < self.deriv_deadzone:
pD_c2 = self.deriv_deadzone
else:
if self.p_nl_c2 > -self.deriv_deadzone:
pD_c2 = -self.deriv_deadzone
disc = c1 * c1 - 4 * c2
if disc > 0:
V_D = self.check_dist_limit(F_Hz=0, thresh=2)
V_D_c1 = 0
V_D_c2 = 0
else:
F_Hz = ((-disc) ** 0.5) / 2
V_D = self.check_dist_limit(F_Hz=F_Hz, thresh=2)
V_D_c1 = -2 * pD_c1 * c1 / disc * V_D
V_D_c2 = -pD_c2 / disc * V_D
# TODO, the derivative inclusion of V_D_c1,2 isn't tested well, but
# that is partially because it doesn't matter much
V_D_c1 = 0
V_D_c2 = 0
V = self.min_BW_Hz
X = self.sys.Xsf_grid
if not self.unstable:
return [
(2 * pD_c1 + 2 * V_D_c1) * X + 2 * V * (pD_c1 + V_D_c1),
(2 * X * V_D_c2) + 2 * pD_c2 + V_D_c2 * (2 * V + c1),
]
else:
return [
-(2 * pD_c1 + 2 * V_D_c1) * X
+ 2 * V * (pD_c1 + V_D_c1)
+ 2 * pD_c1 * V,
-(2 * X * V_D_c2) + 2 * pD_c2 + V_D_c2 * (2 * V + c1),
]
def update_roots(self, r1, r2=None):
"""
r2, may be unspecified, in which case it is assumed to be nothing, if r1 is real, or otherwise the conjugate of r1
"""
if r2 is None and r1.imag == 0:
self.check_dist_limit(F_Hz=0, thresh=1)
self.single_root = True
self.N_parameters = 1
# TODO, fix for S domain
if self.unstable is not None:
if r1.real > 0:
self.unstable = True
r1p = r1.real
else:
self.unstable = False
r1p = -r1.real
if r1p > self.min_BW_Hz:
self.p_nl_c1 = (r1p - self.min_BW_Hz) ** 0.5
return True
else:
self.p_nl_c1 = 0
return False
else:
self.single_root = False
self.N_parameters = 2
if r1.real > 0:
self.unstable = True
r1p = r1.real
else:
self.unstable = False
r1p = -r1.real
if r1.imag != 0:
F_Hz = abs(r1.imag)
self.check_dist_limit(F_Hz=F_Hz, thresh=1)
if r1p > self.min_BW_Hz:
ret = True
r1p = r1p - self.min_BW_Hz
else:
ret = False
r1p = 0
# TODO check conjugates
self.p_nl_c2 = (r1p ** 2 + r1.imag ** 2) ** 0.5
self.p_nl_c1 = (2 * r1p) ** 0.5
else:
self.check_dist_limit(0, thresh=1)
if r1p > self.min_BW_Hz:
ret = True
r1p = r1p - self.min_BW_Hz
else:
ret = False
r1p = 0
if self.unstable:
if r2 < 0:
raise RuntimeError(
"Can't share stable/unstable roots in this coding"
)
r2p = r2
else:
if r2 > 0:
raise RuntimeError(
"Can't share stable/unstable roots in this coding"
)
r2p = -r2
if r2p > self.min_BW_Hz:
r2p = r2p - self.min_BW_Hz
else:
ret = False
r2 = 0
self.p_nl_c2 = (r1p * r2p) ** 0.5
self.p_nl_c1 = (r1p + r2p) ** 0.5
# print('ROOTS: ', self.roots())
return ret
return
def roots(self):
# second order sections (2x roots either real or complex conj)
c1 = self.p_nl_c1 ** 2
if self.single_root:
if not self.unstable:
return [-c1 - self.min_BW_Hz]
else:
return [c1 + self.min_BW_Hz]
else:
c2 = self.p_nl_c2 ** 2
# a = c2, b = c1, c = 1
disc = c1 * c1 - 4 * c2
if disc >= 0:
sqrt_disc = disc ** 0.5
if c1 < 0:
r1 = (-c1 + sqrt_disc) / 2
else:
r1 = (-c1 - sqrt_disc) / 2
r2 = c2 / r1
if not self.unstable:
return [r1 - self.min_BW_Hz, r2 - self.min_BW_Hz]
else:
return [-r1 + self.min_BW_Hz, -r2 + self.min_BW_Hz]
else:
sqrt_disc = (-disc) ** 0.5
if not self.unstable:
r1 = (-c1 + sqrt_disc * 1j) / 2 - self.min_BW_Hz
else:
r1 = (+c1 + sqrt_disc * 1j) / 2 + self.min_BW_Hz
return [r1, r1.conjugate()]
def roots_r(self):
# second order sections (2x roots either real or complex conj)
c1 = self.p_nl_c1 ** 2
if self.single_root:
if not self.unstable:
return [-c1 - self.min_BW_Hz]
else:
return [c1 + self.min_BW_Hz]
else:
c2 = self.p_nl_c2 ** 2
# a = c2, b = c1, c = 1
disc = c1 * c1 - 4 * c2
if disc >= 0:
sqrt_disc = disc ** 0.5
if c1 < 0:
r1 = (-c1 + sqrt_disc) / 2
else:
r1 = (-c1 - sqrt_disc) / 2
r2 = c2 / r1
if not self.unstable:
return [r1 - self.min_BW_Hz, r2 - self.min_BW_Hz]
else:
return [-r1 + self.min_BW_Hz, -r2 + self.min_BW_Hz]
else:
return []
def roots_c(self):
# second order sections (2x roots either real or complex conj)
c1 = self.p_nl_c1 ** 2
if self.single_root:
return []
else:
c2 = self.p_nl_c2 ** 2
# a = c2, b = c1, c = 1
disc = c1 * c1 - 4 * c2
if disc >= 0:
return []
else:
sqrt_disc = (-disc) ** 0.5
if not self.unstable:
r1 = (-c1 + sqrt_disc * 1j) / 2 - self.min_BW_Hz
else:
r1 = (+c1 + sqrt_disc * 1j) / 2 + self.min_BW_Hz
return [r1]
| 2.078125 | 2 |
frappe-bench/apps/erpnext/erpnext/patches/v7_2/setup_auto_close_settings.py | Semicheche/foa_frappe_docker | 1 | 12772363 | <filename>frappe-bench/apps/erpnext/erpnext/patches/v7_2/setup_auto_close_settings.py
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
# update the selling settings and set the close_opportunity_after_days
frappe.reload_doc("selling", "doctype", "selling_settings")
frappe.db.set_value("Selling Settings", "Selling Settings", "close_opportunity_after_days", 15)
# Auto close Replied opportunity
frappe.db.sql("""update `tabOpportunity` set status='Closed' where status='Replied'
and date_sub(curdate(), interval 15 Day)>modified""")
# create Support Settings doctype and update close_issue_after_days
frappe.reload_doc("support", "doctype", "support_settings")
frappe.db.set_value("Support Settings", "Support Settings", "close_issue_after_days", 7) | 1.414063 | 1 |
utils.py | akloster/jupyter-flightgear | 23 | 12772364 | from ipywidgets.widgets import HTML, Button
from tornado.ioloop import IOLoop
from IPython import display
import time
from traitlets import Bool
class LoopDecorator(object):
""" Runs the wrapped function in a certain interval until the user presses
the stop button. """
def __init__(self, button, interval=1.0):
self.button = button
self.interval = interval
def __call__(self, func):
display.display(self.button)
self.last_time = None
self.wrapped = func
self.iterate()
return None
def iterate(self):
if self.button.clicked:
return
loop = IOLoop.current()
t = time.time()
self.wrapped()
wait = self.interval
wait = min(self.interval - time.time()+t , wait)
wait = max(0.01, wait)
wait = min(self.interval, wait)
loop.call_later(wait, self.iterate)
class StopButton(Button):
""" A modified Button which as a "clicked" Attribute. """
clicked = Bool(False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.clicked = False
self.on_click(self._on_click)
def _on_click(self, *args, **kwargs):
self.disabled = True
self.clicked = True
def loop(self, interval=1.0):
""" Returns a loop decorator with this button. """
decorator = LoopDecorator(self, interval)
return decorator
| 3.140625 | 3 |
utils/read_musk_data.py | qiyuangong/ppdpes_core | 0 | 12772365 | <filename>utils/read_musk_data.py
#!/usr/bin/env python
# coding=utf-8
# Read data and read tree fuctions for musk data
try:
from models.numrange import NumRange
from utils.utility import cmp_str
except ImportError:
from ..models.numrange import NumRange
from ..utils.utility import cmp_str
import pickle
import pdb
# Musk: [other(0, 1), f1-f166, class]
# f1-f166 as QID
# class as SA
QI_INDEX = range(2, 168)
SA_INDEX = -1
__DEBUG = False
def read_tree():
"""read tree from data/tree_*.txt, store them in att_tree
"""
att_trees = []
for i in range(len(QI_INDEX)):
att_trees.append(read_pickle_file(str(QI_INDEX[i])))
return att_trees
def read_data():
"""
read microda for *.txt and return read data
"""
QI_num = len(QI_INDEX)
data = []
numeric_dict = []
for i in range(QI_num):
numeric_dict.append(dict())
# oder categorical attributes in intuitive order
# here, we use the appear number
data_file = open('data/musk.data', 'rU')
for line in data_file:
line = line.strip('.\n')
line = line.replace(' ', '')
temp = line.split(',')
ltemp = []
for i in range(QI_num):
index = QI_INDEX[i]
try:
numeric_dict[i][temp[index]] += 1
except KeyError:
numeric_dict[i][temp[index]] = 1
ltemp.append(temp[index])
ltemp.append(temp[-1])
data.append(ltemp)
# pickle numeric attributes and get NumRange
for i in range(QI_num):
static_file = open('tmp/musk_' + str(QI_INDEX[i]) + '_static.pickle', 'wb')
sort_value = list(numeric_dict[i].keys())
sort_value.sort(cmp=cmp_str)
pickle.dump((numeric_dict[i], sort_value), static_file)
static_file.close()
return data
def read_pickle_file(att_name):
"""
read pickle file for numeric attributes
return numrange object
"""
try:
static_file = open('tmp/musk_' + att_name + '_static.pickle', 'rb')
(numeric_dict, sort_value) = pickle.load(static_file)
except:
print "Pickle file not exists!!"
static_file.close()
result = NumRange(sort_value, numeric_dict)
return result
| 2.734375 | 3 |
T07-28/program.py | maa76/SSof-Project1920 | 2 | 12772366 | a = source()
if True:
b = a + 3 * sanitizer2(y)
else:
b = sanitizer(a)
sink(b) | 2.28125 | 2 |
tests/data_generation.py | drabenoro/geomstats | 0 | 12772367 | <filename>tests/data_generation.py
import itertools
import pytest
import geomstats.backend as gs
def better_squeeze(array):
"""Delete possible singleton dimension on first axis."""
if len(array) == 1:
return gs.squeeze(array, axis=0)
return array
class TestData:
"""Class for TestData objects."""
def generate_tests(self, smoke_test_data, random_test_data=[]):
"""Wrap test data with corresponding markers.
Parameters
----------
smoke_test_data : list
Test data that will be marked as smoke.
random_test_data : list
Test data that will be marked as random.
Optional, default: []
Returns
-------
_: list
Tests.
"""
tests = []
if smoke_test_data:
smoke_tests = [
pytest.param(*data.values(), marks=pytest.mark.smoke)
for data in smoke_test_data
]
tests += smoke_tests
if random_test_data:
random_tests = [
pytest.param(*data.values(), marks=pytest.mark.random)
if isinstance(data, dict)
else pytest.param(*data, marks=pytest.mark.random)
for data in random_test_data
]
tests += random_tests
return tests
class ManifoldTestData(TestData):
"""Class for ManifoldTestData: data to test manifold properties."""
def _random_point_belongs_data(
self,
smoke_space_args_list,
smoke_n_points_list,
space_args_list,
n_points_list,
belongs_atol=gs.atol,
):
"""Generate data to check that a random point belongs to the manifold.
Parameters
----------
smoke_space_args_list : list
List of spaces' args on which smoke tests will run.
smoke_n_points_list : list
Integers representing the numbers of points on which smoke tests will run.
space_args_list : list
List of spaces' (manifolds') args on which randomized tests will run.
n_points_list : list
List of integers as numbers of points on which randomized tests will run.
belongs_atol : float
Absolute tolerance for the belongs function.
"""
smoke_data = [
dict(space_args=space_args, n_points=n_points, belongs_atol=belongs_atol)
for space_args, n_points in zip(smoke_space_args_list, smoke_n_points_list)
]
random_data = [
dict(space_args=space_args, n_points=n_points, belongs_atol=belongs_atol)
for space_args, n_points in zip(space_args_list, n_points_list)
]
return self.generate_tests(smoke_data, random_data)
def _projection_belongs_data(
self, space_args_list, shape_list, n_samples_list, belongs_atol=gs.atol
):
"""Generate data to check that a point projected on a manifold belongs to the manifold.
Parameters
----------
space_args_list : list
List of spaces' args on which tests will run.
shape_list : list
List of shapes of the random data that is generated, and projected.
n_samples_list : list
List of integers for the number of random data is generated, and projected.
belongs_atol : float
Absolute tolerance for the belongs function.
"""
random_data = [
dict(
space_args=space_args,
data=gs.random.normal(size=(n_samples,) + shape),
belongs_atol=belongs_atol,
)
for space_args, shape, n_samples in zip(
space_args_list, shape_list, n_samples_list
)
]
return self.generate_tests([], random_data)
def _to_tangent_is_tangent_data(
self,
space_cls,
space_args_list,
shape_list,
n_vecs_list,
is_tangent_atol=gs.atol,
):
"""Generate data to check that to_tangent returns a tangent vector.
Parameters
----------
space_cls : Manifold
Class of the space, i.e. a child class of Manifold.
space_args_list : list
List of spaces' args on which tests will run.
shape_list : list
List of shapes of the random vectors generated, and projected.
n_vecs_list : list
List of integers for the number of random vectors generated, and projected.
is_tangent_atol : float
Absolute tolerance for the is_tangent function.
"""
random_data = []
for space_args, shape, n_vecs in zip(space_args_list, shape_list, n_vecs_list):
space = space_cls(*space_args)
vec = gs.random.normal(size=(n_vecs,) + shape)
base_point = space.random_point()
random_data.append(
dict(
space_args=space_args,
vec=vec,
base_point=base_point,
is_tangent_atol=is_tangent_atol,
)
)
return self.generate_tests([], random_data)
class OpenSetTestData(ManifoldTestData):
def _to_tangent_is_tangent_in_ambient_space_data(
self, space_cls, space_args_list, shape_list, is_tangent_atol=gs.atol
):
"""Generate data to check that tangent vectors are in ambient space's tangent space.
Parameters
----------
space_cls : Manifold
Class of the space, i.e. a child class of Manifold.
space_args_list : list
Arguments to pass to constructor of the manifold.
shape_list : list
List of shapes of the random data that is generated, and projected.
"""
random_data = [
dict(
space_args=space_args,
vector=gs.random.normal(size=shape),
base_point=space_cls(*space_args).random_point(shape[0]),
is_tangent_atol=is_tangent_atol,
)
for space_args, shape in zip(space_args_list, shape_list)
]
return self.generate_tests([], random_data)
class LevelSetTestData(ManifoldTestData):
def _extrinsic_intrinsic_composition_data(
self, space_cls, space_args_list, n_samples_list
):
"""Generate data to check that changing coordinate system twice gives back the point.
Assumes that random_point generates points in extrinsic coordinates.
Parameters
----------
space_cls : Manifold
Class of the space, i.e. a child class of Manifold.
space_args_list : list
Arguments to pass to constructor of the manifold.
n_samples_list : list
List of number of extrinsic points to generate.
"""
random_data = [
dict(
space_args=space_args,
point_extrinsic=space_cls(*space_args).random_point(n_samples),
)
for space_args, n_samples in zip(space_args_list, n_samples_list)
]
return self.generate_tests([], random_data)
def _intrinsic_extrinsic_composition_data(self, space_args_list, n_samples_list):
"""Generate data to check that changing coordinate system twice gives back the point.
Assumes that the first elements in space_args is the dimension of the space.
Parameters
----------
space_args_list : list
Arguments to pass to constructor of the manifold.
n_samples_list : list
List of number of intrinsic points to generate.
"""
random_data = [
dict(
space_args=space_args,
point_intrinsic=gs.random.normal(size=(n_samples,) + space_args[0]),
)
for space_args, n_samples in zip(space_args_list, n_samples_list)
]
return self.generate_tests([], random_data)
class LieGroupTestData(ManifoldTestData):
def _exp_log_composition_data(
self,
group_cls,
group_args_list,
shape_list,
n_samples_list,
rtol=gs.rtol,
atol=gs.atol,
):
"""Generate data to check that group exponential and logarithm are inverse.
Parameters
----------
group_cls : LieGroup
Class of the group, i.e. a child class of LieGroup.
group_args_list : list
Arguments to pass to constructor of the Lie group.
n_samples_list : list
List of number of points and tangent vectors to generate.
"""
random_data = []
for group_args, shape, n_samples in zip(
group_args_list, shape_list, n_samples_list
):
group = group_cls(*group_args)
for base_point in [group.random_point(), group.identity]:
tangent_vec = group.to_tangent(
gs.random.normal(size=(n_samples,) + shape), base_point
)
random_data.append(
dict(
group_args=group_args,
tangent_vec=tangent_vec,
base_point=base_point,
rtol=rtol,
atol=atol,
)
)
return self.generate_tests([], random_data)
def _log_exp_composition_data(
self, group_cls, group_args_list, n_samples_list, rtol=gs.rtol, atol=gs.atol
):
"""Generate data to check that group logarithm and exponential are inverse.
Parameters
----------
group_cls : LieGroup
Class of the group, i.e. a child class of LieGroup.
group_args_list : list
List of arguments to pass to constructor of the Lie group.
n_samples_list : list
List of number of points and tangent vectors to generate.
"""
random_data = []
for group_args, n_samples in zip(group_args_list, n_samples_list):
group = group_cls(*group_args)
for base_point in [group.random_point(), group.identity]:
point = group.random_point(n_samples)
random_data.append(
dict(
group_args=group_args,
point=point,
base_point=base_point,
rtol=rtol,
atol=atol,
)
)
return self.generate_tests([], random_data)
class VectorSpaceTestData(ManifoldTestData):
def _basis_belongs_data(self, space_args_list, belongs_atol=gs.atol):
"""Generate data to check that basis elements belong to vector space.
Parameters
----------
space_args_list : list
List of arguments to pass to constructor of the vector space.
belongs_atol : float
Absolute tolerance of the belongs function.
"""
random_data = [
dict(space_args=space_args, belongs_atol=belongs_atol)
for space_args in space_args_list
]
return self.generate_tests([], random_data)
def _basis_cardinality_data(self, space_args_list):
"""Generate data to check that the number of basis elements is the dimension.
Parameters
----------
space_args_list : list
List of arguments to pass to constructor of the vector space.
"""
random_data = [dict(space_args=space_args) for space_args in space_args_list]
return self.generate_tests([], random_data)
class MatrixLieAlgebraTestData(VectorSpaceTestData):
def _basis_representation_matrix_representation_composition_data(
self, space_cls, space_args_list, n_samples_list, rtol=gs.rtol, atol=gs.atol
):
"""Generate data to check that changing coordinates twice gives back the point.
Parameters
----------
space_cls : LieAlgebra
Class of the space, i.e. a child class of LieAlgebra.
space_args_list : list
Arguments to pass to constructor of the manifold.
n_samples_list : list
List of numbers of samples to generate.
"""
random_data = [
dict(
space_args=space_args,
matrix_rep=space_cls(*space_args).random_point(n_samples),
rtol=rtol,
atol=atol,
)
for space_args, n_samples in zip(space_args_list, n_samples_list)
]
return self.generate_tests([], random_data)
def _matrix_representation_basis_representation_composition_data(
self, space_cls, space_args_list, n_samples_list, rtol=gs.rtol, atol=gs.atol
):
"""Generate data to check that changing coordinates twice gives back the point.
Parameters
----------
space_cls : LieAlgebra
Class of the space, i.e. a child class of LieAlgebra.
space_args_list : list
Arguments to pass to constructor of the LieAlgebra.
n_samples_list : list
List of numbers of samples to generate.
"""
random_data = [
dict(
space_args=space_args,
basis_rep=space_cls(*space_args).basis_representation(
space_cls(*space_args).random_point(n_samples)
),
rtol=rtol,
atol=atol,
)
for space_args, n_samples in zip(space_args_list, n_samples_list)
]
return self.generate_tests([], random_data)
class ConnectionTestData(TestData):
def _exp_shape_data(
self, connection_args_list, space_list, shape_list, n_samples_list
):
"""Generate data to check that exp returns an array of the expected shape.
Parameters
----------
connection_args_list : list
List of argument to pass to constructor of the connection.
space_list : list
List of manifolds on which the connection is defined.
shape_list : list
List of shapes for random data to generate.
n_samples_list : list
List of number of random data to generate.
"""
random_data = []
for connection_args, space, tangent_shape, n_samples in zip(
connection_args_list, space_list, shape_list, n_samples_list
):
base_point = space.random_point(n_samples)
tangent_vec = space.to_tangent(
gs.random.normal(size=(n_samples,) + tangent_shape), base_point
)
n_points_list = itertools.product([1, n_samples], [1, n_samples])
expected_shape_list = [space.shape] + [(n_samples,) + space.shape] * 3
for (n_tangent_vecs, n_base_points), expected_shape in zip(
n_points_list, expected_shape_list
):
random_data.append(
dict(
connection_args=connection_args,
tangent_vec=better_squeeze(tangent_vec[:n_tangent_vecs]),
base_point=better_squeeze(base_point[:n_base_points]),
expected_shape=expected_shape,
)
)
return self.generate_tests([], random_data)
def _log_shape_data(self, connection_args_list, space_list, n_samples_list):
"""Generate data to check that log returns an array of the expected shape.
Parameters
----------
connection_args_list : list
List of argument to pass to constructor of the connection.
space_list : list
List of manifolds on which the connection is defined.
n_samples_list : list
List of number of random data to generate.
"""
random_data = []
for connection_args, space, n_samples in zip(
connection_args_list, space_list, n_samples_list
):
base_point = space.random_point(n_samples)
point = space.random_point(n_samples)
n_points_list = itertools.product([1, n_samples], [1, n_samples])
expected_shape_list = [space.shape] + [(n_samples,) + space.shape] * 3
for (n_points, n_base_points), expected_shape in zip(
n_points_list, expected_shape_list
):
random_data.append(
dict(
connection_args=connection_args,
point=better_squeeze(point[:n_points]),
base_point=better_squeeze(base_point[:n_base_points]),
expected_shape=expected_shape,
)
)
return self.generate_tests([], random_data)
def _exp_belongs_data(
self,
connection_args_list,
space_list,
shape_list,
n_samples_list,
belongs_atol=gs.atol,
):
"""Generate data to check that exp gives a point on the manifold.
Parameters
----------
connection_args_list : list
List of argument to pass to constructor of the connection.
space_list : list
List of manifolds on which the connection is defined.
shape_list : list
List of shapes for random data to generate.
n_samples_list : list
List of number of random data to generate.
"""
random_data = []
for connection_args, space, shape, n_tangent_vecs in zip(
connection_args_list, space_list, shape_list, n_samples_list
):
base_point = space.random_point()
tangent_vec = space.to_tangent(
gs.random.normal(size=(n_tangent_vecs,) + shape), base_point
)
random_data.append(
dict(
connection_args=connection_args,
space=space,
tangent_vec=tangent_vec,
base_point=base_point,
belongs_atol=belongs_atol,
)
)
return self.generate_tests([], random_data)
def _log_is_tangent_data(
self, connection_args_list, space_list, n_samples_list, is_tangent_atol=gs.atol
):
"""Generate data to check that log gives a tangent vector.
Parameters
----------
connection_args_list : list
List of argument to pass to constructor of the connection.
space_list : list
List of manifolds on which the connection is defined.
n_samples_list : list
List of number of random data to generate.
"""
random_data = []
for connection_args, space, n_samples in zip(
connection_args_list, space_list, n_samples_list
):
point = space.random_point(n_samples)
base_point = space.random_point()
random_data.append(
dict(
connection_args=connection_args,
space=space,
point=point,
base_point=base_point,
is_tangent_atol=is_tangent_atol,
)
)
return self.generate_tests([], random_data)
def _geodesic_ivp_belongs_data(
self,
connection_args_list,
space_list,
shape_list,
n_points_list,
belongs_atol=gs.atol,
):
"""Generate data to check that connection geodesics belong to manifold.
Parameters
----------
connection_args_list : list
List of argument to pass to constructor of the connection.
space_list : list
List of manifolds on which the connection is defined.
shape_list : list
List of shapes for random data to generate.
n_points_list : list
List of number of times on the geodesics.
belongs_atol : float
Absolute tolerance for the belongs function.
"""
random_data = []
for connection_args, space, n_points, shape in zip(
connection_args_list, space_list, n_points_list, shape_list
):
initial_point = space.random_point()
initial_tangent_vec = space.to_tangent(
gs.random.normal(size=shape), initial_point
)
random_data.append(
dict(
connection_args=connection_args,
space=space,
n_points=n_points,
initial_point=initial_point,
initial_tangent_vec=initial_tangent_vec,
belongs_atol=belongs_atol,
)
)
return self.generate_tests([], random_data)
def _geodesic_bvp_belongs_data(
self,
connection_args_list,
space_list,
n_points_list,
belongs_atol=gs.atol,
):
"""Generate data to check that connection geodesics belong to manifold.
Parameters
----------
connection_args_list : list
List of argument to pass to constructor of the connection.
space_list : list
List of manifolds on which the connection is defined.
n_points_list : list
List of number of points on the geodesics.
belongs_atol : float
Absolute tolerance for the belongs function.
"""
random_data = []
for connection_args, space, n_points in zip(
connection_args_list,
space_list,
n_points_list,
):
initial_point = space.random_point()
end_point = space.random_point()
random_data.append(
dict(
connection_args=connection_args,
space=space,
n_points=n_points,
initial_point=initial_point,
end_point=end_point,
belongs_atol=belongs_atol,
)
)
return self.generate_tests([], random_data)
def _log_exp_composition_data(
self,
connection_args_list,
space_list,
n_samples_list,
rtol=gs.rtol,
atol=gs.atol,
):
"""Generate data to check that logarithm and exponential are inverse.
Parameters
----------
connection_args_list : list
List of argument to pass to constructor of the connection.
space_list : list
List of manifolds on which the connection is defined.
n_samples_list : list
List of number of random data to generate.
"""
random_data = []
for connection_args, space, n_samples in zip(
connection_args_list, space_list, n_samples_list
):
point = space.random_point(n_samples)
base_point = space.random_point()
random_data.append(
dict(
connection_args=connection_args,
point=point,
base_point=base_point,
rtol=rtol,
atol=atol,
)
)
return self.generate_tests([], random_data)
def _exp_log_composition_data(
self,
connection_args_list,
space_list,
shape_list,
n_samples_list,
rtol=gs.rtol,
atol=gs.atol,
):
"""Generate data to check that exponential and logarithm are inverse.
Parameters
----------
connection_args_list : list
List of argument to pass to constructor of the connection.
space_list : list
List of manifolds on which the connection is defined.
shape_list : list
List of shapes for random data to generate.
n_samples_list : list
List of number of random data to generate.
"""
random_data = []
for connection_args, space, shape, n_samples in zip(
connection_args_list, space_list, shape_list, n_samples_list
):
base_point = space.random_point()
tangent_vec = space.to_tangent(
gs.random.normal(size=(n_samples,) + shape), base_point
)
random_data.append(
dict(
connection_args=connection_args,
tangent_vec=tangent_vec,
base_point=base_point,
rtol=rtol,
atol=atol,
)
)
return self.generate_tests([], random_data)
def _exp_ladder_parallel_transport_data(
self,
connection_args_list,
space_list,
shape_list,
n_samples_list,
n_rungs_list,
alpha_list,
scheme_list,
rtol=gs.rtol,
atol=gs.atol,
):
"""Generate data to check that end point of ladder matches exponential.
Parameters
----------
connection_args_list : list
List of argument to pass to constructor of the connection.
space_list : list
List of manifolds on which the connection is defined.
shape_list : list
List of shapes for random data to generate.
n_rungs_list : list
List of number of rungs for the ladder.
alpha_list : list
List of exponents for th scaling of the vector to transport.
scheme_list : list
List of ladder schemes to test.
rtol : float
Relative tolerance to test this property.
atol : float
Absolute tolerance to test this property.
"""
random_data = []
for (connection_args, space, shape, n_samples, n_rungs, alpha, scheme,) in zip(
connection_args_list,
space_list,
shape_list,
n_samples_list,
n_rungs_list,
alpha_list,
scheme_list,
):
base_point = space.random_point()
tangent_vec = space.to_tangent(
gs.random.normal(size=(n_samples,) + shape), base_point
)
direction = space.to_tangent(gs.random.normal(size=shape), base_point)
random_data.append(
dict(
connection_args=connection_args,
direction=direction,
tangent_vec=tangent_vec,
base_point=base_point,
scheme=scheme,
n_rungs=n_rungs,
alpha=alpha,
rtol=rtol,
atol=atol,
)
)
return self.generate_tests([], random_data)
def _exp_geodesic_ivp_data(
self,
connection_args_list,
space_list,
shape_list,
n_samples_list,
n_points_list,
rtol=gs.rtol,
atol=gs.atol,
):
"""Generate data to check that end point of geodesic matches exponential.
Parameters
----------
connection_args_list : list
List of argument to pass to constructor of the connection.
space_list : list
List of manifolds on which the connection is defined.
shape_list : list
List of shapes for random data to generate.
n_samples_list : list
List of number of random data to generate.
n_points_list : list
List of number of times on the geodesics.
belongs_atol : float
Absolute tolerance for the belongs function.
"""
random_data = []
for connection_args, space, shape, n_samples, n_points in zip(
connection_args_list,
space_list,
shape_list,
n_samples_list,
n_points_list,
):
base_point = space.random_point()
tangent_vec = space.to_tangent(
gs.random.normal(size=(n_samples,) + shape), base_point
)
random_data.append(
dict(
connection_args=connection_args,
n_points=n_points,
tangent_vec=tangent_vec,
base_point=base_point,
rtol=rtol,
atol=atol,
)
)
return self.generate_tests([], random_data)
class RiemannianMetricTestData(ConnectionTestData):
def _squared_dist_is_symmetric_data(
self,
metric_args_list,
space_list,
n_points_a_list,
n_points_b_list,
rtol=gs.rtol,
atol=gs.atol,
):
"""Generate data to check that the squared geodesic distance is symmetric.
Parameters
----------
metric_args_list : list
List of arguments to pass to constructor of the metric.
space_list : list
List of spaces on which the metric is defined.
n_points_a_list : list
List of number of points A to generate on the manifold.
n_points_b_list : list
List of number of points B to generate on the manifold.
rtol : float
Relative tolerance to test this property.
atol : float
Absolute tolerance to test this property.
"""
random_data = []
for metric_args, space, n_points_a, n_points_b in zip(
metric_args_list, space_list, n_points_a_list, n_points_b_list
):
point_a = space.random_point(n_points_a)
point_b = space.random_point(n_points_b)
random_data.append(
dict(
metric_args=metric_args,
point_a=point_a,
point_b=point_b,
rtol=rtol,
atol=atol,
)
)
return self.generate_tests([], random_data)
def _parallel_transport_ivp_is_isometry_data(
self,
metric_args_list,
space_list,
shape_list,
n_samples_list,
is_tangent_atol=gs.atol,
rtol=gs.rtol,
atol=gs.atol,
):
"""Generate data to check that parallel transport is an isometry.
Parameters
----------
metric_args_list : list
List of arguments to pass to constructor of the metric.
space_list : list
List of spaces on which the metric is defined.
shape_list : list
List of shapes for random data to generate.
n_samples_list : list
List of number of random data to generate.
rtol : float
Relative tolerance to test this property.
atol : float
Absolute tolerance to test this property.
"""
random_data = []
for metric_args, space, shape, n_samples in zip(
metric_args_list, space_list, shape_list, n_samples_list
):
base_point = space.random_point()
tangent_vec = space.to_tangent(
gs.random.normal(size=(n_samples,) + shape), base_point
)
direction = space.to_tangent(gs.random.normal(size=shape), base_point)
random_data.append(
dict(
metric_args=metric_args,
space=space,
tangent_vec=tangent_vec,
base_point=base_point,
direction=direction,
is_tangent_atol=is_tangent_atol,
rtol=rtol,
atol=atol,
)
)
return self.generate_tests([], random_data)
def _parallel_transport_bvp_is_isometry_data(
self,
metric_args_list,
space_list,
shape_list,
n_samples_list,
is_tangent_atol=gs.atol,
rtol=gs.rtol,
atol=gs.atol,
):
"""Generate data to check that parallel transport is an isometry.
Parameters
----------
metric_args_list : list
List of arguments to pass to constructor of the metric.
space_list : list
List of spaces on which the metric is defined.
shape_list : list
List of shapes for random data to generate.
n_samples_list : list
List of number of random data to generate.
is_tangent_atol: float
Asbolute tolerance for the is_tangent function.
rtol : float
Relative tolerance to test this property.
atol : float
Absolute tolerance to test this property.
"""
random_data = []
for metric_args, space, tangent_shape, n_tangent_vecs in zip(
metric_args_list, space_list, shape_list, n_samples_list
):
base_point = space.random_point()
tangent_vec = space.to_tangent(
gs.random.normal(size=(n_tangent_vecs,) + tangent_shape), base_point
)
end_point = space.random_point()
random_data.append(
dict(
metric_args=metric_args,
space=space,
tangent_vec=tangent_vec,
base_point=base_point,
end_point=end_point,
is_tangent_atol=is_tangent_atol,
rtol=rtol,
atol=atol,
)
)
return self.generate_tests([], random_data)
| 2.609375 | 3 |
pi/scripts/cpu_et_all.py | bergloman/gadgetWiring | 1 | 12772368 | <reponame>bergloman/gadgetWiring
import os
import sys
import os.path
import datetime
# Return CPU temperature as a character string
def getCPUtemperature():
res = os.popen('vcgencmd measure_temp').readline()
return(res.replace("temp=","").replace("'C\n",""))
# Return RAM information (unit=kb) in a list
# Index 0: total RAM
# Index 1: used RAM
# Index 2: free RAM
def getRAMinfo():
p = os.popen('free')
i = 0
while 1:
i = i + 1
line = p.readline()
if i==2:
return(line.split()[1:4])
# Return % of CPU used by user as a character string
def getCPUuse():
return(str(os.popen("top -n1 | awk '/Cpu\(s\):/ {print $2}'").readline().strip(\
)))
# Return information about disk space as a list (unit included)
# Index 0: total disk space
# Index 1: used disk space
# Index 2: remaining disk space
# Index 3: percentage of disk used
def getDiskSpace():
p = os.popen("df -h /")
i = 0
while 1:
i = i +1
line = p.readline()
if i==2:
return(line.split()[1:5])
#########################
# CPU informatiom
CPU_temp = getCPUtemperature()
CPU_usage = getCPUuse()
print CPU_temp
print CPU_usage
# RAM information
# Output is in kb, here I convert it in Mb for readability
RAM_stats = getRAMinfo()
RAM_total = round(int(RAM_stats[0]) / 1000,1)
RAM_used = round(int(RAM_stats[1]) / 1000,1)
RAM_free = round(int(RAM_stats[2]) / 1000,1)
print RAM_total
print RAM_used
print RAM_free
# Disk information
DISK_stats = getDiskSpace()
DISK_total = DISK_stats[0]
DISK_free = DISK_stats[1]
DISK_perc = DISK_stats[3]
print DISK_total
print DISK_free
print DISK_perc
now_str = datetime.datetime.now().isoformat()
file_name = sys.argv[1]
if not(os.path.exists(file_name)):
hs = open(file_name, "w")
hs.write("now,cpu_temp,cpu_usage,ram_total,ram_used,ram_free,disk_total,disk_free,disk_perc\n")
hs.close()
hs = open(file_name, "a")
hs.write(now_str + ",")
hs.write(str(CPU_temp) + ",")
hs.write(str(CPU_usage) + ",")
hs.write(str(RAM_total) + ",")
hs.write(str(RAM_used) + ",")
hs.write(str(RAM_free) + ",")
hs.write(str(DISK_total) + ",")
hs.write(str(DISK_free) + ",")
hs.write(str(DISK_perc) + "\n")
hs.close()
| 3.046875 | 3 |
Projects/Opencv/read&write&showvideo.py | ankita080208/Hacktoberfest | 1 | 12772369 | import cv2
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('output.avi',fourcc,20.0,(640,480))
#cap.isOpened()=>will return true value if cammera is linked or file name is correct and false in other case
while cap.isOpened():
ret,frame=cap.read()#ret will store true or false if frame store image the it store true else false , frame will store instant capture frames
if ret:
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) used to change the colout of image
#cv2.imshow('frame',frame)
out.write(frame)
cv2.imshow('video',frame)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
| 2.875 | 3 |
tests/test_pool.py | ndevenish/drmaa_futures | 0 | 12772370 | <filename>tests/test_pool.py<gh_stars>0
# coding: utf-8
"""Test the pool launching and management capabilities"""
import pytest
from collections import namedtuple
from drmaa_futures.master import bind_to_endpoint
# If we can't import drmaa (e.g. no good environment for it), then
# skip any tests in this file
try:
import drmaa
except RuntimeError:
pytestmark = pytest.mark.skip()
ServerInfo = namedtuple("ServerInfo", ["socket", "endpoint"])
@pytest.fixture
def server(url=None):
"""Basic server for clients to connect to"""
url = url or "tcp://127.0.0.1:5555"
c = zmq.Context()
socket = c.socket(zmq.REP)
socket.RCVTIMEO = 500
endpoint = bind_to_endpoint(socket)
try:
yield ServerInfo(socket, endpoint)
finally:
socket.close()
c.term()
@pytest.fixture
def pool(server):
pass
def test_a():
pass | 2 | 2 |
src/CheckersBot.py | kartikkukreja/blog-codes | 182 | 12772371 | <gh_stars>100-1000
from copy import deepcopy
from time import time
# Global Constants
MaxUtility = 1e9
IsPlayerBlack = True
MaxAllowedTimeInSeconds = 9.5
MaxDepth = 100
class CheckersState:
def __init__(self, grid, blackToMove, moves):
self.grid = grid
self.blackToMove = blackToMove
self.moves = moves # Hops taken by a disc to reach the current state
# This just checks for whether or not all pieces of a player have been eliminated.
# It does not check for whether a player has a move or not. In that case, there will
# be no successors for that player and alpha beta search will return Min/Max Utility.
def isTerminalState(self):
blackSeen, whiteSeen = False, False
for row in grid:
for cell in row:
if cell == 'b' or cell == 'B': blackSeen = True
elif cell == 'w' or cell == 'W': whiteSeen = True
if blackSeen and whiteSeen: return False
self.isLoserBlack = whiteSeen
return True
def getTerminalUtility(self):
return MaxUtility if IsPlayerBlack != self.isLoserBlack else -MaxUtility
def getSuccessors(self):
def getSteps(cell):
whiteSteps = [(-1, -1), (-1, 1)]
blackSteps = [(1, -1), (1, 1)]
steps = []
if cell != 'b': steps.extend(whiteSteps)
if cell != 'w': steps.extend(blackSteps)
return steps
def generateMoves(board, i, j, successors):
for step in getSteps(board[i][j]):
x, y = i + step[0], j + step[1]
if x >= 0 and x < 8 and y >= 0 and y < 8 and board[x][y] == '_':
boardCopy = deepcopy(board)
boardCopy[x][y], boardCopy[i][j] = boardCopy[i][j], '_'
# A pawn is promoted when it reaches the last row
if (x == 7 and self.blackToMove) or (x == 0 and not self.blackToMove):
boardCopy[x][y] = boardCopy[x][y].upper()
successors.append(CheckersState(boardCopy, not self.blackToMove, [(i, j), (x, y)]))
def generateJumps(board, i, j, moves, successors):
jumpEnd = True
for step in getSteps(board[i][j]):
x, y = i + step[0], j + step[1]
if x >= 0 and x < 8 and y >= 0 and y < 8 and board[x][y] != '_' and board[i][j].lower() != board[x][y].lower():
xp, yp = x + step[0], y + step[1]
if xp >= 0 and xp < 8 and yp >= 0 and yp < 8 and board[xp][yp] == '_':
board[xp][yp], save = board[i][j], board[x][y]
board[i][j] = board[x][y] = '_'
previous = board[xp][yp]
# A pawn is promoted when it reaches the last row
if (xp == 7 and self.blackToMove) or (xp == 0 and not self.blackToMove):
board[xp][yp] = board[xp][yp].upper()
moves.append((xp, yp))
generateJumps(board, xp, yp, moves, successors)
moves.pop()
board[i][j], board[x][y], board[xp][yp] = previous, save, '_'
jumpEnd = False
if jumpEnd and len(moves) > 1:
successors.append(CheckersState(deepcopy(board), not self.blackToMove, deepcopy(moves)))
player = 'b' if self.blackToMove else 'w'
successors = []
# generate jumps
for i in xrange(8):
for j in xrange(8):
if self.grid[i][j].lower() == player:
generateJumps(self.grid, i, j, [(i, j)], successors)
if len(successors) > 0: return successors
# generate moves
for i in xrange(8):
for j in xrange(8):
if self.grid[i][j].lower() == player:
generateMoves(self.grid, i, j, successors)
return successors
def piecesCount(state):
# 1 for a normal piece, 1.5 for a king
black, white = 0, 0
for row in state.grid:
for cell in row:
if cell == 'b': black += 1.0
elif cell == 'B': black += 1.5
elif cell == 'w': white += 1.0
elif cell == 'W': white += 1.5
return black - white if IsPlayerBlack else white - black
def iterativeDeepeningAlphaBeta(state, evaluationFunc):
startTime = time()
def alphaBetaSearch(state, alpha, beta, depth):
def maxValue(state, alpha, beta, depth):
val = -MaxUtility
for successor in state.getSuccessors():
val = max(val, alphaBetaSearch(successor, alpha, beta, depth))
if val >= beta: return val
alpha = max(alpha, val)
return val
def minValue(state, alpha, beta, depth):
val = MaxUtility
for successor in state.getSuccessors():
val = min(val, alphaBetaSearch(successor, alpha, beta, depth - 1))
if val <= alpha: return val
beta = min(beta, val)
return val
if state.isTerminalState(): return state.getTerminalUtility()
if depth <= 0 or time() - startTime > MaxAllowedTimeInSeconds: return evaluationFunc(state)
return maxValue(state, alpha, beta, depth) if state.blackToMove == IsPlayerBlack else minValue(state, alpha, beta, depth)
bestMove = None
for depth in xrange(1, MaxDepth):
if time() - startTime > MaxAllowedTimeInSeconds: break
val = -MaxUtility
for successor in state.getSuccessors():
score = alphaBetaSearch(successor, -MaxUtility, MaxUtility, depth)
if score > val:
val, bestMove = score, successor.moves
return bestMove
if __name__ == '__main__':
player = raw_input()
boardSize = int(raw_input())
grid = []
for i in xrange(boardSize):
grid.append(raw_input())
IsPlayerBlack = player[0] == 'b'
state = CheckersState([list(row.rstrip()) for row in grid], IsPlayerBlack, [])
move = iterativeDeepeningAlphaBeta(state, piecesCount)
print len(move) - 1
for step in move:
print step[0], step[1]
| 2.65625 | 3 |
localgraphclustering/algorithms/__init__.py | vishalbelsare/LocalGraphClustering | 106 | 12772372 | <gh_stars>100-1000
from .acl_list import acl_list
from .eig2_nL import eig2_nL, eig2nL_subgraph
from .fista_dinput_dense import fista_dinput_dense
from .sweepcut import sweepcut
| 1.023438 | 1 |
examples/decompose_fmri_stability.py | johnbanq/modl | 135 | 12772373 | # Author: <NAME>
# License: BSD
import warnings
from nilearn.input_data import NiftiMasker
warnings.filterwarnings("ignore", category=DeprecationWarning)
import os
from os.path import expanduser, join
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from joblib import Memory, dump
from joblib import Parallel, delayed
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state
from modl.datasets import fetch_adhd
from modl.decomposition.fmri import fMRIDictFact
from modl.decomposition.stability import mean_amari_discrepency
from modl.plotting.fmri import display_maps
from nilearn.datasets import fetch_atlas_smith_2009
from modl.utils.system import get_cache_dirs
batch_size = 200
learning_rate = .92
method = 'masked'
step_size = 0.01
reduction_ = 8
alpha = 1e-3
n_epochs = 4
verbose = 15
n_jobs = 70
smoothing_fwhm = 6
components_list = [20, 40, 80, 120, 200, 300, 500]
n_runs = 20
dict_init = fetch_atlas_smith_2009().rsn20
dataset = fetch_adhd(n_subjects=40)
data = dataset.rest.values
train_data, test_data = train_test_split(data, test_size=2, random_state=0)
train_imgs, train_confounds = zip(*train_data)
test_imgs, test_confounds = zip(*test_data)
mask = dataset.mask
mem = Memory(location=get_cache_dirs()[0])
masker = NiftiMasker(mask_img=mask).fit()
def fit_single(train_imgs, test_imgs, n_components, random_state):
dict_fact = fMRIDictFact(smoothing_fwhm=smoothing_fwhm,
method=method,
step_size=step_size,
mask=mask,
memory=mem,
memory_level=2,
verbose=verbose,
n_epochs=n_epochs,
n_jobs=1,
random_state=random_state,
n_components=n_components,
positive=True,
learning_rate=learning_rate,
batch_size=batch_size,
reduction=reduction_,
alpha=alpha,
callback=None,
)
dict_fact.fit(train_imgs, confounds=train_confounds)
score = dict_fact.score(test_imgs)
return dict_fact.components_, score
def fit_many_runs(train_imgs, test_imgs, components_list, n_runs=10, n_jobs=1):
random_states = check_random_state(0).randint(0, int(1e7), size=n_runs)
cached_fit = mem.cache(fit_single)
res = Parallel(n_jobs=n_jobs)(delayed(cached_fit)(
train_imgs, test_imgs, n_components, random_state)
for n_components in components_list
for random_state in random_states
)
components, scores = zip(*res)
shape = (len(components_list), len(random_states))
components = np.array(components).reshape(shape).tolist()
scores = np.array(scores).reshape(shape).tolist()
discrepencies = []
var_discrepencies = []
best_components = []
for n_components, these_components, these_scores in zip(components_list,
components,
scores):
discrepency, var_discrepency = mean_amari_discrepency(
these_components)
best_estimator = these_components[np.argmin(these_scores)]
discrepencies.append(var_discrepency)
var_discrepencies.append(var_discrepency)
best_components.append(best_estimator)
discrepencies = np.array(discrepencies)
var_discrepencies = np.array(var_discrepencies)
best_components = np.array(best_components)
components = best_components[np.argmin(discrepencies)]
return discrepencies, var_discrepencies, components
output_dir = expanduser('~/output_drago4/modl/fmri_stability2')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
discrepencies, var_discrepencies, components = fit_many_runs(
train_imgs, test_imgs,
components_list,
n_jobs=n_jobs,
n_runs=n_runs)
components_img = masker.inverse_transform(components)
components_img.to_filename(
join(output_dir, 'components.nii.gz'))
dump((components_list, discrepencies, var_discrepencies),
join(output_dir, 'discrepencies.pkl'))
fig = plt.figure()
display_maps(fig, components_img)
plt.savefig(join(output_dir, 'components.pdf'))
fig, ax = plt.subplots(1, 1)
ax.fill_between(components_list, discrepencies - var_discrepencies,
discrepencies + var_discrepencies, alpha=0.5)
ax.plot(components_list, discrepencies, marker='o')
ax.set_xlabel('Number of components')
ax.set_ylabel('Mean Amari discrepency')
sns.despine(fig)
fig.suptitle('Stability selection using DL')
plt.savefig(join(output_dir, 'discrepencies.pdf'))
| 1.984375 | 2 |
programs/pgm10_10.py | danielsunzhongyuan/python_practice | 0 | 12772374 | #
# This file contains the Python code from Program 10.10 of
# "Data Structures and Algorithms
# with Object-Oriented Design Patterns in Python"
# by <NAME>.
#
# Copyright (c) 2003 by <NAME>, P.Eng. All rights reserved.
#
# http://www.brpreiss.com/books/opus7/programs/pgm10_10.txt
#
class AVLTree(BinarySearchTree):
def balance(self):
self.adjustHeight()
if self.balanceFactor > 1:
if self._left.balanceFactor > 0:
self.doLLRotation()
else:
self.doLRRotation()
elif self.balanceFactor < -1:
if self._right.balanceFactor < 0:
self.doRRRotation()
else:
self.doRLRotation()
# ...
| 3.15625 | 3 |
tests/test_states.py | cgogolin/strawberryfields | 0 | 12772375 | <reponame>cgogolin/strawberryfields<filename>tests/test_states.py
"""
Unit tests for :class:`strawberryfields.backends.states`.
"""
import os
import sys
import signal
import unittest
import numpy as np
from numpy import pi
from scipy.special import factorial
from scipy.stats import multivariate_normal
import strawberryfields as sf
from strawberryfields.ops import *
from strawberryfields.utils import *
from strawberryfields import backends
from defaults import BaseTest, FockBaseTest, GaussianBaseTest
from strawberryfields.backends.shared_ops import rotation_matrix as R
a = 0.3+0.1j
r = 0.23
phi = 0.123
def wigner(grid, mu, cov):
mvn = multivariate_normal(mu, cov, allow_singular=True)
return mvn.pdf(grid)
class BackendStateCreation(BaseTest):
num_subsystems = 3
def test_full_state_creation(self):
state = self.circuit.state(modes=None)
self.assertEqual(state.num_modes, 3)
self.assertEqual(state.hbar, self.hbar)
self.assertEqual(state.mode_names, {0: 'q[0]', 1: 'q[1]', 2: 'q[2]'})
self.assertEqual(state.mode_indices, {'q[0]': 0, 'q[1]': 1, 'q[2]': 2})
if isinstance(self.backend, backends.BaseFock):
print(self.D)
self.assertEqual(state.cutoff_dim, self.D)
def test_reduced_state_creation(self):
state = self.circuit.state(modes=[0, 2])
self.assertEqual(state.num_modes, 2)
self.assertEqual(state.mode_names, {0: 'q[0]', 1: 'q[2]'})
self.assertEqual(state.mode_indices, {'q[0]': 0, 'q[2]': 1})
def test_reduced_state_fidelity(self):
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.prepare_coherent_state(a, 0)
self.circuit.prepare_squeezed_state(r, phi, 1)
state = self.circuit.state(modes=[0])
f = state.fidelity_coherent([a])
self.assertAllAlmostEqual(f, 1, delta=self.tol)
def test_reduced_state_fock_probs(self):
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.prepare_coherent_state(a, 0)
self.circuit.prepare_squeezed_state(r, phi, 1)
state = self.circuit.state(modes=[0])
probs = np.array([state.fock_prob([i]) for i in range(self.D)]).T
ref_state = np.array([np.exp(-0.5*np.abs(a)**2)*a**n/np.sqrt(factorial(n)) for n in range(self.D)])
ref_probs = np.tile(np.abs(ref_state) ** 2, self.bsize)
self.assertAllAlmostEqual(probs.flatten(), ref_probs.flatten(), delta=self.tol)
class FrontendStateCreation(BaseTest):
num_subsystems = 3
def setUp(self):
super().setUp()
self.eng, q = sf.Engine(self.num_subsystems, hbar=self.hbar)
def test_full_state_creation(self):
self.eng.reset()
q = self.eng.register
with self.eng:
Coherent(a) | q[0]
Squeezed(r, phi) | q[1]
state = self.eng.run(backend=self.backend_name, cutoff_dim=self.D)
self.assertEqual(state.num_modes, 3)
self.assertEqual(state.hbar, self.hbar)
self.assertEqual(state.mode_names, {0: 'q[0]', 1: 'q[1]', 2: 'q[2]'})
self.assertEqual(state.mode_indices, {'q[0]': 0, 'q[1]': 1, 'q[2]': 2})
class BaseStateMethods(BaseTest):
num_subsystems = 2
def test_mean_photon(self):
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.prepare_coherent_state(a, 0)
state = self.circuit.state()
mean_photon = state.mean_photon(0)
self.assertAllAlmostEqual(mean_photon, np.abs(a)**2, delta=self.tol)
def test_rdm(self):
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.prepare_coherent_state(a, 0)
self.circuit.prepare_coherent_state(0.1, 1)
state = self.circuit.state()
rdm = state.reduced_dm(0, cutoff=self.D)
ket = coherent_state(a, fock_dim=self.D)
rdm_exact = np.outer(ket, ket.conj())
if self.batched:
np.tile(rdm_exact, [self.bsize, 1])
self.assertAllAlmostEqual(rdm, rdm_exact, delta=self.tol)
class BaseFockStateMethods(FockBaseTest):
num_subsystems = 2
def test_ket(self):
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.displacement(a, 0)
state = self.circuit.state()
if self.args.mixed:
self.assertEqual(state.is_pure, False)
return
else:
self.assertEqual(state.is_pure, True)
ket = state.ket()
ket0exact = coherent_state(a, fock_dim=self.D)
ket0 = np.sum(ket,axis=-1)
if self.batched:
np.tile(ket0, [self.bsize, 1])
self.assertAllAlmostEqual(ket0, ket0exact, delta=self.tol)
class BaseGaussianStateMethods(GaussianBaseTest):
num_subsystems = 2
# can push values higher in Gaussian backend
a = 1+0.5j
r = 2
phi = -0.5
def test_coherent_methods(self):
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.prepare_coherent_state(self.a, 0)
self.circuit.prepare_squeezed_state(self.r, self.phi, 1)
state = self.circuit.state()
coherent_check = []
for i in range(2):
coherent_check.append(state.is_coherent(i))
alpha_list = state.displacement()
self.assertAllEqual(coherent_check, [True, False])
self.assertAllAlmostEqual(alpha_list, [self.a, 0.], delta=self.tol)
def test_squeezing_methods(self):
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.prepare_coherent_state(self.a, 0)
self.circuit.prepare_squeezed_state(self.r, self.phi, 1)
state = self.circuit.state()
squeezing_check = []
for i in range(2):
squeezing_check.append(state.is_squeezed(i))
z_list = np.array(state.squeezing())
self.assertAllEqual(squeezing_check, [False, True])
self.assertAllAlmostEqual(z_list, [[0.,0.], [self.r, self.phi]], delta=self.tol)
class QuadExpectation(BaseTest):
num_subsystems = 1
def test_vacuum(self):
state = self.circuit.state()
res = np.array(state.quad_expectation(0, phi=pi/4)).T
res_exact = np.tile(np.array([0, self.hbar/2.]), self.bsize)
self.assertAllAlmostEqual(res.flatten(), res_exact.flatten(), delta=self.tol)
def test_squeezed_coherent(self):
qphi = 0.78
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.prepare_displaced_squeezed_state(a, r, phi, 0)
state = self.circuit.state()
res = np.array(state.quad_expectation(0, phi=qphi)).T
xphi_mean = (a.real*np.cos(qphi) + a.imag*np.sin(qphi)) * np.sqrt(2*self.hbar)
xphi_var = (np.cosh(2*r) - np.cos(phi-2*qphi)*np.sinh(2*r)) * self.hbar/2
res_exact = np.tile(np.array([xphi_mean, xphi_var]), self.bsize)
self.assertAllAlmostEqual(res.flatten(), res_exact.flatten(), delta=self.tol)
class WignerSingleMode(BaseTest):
num_subsystems = 1
batched = False
bsize = 1
# wigner parameters
xmin = -5
xmax = 5
dx = 0.1
xvec = np.arange(xmin, xmax, dx)
X, P = np.meshgrid(xvec, xvec)
grid = np.empty(X.shape + (2,))
grid[:, :, 0] = X
grid[:, :, 1] = P
def test_vacuum(self):
if self.batched:
return
state = self.circuit.state()
W = state.wigner(0, self.xvec, self.xvec)
# exact wigner function
mu = [0,0]
cov = np.identity(2)*self.hbar/2.
Wexact = wigner(self.grid, mu, cov)
self.assertAllAlmostEqual(W, Wexact, delta=self.tol)
def test_squeezed_coherent(self):
if self.batched:
return
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.prepare_coherent_state(a, 0)
self.circuit.squeeze(r*np.exp(1j*phi), 0)
state = self.circuit.state()
W = state.wigner(0, self.xvec, self.xvec)
rot = R(phi/2)
# exact wigner function
alpha = a*np.cosh(r) - np.conjugate(a)*np.exp(1j*phi)*np.sinh(r)
mu = np.array([alpha.real, alpha.imag])*np.sqrt(2*self.hbar)
cov = np.diag([np.exp(-2*r), np.exp(2*r)])
cov = np.dot(rot, np.dot(cov, rot.T))*self.hbar/2.
Wexact = wigner(self.grid, mu, cov)
self.assertAllAlmostEqual(W, Wexact, delta=self.tol)
class WignerTwoMode(BaseTest):
num_subsystems = 2
# wigner parameters
xmin = -5
xmax = 5
dx = 0.1
xvec = np.arange(xmin, xmax, dx)
X, P = np.meshgrid(xvec, xvec)
grid = np.empty(X.shape + (2,))
grid[:, :, 0] = X
grid[:, :, 1] = P
def test_two_mode_squeezed(self):
if self.batched:
return
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.prepare_squeezed_state(r, 0, 0)
self.circuit.prepare_squeezed_state(-r, 0, 1)
state = self.circuit.state()
W0 = state.wigner(0, self.xvec, self.xvec)
W1 = state.wigner(1, self.xvec, self.xvec)
# exact wigner function
mu = np.array([0, 0])*np.sqrt(2*self.hbar)
cov = np.diag([np.exp(-2*r), np.exp(2*r)])*self.hbar/2
W0exact = wigner(self.grid, mu, cov)
cov = np.diag([np.exp(2*r), np.exp(-2*r)])*self.hbar/2
W1exact = wigner(self.grid, mu, cov)
self.assertAllAlmostEqual(W0, W0exact, delta=self.tol)
self.assertAllAlmostEqual(W1, W1exact, delta=self.tol)
class InitialStateFidelityTests(BaseTest):
"""Fidelity tests."""
num_subsystems = 2
def test_vacuum(self):
state = self.circuit.state()
self.assertAllAlmostEqual(state.fidelity_vacuum(), 1, delta=self.tol)
def test_coherent_fidelity(self):
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.prepare_coherent_state(a, 0)
self.circuit.displacement(a, 1)
state = self.circuit.state()
if isinstance(self.backend, backends.BaseFock):
in_state = coherent_state(a, basis='fock', fock_dim=self.D)
else:
in_state = coherent_state(a, basis='gaussian')
self.assertAllAlmostEqual(state.fidelity(in_state, 0), 1, delta=self.tol)
self.assertAllAlmostEqual(state.fidelity(in_state, 1), 1, delta=self.tol)
self.assertAllAlmostEqual(state.fidelity_coherent([a,a]), 1, delta=self.tol)
def test_squeezed_fidelity(self):
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.prepare_squeezed_state(r, phi, 0)
self.circuit.squeeze(r*np.exp(1j*phi), 1)
state = self.circuit.state()
if isinstance(self.backend, backends.BaseFock):
in_state = squeezed_state(r, phi, basis='fock', fock_dim=self.D)
else:
in_state = squeezed_state(r, phi, basis='gaussian')
self.assertAllAlmostEqual(state.fidelity(in_state, 0), 1, delta=self.tol)
self.assertAllAlmostEqual(state.fidelity(in_state, 1), 1, delta=self.tol)
def test_squeezed_coherent_fidelity(self):
self.circuit.reset(pure=self.kwargs['pure'])
self.circuit.prepare_displaced_squeezed_state(a, r, phi, 0)
self.circuit.squeeze(r*np.exp(1j*phi), 1)
self.circuit.displacement(a, 1)
state = self.circuit.state()
if isinstance(self.backend, backends.BaseFock):
in_state = displaced_squeezed_state(a, r, phi, basis='fock', fock_dim=self.D)
else:
in_state = displaced_squeezed_state(a, r, phi, basis='gaussian')
self.assertAllAlmostEqual(state.fidelity(in_state, 0), 1, delta=self.tol)
self.assertAllAlmostEqual(state.fidelity(in_state, 1), 1, delta=self.tol)
mag_alphas = np.linspace(0, .8, 4)
phase_alphas = np.linspace(0, 2 * np.pi, 7, endpoint=False)
class FockProbabilities(BaseTest):
num_subsystems = 1
def test_prob_fock_gaussian(self):
"""Tests that probabilities of particular Fock states |n> are correct for a gaussian state."""
for mag_alpha in mag_alphas:
for phase_alpha in phase_alphas:
self.circuit.reset(pure=self.kwargs['pure'])
alpha = mag_alpha * np.exp(1j * phase_alpha)
ref_state = np.array([np.exp(-0.5 * np.abs(alpha) ** 2) * alpha ** n / np.sqrt(factorial(n)) for n in range(self.D)])
ref_probs = np.abs(ref_state) ** 2
self.circuit.prepare_coherent_state(alpha, 0)
state = self.circuit.state()
for n in range(self.D):
prob_n = state.fock_prob([n])
self.assertAllAlmostEqual(prob_n, ref_probs[n], delta=self.tol)
class AllFockProbsSingleMode(FockBaseTest):
num_subsystems = 1
def test_all_fock_probs_pure(self):
"""Tests that the numeric probabilities in the full Fock basis are correct for a one-mode pure state."""
for mag_alpha in mag_alphas:
for phase_alpha in phase_alphas:
self.circuit.reset(pure=True)
alpha = mag_alpha * np.exp(1j * phase_alpha)
ref_state = np.array([np.exp(-0.5 * np.abs(alpha) ** 2) * alpha ** n / np.sqrt(factorial(n)) for n in range(self.D)])
ref_probs = np.abs(ref_state) ** 2
self.circuit.prepare_coherent_state(alpha, 0)
state = self.circuit.state()
probs = state.all_fock_probs().flatten()
ref_probs = np.tile(ref_probs, self.bsize)
self.assertAllAlmostEqual(probs, ref_probs, delta=self.tol)
class AllFockProbsTwoMode(FockBaseTest):
num_subsystems = 2
def test_prob_fock_state_nongaussian(self):
"""Tests that probabilities of particular Fock states |n> are correct for a nongaussian state."""
for mag_alpha in mag_alphas:
for phase_alpha in phase_alphas:
self.circuit.reset(pure=self.kwargs['pure'])
alpha = mag_alpha * np.exp(1j * phase_alpha)
ref_state = np.array([np.exp(-0.5 * np.abs(alpha) ** 2) * alpha ** n / np.sqrt(factorial(n)) for n in range(self.D)])
ref_probs = np.abs(ref_state) ** 2
self.circuit.prepare_coherent_state(alpha, 0)
self.circuit.prepare_fock_state(self.D // 2, 1)
state = self.circuit.state()
for n in range(self.D):
prob_n = state.fock_prob([n, self.D // 2])
self.assertAllAlmostEqual(prob_n, ref_probs[n], delta=self.tol)
def test_all_fock_state_probs(self):
"""Tests that the numeric probabilities in the full Fock basis are correct for a two-mode gaussian state."""
for mag_alpha in mag_alphas:
for phase_alpha in phase_alphas:
self.circuit.reset(pure=self.kwargs['pure'])
alpha = mag_alpha * np.exp(1j * phase_alpha)
ref_state1 = np.array([np.exp(-0.5 * np.abs(alpha) ** 2) * alpha ** n / np.sqrt(factorial(n)) for n in range(self.D)])
ref_state2 = np.array([np.exp(-0.5 * np.abs(-alpha) ** 2) * (-alpha) ** n / np.sqrt(factorial(n)) for n in range(self.D)])
ref_state = np.outer(ref_state1, ref_state2)
ref_probs = np.abs(np.reshape(ref_state ** 2, -1))
ref_probs = np.tile(ref_probs, self.bsize)
self.circuit.prepare_coherent_state(alpha, 0)
self.circuit.prepare_coherent_state(-alpha, 1)
state = self.circuit.state()
for n in range(self.D):
for m in range(self.D):
probs = state.all_fock_probs().flatten()
self.assertAllAlmostEqual(probs, ref_probs, delta=self.tol)
if mag_alpha == 0.:
pass
if __name__ == '__main__':
# run the tests in this file
suite = unittest.TestSuite()
tests = [
BackendStateCreation,
FrontendStateCreation,
BaseStateMethods,
BaseFockStateMethods,
BaseGaussianStateMethods,
QuadExpectation,
WignerSingleMode,
WignerTwoMode,
InitialStateFidelityTests,
# FockProbabilities,
# AllFockProbsSingleMode,
# AllFockProbsTwoMode
]
for t in tests:
ttt = unittest.TestLoader().loadTestsFromTestCase(t)
suite.addTests(ttt)
unittest.TextTestRunner().run(suite)
| 2.34375 | 2 |
launch/main.py | haraisao/rtmlaunch | 0 | 12772376 | <reponame>haraisao/rtmlaunch<gh_stars>0
#
# Simple Launcher for OpenHRI and OpenRTM-aist
# Released under the MIT license
# Copyright(C) 2018 <NAME>, All rights reserved.
#
from __future__ import print_function
import sys
import os
import time
import subprocess
import re
import traceback
import rtcpkg as pkg
import rtc_handle as rh
import rtc_handle_tool as rth
__name_server__ = None
__system_editor__ = None
__rtc_home__ = os.getenv('RTC_PKG_HOME')
if __rtc_home__ is None:
__rtc_home__ = os.getenv('HOME')
if __rtc_home__ is None:
__rtc_home__ = os.getenv('HOMEPATH')
#
#
def rtse():
global __system_editor__
if __system_editor__ :
if __system_editor__.poll() is not None:
__system_editor__ = subprocess.Popen('RTSystemEditorRCP')
else:
print("RTSystemEditor is already running...")
else:
__system_editor__ = subprocess.Popen('RTSystemEditorRCP')
#
#
def eSEAT_cmd(fname=""):
res = ""
_eSEAT_path_ = pkg.findFiles(__rtc_home__, ['eSEAT.py', 'manifest.xml'])
if _eSEAT_path_ :
if fname : fname = findFile(fname)
res = 'python '+ os.path.join(_eSEAT_path_ ,'eSEAT.py -v ') +fname
else:
print ('eSEAT not found.')
return res
#
#
def openrtp():
if os.name == 'posix':
file = findFile('openrtp')
else:
file = findFile('RTSystemEditorRCP.exe')
return ProcessManager(file)
#
#
def findFile(fname, top=None):
if top == None: top = __rtc_home__
pth = pkg.findFile(top, fname)
if pth :
return os.path.join(pth, fname)
else:
return None
def findFile2(pat, top=None):
if top == None: top = __rtc_home__
return pkg.findFile2(top, pat)
#
#
def terminateNameServer():
global __name_server__
if __name_server__:
if __name_server__.poll() == 1:
__name_server__ = None
else:
__name_server__.terminate()
__name_server__ = None
#
#
def killNameServer():
if __name_server__ :
terminateNameServer()
else:
os.system("taskkill /F /IM omniNames.exe")
#
#
class RtcMgr(object):
#
#
def __init__(self, argv=[]):
self.rtm_env = None
self.argv = argv
self.object_list={}
self.root={}
self.initNS()
self.name_space = self.rtm_env.name_space['localhost']
self.update()
rth.NS = self.name_space
#
#
def initNS(self):
global __name_server__
try:
self.rtm_env = rh.RtmEnv(self.argv)
except:
__name_server__ = subprocess.Popen('omniNames')
self.rtm_env = rh.RtmEnv(self.argv)
return self.rtm_env
#
def update(self):
try:
self.name_space.list_obj()
except:
import traceback
traceback.print_exc()
print ("Error in update object list")
#
def get_handle_names(self, pat=None):
res = []
self.update()
for name in self.name_space.rtc_handles.keys():
if pat is None or re.search(pat, name):
res.append(name)
return res
def get_handle(self, pat=None):
res = []
self.update()
for name in self.name_space.rtc_handles.keys():
if pat is None or re.search(pat, name):
res.append(self.name_space.rtc_handles[name])
return res
def get_rtc_handles(self, pat=None):
self.update()
if pat is None: pat='.rtc'
return self.name_space.find_handles(pat)
def get_inports(self, pat=None):
if pat is None: pat='.rtc'
rtcs = self.name_space.find_handles(pat)
if len(rtcs) == 1:
name, handle = rtcs.items()[0]
return [ name+":"+p for p in handle.inports]
else:
print("Too many rtcs: ", rtcs.keys())
return None
def get_outports(self, pat=None):
if pat is None: pat='.rtc'
rtcs = self.name_space.find_handles(pat)
if len(rtcs) == 1:
name, handle = rtcs.items()[0]
return [ name+":"+p for p in handle.outports]
else:
print("Too many rtcs: ", rtcs.keys())
return None
def get_services(self, pat=None):
if pat is None: pat='.rtc'
rtcs = self.name_space.find_handles(pat)
if len(rtcs) == 1:
name, handle = rtcs.items()[0]
return [ name+":"+p for p in handle.services]
else:
print("Too many rtcs: ", rtcs.keys())
return None
def find_available_connections(self, rtcs):
return self.name_space.connection_manager.find_available_connections(rtcs)
def connect_ports(self, ports):
return self.name_space.connection_manager.connect_ports(ports)
def disconnect_ports(self, ports):
return self.name_space.connection_manager.disconnect_ports(ports)
def get_port_info(self, name):
hlist=self.get_handle(name)
if len(hlist) == 1:
h=hlist[0]
res={}
res['in'] =[]
res['out']=[]
res['service']=[]
for x in h.inports:
res['in'].append("%s:%s" % (x, h.inports[x].data_type))
for x in h.outports:
res['out'].append("%s:%s" % (x, h.outports[x].data_type))
for x in h.services:
res['service'].append("%s" % (x, ))
return res
else:
print(self.get_handle_names(name))
return None
#
#
class ProcessManager(object):
#
#
def __init__(self, fname, stderr=None, stdout=None, stdin=None, find=None):
self.popen = None
self.env = None
self.f_out = stdout
self.f_err = stderr
self.f_in = stdin
if find :
fname=findFile(fname)
print("Filename is ", fname)
if fname:
self.setFile( fname )
#
#
def setFile(self, fname):
self.exec_file_name = fname.split()
self.pid_file = os.path.basename(fname)+".pid"
#
#
def run(self):
if os.path.exists(self.pid_file):
print("Process %s is already running..." % self.exec_file_name)
return
try:
self.popen = subprocess.Popen(self.exec_file_name, env=self.env,
stdout=self.f_out, stdin=self.f_in, stderr=self.f_err)
#if not self.popen.poll() :
# with open(self.pid_name, "w") as f:
# f.write(self.popen.pid)
# f.close()
except:
import traceback
traceback.print_exc()
print("Error in run()")
pass
#
#
def remove_pid_file(self):
try:
os.remove(self.pid_file)
except:
pass
#
#
def shutdown(self):
if self.popen and (not self.popen.poll()) :
self.popen.terminate()
if self.popen.poll():
self.remove_pid_file()
class CmdProcess(ProcessManager):
def __init__(self, fname):
ProcessManager.__init__(self, fname, find=True)
self.cmd=["C:\\Windows\\System32\\cmd.exe", "/c", "start" ]
def run(self):
if os.path.exists(self.pid_file):
print("Process %s is already running..." % self.exec_file_name)
return
try:
print(self.cmd + self.exec_file_name)
self.popen = subprocess.Popen(self.cmd + self.exec_file_name, env=self.env,
stdout=self.f_out, stdin=self.f_in, stderr=self.f_err)
#if not self.popen.poll() :
# with open(self.pid_name, "w") as f:
# f.write(self.popen.pid)
# f.close()
except:
import traceback
traceback.print_exc()
print("Error in run()")
pass
#
#
#
class eSEAT(ProcessManager):
def __init__(self, fname):
ProcessManager.__init__(self, "")
self.eSEAT_path = ""
name, ext = os.path.splitext(fname)
if not ext : extn = ".seatml"
else: extn = ext
self.seatml = "".join([name, extn])
self.find_eSEAT()
if self.eSEAT_path:
self.seatml = self.find_seatml(self.seatml)
def find_eSEAT(self):
global __rtc_home__
self.eSEAT_path = pkg.findFiles(__rtc_home__, ['eSEAT.py', 'manifest.xml'])
if self.eSEAT_path :
self.eSEAT_path = os.path.join(self.eSEAT_path ,'eSEAT.py')
print("eSEAT_path = " + self.eSEAT_path)
def find_seatml(self, fname):
res = findFile(fname)
if res:
print ("Seatml File = "+ res)
return res
else:
print ("Seatml not found: "+fname)
return fname
def run(self, opt = "-v"):
self.setFile( "python "+self.eSEAT_path + " "+opt+" "+ self.seatml)
ProcessManager.run(self)
| 2.015625 | 2 |
terrascript/matchbox/__init__.py | hugovk/python-terrascript | 4 | 12772377 | # terrascript/matchbox/__init__.py
import terrascript
class matchbox(terrascript.Provider):
pass
| 1.1875 | 1 |
config/deploy/scheme2.py | bbc/intra-chroma-attentionCNN | 7 | 12772378 | <reponame>bbc/intra-chroma-attentionCNN<filename>config/deploy/scheme2.py
scheme = 2
deploy_path = "/work/marcb/Intra_Chroma/deploy"
config = "config/norelu_sa_mult_out.py"
| 1.0625 | 1 |
liquid_extra/filters/additional.py | jg-rp/liquid-extra | 1 | 12772379 | """Some additional filters that don't belong to any specific category."""
import json
from typing import Any
from typing import Optional
from typing import Mapping
from liquid.context import get_item
from liquid.filter import liquid_filter
from liquid.filter import with_context
from liquid.filter import with_environment
from liquid import Environment
from liquid import Context
class JSON:
"""Serialize objects as a JSON (JavaScript Object Notation) formatted string.
Args:
default: A 'default' function passed to json.dumps. This function is called
in the event that the JSONEncoder does not know how to serialize an object.
"""
name = "json"
def __init__(self, default: Any = None):
self.default = default
@liquid_filter
def __call__(self, obj: object) -> str:
return json.dumps(obj, default=self.default)
@with_context
@with_environment
class Translate:
"""Replace translation keys with strings for the current locale.
Tries to read the locale from the current template context, falling back to
"default" if the key "locale" does not exist.
Args:
locales: A mapping of locale name to translation key mapping. If locales
is `None`, the default, the translation key will be returned unchanged.
"""
name = "t"
def __init__(self, locales: Optional[Mapping[str, Mapping[str, object]]] = None):
self.locales: Mapping[str, Mapping[str, object]] = locales or {}
@liquid_filter
def __call__(
self,
key: object,
*,
context: Context,
environment: Environment,
**kwargs: Any,
) -> str:
locale = context.resolve("locale", default="default")
translations: Mapping[str, object] = self.locales.get(locale, {})
key = str(key)
path = key.split(".")
val = get_item(translations, *path, default=key) # type: ignore
return environment.from_string(val).render(**kwargs)
| 2.390625 | 2 |
remi/db/engine.py | Qu4n7r01d/remi | 1 | 12772380 | <reponame>Qu4n7r01d/remi<gh_stars>1-10
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker
from remi.core.constant import Client
async def dispose_all_engines():
await async_config_engine.dispose()
async_engine_scheme = f"sqlite+aiosqlite:///{Client.DATA_PATH}"
async_config_engine = create_async_engine(f"{async_engine_scheme}/config.sqlite", future=True)
async_config_session = sessionmaker(
async_config_engine,
expire_on_commit=False,
class_=AsyncSession,
autoflush=True,
)
| 2.15625 | 2 |
core/utils/states/mailing_everyone.py | AKurmazov/hoteluni_bot | 2 | 12772381 | <reponame>AKurmazov/hoteluni_bot<filename>core/utils/states/mailing_everyone.py
from aiogram.dispatcher.filters.state import State, StatesGroup
class MailingEveryoneDialog(StatesGroup):
enter_message = State()
| 1.328125 | 1 |
from_config/run_test.py | astrockragh/IceCube | 0 | 12772382 | <filename>from_config/run_test.py
import os, sys, tqdm, json, shutil, glob
import os.path as osp
from tensorflow.keras.backend import clear_session
import tensorflow as tf
# gpu_devices = tf.config.list_physical_devices('GPU')
# if len(gpu_devices) > 0:
# print("GPU detected")
# tf.config.experimental.set_memory_growth(gpu_devices[0], True)
exp_folder = str(sys.argv[1])
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
##########################################################
# Loop over JSON files and train models #
##########################################################
# Generate list over experiments to run
from dev.utils import list_experiments, clean_done, check_dataset
from dev.train_script import train_model
clean_done(exp_folder)
exp_folder, exp_list = list_experiments(exp_folder)
print(f"Starting process with {len(exp_list)} experiments")
print(exp_list)
# Loop over the experiments
for i, experiment in enumerate(exp_list):
# Load construction dictionary from json file
with open(osp.join(exp_folder, experiment)) as file:
construct_dict = json.load(file)
construct_dict['experiment_name']=experiment[:-5]
construct_dict['data_params']['n_data']=1e3
construct_dict['experiment']='test'
construct_dict['run_params']['epochs']=5
construct_dict['run_params']['val_epoch']=2
print(f"Starting experiment from {experiment[:-5]}")
epochexit=train_model(construct_dict)
clear_session()
# if SHUTDOWN == True:
# os.system("shutdown -h")
# Create a script to go through and test the performance
# test_model(model = construct_dict['Experiment'], data = instructions_to_dataset_name(construct_dict))
# We can setup a shutdown maybe
#os.system("shutdown -h 5")
| 2.109375 | 2 |
printevenrange.py | maurendeviia/pythoncharmers | 37 | 12772383 | <filename>printevenrange.py<gh_stars>10-100
# Python program to print Even Numbers in given range
start, end = 4, 19
# iterating each number in list
for num in range(start, end + 1):
# checking condition
if num % 2 == 0:
print(num, end = " ")
| 3.65625 | 4 |
seaplotlib/decorators.py | mvdbeek/seaplotlib | 0 | 12772384 | from functools import wraps
from .helper import abline
from . import plt
def can_set_title(function):
@wraps(function)
def set_title(*args, **kwargs):
title = kwargs.pop('title', None)
r = function(*args, **kwargs)
if title:
ax = plt.gca()
ax.set_title(title)
return r
return set_title
def can_set_xlabel(function):
@wraps(function)
def set_xlabel(*args, **kwargs):
xlabel = kwargs.pop('xlabel', None)
r = function(*args, **kwargs)
if xlabel:
ax = plt.gca()
ax.set_xlabel(xlabel)
return r
return set_xlabel
def can_set_ylabel(function):
@wraps(function)
def set_ylabel(*args, **kwargs):
ylabel = kwargs.pop('ylabel', None)
r = function(*args, **kwargs)
if ylabel:
ax = plt.gca()
ax.set_ylabel(ylabel)
return r
return set_ylabel
def can_create_figure(function):
@wraps(function)
def create_figure(*args, **kwargs):
if not kwargs.get('ax'):
figsize = kwargs.pop('figsize', None)
_, kwargs['ax'] = plt.subplots(figsize=figsize)
return function(*args, **kwargs)
return create_figure
def can_set_equal_scale(function):
@wraps(function)
def set_equal_scale(*args, **kwargs):
equal_scale = kwargs.pop('equal_scale', False)
r = function(*args, **kwargs)
if equal_scale:
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
new_min = min(xlim[0], ylim[0])
new_max = max(xlim[1], ylim[1])
assert (new_min, new_max) == ax.set_xlim(new_min, new_max)
assert (new_min, new_max) == ax.set_ylim(new_min, new_max)
return r
return set_equal_scale
def can_add_abline(function):
@wraps(function)
def _abline(*args, **kwargs):
add_abline = kwargs.pop('abline', False)
r = function(*args, **kwargs)
if add_abline:
abline()
return r
return _abline
def tight_layout(function):
@wraps(function)
def tight_layout(*args, **kwargs):
r = function(*args, **kwargs)
plt.tight_layout()
return r
return tight_layout
| 2.53125 | 3 |
main.py | Waz0x/codingame_discord_bot | 6 | 12772385 | import os
import json
import codingame
import discord
from discord.ext import commands
with open("./config/config.json", "r") as cjson:
config = json.load(cjson)
with open("./config/db.json", "r") as dbjson:
db = json.load(dbjson)
intents = discord.Intents.default()
bot = commands.Bot(command_prefix=config["prefix"], intents=intents)
bot.config = config
bot.codingame_client = codingame.Client(is_async=True)
for file in os.listdir('cogs'):
if file.endswith('.py'):
bot.load_extension(f"cogs.{file[:-3]}")
bot.run(config["token"]) | 2.25 | 2 |
Scheduler/dailyMission.py | louisyoungx/stock-api | 1 | 12772386 | import time
import datetime
from Status.logList import log
from Message.sendEmail import send_email
from Message.sendMessage import send_message
from Scheduler.dataAnalysis import analysis
from Update.getData import getCurrentData_torxiong
def getTime():
# time.localtime(time.time())
# int tm_sec; /* 秒 – 取值区间为[0,59] */
# int tm_min; /* 分 - 取值区间为[0,59] */
# int tm_hour; /* 时 - 取值区间为[0,23] */
# int tm_mday; /* 一个月中的日期 - 取值区间为[1,31] */
# int tm_mon; /* 月份(从一月开始,0代表一月) - 取值区间为[0,11] */
# int tm_year; /* 年份,其值等于实际年份减去1900 */
# int tm_wday; /* 星期 – 取值区间为[0,6],其中0代表星期一,1代表星期二,以此类推 */
# int tm_yday; /* 从每年的1月1日开始的天数 – 取值区间为[0,365],其中0代表1月1日,1代表1月2日,以此类推 */
# int tm_isdst; /* 夏令时标识符,实行夏令时的时候,tm_isdst为正。不实行夏令时的时候,tm_isdst为0;不了解情况时,tm_isdst()为负。
return time.localtime(time.time())
def dormancy(to_time):
pass
def min_sleep(startTime, endTime):
'''计算两个时间点之间的分钟数'''
# 处理格式,加上秒位
startTime1 = startTime + ':00'
endTime1 = endTime + ':00'
# 计算分钟数
startTime2 = datetime.datetime.strptime(startTime1, "%Y-%m-%d %H:%M:%S")
endTime2 = datetime.datetime.strptime(endTime1, "%Y-%m-%d %H:%M:%S")
seconds = (endTime2 - startTime2).seconds
# 来获取时间差中的秒数。注意,seconds获得的秒只是时间差中的小时、分钟和秒部分的和,并没有包含时间差的天数(既是两个时间点不是同一天,失效)
total_seconds = (endTime2 - startTime2).total_seconds()
# 来获取准确的时间差,并将时间差转换为秒
# print(total_seconds)
# mins = total_seconds / 60
log.update("(子线程:巡航模块):即将休眠,将于{}重新工作".format(endTime2))
time.sleep(total_seconds)
return True
# return int(mins)
# if __name__ == "__main__":
# startTime_1 = '2019-07-28 00:00'
# endTime_1 = '2019-07-29 00:00'
# fenNum = minNums(startTime_1, endTime_1)
# print(fenNum)
def time_in_work():
'''判断当前时间是否开市'''
# 范围时间
morning_start_time = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '9:30', '%Y-%m-%d%H:%M')
morning_end_time = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '11:30', '%Y-%m-%d%H:%M')
afternoon_start_time = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '13:00', '%Y-%m-%d%H:%M')
afternoon_end_time = datetime.datetime.strptime(str(datetime.datetime.now().date()) + '15:00', '%Y-%m-%d%H:%M')
# 当前时间
now_time = datetime.datetime.now()
# 判断当前时间是否在范围时间内
if morning_end_time > now_time > morning_start_time:
return True
elif afternoon_end_time > now_time > afternoon_start_time:
return True
elif afternoon_start_time > now_time > morning_end_time:
return "REST"
else:
return False
def daily_tracking(stock_code, mins): # 股票代码与获取数据频率
now = time.localtime(time.time())
log.update("(子线程:巡航模块):今日任务初始化成功")
if now.tm_wday - 1 < 5: # 如果是工作日
log.update("(子线程:巡航模块):当前为工作日")
if time_in_work() == "REST": # 午休时间
log.update("(子线程:巡航模块):当前已到午休时间")
now_str_time = "{}-{}-{} {}:{}".format(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min)
end_time = "{}-{}-{} 13:00".format(now.tm_year, now.tm_mon, now.tm_mday)
min_sleep(now_str_time, end_time)
elif time_in_work(): # 开市时间
log.update("(子线程:巡航模块):当前为正常交易时间")
log.update("(子线程:巡航模块):启用数据获取模块Update.getData与数据分析模块Scheduler.dataAnalyse")
log.update("(子线程:巡航模块):当前持续监测中,数据获取频率:{}分钟/次".format(mins))
while time_in_work():
data = getCurrentData_torxiong(stock_code)
analysis_result = analysis(data)
if analysis_result:
send_message(analysis_result)
# if email_title:
# send_email(email_title, email_html)
time.sleep(60 * mins) # 暂时休眠5分钟
elif now.tm_hour > 15: # 午休未开市
log.update("(子线程:巡航模块):结束休眠,正在等待开盘")
time.sleep(60 * 5)
elif now.tm_hour < 9: # 今天未开市
log.update("(子线程:巡航模块):等待开市")
now_str_time = "{}-{}-{} {}:{}".format(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min)
end_time = "{}-{}-{} 9:30".format(now.tm_year, now.tm_mon, now.tm_mday)
min_sleep(now_str_time, end_time)
else: # 今天已休市
log.update("(子线程:巡航模块):已休市")
now_str_time = "{}-{}-{} {}:{}".format(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min)
end_time = "{}-{}-{} 9:30".format(now.tm_year, now.tm_mon, now.tm_mday+1)
min_sleep(now_str_time, end_time)
else: # 周末
log.update("(子线程:巡航模块):今日为周六")
if now.tm_wday == 5: # 周六
now_str_time = "{}-{}-{} {}:{}".format(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min)
end_time = "{}-{}-{} 9:30".format(now.tm_year, now.tm_mon, now.tm_mday+2)
min_sleep(now_str_time, end_time)
if now.tm_wday == 6: # 周日
log.update("(子线程:巡航模块):今日为周五")
now_str_time = "{}-{}-{} {}:{}".format(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min)
end_time = "{}-{}-{} 9:30".format(now.tm_year, now.tm_mon, now.tm_mday+1)
min_sleep(now_str_time, end_time)
def timing(stock_code):
while True:
daily_tracking(stock_code, 5)
| 2.515625 | 3 |
braincards/settings.py | snirp/braincards-OLD | 0 | 12772387 | <filename>braincards/settings.py
import os
from dotenv import load_dotenv
# Load sensitive environment-specific settings from .env file
# These do *not* override existing System environment variables by default.
# To override, use load_dotenv(override=True)
load_dotenv()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.getenv("BRAINCARDS_SECRET_KEY")
DEBUG = os.getenv("BRAINCARDS_DEBUG")
ALLOWED_HOSTS = []
WSGI_APPLICATION = 'braincards.wsgi.application'
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
# 'allauth.socialaccount.providers.facebook',
# https://developers.google.com/identity/sign-in/web/sign-in
# https://console.developers.google.com/apis
'allauth.socialaccount.providers.google',
# 'allauth.socialaccount.providers.twitter',
'ordered_model',
'users',
'cards',
'rest_framework',
'rest_framework.authtoken',
# 'rest_auth',
# 'rest_auth.registration',
]
AUTH_USER_MODEL = 'users.User'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'braincards.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'braincards.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv("BRAINCARDS_POSTGRES_NAME"),
'USER': os.getenv("BRAINCARDS_POSTGRES_USER"),
'PASSWORD': os.getenv("BRAINCARDS_POSTGRES_PASSWORD"),
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
SITE_ID = 1
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Amsterdam'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
REST_AUTH_SERIALIZERS = {
'LOGIN_SERIALIZER': 'users.serializers.LoginSerializer',
'USER_DETAILS_SERIALIZER': 'users.serializers.UserDetailsSerializer',
# 'TOKEN_SERIALIZER': 'path.to.custom.TokenSerializer',
}
REST_AUTH_REGISTER_SERIALIZERS = {
'REGISTER_SERIALIZER': 'users.serializers.RegisterSerializer',
}
# AllAuth registration settings
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_EMAIL_VERIFICATION = 'optional'
SOCIALACCOUNT_EMAIL_VERIFICATION = ACCOUNT_EMAIL_VERIFICATION
SOCIALACCOUNT_PROVIDERS = {}
# Email settings
EMAIL_USE_TLS = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_PASSWORD = os.getenv("BRAINCARDS_EMAIL_HOST_PASSWORD")
EMAIL_HOST_USER = os.getenv("BRAINCARDS_EMAIL_HOST_USER")
EMAIL_PORT = 587
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
]
} | 1.78125 | 2 |
gui_app/utils/SystemUtil.py | cloudconductor/cloud_conductor_gui | 0 | 12772388 | from ..utils import ApiUtil
from ..utils import StringUtil
from ..utils.ApiUtil import Url
def get_system_list(code, token, project_id=None):
# -- Create a cloud, api call
url = Url.systemList
data = {
'auth_token': token,
'project_id': project_id,
}
# -- API call, get a response
list = ApiUtil.requestGet(url, code, data)
return list
def get_system_list2(code, token, project_id=None):
systems = get_system_list(code, token, project_id)
dic = {}
list = []
for sys in systems:
dic['id'] = str(sys.get('id'))
dic['name'] = sys.get('name')
list.append(dic.copy())
return list
def get_system_detail(code, token, id):
if StringUtil.isEmpty(code):
return None
if StringUtil.isEmpty(token):
return None
if StringUtil.isEmpty(id):
return None
url = Url.systemDetail(id, Url.url)
data = {
'auth_token': token,
}
system = ApiUtil.requestGet(url, code, data)
return StringUtil.deleteNullDict(system)
def create_system(code, token, project_id, form):
# -- Create a system, api call
url = Url.systemCreate
form = StringUtil.deleteNullDict(form)
form['auth_token'] = token
form['project_id'] = project_id
# -- API call, get a response
system = ApiUtil.requestPost(url, code, StringUtil.deleteNullDict(form))
return system
def edit_system(code, token, id, form):
# -- Create a system, api call
url = Url.systemEdit(id, Url.url)
data = StringUtil.deleteNullDict(form)
data['auth_token'] = token
# -- API call, get a response
system = ApiUtil.requestPut(url, code, data)
return system
def get_system_delete(code, token, id):
if StringUtil.isEmpty(code):
return None
if StringUtil.isEmpty(token):
return None
if StringUtil.isEmpty(id):
return None
url = Url.systemDelete(id, Url.url)
data = {'auth_token': token}
ApiUtil.requestDelete(url, code, data)
| 2.453125 | 2 |
main.py | MatejMecka/reddit_stellar_tip_bot | 0 | 12772389 | <gh_stars>0
import os
from praw import Reddit
from praw.models import Submission, Subreddit, Comment
from dotenv import load_dotenv
from stellar_sdk import Keypair, Asset, exceptions
import logging
import sqlite3
import mysql.connector
import sys
"""
logging.basicConfig(filename='app.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')
logging.getLogger().addHandler(logging.StreamHandler())
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
"""
load_dotenv()
# Reddit
CLIENT = os.getenv("CLIENT_ID")
SECRET = os.getenv("CLIENT_SECRET")
USERNAME = os.getenv("USERNAME")
PASSWORD = <PASSWORD>("PASSWORD")
# MariaDB
USE_SQLITE3 = os.getenv("USE_SQLITE3")
MARIA_DB_HOST = os.getenv("MARIA_DB_HOST")
MARIA_DB_USER = os.getenv("MARIA_DB_USER")
MARIA_DB_PASSWORD = os.getenv("MARIA_DB_PASSWORD")
MARIA_DB_PORT = os.getenv("MARIA_DB_PORT")
MARIA_DB_DATABASE = os.getenv("MARIA_DB_DATABASE")
# Website
SIGNING_URL = os.getenv("SIGNING_URL")
if USE_SQLITE3 == "True":
conn = sqlite3.connect('accounts.db')
else:
try:
conn = mysql.connector.connect(
user=MARIA_DB_USER,
password=<PASSWORD>,
host=MARIA_DB_HOST,
port=int(MARIA_DB_PORT),
database=MARIA_DB_DATABASE
)
except Exception as e:
print(f"Error connecting to Mysql Database: {e}")
sys.exit(1)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS accounts (id INTEGER AUTO_INCREMENT PRIMARY KEY, username text, account text)''')
c.execute('''CREATE TABLE IF NOT EXISTS to_notify (id INTEGER AUTO_INCREMENT PRIMARY KEY, person_to_be_notified text, persons_account text, amount int, asset_name text, asset_issuer text)''')
conn.commit()
# Create the reddit object instance using Praw
reddit = Reddit(
user_agent="Stellar Tip Bot v0.0.1",
client_id=CLIENT,
client_secret=SECRET,
username=USERNAME,
password=PASSWORD,
)
def statementForDB(statement):
if USE_SQLITE3 == "False":
return statement.replace('?', '%s')
return statement
def create_account(username, public_key):
# Check if Valid key
try:
Keypair.from_public_key(public_key)
except exceptions.Ed25519PublicKeyInvalidError:
return "The provided Public Key is invalid!"
# Create Account or replace
try:
statement = "REPLACE INTO accounts (username, account) VALUES(?,?)"
c.execute(statementForDB(statement), (str(username), str(public_key)))
conn.commit()
except Exception as e:
print(f"ERROR: {str(e)}")
return f"There was an error creating your account: {str(e)}"
# Check if user was wanted
try:
statement = "SELECT person_to_be_notified, amount, id from to_notify WHERE persons_account=?"
c.execute(statementForDB(statement), (str(username), ))
rows = c.fetchall()
except Exception as e:
print(f"ERROR: {str(e)}")
# Notify everyone else
if rows is []:
pass
else:
for row in rows:
try:
reddit.redditor(row[0]).message(f'{username} opened an account!', f'The person in the subject setup their Stellar Wallet! Visit the following url to proceed with tipping: {SIGNING_URL}/payment?user={username}&amount={row[1]}.')
statement = "DELETE FROM to_notify WHERE id=?"
c.execute(statementForDB(statement), (int(row[2]), ))
conn.commit()
except Exception as e:
print(f"Error deleting from table: {str(e)}")
return "Account has been succesfully created!"
def payment(user, amount, original_poster, asset_code=None, asset_issuer=None):
# Check if User Exists
user = user.replace('/u/', '').replace('u/', '').replace('/U/', '')
try:
statement = "SELECT account from accounts WHERE username=?"
c.execute(statementForDB(statement), (str(user), ))
public_key = c.fetchone()
except Exception as e:
print(f"ERROR with Payment: {str(e)}")
return f"There was an error finding the account for the recepient: {str(e)}"
# Check if it's custom asset
if asset_code == None and asset_issuer == None:
asset_name = "XLM"
else:
try:
asset = Asset(asset_code, asset_issuer)
asset_name = asset_code
except Exception as e:
return f"There was an error processing the custom asset: {str(e)}"
if public_key is None:
# Inform user
reddit.redditor(user).message(f'{original_poster} wants to tip you!', f'Hey there! The user in the subject wants to tip you {amount} {asset_name}. In order to accept the tip create an account by replying to this message with: `setaddress [ADDRESS]` where `[ADDRESS]` is the Stellar Wallet where you want to receive the tip.')
# Add user for later notification
try:
statement = "INSERT INTO to_notify (person_to_be_notified, persons_account, amount, asset_name, asset_issuer) VALUES(?,?,?,?,?)"
c.execute(statementForDB(statement), (str(original_poster), str(user), int(amount), str(asset_name), str(asset_issuer)))
conn.commit()
except Exception as e:
print(f"ERROR with Payment: {str(e)}")
return "The user does not have a Stellar Account setten up with me. They have been notified you want to tip them."
else:
if asset_name == "XLM":
url = f"{SIGNING_URL}/payment?user={user}&amount={amount}"
else:
url = f"{SIGNING_URL}/create-claimable-balance?user={user}&amount={amount}&asset_name={asset_name}&asset_issuer={asset_issuer}"
return f"Hi there! In order to tip the following person visit the following page: {url}"
def main():
print('Started!')
try:
# Parse messages that it receives
for mention in reddit.inbox.stream():
if mention.new:
mention.mark_read()
print(f"{mention.author} - {mention.body}")
# Parse commands
message = mention.body.split(' ')
if 'tipbot_stellar' in message[0]:
message.pop(0)
command = message[0].lower()
arguments = message [1::]
print(command)
if command == "help":
mention.reply("""
Hello! This are the commands I currently support:
`help` <- You ran this! 😎
`tip [USER] [AMOUNT]` <- Pay a certain reddit user `[AMOUNT]` XLM
`tip [USER] [AMOUNT] [ASSET_NAME] [ASSET_ISSUER]` <- Creates a Claimable balance for a certain asset to a user
`setAddress [STELLAR_ADDRESS]` <- Set your Stellar Public Key so others can tip you.
The bot uses [Albedo](https://albedo.link/) for signing transactions so make sure you have it installed.
""")
continue
if command == "tip":
if len(arguments) == 4:
message = payment(arguments[0], arguments[1], mention.author, arguments[2], arguments[3])
elif len(arguments) == 2:
message = payment(arguments[0], arguments[1], mention.author)
else:
message = 'Invalid number of arguments'
mention.reply(message)
continue
if command == "setaddress":
message = create_account(mention.author, arguments[0])
print(message)
mention.reply(message)
continue
else:
mention.reply("The command you wrote does not exist. Try replying with `help`")
except Exception as e:
print(str(e))
if __name__ == '__main__':
main()
| 2.546875 | 3 |
CargarArchivo.py | yopablo017/PROYECTO2LF | 0 | 12772390 | <filename>CargarArchivo.py
from tkinter import *
from tkinter import messagebox as MessageBox
noterminales=""
terminales=""
terminalinicial=""
contador=0
Terminales=[]
Noterminales=[]
Terminalinicial=[]
Titulos=[]
Produccion=[]
Cantidad1=[]
Cantidad2=[]
Cantidad3=[]
Validos=[]
NoValidos=[]
ContadorCantidad=0
Validar=False
conta=0
def analizar(cadena):
global conta
global contador
global ContadorCantidad
global Validar
final=cadena[-1]
if conta==0:
ContadorCantidad += 1
Nombre(cadena)
paso2=";" in cadena
if paso2==True:
Automatas(cadena)
paso3=">" in cadena
if paso3==True:
Producciones(cadena)
paso4="*" in cadena
if paso4==True:
if Validar==True:
Validos.append(ContadorCantidad)
else:
Eliminar()
NoValidos.append(ContadorCantidad)
conta=-1
contador=0
Validar=False
conta += 1
def Nombre(cadena):
global ContadorCantidad
Titulos.append(cadena)
Cantidad1.append(ContadorCantidad)
def Automatas(cadena):
global ContadorCantidad
global terminales
global noterminales
global terminalinicial
Alfabeto=cadena.split(";")
noterminales=Alfabeto[0]
terminales=Alfabeto[1]
terminalinicial=Alfabeto[2]
Terminales.append(terminales)
Noterminales.append(noterminales)
Terminalinicial.append(terminalinicial)
Cantidad2.append(ContadorCantidad)
def Producciones(cadena):
global ContadorCantidad
global contador
global terminales
global noterminales
global terminalinicial
global Validar
palabra=""
contadorterminales=0
contadornoterminales=0
paso1=True
paso2=False
Paso3=False
for i in cadena:
caracter=i
if paso1==True:
if caracter==">":
paso2=True
paso1=False
else:
pass
elif paso2==True:
if caracter==" ":
ter= palabra in terminales
noter=palabra in noterminales
if ter == True:
contadorterminales += 1
elif noter == True:
contadornoterminales += 1
palabra=""
else:
palabra=palabra+caracter
print(contadorterminales)
print(contadornoterminales)
if contadorterminales==2 and contadornoterminales==1:
Validar=True
elif contadorterminales==4 and contadornoterminales==1:
Validar=True
elif contadorterminales==3 and contadornoterminales==1:
Validar=True
elif contadorterminales>=0 and contadornoterminales<=1:
Validos=True
Cantidad3.append(ContadorCantidad)
contador += 1
Produccion.append(str(contador)+": "+cadena)
def Eliminar():
global ContadorCantidad
ultimo=Titulos[-1]
MessageBox.showwarning("Alerta","La gramatica "+ultimo+" es Regular asi que no se agregara.")
tamaño1=Cantidad1.count(ContadorCantidad)
for i in range(0,tamaño1):
Titulos.pop()
Cantidad1.pop()
tamaño2=Cantidad2.count(ContadorCantidad)
for i in range(0,tamaño2):
Noterminales.pop()
Terminalinicial.pop()
Terminales.pop()
Cantidad2.pop()
tamaño3=Cantidad3.count(ContadorCantidad)
for i in range(0,tamaño3):
Produccion.pop()
Cantidad3.pop() | 3.28125 | 3 |
PYTHON/PasswordGen.py | ayushyado/HACKTOBERFEST2021-2 | 125 | 12772391 | <reponame>ayushyado/HACKTOBERFEST2021-2<filename>PYTHON/PasswordGen.py
import string
import random
#Characters List to Generate Password
characters = list(string.ascii_letters + string.digits + "!@#$%^&*()")
def password_gen():
#Length of Password from the User
length = int(input("Password length: "))
#Shuffling the Characters
random.shuffle(characters)
#Picking random Characters from the given List
password = []
for i in range(length):
password.append(random.choice(characters))
#Shuffling the Resultant Password
random.shuffle(password)
#Converting the List to String
#Printing the List
print("".join(password))
#Invoking the function
password_gen()
| 3.859375 | 4 |
custom/funcs.py | sergicastellasape/transformers | 0 | 12772392 | <gh_stars>0
import torch
from transformers import *
from custom.transformer_sentence import TransformerSentence
from tqdm import tqdm
import re
def load_dataset(txt_path=None,
return_embeddings=False,
MODEL=BertModel.from_pretrained('scibert-scivocab-uncased'),
TOKENIZER=BertTokenizer.from_pretrained('scibert-scivocab-uncased')):
if txt_path is None:
raise ValueError("txt_path must be specified as a named argument! \
E.g. txt_path=../dataset/yourfile.txt")
# Read input sequences from .txt file and put them in a list
with open(txt_path) as f:
text = f.read()
sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text)
try:
sentences.remove('') # remove possible empty strings
except:
None
list_SentenceObj, ALL_INITIAL_EMBEDDINGS, ALL_CONTEXT_EMBEDDINGS = [], [], []
for raw_sentence in tqdm(sentences):
SentenceObj = TransformerSentence(raw_sentence,
model=MODEL,
tokenizer=TOKENIZER)
SentenceObj.write_summary(print_tokens=False)
list_SentenceObj.append(SentenceObj)
ALL_INITIAL_EMBEDDINGS.append(SentenceObj.summary['states'][0, :, :])
ALL_CONTEXT_EMBEDDINGS.append(SentenceObj.summary['states'][-1, :, :])
ALL_INITIAL_EMBEDDINGS = torch.cat(ALL_INITIAL_EMBEDDINGS, dim=0)
ALL_CONTEXT_EMBEDDINGS = torch.cat(ALL_CONTEXT_EMBEDDINGS, dim=0)
if return_embeddings:
return sentences, list_SentenceObj, ALL_INITIAL_EMBEDDINGS, ALL_CONTEXT_EMBEDDINGS
else:
return sentences, list_SentenceObj
| 2.546875 | 3 |
cldfspec/commands/component_readmes.py | cldf/cldf | 23 | 12772393 | """
Create the component-specific README files by concatenating `description.md` and
a generated description of the metadata.
"""
import json
from pycldf.terms import Terms
from csvw.metadata import Table
from cldfspec.util import REPO_DIR
def run(args):
for p in REPO_DIR.joinpath('components').glob('*/*.json'):
readme = p.parent.joinpath('description.md').read_text(encoding='utf8')
cols = table2markdown(Table.fromvalue(json.loads(p.read_text(encoding='utf8'))))
p.parent.joinpath('README.md').write_text(readme + '\n' + cols, encoding='utf8')
def cardinality(col, term):
#
# FIXME: move to pycldf
#
res = None
if term:
# Make sure, cardinality is consistent with the ontology:
tcol = term.to_column()
res = term.cardinality
assert (res == 'multivalued' and tcol.separator) or \
(res == 'singlevalued' and not tcol.separator) or \
(res is None), 'y'
assert bool(col.separator) == bool(tcol.separator), 'x'
# Make sure, cardinality is consistent with separator spec:
card = col.common_props.get('dc:extent')
assert (card == 'multivalued' and col.separator) or \
(card == 'singlevalued' and not col.separator) or \
(card is None), 'z'
return res or card or 'unspecified'
def colrow(col, pk, TERMS):
dt = '`{}`'.format(col.datatype.base if col.datatype else 'string')
if col.separator:
dt = 'list of {} (separated by `{}`)'.format(dt, col.separator)
desc = col.common_props.get('dc:description', '').replace('\n', ' ')
term = None
if col.propertyUrl:
term = TERMS.by_uri.get(col.propertyUrl.uri)
card = cardinality(col, term)
if (not desc) and term:
desc = term.comment(one_line=True)
pk = pk or []
if col.name in pk:
desc = (desc + '<br>') if desc else desc
desc += 'Primary key'
if term and term.references:
desc = (desc + '<br>') if desc else desc
desc += 'References {}'.format(term.references)
return ' | '.join([
'[{}]({})'.format(col.name, col.propertyUrl)
if col.propertyUrl else '`{}`'.format(col.name),
dt,
card,
desc,
])
def table2markdown(table):
res = []
res.append('## [{}]({}): `{}`\n'.format(
table.common_props['dc:conformsTo'].split('#')[1],
table.common_props['dc:conformsTo'],
table.url.string,
))
if table.common_props.get('dc:description'):
res.append(table.common_props['dc:description'] + '\n')
res.append('Name/Property | Datatype | Cardinality | Description')
res.append(' --- | --- | --- | --- ')
TERMS = Terms(REPO_DIR / 'terms.rdf')
for col in table.tableSchema.columns:
res.append(colrow(col, table.tableSchema.primaryKey, TERMS))
return '\n'.join(res)
| 2.453125 | 2 |
src/initialize_constraints.py | petersiemen/P3_Implement_SLAM | 0 | 12772394 | <reponame>petersiemen/P3_Implement_SLAM<filename>src/initialize_constraints.py
import numpy as np
def initialize_constraints(N, num_landmarks, world_size):
''' This function takes in a number of time steps N, number of landmarks, and a world_size,
and returns initialized constraint matrices, omega and xi.'''
## Recommended: Define and store the size (rows/cols) of the constraint matrix in a variable
## TODO: Define the constraint matrix, Omega, with two initial "strength" values
## for the initial x, y location of our robot
rows = cols = 2 * (N + num_landmarks)
omega = np.zeros((rows, cols))
## TODO: Define the constraint *vector*, xi
## you can assume that the robot starts out in the middle of the world with 100% confidence
xi = np.zeros((rows, 1))
x_0 = 0
y_0 = 1
start_x = world_size / 2.
start_y = world_size / 2.
omega[x_0, x_0] = 1
omega[y_0, y_0] = 1
xi[x_0] = start_x
xi[y_0] = start_y
return omega, xi
| 3.5625 | 4 |
scripts/count_words.py | Mitrius/TransformerBasedWikification | 0 | 12772395 | #!/home/mitrius/anaconda3/bin/python
from sys import argv
if __name__ == "__main__":
found = set()
with open(argv[1], "r") as f_in:
for line in f_in:
line_out = line.split(',')[1].strip()
line_splitted = line_out.split(' ')
for word in line_splitted:
found.add(word)
print(len(found))
| 3.609375 | 4 |
ast-transformations-core/src/test/resources/org/jetbrains/research/ml/ast/transformations/multipleOperatorComparison/data/out_3.py | JetBrains-Research/ast-transformations | 8 | 12772396 | <gh_stars>1-10
x = input()
y = input()
z = input()
flag = z % (1 + 1) == 0 and (1 < x and x < 123) or (1 > y and y > x and x > y and y < 123)
def identity(var):
return var
if x ^ y == 1 or (
x % 2 == 0 and (3 > x and x <= 3 and 3 <= y and y > z and z >= 5) or (
identity(-1) + hash('hello') < 10 + 120 and 10 + 120 < hash('world') - 1)):
print(x, y, z)
| 3.515625 | 4 |
demo/config.sample.py | Souukou/ZhetaokeSDK | 20 | 12772397 | APPKEY = "" # appkey,从折淘客获取
SID = # sid,从折淘客获取,与每个淘宝联盟账户对应
PID = "" # 推广位PID,在淘宝联盟中创建,需要是网站的PID
ALIPAY_KEY = "" # 代付安全密匙,在折淘客中设置
| 1.210938 | 1 |
app.py | Renaud17/HSE-KPI | 0 | 12772398 | <filename>app.py
from db import *
from get import *
from subprocess import call
import streamlit as st
import pandas as pd
import datetime
import re
import base64
from datetime import datetime,date
from datetime import datetime, timedelta
from pandas import DataFrame
from io import BytesIO
import xlsxwriter
import plotly.express as px
from PIL import Image
import streamlit.components.v1 as components
from responses import *
from bot import *
from bot import LemTokens,Normalize,Normalize,get_text,load_doc,intent,response,intent,bot_initialize
#imglog = Image.open('logo.jpg')
#st.set_page_config(page_title='HSEbot-KPI', page_icon=imglog, initial_sidebar_state='expanded', layout='wide')# layout="wide"
@st.cache(allow_output_mutation=True)
def to_excel(df):
output = BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1')
writer.save()
processed_data = output.getvalue()
return processed_data
@st.cache(allow_output_mutation=True)
def get_table_download_link(df):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
val = to_excel(df)
b64 = base64.b64encode(val) # val looks like b'...'
return f'<a href="data:application/octet-stream;base64,{b64.decode()}" download="extract.xlsx">Votre fichier excel</a>' # decode b'abc' => abc
#pour verifier le type d'entrée
def inputcheck(inputext):
try:
inputext = int(inputext)
except:
st.error("Veillez à ne saisir qu'un nombre")
st.stop()
return inputext
# Security
#passlib,hashlib,bcrypt,scrypt
import hashlib
@st.cache(allow_output_mutation=True)
def make_hashes(password):
return hashlib.sha256(str.encode(password)).hexdigest()
@st.cache(allow_output_mutation=True)
def check_hashes(password,hashed_text):
if make_hashes(password) == hashed_text:
return hashed_text
return False
def main():
#couleur du select box
def style():
st.markdown("""<style>
div[data-baseweb="select"]> div {
background-color: yellow;
}
div[role="listbox"] ul {
background-color:white;
}</style>""", unsafe_allow_html=True)
#couleur button
primaryColor = st.get_option("theme.primaryColor")
s = f"""
<style>
div.stButton > button:first-child {{text-shadow:0px 1px 0px #2f6627;font-size:15px; background-color: #71f9ed;border: 5px solid {primaryColor}; border-radius:5px 5px 5px 5px; }}
<style>
"""
st.markdown(s, unsafe_allow_html=True)
#masquer le menu streamlit
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
menu = ["Accueil", "Connexion","Inscription"] #
choice = st.sidebar.selectbox("Menu",menu)
if choice == "Accueil":
components.html("""
<style>
* {box-sizing: border-box}
body {font-family: Verdana, sans-serif; margin:0}
.mySlides {display: none}
img {vertical-align: middle;}
/* Slideshow container */
.slideshow-container {
max-width: 1000px;
position: relative;
margin: auto;
}
/* Next & previous buttons */
.prev, .next {
cursor: pointer;
position: absolute;
top: 50%;
width: auto;
padding: 16px;
margin-top: -22px;
color: white;
font-weight: bold;
font-size: 18px;
transition: 0.6s ease;
border-radius: 0 3px 3px 0;
user-select: none;
}
/* Position the "next button" to the right */
.next {
right: 0;
border-radius: 3px 0 0 3px;
}
/* On hover, add a black background color with a little bit see-through */
.prev:hover, .next:hover {
background-color: rgba(0,0,0,0.8);
}
/* Caption text */
.text {
color: #f2f2f2;
font-size: 15px;
padding: 8px 12px;
position: absolute;
bottom: 8px;
width: 100%;
text-align: center;
}
/* Number text (1/3 etc) */
.numbertext {
color: #f2f2f2;
font-size: 12px;
padding: 8px 12px;
position: absolute;
top: 0;
}
/* The dots/bullets/indicators */
.dot {
cursor: pointer;
height: 15px;
width: 15px;
margin: 0 2px;
background-color: #bbb;
border-radius: 50%;
display: inline-block;
transition: background-color 0.6s ease;
}
.active, .dot:hover {
background-color: #717171;
}
/* Fading animation */
.fade {
-webkit-animation-name: fade;
-webkit-animation-duration: 1.5s;
animation-name: fade;
animation-duration: 1.5s;
}
@-webkit-keyframes fade {
from {opacity: .4}
to {opacity: 1}
}
@keyframes fade {
from {opacity: .4}
to {opacity: 1}
}
/* On smaller screens, decrease text size */
@media only screen and (max-width: 300px) {
.prev, .next,.text {font-size: 11px}
}
</style>
</head>
<body>
<div class="slideshow-container">
<div class="mySlides fade">
<div class="numbertext">1 / 3</div>
<img src="https://cdn.shopify.com/s/files/1/2382/6729/products/SP124958.jpg?v=1536179866" style="width:100%;border-radius:5px;">
<div class="text"></div>
</div>
<div class="mySlides fade">
<div class="numbertext">2 / 3</div>
<img src="https://www.hsetrain.org/images/slide1.jpg" style="width:100%;border-radius:5px;">
<div class="text"></div>
</div>
<div class="mySlides fade">
<div class="numbertext">3 / 3</div>
<img src="https://www.spc.com.sg/wp-content/uploads/2015/11/banner-community-society-hse.jpg" style="width:100%;border-radius:5px;">
<div class="text"></div>
</div>
<a class="prev" onclick="plusSlides(-1)">❮</a>
<a class="next" onclick="plusSlides(1)">❯</a>
</div>
<br>
<div style="text-align:center">
<span class="dot" onclick="currentSlide(1)"></span>
<span class="dot" onclick="currentSlide(2)"></span>
<span class="dot" onclick="currentSlide(3)"></span>
</div>
<script>
var slideIndex = 1;
showSlides(slideIndex);
function plusSlides(n) {
showSlides(slideIndex += n);
}
function currentSlide(n) {
showSlides(slideIndex = n);
}
function showSlides(n) {
var i;
var slides = document.getElementsByClassName("mySlides");
var dots = document.getElementsByClassName("dot");
if (n > slides.length) {slideIndex = 1}
if (n < 1) {slideIndex = slides.length}
for (i = 0; i < slides.length; i++) {
slides[i].style.display = "none";
}
for (i = 0; i < dots.length; i++) {
dots[i].className = dots[i].className.replace(" active", "");
}
slides[slideIndex-1].style.display = "block";
dots[slideIndex-1].className += " active";
}
</script>
""")
html_temp = """
<div style="background-color:#464e5f;padding:10px;border-radius:10px;margin:3px;">
<h1 style="font-family: 'BadaBoom BB', sans-serif;color:white;text-align:center;"><b>HSE KPI RECORDER & HSEbot</b></h1>
</div>
"""
#components.html(html_temp)
st.markdown(html_temp, unsafe_allow_html = True)
st.markdown("✨ **Elle est une application d'analyse et de suivi des indicateurs de performance HSE dotée d'une intelligence artificielle pour identifier et prevenir les risques et dangers au travail.**")
st.markdown("✨ **Vous pouvez ajouter; modifier; supprimer et visualiser vos données avec des graphes.**")
st.markdown("✨ **Vous pouvez aussi téléchager vos données selon des intervalles de date.**")
st.markdown("✨ **HSEbot vous permet de discuter de manière inter-active avec une intelligence artificielle qui vous donne des conseils de prévention sur les risques au chantier.**")
image_BOT = """
<center><img src="https://www.trainingjournal.com/sites/www.trainingjournal.com/files/styles/original_-_local_copy/entityshare/23924%3Fitok%3DKw_wPH9G" alt="HSEBOT" height="150" width="200"></center>
"""
col1, col2, col3 = st.beta_columns([1,10,1])
with col2:
st.markdown(image_BOT, unsafe_allow_html = True)
#st.image("https://www.trainingjournal.com/sites/www.trainingjournal.com/files/styles/original_-_local_copy/entityshare/23924%3Fitok%3DKw_wPH9G",width=400,)
#Bot HSE
user_input = get_text()
response = bot_initialize(user_input)
st.text_area("HSEBot:", value=response, height=200, max_chars=None, key=None)
elif choice == "Connexion":
st.subheader("Section Connexion")
email = st.sidebar.text_input("Email")
password = st.sidebar.text_input("Mot de passe",type='password')
if st.sidebar.checkbox("Connexion"):
# if password == '<PASSWORD>':
create_table()
hashed_pswd = make_hashes(password)
result = login_user(email,check_hashes(password,hashed_pswd))
if result:
st.success("Connecté en tant que {}".format(email))
#task = st.selectbox("Task",["Add Post","Analytics","Profiles"])
task = ""
if task == "":
st.subheader("")
image_temp ="""
<div style="background-color:#464e5f;padding:10px;border-radius:5px;margin:10px;">
<img src="https://1tpecash.fr/wp-content/uploads/elementor/thumbs/Renaud-Louis-osf6t5lcki4q31uzfafpi9yx3zp4rrq7je8tj6p938.png" alt="Avatar" style="vertical-align: middle;width: 50px;height: 50px;border-radius: 50%;" >
<br/>
<p style="color:white;text-align:justify">Bienvenue ! Je vous souhaite une bonne expérience, ce travail est le fruit de mes expériences en tant que Manager HSE et Data scientist vos avis à propos sont les bienvenues.</p>
</div>
"""
title_temp = """
<div style="background-color:#464e5f;padding:10px;border-radius:5px;margin:10px;">
<h1 style ="color:white;text-align:center;"> GESTION DES INDICATEURS HSE </h1>
</div>
"""
st.markdown(image_temp, unsafe_allow_html = True)
st.markdown(title_temp, unsafe_allow_html = True)
#st.markdown('### GESTION DES INDICATEURS HSE')
style()
choix = st.selectbox("", ["AJOUTER", "AFFICHER", "METTRE À JOUR", "SUPPRIMER"])
if choix == "AJOUTER":
st.subheader("AJOUTER DES DONNÉES")
col1, col2= st.beta_columns(2)
with col1:
st.subheader("CIBLE A ENREGISTRER")
style()
cible = st.selectbox('', ['Accueil sécurité','Briefing de sécurité( TBM)','Non conformité','Changements enregistrés','Anomalies','Analyse des risques réalisés(JSA)','Incident & Accident',"Audit-Inspection-Exercice d'urgence"])
#connexion à l'interface et recupération des données
if cible == 'Accueil sécurité':
with col1:
Nbre_Arrivant =inputcheck(st.text_input("Nombre Arrivant",value=0))
Nbre_induction = inputcheck(st.text_input("Nombre d'induction",value=0))
IDD=email
with col2:
st.subheader("DATE ET NOM DU CHANTIER")
Date = st.date_input("Date")
Chantier = st.text_input("Chantier")
button1=st.button("AJOUTER LES DÉTAILS")
if button1:
add_Accueil(IDD,Chantier,Nbre_Arrivant,Nbre_induction,Date)
st.success("AJOUTÉ AVEC SUCCÈS: {}".format(Chantier))
elif cible == 'Briefing de sécurité( TBM)':
with col1:
Nbre_chantier =inputcheck(st.text_input("Nombre de chantier",value=0))
Nbre_TBM = inputcheck(st.text_input("Nombre de TBM",value=0))
IDD=email
with col2:
st.subheader("DATE ET NOM DU CHANTIER")
Date = st.date_input("Date")
Chantier = st.text_input("Chantier")
button2=st.button("AJOUTER LES DÉTAILS")
if button2:
add_TBM(IDD,Chantier,Nbre_chantier,Nbre_TBM,Date)
st.success("AJOUTÉ AVEC SUCCÈS: {}".format(Chantier))
elif cible == 'Non conformité':
with col1:
NCR = inputcheck(st.text_input("Nombre de Non conformité remontée",value=0,key=0))
FNCR = inputcheck(st.text_input("Nombre de fiche de Non conformité remontée",value=0,key=1))
NCC = inputcheck(st.text_input("Nombre de Non conformité cloturée",value=0,key=2))
FNCC= inputcheck(st.text_input("Nombre de fiche de Non conformité cloturée",value=0, key=3))
IDD=email
with col2:
st.subheader("DATE ET NOM DU CHANTIER")
Date = st.date_input("Date")
Chantier = st.text_input("Chantier")
button3=st.button("AJOUTER LES DÉTAILS")
if button3:
add_NC(IDD,Chantier,NCR,FNCR,NCC,FNCC,Date)
st.success("AJOUTÉ AVEC SUCCÈS: {}".format(Chantier))
elif cible == "Changements enregistrés":
with col1:
NCH = inputcheck(st.text_input("Nombre de Changement enregistrés",value=0))
FNCH = inputcheck(st.text_input("Nombre de fiche de Changements enregistrés",value=0))
NCHC = inputcheck(st.text_input("Nombre de Changements cloturés",value=0))
FNCHC= inputcheck(st.text_input("Nombre de fiche de Changements suivis et cloturés",value=0))
IDD=email
with col2:
st.subheader("DATE ET NOM DU CHANTIER")
Date = st.date_input("Date")
Chantier = st.text_input("Chantier")
button4=st.button("AJOUTER LES DÉTAILS")
if button4:
add_Changements(IDD,Chantier,NCH,FNCH,NCHC,FNCHC,Date)
st.success("AJOUTÉ AVEC SUCCÈS: {}".format(Chantier))
elif cible == "Anomalies":
with col1:
NA = inputcheck(st.text_input("Nombre d'Anomalies Remontées",value=0))
FNA = inputcheck(st.text_input("Nombre de fiche d'Anomalies Remontées",value=0))
NAC = inputcheck(st.text_input("Nombre d' Anomalies cloturés",value=0))
FNAC = inputcheck(st.text_input("Nombre de fiche de Anomalies Corrigées",value=0))
IDD=email
with col2:
st.subheader("DATE ET NOM DU CHANTIER")
Date = st.date_input("Date")
Chantier = st.text_input("Chantier")
button5=st.button("AJOUTER LES DÉTAILS")
if button5:
add_Anomalies(IDD,Chantier,NA,FNA,NAC,FNAC,Date)
st.success("AJOUTÉ AVEC SUCCÈS: {}".format(Chantier))
elif cible == "Analyse des risques réalisés(JSA)":
with col1:
NAct = inputcheck(st.text_input("Nombre d'Activite",value=0))
NJSA = inputcheck(st.text_input("Nombre de fiche JSA",value=0))
IDD=email
with col2:
st.subheader("DATE ET NOM DU CHANTIER")
Date = st.date_input("Date")
Chantier = st.text_input("Chantier")
button6=st.button("AJOUTER LES DÉTAILS")
if button6:
add_JSA(IDD,Chantier,NAct,NJSA,Date)
st.success("AJOUTÉ AVEC SUCCÈS: {}".format(Chantier))
elif cible == "Incident & Accident":
with col1:
AAA = inputcheck(st.text_input("Accident Avec Arrêt",value=0))
NJP = inputcheck(st.text_input("Nombre de jours perdus",value=0))
ASA = inputcheck(st.text_input("Accident Sans Arrêt",value=0))
AT = inputcheck(st.text_input("Nombre d'accident de trajet",value=0))
NInc = inputcheck(st.text_input("Incident",value=0))
IDD=email
with col2:
st.subheader("DATE ET NOM DU CHANTIER")
Date = st.date_input("Date")
Chantier = st.text_input("Chantier")
button7=st.button("AJOUTER LES DÉTAILS")
if button7:
add_Incident_Accident(IDD,Chantier,NInc,AAA,ASA,AT,NJP,Date)
st.success("AJOUTÉ AVEC SUCCÈS: {}".format(Chantier))
elif cible == "Audit-Inspection-Exercice d'urgence":
with col1:
AC= inputcheck(st.text_input("Nombre d'audit",value=0))
VC= inputcheck(st.text_input("Nombre de Visite Conjointe",value=0))
NEU= inputcheck(st.text_input("Nombre d'exercice d'urgence",value=0))
SMPAR= inputcheck(st.text_input("Sensibilisation au modes de prévention des activités à risques",value=0))
PR= inputcheck(st.text_input("Procedures réalisées",value=0))
IE= inputcheck(st.text_input("Inspections Environnementales",value=0))
IDD=email
with col2:
st.subheader("DATE ET NOM DU CHANTIER")
Date = st.date_input("Date")
Chantier = st.text_input("Chantier")
button8=st.button("AJOUTER LES DÉTAILS")
if button8:
add_Audit(IDD,Chantier,AC,VC,NEU,SMPAR,PR,IE,Date)
st.success("AJOUTÉ AVEC SUCCÈS: {}".format(Chantier))
#visualisation des données
elif choix == "AFFICHER":
st.subheader("AFFICHEZ VOS DONNÉES")
st.warning("Si vous faites des enregistrements à une date antérieure à celle de votre inscription veuillez spécifier l'intervalle de date, car l'affichage des données est par défaut à partir de votre jour d'inscription.")
ACCUEIL_exp= st.beta_expander("ACCUEIL SECURITÉ")
with ACCUEIL_exp:
df_Accueil = pd.DataFrame(view_Accueil(), columns=["id","IDD","Chantier","Nbre_Arrivant","Nbre_induction","Date"])
#pour voir uniquement les donnée de l'user connecté
IDD2 = email.strip('][').split(', ')
#ACCUEIL
@st.cache
def Accueil_2(df_Accueil: pd.DataFrame) -> pd.DataFrame:
df_Accueil2 = df_Accueil[(df_Accueil["IDD"].isin(IDD2))]
return df_Accueil2.loc[1:, ["id","Chantier","Nbre_Arrivant","Nbre_induction","Date"]]
# Pour empêcher l'affichage d'erreur en cas de donnée vide
try:
df_Accueil1 = Accueil_2(df_Accueil)
except:
st.error("Nous ne pouvons afficher, car vous n'avez pas de donnée enregistrée.")
st.stop()
df_Accueil1['Date'] = pd.to_datetime(df_Accueil1['Date']).apply(lambda x: x.date())
df_Accueil1.sort_values(by=['Date'], inplace=True)
#intervalle de date
st.write('SELECTIONNEZ UN INTERVALLE DE DATE POUR VOTRE GRILLE')
try:
miny= st.date_input('MinDate',min(df_Accueil1['Date']))
maxy= st.date_input('MaxDate',max(df_Accueil1['Date']))
except:
st.error("Nous ne pouvons afficher, car vous n'avez pas au moins deux dates enregistrées.")
st.stop()
#filtrage par chantier
splitted_df_Accueil1 = df_Accueil1['Chantier'].str.split(',')
unique_vals1 = list(dict.fromkeys([y for x in splitted_df_Accueil1 for y in x]).keys())
filtrechantier = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals1,key=0)
mask = (df_Accueil1['Date'] > miny) & (df_Accueil1['Date'] <= maxy) & (df_Accueil1['Chantier'] == filtrechantier)
df_filter1=df_Accueil1.loc[mask]
st.dataframe(df_filter1)
st.text("*Nbre_Arrivant: Nombre d'arrivant\n*Nbre_induction: Nombre d'induction")
if st.button("Télécharger",key=0):
st.markdown(get_table_download_link(df_filter1), unsafe_allow_html=True)
#figure
df_filter1['Nbre_Arrivant'] = pd.to_numeric(df_filter1['Nbre_Arrivant'])
df_filter1['Nbre_induction'] = pd.to_numeric(df_filter1['Nbre_induction'])
Objectf_fixé= df_filter1['Nbre_Arrivant'].sum()
Objectif_atteint = df_filter1['Nbre_induction'].sum()
df_filter1_df = pd.DataFrame(columns=["Nombre d'arrivant", "Nombre d'induction"])
df_filter1_df.at[0, "Nombre d'arrivant"] = Objectf_fixé
df_filter1_df.at[0, "Nombre d'induction"] = Objectif_atteint
df_filter1_df_melt = pd.melt(df_filter1_df)
df_filter1_df_melt.columns = ['variable', 'valeur']
st.dataframe(df_filter1_df_melt)
fig = px.bar(df_filter1_df_melt, x = 'variable', y = 'valeur',color="variable")
st.plotly_chart(fig, use_container_width=True)
BRIEFING_exp= st.beta_expander("BRIEFING DE SÉCURITÉ( TBM)")
with BRIEFING_exp:
#TMB
df_TBM = pd.DataFrame(view_TBM(), columns=["id","IDD","Chantier","Nbre_chantier","Nbre_TBM","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def TBM_2(df_TBM: pd.DataFrame) -> pd.DataFrame:
df_TBM2 = df_TBM[(df_TBM["IDD"].isin(IDD2))]
return df_TBM2.loc[1:, ["id","Chantier","Nbre_chantier","Nbre_TBM","Date"]]
# Pour empêcher l'affichage d'erreur en cas de donnée vide
try:
df_TBM1 = TBM_2(df_TBM)
except:
st.error("Nous ne pouvons afficher, car vous n'avez pas de donnée enregistrée.")
st.stop()
df_TBM1['Date'] = pd.to_datetime(df_TBM1['Date']).apply(lambda x: x.date())
df_TBM1.sort_values(by=['Date'], inplace=True)
#intervalle de date
st.write('SELECTIONNEZ UN INTERVALLE DE DATE POUR VOTRE GRILLE')
try:
miny= st.date_input('MinDate',min(df_TBM1['Date']),key=0)
maxy= st.date_input('MaxDate',max(df_TBM1['Date']),key=0)
except:
st.error("Nous ne pouvons afficher, car vous n'avez pas au moins deux dates enregistrées.")
st.stop()
#filtrage par chantier
splitted_df_TBM1 = df_TBM1['Chantier'].str.split(',')
unique_vals2 = list(dict.fromkeys([y for x in splitted_df_TBM1 for y in x]).keys())
filtrechantier2 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals2,key=1)
mask = (df_TBM1['Date'] > miny) & (df_TBM1['Date'] <= maxy) & (df_TBM1['Chantier'] == filtrechantier2)
df_filter2=df_TBM1.loc[mask]
st.dataframe(df_filter2)
st.text("*Nbre_chantier: Nombre de chantier\n*Nbre_TBM: Nombre de TBM")
if st.button("Télécharger", key=1):
st.markdown(get_table_download_link(df_filter2), unsafe_allow_html=True)
#figure
df_filter2['Nbre_chantier'] = pd.to_numeric(df_filter2['Nbre_chantier'])
df_filter2['Nbre_TBM'] = pd.to_numeric(df_filter2['Nbre_TBM'])
Objectf_fixé2= df_filter2['Nbre_chantier'].sum()
Objectif_atteint2 = df_filter2['Nbre_TBM'].sum()
df_filter2_df = pd.DataFrame(columns=["Nombre de chantier", "Nombre de TBM"])
df_filter2_df.at[0, "Nombre de chantier"] = Objectf_fixé2
df_filter2_df.at[0, "Nombre de TBM"] = Objectif_atteint2
df_filter2_df_melt = pd.melt(df_filter2_df)
df_filter2_df_melt.columns = ['variable', 'valeur']
st.dataframe(df_filter2_df_melt)
figTBM = px.bar(df_filter2_df_melt, x = 'variable', y = 'valeur',color="variable")
st.plotly_chart(figTBM, use_container_width=True)
CONFORMITÉ_exp= st.beta_expander("NON CONFORMITÉ")
with CONFORMITÉ_exp:
#NON CONFORMITÉ
df_NC = pd.DataFrame(view_NC(), columns=["id","IDD","Chantier","NCR","FNCR","NCC","FNCC","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def NC_2(df_NC: pd.DataFrame) -> pd.DataFrame:
df_NC2 = df_NC[(df_NC["IDD"].isin(IDD2))]
return df_NC2.loc[1:, ["id","Chantier","NCR","FNCR","NCC","FNCC","Date"]]
# Pour empêcher l'affichage d'erreur en cas de donnée vide
try:
df_NC1 = NC_2(df_NC)
except:
st.error("Nous ne pouvons afficher, car vous n'avez pas de donnée enregistrée.")
st.stop()
df_NC1['Date'] = pd.to_datetime(df_NC1['Date']).apply(lambda x: x.date())
df_NC1.sort_values(by=['Date'], inplace=True)
#intervalle de date
st.write('SELECTIONNEZ UN INTERVALLE DE DATE POUR VOTRE GRILLE')
try:
miny= st.date_input('MinDate',min(df_NC1['Date']),key=1)
maxy= st.date_input('MaxDate',max(df_NC1['Date']),key=1)
except:
st.error("Nous ne pouvons afficher car vous n'avez pas aumoins deux dates enrégistrées.")
st.stop()
#filtrage par chantier
splitted_df_NC1 = df_NC1['Chantier'].str.split(',')
unique_vals3 = list(dict.fromkeys([y for x in splitted_df_NC1 for y in x]).keys())
filtrechantier3 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals3,key=2)
mask = (df_NC1['Date'] > miny) & (df_NC1['Date'] <= maxy) & (df_NC1['Chantier'] == filtrechantier3)
df_filter3=df_NC1.loc[mask]
st.dataframe(df_filter3)
st.text("*NCR: Non conformité remontée\n*FNCR: Nombre de fiche de Non conformité remontée\n*NCC: Nombre de Non conformité cloturée\n*FNCC:Nombre de fiche de Non conformité cloturée")
if st.button("Télécharger", key=2):
st.markdown(get_table_download_link(df_filter3), unsafe_allow_html=True)
#figure
df_filter3['NCR'] = pd.to_numeric(df_filter3['NCR'])
df_filter3['NCC'] = pd.to_numeric(df_filter3['NCC'])
df_filter3['FNCR'] = pd.to_numeric(df_filter3['FNCR'])
df_filter3['FNCC'] = pd.to_numeric(df_filter3['FNCC'])
Objectf_fixe3 = df_filter3['NCR'].sum()
Objectif_atteint3 = df_filter3['NCC'].sum()
Objectf_fixe4= df_filter3['FNCR'].sum()
Objectif_atteint4 = df_filter3['FNCC'].sum()
df_filter3_df1 = pd.DataFrame(columns=["NCR", "NCC"])
df_filter3_df2 = pd.DataFrame(columns=["FNCR", "FNCC"])
df_filter3_df1.at[0, "NCR"] = Objectf_fixe3
df_filter3_df1.at[0, "NCC"] = Objectif_atteint3
df_filter3_df2.at[0, "FNCR"] = Objectf_fixe4
df_filter3_df2.at[0, "FNCC"] = Objectif_atteint4
df_filter3_df_melt1 = pd.melt(df_filter3_df1)
df_filter3_df_melt2 = pd.melt(df_filter3_df2)
df_filter3_df_melt1.columns = ['variable', 'valeur']
df_filter3_df_melt2.columns = ['variable', 'valeur']
st.dataframe(df_filter3_df_melt1)
st.dataframe(df_filter3_df_melt2)
figNC1 = px.bar(df_filter3_df_melt1, x = 'variable', y = 'valeur',color="variable")
figNC2 = px.bar(df_filter3_df_melt2, x = 'variable', y = 'valeur',color="variable")
st.plotly_chart(figNC1, use_container_width=True)
st.plotly_chart(figNC2, use_container_width=True)
CHANGEMENTS_exp= st.beta_expander("CHANGEMENTS ENREGISTRÉS")
with CHANGEMENTS_exp:
#CHANGEMENTS
df_Changements = pd.DataFrame(view_Changements(), columns=["id","IDD","Chantier","NCH","FNCH","NCHC","FNCHC","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def Changements_2(df_Changements: pd.DataFrame) -> pd.DataFrame:
df_Changements2 = df_Changements[(df_Changements["IDD"].isin(IDD2))]
return df_Changements2.loc[1:, ["id","Chantier","NCH","FNCH","NCHC","FNCHC","Date"]]
# Pour empêcher l'affichage d'erreur en cas de donnée vide
try:
df_Changements1 = Changements_2(df_Changements)
except:
st.error("Nous ne pouvons afficher, car vous n'avez pas de donnée enregistrée.")
st.stop()
df_Changements1['Date'] = pd.to_datetime(df_Changements1['Date']).apply(lambda x: x.date())
df_Changements1.sort_values(by=['Date'], inplace=True)
#intervalle de date
st.write('SELECTIONNEZ UN INTERVALLE DE DATE POUR VOTRE GRILLE')
try:
miny= st.date_input('MinDate',min(df_Changements1['Date']),key=2)
maxy= st.date_input('MaxDate',max(df_Changements1['Date']),key=2)
except:
st.error("Nous ne pouvons afficher car vous n'avez pas aumoins deux dates enrégistrées")
st.stop()
#filtrage par chantier
splitted_df_Changements1 = df_Changements1['Chantier'].str.split(',')
unique_vals4 = list(dict.fromkeys([y for x in splitted_df_Changements1 for y in x]).keys())
filtrechantier4 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals4,key=3)
mask = (df_Changements1['Date'] > miny) & (df_Changements1['Date'] <= maxy) & (df_Changements1['Chantier'] == filtrechantier4)
df_filter4=df_Changements1.loc[mask]
st.dataframe(df_filter4)
st.text("*NCH: Nombre de Changement enregistrés\n*FNCH: Nombre de fiche de Changements enregistrés\n*NCHC: Nombre de Changements cloturés\n*FNCHC:Nombre de fiche de Changements suivis et cloturés")
if st.button("Télécharger", key=3):
st.markdown(get_table_download_link(df_filter4), unsafe_allow_html=True)
#figure
df_filter4['NCH'] = pd.to_numeric(df_filter4['NCH'])
df_filter4['NCHC'] = pd.to_numeric(df_filter4['NCHC'])
df_filter4['FNCH'] = pd.to_numeric(df_filter4['FNCH'])
df_filter4['FNCHC'] = pd.to_numeric(df_filter4['FNCHC'])
Objectf_fixe4 = df_filter4['NCH'].sum()
Objectif_atteint4 = df_filter4['NCHC'].sum()
Objectf_fixe5= df_filter4['FNCH'].sum()
Objectif_atteint5 = df_filter4['FNCHC'].sum()
df_filter4_df1 = pd.DataFrame(columns=["NCH", "NCHC"])
df_filter4_df2 = pd.DataFrame(columns=["FNCH", "FNCHC"])
df_filter4_df1.at[0, "NCH"] = Objectf_fixe4
df_filter4_df1.at[0, "NCHC"] = Objectif_atteint4
df_filter4_df2.at[0, "FNCH"] = Objectf_fixe5
df_filter4_df2.at[0, "FNCHC"] = Objectif_atteint5
df_filter4_df_melt1 = pd.melt(df_filter4_df1)
df_filter4_df_melt2 = pd.melt(df_filter4_df2)
df_filter4_df_melt1.columns = ['variable', 'valeur']
df_filter4_df_melt2.columns = ['variable', 'valeur']
st.dataframe(df_filter4_df_melt1)
st.dataframe(df_filter4_df_melt2)
figCH1 = px.bar(df_filter4_df_melt1, x = 'variable', y = 'valeur',color="variable")
figCH2 = px.bar(df_filter4_df_melt2, x = 'variable', y = 'valeur',color="variable")
st.plotly_chart(figCH1, use_container_width=True)
st.plotly_chart(figCH2, use_container_width=True)
ANOMALIES_exp= st.beta_expander("ANOMALIES")
with ANOMALIES_exp:
#ANOMALIES
df_Anomalies = pd.DataFrame(view_Anomalies(), columns=["id","IDD","Chantier","NA","FNA","NAC","FNAC","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def Anomalies_2(df_Anomalies: pd.DataFrame) -> pd.DataFrame:
df_Anomalies2 = df_Anomalies[(df_Anomalies["IDD"].isin(IDD2))]
return df_Anomalies2.loc[1:, ["id","Chantier","NA","FNA","NAC","FNAC","Date"]]
# Pour empêcher l'affichage d'erreur en cas de donnée vide
try:
df_Anomalies1 = Anomalies_2(df_Anomalies)
except:
st.error("Nous ne pouvons afficher, car vous n'avez pas de donnée enregistrée.")
st.stop()
df_Anomalies1['Date'] = pd.to_datetime(df_Anomalies1['Date']).apply(lambda x: x.date())
df_Anomalies1.sort_values(by=['Date'], inplace=True)
#intervalle de date
st.write('SELECTIONNEZ UN INTERVALLE DE DATE POUR VOTRE GRILLE')
try:
miny= st.date_input('MinDate',min(df_Anomalies1['Date']),key=3)
maxy= st.date_input('MaxDate',max(df_Anomalies1['Date']),key=3)
except:
st.error("Nous ne pouvons afficher car vous n'avez pas aumoins deux dates enrégistrées")
st.stop()
#filtrage par chantier
splitted_df_Anomalies1 = df_Anomalies1['Chantier'].str.split(',')
unique_vals5 = list(dict.fromkeys([y for x in splitted_df_Anomalies1 for y in x]).keys())
filtrechantier5 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals5,key=4)
mask = (df_Anomalies1['Date'] > miny) & (df_Anomalies1['Date'] <= maxy) & (df_Anomalies1['Chantier'] == filtrechantier5)
df_filter5=df_Anomalies1.loc[mask]
st.dataframe(df_filter5)
st.text("*NA: Nombre d'anomalies enregistrés\n*FNA: Nombre de fiche d'anomalies enregistrés\n*NAC: Nombre d'anomalies Corrigées\n*FNAC:Nombre de fiche d'anomalies Corrigées")
if st.button("Télécharger", key=4):
st.markdown(get_table_download_link(df_filter5), unsafe_allow_html=True)
#figure
df_filter5['NA'] = pd.to_numeric(df_filter5['NA'])
df_filter5['NAC'] = pd.to_numeric(df_filter5['NAC'])
df_filter5['FNA'] = pd.to_numeric(df_filter5['FNA'])
df_filter5['FNAC'] = pd.to_numeric(df_filter5['FNAC'])
Objectf_fixe5 = df_filter5['NA'].sum()
Objectif_atteint5 = df_filter5['NAC'].sum()
Objectf_fixe6= df_filter5['FNA'].sum()
Objectif_atteint6 = df_filter5['FNAC'].sum()
df_filter5_df1 = pd.DataFrame(columns=["NA", "NAC"])
df_filter5_df2 = pd.DataFrame(columns=["FNA", "FNAC"])
df_filter5_df1.at[0, "NA"] = Objectf_fixe5
df_filter5_df1.at[0, "NAC"] = Objectif_atteint5
df_filter5_df2.at[0, "FNA"] = Objectf_fixe6
df_filter5_df2.at[0, "FNAC"] = Objectif_atteint6
df_filter5_df_melt1 = pd.melt(df_filter5_df1)
df_filter5_df_melt2 = pd.melt(df_filter5_df2)
df_filter5_df_melt1.columns = ['variable', 'valeur']
df_filter5_df_melt2.columns = ['variable', 'valeur']
st.dataframe(df_filter5_df_melt1)
st.dataframe(df_filter5_df_melt2)
figNA1 = px.bar(df_filter5_df_melt1, x = 'variable', y = 'valeur',color="variable")
figNA2 = px.bar(df_filter5_df_melt2, x = 'variable', y = 'valeur',color="variable")
st.plotly_chart(figNA1, use_container_width=True)
st.plotly_chart(figNA2, use_container_width=True)
ANALYSE_exp= st.beta_expander("ANALYSE DES RISQUES RÉALISÉS(JSA)")
with ANALYSE_exp:
#JSA
df_JSA = pd.DataFrame(view_JSA(), columns=["id","IDD","Chantier","NAct","NJSA","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def JSA_2(df_JSA: pd.DataFrame) -> pd.DataFrame:
df_JSA2 = df_JSA[(df_JSA["IDD"].isin(IDD2))]
return df_JSA2.loc[1:, ["id","Chantier","NAct","NJSA","Date"]]
# Pour empêcher l'affichage d'erreur en cas de donnée vide
try:
df_JSA1 = JSA_2(df_JSA)
except:
st.error("Nous ne pouvons afficher, car vous n'avez pas de donnée enregistrée.")
st.stop()
df_JSA1['Date'] = pd.to_datetime(df_JSA1['Date']).apply(lambda x: x.date())
df_JSA1.sort_values(by=['Date'], inplace=True)
#intervalle de date
st.write('SELECTIONNEZ UN INTERVALLE DE DATE POUR VOTRE GRILLE')
try:
miny= st.date_input('MinDate',min(df_JSA1['Date']),key=4)
maxy= st.date_input('MaxDate',max(df_JSA1['Date']),key=4)
except:
st.error("Nous ne pouvons afficher, car vous n'avez pas au moins deux dates enregistrées.")
st.stop()
#filtrage par chantier
splitted_df_JSA1 = df_JSA1['Chantier'].str.split(',')
unique_vals6 = list(dict.fromkeys([y for x in splitted_df_JSA1 for y in x]).keys())
filtrechantier6 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals6,key=5)
mask = (df_JSA1['Date'] > miny) & (df_JSA1['Date'] <= maxy) & (df_JSA1['Chantier'] == filtrechantier6)
df_filter6=df_JSA1.loc[mask]
st.dataframe(df_filter6)
st.text("*NAct: Nombre d'activité\n*NJSA: Analyse des risques réalisés")
if st.button("Télécharger", key=5):
st.markdown(get_table_download_link(df_filter6), unsafe_allow_html=True)
#figure
df_filter6['NAct'] = pd.to_numeric(df_filter6['NAct'])
df_filter6['NJSA'] = pd.to_numeric(df_filter6['NJSA'])
Objectf_fixé6= df_filter6['NAct'].sum()
Objectif_atteint6 = df_filter6['NJSA'].sum()
df_filter6_df = pd.DataFrame(columns=["NAct", "NJSA"])
df_filter6_df.at[0, "NAct"] = Objectf_fixé6
df_filter6_df.at[0, "NJSA"] = Objectif_atteint6
df_filter6_df_melt = pd.melt(df_filter6_df)
df_filter6_df_melt.columns = ['variable', 'valeur']
st.dataframe(df_filter6_df_melt)
figJSA = px.bar(df_filter6_df_melt, x = 'variable', y = 'valeur',color="variable")
st.plotly_chart(figJSA, use_container_width=True)
INCIDENT_exp= st.beta_expander("INCIDENT & ACCIDENT")
with INCIDENT_exp:
#IA
df_IA = pd.DataFrame(view_Incident_Accident(), columns=["id","IDD","Chantier","NInc","AAA","ASA","AT","NJP","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def IA_2(df_IA: pd.DataFrame) -> pd.DataFrame:
df_IA = df_IA[(df_IA["IDD"].isin(IDD2))]
return df_IA.loc[1:, ["id","Chantier","NInc","AAA","ASA","AT","NJP","Date"]]
# Pour empêcher l'affichage d'erreur en cas de donnée vide
try:
df_IA1 = IA_2(df_IA)
except:
st.error("Nous ne pouvons afficher, car vous n'avez pas de donnée enregistrée.")
st.stop()
df_IA1['Date'] = pd.to_datetime(df_IA1['Date']).apply(lambda x: x.date())
df_IA1.sort_values(by=['Date'], inplace=True)
#intervalle de date
st.write('SELECTIONNEZ UN INTERVALLE DE DATE POUR VOTRE GRILLE')
try:
miny= st.date_input('MinDate',min(df_IA1['Date']),key=5)
maxy= st.date_input('MaxDate',max(df_IA1['Date']),key=5)
except:
st.error("Nous ne pouvons afficher car vous n'avez pas aumoins deux dates enrégistrées")
st.stop()
#filtrage par chantier
splitted_df_IA1 = df_IA1['Chantier'].str.split(',')
unique_vals7 = list(dict.fromkeys([y for x in splitted_df_IA1 for y in x]).keys())
filtrechantier7 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals7,key=6)
mask = (df_IA1['Date'] > miny) & (df_IA1['Date'] <= maxy) & (df_IA1['Chantier'] == filtrechantier7)
df_filter7=df_IA1.loc[mask]
st.dataframe(df_filter7)
st.text("*NInc: Incident\n*AAA: Accident avec arrêt\n*ASA: Accident sans arrêt\n*AT:Accident de trajet\n*NJP:Nombre de jours perdus")
if st.button("Télécharger", key=6):
st.markdown(get_table_download_link(df_filter7), unsafe_allow_html=True)
#figure
df_filter7['NInc'] = pd.to_numeric(df_filter7['NInc'])
df_filter7['AAA'] = pd.to_numeric(df_filter7['AAA'])
df_filter7['ASA'] = pd.to_numeric(df_filter7['ASA'])
df_filter7['AT'] = pd.to_numeric(df_filter7['AT'])
df_filter7['NJP'] = pd.to_numeric(df_filter7['NJP'])
Objectf_fixe6 = df_filter7['NInc'].sum()
Objectf_fixe7 = df_filter7['AAA'].sum()
Objectf_fixe8= df_filter7['ASA'].sum()
Objectf_fixe9= df_filter7['AT'].sum()
Objectf_fixe10 = df_filter7['NJP'].sum()
df_filter7_df1 = pd.DataFrame(columns=["NInc","AAA","ASA","AT","NJP"])
df_filter7_df1.at[0, "NInc"] = Objectf_fixe6
df_filter7_df1.at[0, "AAA"] = Objectf_fixe7
df_filter7_df1.at[0, "ASA"] = Objectf_fixe8
df_filter7_df1.at[0, "AT"] = Objectf_fixe9
df_filter7_df1.at[0, "NJP"] = Objectf_fixe10
df_filter7_df_melt1 = pd.melt(df_filter7_df1)
df_filter7_df_melt1.columns = ['variable', 'valeur']
st.dataframe(df_filter7_df_melt1)
figIA = px.bar(df_filter7_df_melt1, x = 'variable', y = 'valeur',color="variable")
st.plotly_chart(figIA, use_container_width=True)
AUDIT_exp= st.beta_expander("AUDIT CHANTIER; VISITE CONJOINTE; PRÉVENTION ET INSPECTION")
with AUDIT_exp:
#Audit
df_Audit = pd.DataFrame(view_Audit(), columns=["id","IDD","Chantier","AC","VC","NEU","SMPAR","NPR","IE","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def Audit_2(df_Audit: pd.DataFrame) -> pd.DataFrame:
df_Audit = df_Audit[(df_Audit["IDD"].isin(IDD2))]
return df_Audit.loc[1:, ["id","Chantier","AC","VC","NEU","SMPAR","NPR","IE","Date"]]
# Pour empêcher l'affichage d'erreur en cas de donnée vide
try:
df_Audit1 = Audit_2(df_Audit)
except:
st.error("Nous ne pouvons afficher, car vous n'avez pas de donnée enregistrée.")
st.stop()
df_Audit1['Date'] = pd.to_datetime(df_Audit1['Date']).apply(lambda x: x.date())
df_Audit1.sort_values(by=['Date'], inplace=True)
#intervalle de date
st.write('SELECTIONNEZ UN INTERVALLE DE DATE POUR VOTRE GRILLE')
try:
miny= st.date_input('MinDate',min(df_Audit1['Date']),key=6)
maxy= st.date_input('MaxDate',max(df_Audit1['Date']),key=6)
except:
st.error("Nous ne pouvons afficher car vous n'avez pas aumoins deux dates enrégistrées")
st.stop()
#filtrage par chantier
splitted_df_Audit1 = df_Audit1['Chantier'].str.split(',')
unique_vals8 = list(dict.fromkeys([y for x in splitted_df_Audit1 for y in x]).keys())
filtrechantier8 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals8,key=7)
mask = (df_Audit1['Date'] > miny) & (df_Audit1['Date'] <= maxy) & (df_Audit1['Chantier'] == filtrechantier8)
df_filter8=df_Audit1.loc[mask]
st.dataframe(df_filter8)
st.text("*AC: Audit Chantier\n*VC:Visite conjointe\n*NEU:Nombre d'exercice d'urgence\n*SMPAR:Sensibilisation au modes de prévention des activités à risques\n*NPR:Nombre de procedures réalisées\n*IE:Inspections Environne-mentales")
if st.button("Télécharger", key=7):
st.markdown(get_table_download_link(df_filter8), unsafe_allow_html=True)
#figure
df_filter8['AC'] = pd.to_numeric(df_filter8['AC'])
df_filter8['VC'] = pd.to_numeric(df_filter8['VC'])
df_filter8['NEU'] = pd.to_numeric(df_filter8['NEU'])
df_filter8['SMPAR'] = pd.to_numeric(df_filter8['SMPAR'])
df_filter8['NPR'] = pd.to_numeric(df_filter8['NPR'])
df_filter8['IE'] = pd.to_numeric(df_filter8['IE'])
Objectf_fixe12 = df_filter8['AC'].sum()
Objectf_fixe13 = df_filter8['VC'].sum()
Objectf_fixe14= df_filter8['NEU'].sum()
Objectf_fixe15= df_filter8['SMPAR'].sum()
Objectf_fixe16 = df_filter8['NPR'].sum()
Objectf_fixe17 = df_filter8['IE'].sum()
df_filter8_df1 = pd.DataFrame(columns=["AC", "VC","NEU","SMPAR","NPR","IE"])
df_filter8_df1.at[0, "AC"] = Objectf_fixe12
df_filter8_df1.at[0, "VC"] = Objectf_fixe13
df_filter8_df1.at[0, "NEU"] = Objectf_fixe14
df_filter8_df1.at[0, "SMPAR"] = Objectf_fixe15
df_filter8_df1.at[0, "NPR"] = Objectf_fixe16
df_filter8_df1.at[0, "IE"] = Objectf_fixe17
df_filter8_df_melt1 = pd.melt(df_filter8_df1)
df_filter8_df_melt1.columns = ['variable', 'valeur']
st.dataframe(df_filter8_df_melt1)
figAC1 = px.bar(df_filter8_df_melt1, x = 'variable', y = 'valeur',color="variable")
st.plotly_chart(figAC1, use_container_width=True)
#Modification
elif choix == "METTRE À JOUR":
st.subheader("MODIFIER DES DONNÉES")
with st.beta_expander("ACCUEIL SECURITÉ"):
st.markdown('### DONNÉE ACTUELLE')
df_Accueil = pd.DataFrame(view_Accueil(), columns=["id","IDD","Chantier","Nbre_Arrivant","Nbre_induction","Date"])
#pour voir uniquement les donnée de l'user connecté
IDD2 = email.strip('][').split(', ')
#ACCUEIL
@st.cache
def Accueil_2(df_Accueil: pd.DataFrame) -> pd.DataFrame:
df_Accueil2 = df_Accueil[(df_Accueil["IDD"].isin(IDD2))]
return df_Accueil2.loc[1:, ["id","Chantier","Nbre_Arrivant","Nbre_induction","Date"]]
df_Accueil1 = Accueil_2(df_Accueil)
#filtrage par chantier
splitted_df_Accueil1 = df_Accueil1['Chantier'].str.split(',')
unique_vals1 = list(dict.fromkeys([y for x in splitted_df_Accueil1 for y in x]).keys())
filtrechantier = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals1,key=9)
mask = (df_Accueil1['Chantier'] == filtrechantier)
df_filter1=df_Accueil1.loc[mask]
st.dataframe(df_filter1)
idval = list(df_filter1['id'])
selected_id = st.selectbox("SELECTIONEZ l'ID DE LA LIGNE À MODIFIER", idval)
name_result = get_id_Accueil(selected_id)
if name_result:
id = name_result[0][0]
Chantier = name_result[0][2]
NArrivant = name_result[0][3]
Ninduction = name_result[0][4]
col1, col2= st.beta_columns(2)
with col1:
st.subheader("CIBLE À MODIFIER")
with col1:
new_NArrivant =inputcheck(st.text_input("Nombre Arrivant",NArrivant))
new_Ninduction = inputcheck(st.text_input("Nombre d'induction",Ninduction))
id=selected_id
with col2:
st.subheader("NOM DU CHANTIER")
new_Chantier = st.text_input("Chantier",Chantier)
button1=st.button("MODIFIER LES DÉTAILS")
if button1:
edit_Accueil(new_Chantier,new_NArrivant,new_Ninduction,id)
st.success("MODIFIÉ AVEC SUCCÈS: {}".format(new_Chantier))
st.markdown('### DONNÉE MODIFIÉE')
df_Accueil = pd.DataFrame(view_Accueil(), columns=["id","IDD","Chantier","Nbre_Arrivant","Nbre_induction","Date"])
#pour voir uniquement les donnée de l'user connecté
IDD2 = email.strip('][').split(', ')
#ACCUEIL
@st.cache
def Accueil_2(df_Accueil: pd.DataFrame) -> pd.DataFrame:
df_Accueil2 = df_Accueil[(df_Accueil["IDD"].isin(IDD2))]
return df_Accueil2.loc[1:, ["id","Chantier","Nbre_Arrivant","Nbre_induction","Date"]]
df_Accueil1 = Accueil_2(df_Accueil)
#filtrage par chantier
splitted_df_Accueil1 = df_Accueil1['Chantier'].str.split(',')
unique_vals1 = list(dict.fromkeys([y for x in splitted_df_Accueil1 for y in x]).keys())
filtrechantier = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals1,key=10)
mask = (df_Accueil1['Chantier'] == filtrechantier)
df_filter1=df_Accueil1.loc[mask]
st.dataframe(df_filter1)
with st.beta_expander("BRIEFING DE SÉCURITÉ( TBM)"):
st.markdown('### DONNÉE ACTUELLE')
df_TBM = pd.DataFrame(view_TBM(), columns=["id","IDD","Chantier","Nbre_chantier","Nbre_TBM","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def TBM_2(df_TBM: pd.DataFrame) -> pd.DataFrame:
df_TBM2 = df_TBM[(df_TBM["IDD"].isin(IDD2))]
return df_TBM2.loc[1:, ["id","Chantier","Nbre_chantier","Nbre_TBM","Date"]]
df_TBM1 = TBM_2(df_TBM)
#filtrage par chantier
splitted_df_TBM1 = df_TBM1['Chantier'].str.split(',')
unique_vals2 = list(dict.fromkeys([y for x in splitted_df_TBM1 for y in x]).keys())
filtrechantier2 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals2,key=100)
mask = (df_TBM1['Chantier'] == filtrechantier2)
df_filter2=df_TBM1.loc[mask]
st.dataframe(df_filter2)
idval = list(df_filter2['id'])
selected_id = st.selectbox("SELECTIONEZ l'ID DE LA LIGNE À MODIFIER", idval,key=9)
name_result = get_id_TBM(selected_id)
if name_result:
id = name_result[0][0]
Chantier = name_result[0][2]
NChantier = name_result[0][3]
NTBM = name_result[0][4]
col1, col2= st.beta_columns(2)
with col1:
st.subheader("CIBLE À MODIFIER")
with col1:
new_NChantier =inputcheck(st.text_input("Nombre Arrivant",NChantier,key=0))
new_NTBM = inputcheck(st.text_input("Nombre d'induction",NTBM,key=1))
id=selected_id
with col2:
st.subheader("NOM DU CHANTIER")
new_Chantier = st.text_input("Chantier",Chantier,key=2)
button1=st.button("MODIFIER LES DÉTAILS",key=0)
if button1:
edit_TBM(new_Chantier,new_NChantier,new_NTBM,id)
st.success("MODIFIÉ AVEC SUCCÈS: {}".format(new_Chantier))
st.markdown('### DONNÉE MODIFIÉE')
df_TBM = pd.DataFrame(view_TBM(), columns=["id","IDD","Chantier","Nbre_chantier","Nbre_TBM","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def TBM_2(df_TBM: pd.DataFrame) -> pd.DataFrame:
df_TBM2 = df_TBM[(df_TBM["IDD"].isin(IDD2))]
return df_TBM2.loc[1:, ["id","Chantier","Nbre_chantier","Nbre_TBM","Date"]]
df_TBM1 = TBM_2(df_TBM)
#filtrage par chantier
splitted_df_TBM1 = df_TBM1['Chantier'].str.split(',')
unique_vals2 = list(dict.fromkeys([y for x in splitted_df_TBM1 for y in x]).keys())
filtrechantier2 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals2,key=101)
mask = (df_TBM1['Chantier'] == filtrechantier2)
df_filter2=df_TBM1.loc[mask]
st.dataframe(df_filter2)
with st.beta_expander("NON CONFORMITÉ"):
st.markdown('### DONNÉE ACTUELLE')
df_NC = pd.DataFrame(view_NC(), columns=["id","IDD","Chantier","NCR","FNCR","NCC","FNCC","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def NC_2(df_NC: pd.DataFrame) -> pd.DataFrame:
df_NC2 = df_NC[(df_NC["IDD"].isin(IDD2))]
return df_NC2.loc[1:, ["id","Chantier","NCR","FNCR","NCC","FNCC","Date"]]
df_NC1 = NC_2(df_NC)
#filtrage par chantier
splitted_df_NC1 = df_NC1['Chantier'].str.split(',')
unique_vals3 = list(dict.fromkeys([y for x in splitted_df_NC1 for y in x]).keys())
filtrechantier3 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals3,key=21)
mask = (df_NC1['Chantier'] == filtrechantier3)
df_filter3=df_NC1.loc[mask]
st.dataframe(df_filter3)
idval = list(df_filter3['id'])
selected_id = st.selectbox("SELECTIONEZ l'ID DE LA LIGNE À MODIFIER", idval,key=0)
name_result = get_id_NC(selected_id)
if name_result:
id = name_result[0][0]
Chantier = name_result[0][2]
NCR = name_result[0][3]
FNCR = name_result[0][4]
NCC = name_result[0][5]
FNCC = name_result[0][6]
col1, col2= st.beta_columns(2)
with col1:
st.subheader("CIBLE À MODIFIER")
with col1:
new_NCR = inputcheck(st.text_input("Nombre de Non conformité remontée",NCR,key=0))
new_FNCR = inputcheck(st.text_input("Nombre de fiche de Non conformité remontée",FNCR,key=1))
new_NCC = inputcheck(st.text_input("Nombre de Non conformité cloturée",NCC,key=2))
new_FNCC= inputcheck(st.text_input("Nombre de fiche de Non conformité cloturée",FNCC, key=3))
with col2:
st.subheader("NOM DU CHANTIER")
new_Chantier = st.text_input("Chantier",Chantier,key=4)
button1=st.button("MODIFIER LES DÉTAILS",key=1)
if button1:
edit_NC(new_Chantier,new_NCR,new_FNCR,new_NCC,new_FNCC,id)
st.success("MODIFIÉ AVEC SUCCÈS: {}".format(new_Chantier))
st.markdown('### DONNÉE MODIFIÉE')
df_NC = pd.DataFrame(view_NC(), columns=["id","IDD","Chantier","NCR","FNCR","NCC","FNCC","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def NC_2(df_NC: pd.DataFrame) -> pd.DataFrame:
df_NC2 = df_NC[(df_NC["IDD"].isin(IDD2))]
return df_NC2.loc[1:, ["id","Chantier","NCR","FNCR","NCC","FNCC","Date"]]
df_NC1 = NC_2(df_NC)
#filtrage par chantier
splitted_df_NC1 = df_NC1['Chantier'].str.split(',')
unique_vals3 = list(dict.fromkeys([y for x in splitted_df_NC1 for y in x]).keys())
filtrechantier3 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals3,key=22)
mask = (df_NC1['Chantier'] == filtrechantier3)
df_filter3=df_NC1.loc[mask]
st.dataframe(df_filter3)
with st.beta_expander("CHANGEMENTS ENREGISTRÉS"):
st.markdown('### DONNÉE ACTUELLE')
df_Changements = pd.DataFrame(view_Changements(), columns=["id","IDD","Chantier","NCH","FNCH","NCHC","FNCHC","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def Changements_2(df_Changements: pd.DataFrame) -> pd.DataFrame:
df_Changements2 = df_Changements[(df_Changements["IDD"].isin(IDD2))]
return df_Changements2.loc[1:, ["id","Chantier","NCH","FNCH","NCHC","FNCHC","Date"]]
df_Changements1 = Changements_2(df_Changements)
#filtrage par chantier
splitted_df_Changements1 = df_Changements1['Chantier'].str.split(',')
unique_vals4 = list(dict.fromkeys([y for x in splitted_df_Changements1 for y in x]).keys())
filtrechantier4 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals4,key=12)
mask = (df_Changements1['Chantier'] == filtrechantier4)
df_filter4=df_Changements1.loc[mask]
st.dataframe(df_filter4)
idval = list(df_filter4['id'])
selected_id = st.selectbox("SELECTIONEZ l'ID DE LA LIGNE À MODIFIER", idval,key=1)
name_result = get_id_Changements(selected_id)
if name_result:
id = name_result[0][0]
Chantier = name_result[0][2]
NCH = name_result[0][3]
FNCH = name_result[0][4]
NCHC = name_result[0][5]
FNCHC = name_result[0][6]
col1, col2= st.beta_columns(2)
with col1:
st.subheader("CIBLE À MODIFIER")
with col1:
new_NCH = inputcheck(st.text_input("Nombre de Changement enregistrés",NCH))
new_FNCH = inputcheck(st.text_input("Nombre de fiche de Changements enregistrés",FNCH))
new_NCHC = inputcheck(st.text_input("Nombre de Changements cloturés",NCHC))
new_FNCHC= inputcheck(st.text_input("Nombre de fiche de Changements suivis et cloturés",FNCHC))
with col2:
st.subheader("NOM DU CHANTIER")
Chantier = st.text_input("Chantier",Chantier,key=3)
button1=st.button("MODIFIER LES DÉTAILS",key=3)
if button1:
edit_Changements(new_Chantier,new_NCH,new_FNCH,new_NCHC,new_FNCHC,id)
st.success("MODIFIÉ AVEC SUCCÈS: {}".format(new_Chantier))
st.markdown('### DONNÉE MODIFIÉE')
df_Changements = pd.DataFrame(view_Changements(), columns=["id","IDD","Chantier","NCH","FNCH","NCHC","FNCHC","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def Changements_2(df_Changements: pd.DataFrame) -> pd.DataFrame:
df_Changements2 = df_Changements[(df_Changements["IDD"].isin(IDD2))]
return df_Changements2.loc[1:, ["id","Chantier","NCH","FNCH","NCHC","FNCHC","Date"]]
df_Changements1 = Changements_2(df_Changements)
#filtrage par chantier
splitted_df_Changements1 = df_Changements1['Chantier'].str.split(',')
unique_vals4 = list(dict.fromkeys([y for x in splitted_df_Changements1 for y in x]).keys())
filtrechantier4 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals4,key=13)
mask = (df_Changements1['Chantier'] == filtrechantier4)
df_filter4=df_Changements1.loc[mask]
st.dataframe(df_filter4)
with st.beta_expander("ANOMALIES"):
st.markdown('### DONNÉE ACTUELLE')
df_Anomalies = pd.DataFrame(view_Anomalies(), columns=["id","IDD","Chantier","NA","FNA","NAC","FNAC","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def Anomalies_2(df_Anomalies: pd.DataFrame) -> pd.DataFrame:
df_Anomalies2 = df_Anomalies[(df_Anomalies["IDD"].isin(IDD2))]
return df_Anomalies2.loc[1:, ["id","Chantier","NA","FNA","NAC","FNAC","Date"]]
df_Anomalies1 = Anomalies_2(df_Anomalies)
#filtrage par chantier
splitted_df_Anomalies1 = df_Anomalies1['Chantier'].str.split(',')
unique_vals5 = list(dict.fromkeys([y for x in splitted_df_Anomalies1 for y in x]).keys())
filtrechantier5 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals5,key=14)
mask = (df_Anomalies1['Chantier'] == filtrechantier5)
df_filter5=df_Anomalies1.loc[mask]
st.dataframe(df_filter5)
idval = list(df_filter5['id'])
selected_id = st.selectbox("SELECTIONEZ l'ID DE LA LIGNE A MODIFIER", idval,key=4)
name_result = get_id_Anomalies(selected_id)
if name_result:
id = name_result[0][0]
Chantier = name_result[0][2]
NA = name_result[0][3]
FNA = name_result[0][4]
NAC = name_result[0][5]
FNAC = name_result[0][6]
col1, col2= st.beta_columns(2)
with col1:
st.subheader("CIBLE À MODIFIER")
with col1:
new_NA = inputcheck(st.text_input("Nombre d'Anomalies Remontées",NA))
new_FNA = inputcheck(st.text_input("Nombre de fiche d'Anomalies Remontées",FNA))
new_NAC = inputcheck(st.text_input("Nombre d' Anomalies cloturés",NAC))
new_FNAC = inputcheck(st.text_input("Nombre de fiche de Anomalies Corrigées",FNAC))
with col2:
st.subheader("NOM DU CHANTIER")
new_Chantier = st.text_input("Chantier",Chantier,key=5)
button1=st.button("MODIFIER LES DÉTAILS",key=5)
if button1:
edit_Anomalies(new_Chantier,new_NA,new_FNA,new_NAC,new_FNAC,id)
st.success("MODIFIÉ AVEC SUCCÈS: {}".format(new_Chantier))
st.markdown('### DONNÉE MODIFIÉE')
df_Anomalies = pd.DataFrame(view_Anomalies(), columns=["id","IDD","Chantier","NA","FNA","NAC","FNAC","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def Anomalies_2(df_Anomalies: pd.DataFrame) -> pd.DataFrame:
df_Anomalies2 = df_Anomalies[(df_Anomalies["IDD"].isin(IDD2))]
return df_Anomalies2.loc[1:, ["id","Chantier","NA","FNA","NAC","FNAC","Date"]]
df_Anomalies1 = Anomalies_2(df_Anomalies)
#filtrage par chantier
splitted_df_Anomalies1 = df_Anomalies1['Chantier'].str.split(',')
unique_vals5 = list(dict.fromkeys([y for x in splitted_df_Anomalies1 for y in x]).keys())
filtrechantier5 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals5,key=144)
df_filter5=df_Anomalies1.loc[mask]
st.dataframe(df_filter5)
with st.beta_expander("ANALYSE DES RISQUES RÉALISÉS(JSA)"):
st.markdown('### DONNÉE ACTUELLE')
df_JSA = pd.DataFrame(view_JSA(), columns=["id","IDD","Chantier","NAct","NJSA","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def JSA_2(df_JSA: pd.DataFrame) -> pd.DataFrame:
df_JSA2 = df_JSA[(df_JSA["IDD"].isin(IDD2))]
return df_JSA2.loc[1:, ["id","Chantier","NAct","NJSA","Date"]]
df_JSA1 = JSA_2(df_JSA)
#filtrage par chantier
splitted_df_JSA1 = df_JSA1['Chantier'].str.split(',')
unique_vals6 = list(dict.fromkeys([y for x in splitted_df_JSA1 for y in x]).keys())
filtrechantier6 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals6,key=166)
mask = (df_JSA1['Chantier'] == filtrechantier6)
df_filter6=df_JSA1.loc[mask]
st.dataframe(df_filter6)
idval = list(df_filter6['id'])
selected_id = st.selectbox("SELECTIONEZ l'ID DE LA LIGNE À MODIFIER", idval,key=6)
name_result = get_id_JSA(selected_id)
if name_result:
id = name_result[0][0]
Chantier = name_result[0][2]
NAct = name_result[0][3]
NJSA = name_result[0][4]
col1, col2= st.beta_columns(2)
with col1:
st.subheader("CIBLE À MODIFIER")
with col1:
new_NAct = inputcheck(st.text_input("Nombre d'Activite",NAct))
new_NJSA = inputcheck(st.text_input("Nombre de fiche JSA",NJSA))
with col2:
st.subheader("NOM DU CHANTIER")
new_Chantier = st.text_input("Chantier",Chantier,key=6)
button1=st.button("MODIFIER LES DÉTAILS",key=6)
if button1:
edit_JSA(new_Chantier,new_NAct,new_NJSA,id)
st.success("MODIFIÉ AVEC SUCCÈS: {}".format(new_Chantier))
st.markdown('### DONNÉE MODIFIÉE')
df_JSA = pd.DataFrame(view_JSA(), columns=["id","IDD","Chantier","NAct","NJSA","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def JSA_2(df_JSA: pd.DataFrame) -> pd.DataFrame:
df_JSA2 = df_JSA[(df_JSA["IDD"].isin(IDD2))]
return df_JSA2.loc[1:, ["id","Chantier","NAct","NJSA","Date"]]
df_JSA1 = JSA_2(df_JSA)
#filtrage par chantier
splitted_df_JSA1 = df_JSA1['Chantier'].str.split(',')
unique_vals6 = list(dict.fromkeys([y for x in splitted_df_JSA1 for y in x]).keys())
filtrechantier6 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals6,key=177)
mask = (df_JSA1['Chantier'] == filtrechantier6)
df_filter6=df_JSA1.loc[mask]
st.dataframe(df_filter6)
with st.beta_expander("INCIDENT & ACCIDENT"):
st.markdown('### DONNÉE ACTUELLE')
df_IA = pd.DataFrame(view_Incident_Accident(), columns=["id","IDD","Chantier","NInc","AAA","ASA","AT","NJP","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def IA_2(df_IA: pd.DataFrame) -> pd.DataFrame:
df_IA = df_IA[(df_IA["IDD"].isin(IDD2))]
return df_IA.loc[1:, ["id","Chantier","NInc","AAA","ASA","AT","NJP","Date"]]
df_IA1 = IA_2(df_IA)
#filtrage par chantier
splitted_df_IA1 = df_IA1['Chantier'].str.split(',')
unique_vals7 = list(dict.fromkeys([y for x in splitted_df_IA1 for y in x]).keys())
filtrechantier7 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals7,key=18)
mask = (df_IA1['Chantier'] == filtrechantier7)
df_filter7=df_IA1.loc[mask]
st.dataframe(df_filter7)
idval = list(df_filter7['id'])
selected_id = st.selectbox("SELECTIONEZ l'ID DE LA LIGNE À MODIFIER", idval,key=7)
name_result = get_id_Incident_Accident(selected_id)
if name_result:
id = name_result[0][0]
Chantier = name_result[0][2]
NInc = name_result[0][3]
AAA = name_result[0][4]
ASA = name_result[0][5]
AT = name_result[0][6]
NJP = name_result[0][7]
col1, col2= st.beta_columns(2)
with col1:
st.subheader("CIBLE À MODIFIER")
with col1:
new_AAA = inputcheck(st.text_input("Accident Avec Arrêt",AAA))
new_NJP = inputcheck(st.text_input("Nombre de jours perdus",NJP))
new_ASA = inputcheck(st.text_input("Accident Sans Arrêt",ASA))
new_AT = inputcheck(st.text_input("Nombre d'accident de trajet",AT))
new_NInc = inputcheck(st.text_input("Incident",NInc))
with col2:
st.subheader("NOM DU CHANTIER")
new_Chantier = st.text_input("Chantier",Chantier,key=7)
button1=st.button("MODIFIER LES DÉTAILS",key=7)
if button1:
edit_Incident_Accident(new_Chantier,new_NInc,new_AAA,new_ASA,new_AT,new_NJP,id)
st.success("MODIFIÉ AVEC SUCCÈS: {}".format(new_Chantier))
df_IA = pd.DataFrame(view_Incident_Accident(), columns=["id","IDD","Chantier","NInc","AAA","ASA","AT","NJP","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def IA_2(df_IA: pd.DataFrame) -> pd.DataFrame:
df_IA = df_IA[(df_IA["IDD"].isin(IDD2))]
return df_IA.loc[1:, ["id","Chantier","NInc","AAA","ASA","AT","NJP","Date"]]
df_IA1 = IA_2(df_IA)
splitted_df_IA1 = df_IA1['Chantier'].str.split(',')
unique_vals7 = list(dict.fromkeys([y for x in splitted_df_IA1 for y in x]).keys())
filtrechantier7 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals7,key=19)
mask = (df_IA1['Chantier'] == filtrechantier7)
df_filter7=df_IA1.loc[mask]
st.dataframe(df_filter7)
with st.beta_expander("AUDIT CHANTIER; VISITE CONJOINTE; PRÉVENTION ET INSPECTION"):
st.markdown('### DONNÉE ACTUELLE')
df_Audit = pd.DataFrame(view_Audit(), columns=["id","IDD","Chantier","AC","VC","NEU","SMPAR","NPR","IE","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def Audit_2(df_Audit: pd.DataFrame) -> pd.DataFrame:
df_Audit = df_Audit[(df_Audit["IDD"].isin(IDD2))]
return df_Audit.loc[1:, ["id","Chantier","AC","VC","NEU","SMPAR","NPR","IE","Date"]]
df_Audit1 = Audit_2(df_Audit)
#filtrage par chantier
splitted_df_Audit1 = df_Audit1['Chantier'].str.split(',')
unique_vals8 = list(dict.fromkeys([y for x in splitted_df_Audit1 for y in x]).keys())
filtrechantier8 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals8,key=20)
mask = (df_Audit1['Chantier'] == filtrechantier8)
df_filter8=df_Audit1.loc[mask]
st.dataframe(df_filter8)
idval = list(df_filter8['id'])
selected_id = st.selectbox("SELECTIONEZ l'ID DE LA LIGNE À MODIFIER", idval,key=8)
name_result = get_id_Audit(selected_id)
if name_result:
id = name_result[0][0]
Chantier = name_result[0][2]
AC = name_result[0][3]
VC = name_result[0][4]
NEU = name_result[0][5]
SMPAR = name_result[0][6]
NPR = name_result[0][7]
IE = name_result[0][8]
col1, col2= st.beta_columns(2)
with col1:
st.subheader("CIBLE À MODIFIER")
with col1:
new_AC= inputcheck(st.text_input("Nombre d'audit",AC))
new_VC= inputcheck(st.text_input("Nombre de Visite Conjointe",VC))
new_NEU= inputcheck(st.text_input("Nombre d'exercice d'urgence",NEU))
new_SMPAR= inputcheck(st.text_input("Sensibilisation au modes de prévention des activités à risques",SMPAR))
new_NPR= inputcheck(st.text_input("Procedures réalisées",NPR))
new_IE= inputcheck(st.text_input("Inspections Environnementales",IE))
with col2:
st.subheader("NOM DU CHANTIER")
new_Chantier = st.text_input("Chantier")
button1=st.button("MODIFIER LES DÉTAILS",key=8)
if button1:
edit_Audit(new_ID,new_Chantier,new_AC,new_VC,new_NEU,new_SMPAR,new_NPR,new_IE,id)
st.success("MODIFIÉ AVEC SUCCÈS: {}".format(new_Chantier))
st.markdown('### DONNÉE MODIFIÉE')
df_Audit = pd.DataFrame(view_Audit(), columns=["id","IDD","Chantier","AC","VC","NEU","SMPAR","NPR","IE","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def Audit_2(df_Audit: pd.DataFrame) -> pd.DataFrame:
df_Audit = df_Audit[(df_Audit["IDD"].isin(IDD2))]
return df_Audit.loc[1:, ["id","Chantier","AC","VC","NEU","SMPAR","NPR","IE","Date"]]
df_Audit1 = Audit_2(df_Audit)
#filtrage par chantier
splitted_df_Audit1 = df_Audit1['Chantier'].str.split(',')
unique_vals8 = list(dict.fromkeys([y for x in splitted_df_Audit1 for y in x]).keys())
filtrechantier8 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals8,key=211)
mask = (df_Audit1['Chantier'] == filtrechantier8)
df_filter8=df_Audit1.loc[mask]
st.dataframe(df_filter8)
#Suppression des données
elif choix == "SUPPRIMER":
st.subheader("SUPPRIMER DES DONNÉES")
with st.beta_expander("ACCUEIL SECURITÉ"):
st.markdown('### DONNÉE ACTUELLE')
df_Accueil = pd.DataFrame(view_Accueil(), columns=["id","IDD","Chantier","Nbre_Arrivant","Nbre_induction","Date"])
#pour voir uniquement les donnée de l'user connecté
IDD2 = email.strip('][').split(', ')
#ACCUEIL
@st.cache
def Accueil_2(df_Accueil: pd.DataFrame) -> pd.DataFrame:
df_Accueil2 = df_Accueil[(df_Accueil["IDD"].isin(IDD2))]
return df_Accueil2.loc[1:, ["id","Chantier","Nbre_Arrivant","Nbre_induction","Date"]]
df_Accueil1 = Accueil_2(df_Accueil)
#filtrage par chantier
splitted_df_Accueil1 = df_Accueil1['Chantier'].str.split(',')
unique_vals1 = list(dict.fromkeys([y for x in splitted_df_Accueil1 for y in x]).keys())
filtrechantier = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals1,key=22)
mask = (df_Accueil1['Chantier'] == filtrechantier)
df_filter1=df_Accueil1.loc[mask]
st.dataframe(df_filter1)
idval = list(df_filter1['id'])
selected_id = st.selectbox("SELECTIONEZ l'ID DE LA LIGNE À SUPPRIMER", idval, key=10)
name_delete = get_id_Accueil(selected_id)
if name_delete:
id = name_delete[0][0]
if st.button("SUPPRIMER"):
delete_data_Accueil(id)
st.warning("SUPPRIMER: '{}'".format(name_delete))
st.markdown('### DONNÉE MODIFIÉE')
df_Accueil = pd.DataFrame(view_Accueil(), columns=["id","IDD","Chantier","Nbre_Arrivant","Nbre_induction","Date"])
#pour voir uniquement les donnée de l'user connecté
IDD2 = email.strip('][').split(', ')
#ACCUEIL
@st.cache
def Accueil_2(df_Accueil: pd.DataFrame) -> pd.DataFrame:
df_Accueil2 = df_Accueil[(df_Accueil["IDD"].isin(IDD2))]
return df_Accueil2.loc[1:, ["id","Chantier","Nbre_Arrivant","Nbre_induction","Date"]]
df_Accueil1 = Accueil_2(df_Accueil)
#filtrage par chantier
splitted_df_Accueil1 = df_Accueil1['Chantier'].str.split(',')
unique_vals1 = list(dict.fromkeys([y for x in splitted_df_Accueil1 for y in x]).keys())
filtrechantier = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals1,key=23)
mask = (df_Accueil1['Chantier'] == filtrechantier)
df_filter1=df_Accueil1.loc[mask]
st.dataframe(df_filter1)
with st.beta_expander("BRIEFING DE SÉCURITÉ( TBM)"):
st.markdown('### DONNÉE ACTUELLE')
df_TBM = pd.DataFrame(view_TBM(), columns=["id","IDD","Chantier","Nbre_chantier","Nbre_TBM","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def TBM_2(df_TBM: pd.DataFrame) -> pd.DataFrame:
df_TBM2 = df_TBM[(df_TBM["IDD"].isin(IDD2))]
return df_TBM2.loc[1:, ["id","Chantier","Nbre_chantier","Nbre_TBM","Date"]]
df_TBM1 = TBM_2(df_TBM)
#filtrage par chantier
splitted_df_TBM1 = df_TBM1['Chantier'].str.split(',')
unique_vals2 = list(dict.fromkeys([y for x in splitted_df_TBM1 for y in x]).keys())
filtrechantier2 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals2,key=24)
mask = (df_TBM1['Chantier'] == filtrechantier2)
df_filter2=df_TBM1.loc[mask]
st.dataframe(df_filter2)
idval = list(df_filter2['id'])
selected_id = st.selectbox("SELECTIONEZ l'ID DE LA LIGNE À SUPPRIMER", idval,key=41)
name_delete = get_id_TBM(selected_id)
if name_delete:
id = name_delete[0][0]
if st.button("SUPPRIMER",key=0):
delete_data_TBM(name_delete)
st.warning("SUPPRIMER: '{}'".format(name_delete))
st.markdown('### DONNÉE MODIFIÉE')
df_TBM = pd.DataFrame(view_TBM(), columns=["id","IDD","Chantier","Nbre_chantier","Nbre_TBM","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def TBM_2(df_TBM: pd.DataFrame) -> pd.DataFrame:
df_TBM2 = df_TBM[(df_TBM["IDD"].isin(IDD2))]
return df_TBM2.loc[1:, ["id","Chantier","Nbre_chantier","Nbre_TBM","Date"]]
df_TBM1 = TBM_2(df_TBM)
#filtrage par chantier
splitted_df_TBM1 = df_TBM1['Chantier'].str.split(',')
unique_vals2 = list(dict.fromkeys([y for x in splitted_df_TBM1 for y in x]).keys())
filtrechantier2 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals2,key=35)
mask = (df_TBM1['Chantier'] == filtrechantier2)
df_filter2=df_TBM1.loc[mask]
st.dataframe(df_filter2)
with st.beta_expander("NON CONFORMITÉ"):
st.markdown('### DONNÉE ACTUELLE')
df_NC = pd.DataFrame(view_NC(), columns=["id","IDD","Chantier","NCR","FNCR","NCC","FNCC","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def NC_2(df_NC: pd.DataFrame) -> pd.DataFrame:
df_NC2 = df_NC[(df_NC["IDD"].isin(IDD2))]
return df_NC2.loc[1:, ["id","Chantier","NCR","FNCR","NCC","FNCC","Date"]]
df_NC1 = NC_2(df_NC)
#filtrage par chantier
splitted_df_NC1 = df_NC1['Chantier'].str.split(',')
unique_vals3 = list(dict.fromkeys([y for x in splitted_df_NC1 for y in x]).keys())
filtrechantier3 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals3,key=25)
mask = (df_NC1['Chantier'] == filtrechantier3)
df_filter3=df_NC1.loc[mask]
st.dataframe(df_filter3)
idval = list(df_filter3['id'])
selected_id = st.selectbox("SELECTIONEZ l'ID DE LA LIGNE À SUPPRIMER", idval,key=12)
name_delete = get_id_NC(selected_id)
if name_delete:
id = name_delete[0][0]
if st.button("SUPPRIMER",key=1):
delete_data_NC(name_delete)
st.warning("SUPPRIMER: '{}'".format(name_delete))
st.markdown('### DONNÉE MODIFIÉE')
df_NC = pd.DataFrame(view_NC(), columns=["id","IDD","Chantier","NCR","FNCR","NCC","FNCC","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def NC_2(df_NC: pd.DataFrame) -> pd.DataFrame:
df_NC2 = df_NC[(df_NC["IDD"].isin(IDD2))]
return df_NC2.loc[1:, ["id","Chantier","NCR","FNCR","NCC","FNCC","Date"]]
df_NC1 = NC_2(df_NC)
#filtrage par chantier
splitted_df_NC1 = df_NC1['Chantier'].str.split(',')
unique_vals3 = list(dict.fromkeys([y for x in splitted_df_NC1 for y in x]).keys())
filtrechantier3 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals3,key=26)
mask = (df_NC1['Chantier'] == filtrechantier3)
df_filter3=df_NC1.loc[mask]
st.dataframe(df_filter3)
with st.beta_expander("CHANGEMENTS ENREGISTRÉS"):
st.markdown('### DONNÉE ACTUELLE')
df_Changements = pd.DataFrame(view_Changements(), columns=["id","IDD","Chantier","NCH","FNCH","NCHC","FNCHC","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def Changements_2(df_Changements: pd.DataFrame) -> pd.DataFrame:
df_Changements2 = df_Changements[(df_Changements["IDD"].isin(IDD2))]
return df_Changements2.loc[1:, ["id","Chantier","NCH","FNCH","NCHC","FNCHC","Date"]]
df_Changements1 = Changements_2(df_Changements)
#filtrage par chantier
splitted_df_Changements1 = df_Changements1['Chantier'].str.split(',')
unique_vals4 = list(dict.fromkeys([y for x in splitted_df_Changements1 for y in x]).keys())
filtrechantier4 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals4,key=27)
mask = (df_Changements1['Chantier'] == filtrechantier4)
df_filter4=df_Changements1.loc[mask]
st.dataframe(df_filter4)
idval = list(df_filter4['id'])
selected_id = st.selectbox("SELECTIONEZ l'ID DE LA LIGNE À SUPPRIMER", idval,key=13)
name_delete = get_id_Changements(selected_id)
if name_delete:
id = name_delete[0][0]
if st.button("SUPPRIMER",key=2):
delete_data_Changements(name_delete)
st.warning("SUPPRIMER: '{}'".format(name_delete))
st.markdown('### DONNÉE MODIFIÉE')
df_Changements = pd.DataFrame(view_Changements(), columns=["id","IDD","Chantier","NCH","FNCH","NCHC","FNCHC","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def Changements_2(df_Changements: pd.DataFrame) -> pd.DataFrame:
df_Changements2 = df_Changements[(df_Changements["IDD"].isin(IDD2))]
return df_Changements2.loc[1:, ["id","Chantier","NCH","FNCH","NCHC","FNCHC","Date"]]
df_Changements1 = Changements_2(df_Changements)
#filtrage par chantier
splitted_df_Changements1 = df_Changements1['Chantier'].str.split(',')
unique_vals4 = list(dict.fromkeys([y for x in splitted_df_Changements1 for y in x]).keys())
filtrechantier4 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals4,key=28)
mask = (df_Changements1['Chantier'] == filtrechantier4)
df_filter4=df_Changements1.loc[mask]
st.dataframe(df_filter4)
with st.beta_expander("ANOMALIES"):
st.markdown('### DONNÉE ACTUELLE')
df_Anomalies = pd.DataFrame(view_Anomalies(), columns=["id","IDD","Chantier","NA","FNA","NAC","FNAC","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def Anomalies_2(df_Anomalies: pd.DataFrame) -> pd.DataFrame:
df_Anomalies2 = df_Anomalies[(df_Anomalies["IDD"].isin(IDD2))]
return df_Anomalies2.loc[1:, ["id","Chantier","NA","FNA","NAC","FNAC","Date"]]
df_Anomalies1 = Anomalies_2(df_Anomalies)
#filtrage par chantier
splitted_df_Anomalies1 = df_Anomalies1['Chantier'].str.split(',')
unique_vals5 = list(dict.fromkeys([y for x in splitted_df_Anomalies1 for y in x]).keys())
filtrechantier5 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals5,key=29)
mask = (df_Anomalies1['Chantier'] == filtrechantier5)
df_filter5=df_Anomalies1.loc[mask]
st.dataframe(df_filter5)
idval = list(df_filter5['id'])
selected_id = st.selectbox("SELECTIONEZ l'ID DE LA LIGNE À SUPPRIMER", idval,key=14)
name_delete = get_id_Anomalies(selected_id)
if name_delete:
id = name_delete[0][0]
if st.button("SUPPRIMER",key=3):
delete_data_Anomalies(name_delete)
st.warning("SUPPRIMER: '{}'".format(name_delete))
st.markdown('### DONNÉE MODIFIÉE')
df_Anomalies = pd.DataFrame(view_Anomalies(), columns=["id","IDD","Chantier","NA","FNA","NAC","FNAC","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def Anomalies_2(df_Anomalies: pd.DataFrame) -> pd.DataFrame:
df_Anomalies2 = df_Anomalies[(df_Anomalies["IDD"].isin(IDD2))]
return df_Anomalies2.loc[1:, ["id","Chantier","NA","FNA","NAC","FNAC","Date"]]
df_Anomalies1 = Anomalies_2(df_Anomalies)
#filtrage par chantier
splitted_df_Anomalies1 = df_Anomalies1['Chantier'].str.split(',')
unique_vals5 = list(dict.fromkeys([y for x in splitted_df_Anomalies1 for y in x]).keys())
filtrechantier5 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals5,key=30)
mask = (df_Anomalies1['Chantier'] == filtrechantier5)
df_filter5=df_Anomalies1.loc[mask]
st.dataframe(df_filter5)
with st.beta_expander("ANALYSE DES RISQUES RÉALISÉS(JSA)"):
st.markdown('### DONNÉE ACTUELLE')
df_JSA = pd.DataFrame(view_JSA(), columns=["id","IDD","Chantier","NAct","NJSA","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def JSA_2(df_JSA: pd.DataFrame) -> pd.DataFrame:
df_JSA2 = df_JSA[(df_JSA["IDD"].isin(IDD2))]
return df_JSA2.loc[1:, ["id","Chantier","NAct","NJSA","Date"]]
df_JSA1 = JSA_2(df_JSA)
#filtrage par chantier
splitted_df_JSA1 = df_JSA1['Chantier'].str.split(',')
unique_vals6 = list(dict.fromkeys([y for x in splitted_df_JSA1 for y in x]).keys())
filtrechantier6 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals6,key=31)
mask = (df_JSA1['Chantier'] == filtrechantier6)
df_filter6=df_JSA1.loc[mask]
st.dataframe(df_filter6)
idval = list(df_filter6['id'])
selected_id = st.selectbox("SELECTIONEZ l'ID DE LA LIGNE À SUPPRIMER", idval,key=15)
name_delete = get_id_JSA(selected_id)
if name_delete:
id = name_delete[0][0]
if st.button("SUPPRIMER",key=4):
delete_data_JSA(name_delete)
st.warning("SUPPRIMER: '{}'".format(name_delete))
st.markdown('### DONNÉE MODIFIÉE')
df_JSA = pd.DataFrame(view_JSA(), columns=["id","IDD","Chantier","NAct","NJSA","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def JSA_2(df_JSA: pd.DataFrame) -> pd.DataFrame:
df_JSA2 = df_JSA[(df_JSA["IDD"].isin(IDD2))]
return df_JSA2.loc[1:, ["id","Chantier","NAct","NJSA","Date"]]
df_JSA1 = JSA_2(df_JSA)
#filtrage par chantier
splitted_df_JSA1 = df_JSA1['Chantier'].str.split(',')
unique_vals6 = list(dict.fromkeys([y for x in splitted_df_JSA1 for y in x]).keys())
filtrechantier6 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals6,key=32)
mask = (df_JSA1['Chantier'] == filtrechantier6)
df_filter6=df_JSA1.loc[mask]
st.dataframe(df_filter6)
with st.beta_expander("INCIDENT & ACCIDENT"):
st.markdown('### DONNÉE ACTUELLE')
df_IA = pd.DataFrame(view_Incident_Accident(), columns=["id","IDD","Chantier","NInc","AAA","ASA","AT","NJP","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def IA_2(df_IA: pd.DataFrame) -> pd.DataFrame:
df_IA = df_IA[(df_IA["IDD"].isin(IDD2))]
return df_IA.loc[1:, ["id","Chantier","NInc","AAA","ASA","AT","NJP","Date"]]
df_IA1 = IA_2(df_IA)
#filtrage par chantier
splitted_df_IA1 = df_IA1['Chantier'].str.split(',')
unique_vals7 = list(dict.fromkeys([y for x in splitted_df_IA1 for y in x]).keys())
filtrechantier7 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals7,key=33)
mask = (df_IA1['Chantier'] == filtrechantier7)
df_filter7=df_IA1.loc[mask]
st.dataframe(df_filter7)
idval = list(df_filter7['id'])
selected_id = st.selectbox("SELECTIONEZ l'ID DE LA LIGNE À SUPPRIMER", idval,key=16)
name_delete = get_id_Incident_Accident(selected_id)
if name_delete:
id = name_delete[0][0]
if st.button("SUPPRIMER",key=5):
delete_data_Incident_Accident(name_delete)
st.warning("SUPPRIMER: '{}'".format(name_delete))
st.markdown('### DONNÉE MODIFIÉE')
df_IA = pd.DataFrame(view_Incident_Accident(), columns=["id","IDD","Chantier","NInc","AAA","ASA","AT","NJP","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def IA_2(df_IA: pd.DataFrame) -> pd.DataFrame:
df_IA = df_IA[(df_IA["IDD"].isin(IDD2))]
return df_IA.loc[1:, ["id","Chantier","NInc","AAA","ASA","AT","NJP","Date"]]
df_IA1 = IA_2(df_IA)
#filtrage par chantier
splitted_df_IA1 = df_IA1['Chantier'].str.split(',')
unique_vals7 = list(dict.fromkeys([y for x in splitted_df_IA1 for y in x]).keys())
filtrechantier7 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals7,key=34)
mask = (df_IA1['Chantier'] == filtrechantier7)
df_filter7=df_IA1.loc[mask]
st.dataframe(df_filter7)
with st.beta_expander("AUDIT CHANTIER; VISITE CONJOINTE; PRÉVENTION ET INSPECTION"):
st.markdown('### DONNÉE ACTUELLE')
df_Audit = pd.DataFrame(view_Audit(), columns=["id","IDD","Chantier","AC","VC","NEU","SMPAR","NPR","IE","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def Audit_2(df_Audit: pd.DataFrame) -> pd.DataFrame:
df_Audit = df_Audit[(df_Audit["IDD"].isin(IDD2))]
return df_Audit.loc[1:, ["id","Chantier","AC","VC","NEU","SMPAR","NPR","IE","Date"]]
df_Audit1 = Audit_2(df_Audit)
#filtrage par chantier
splitted_df_Audit1 = df_Audit1['Chantier'].str.split(',')
unique_vals8 = list(dict.fromkeys([y for x in splitted_df_Audit1 for y in x]).keys())
filtrechantier8 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals8,key=355)
mask = (df_Audit1['Chantier'] == filtrechantier8)
df_filter8=df_Audit1.loc[mask]
st.dataframe(df_filter8)
idval = list(df_filter8['id'])
selected_id = st.selectbox("SELECTIONEZ l'ID DE LA LIGNE À SUPPRIMER", idval,key=17)
name_delete = get_id_Audit(selected_id)
if name_delete:
id = name_delete[0][0]
if st.button("SUPPRIMER",key=6):
delete_data_Audit(name_delete)
st.warning("SUPPRIMER: '{}'".format(name_delete))
st.markdown('### DONNÉE MODIFIÉE')
df_Audit = pd.DataFrame(view_Audit(), columns=["id","IDD","Chantier","AC","VC","NEU","SMPAR","NPR","IE","Date"])
IDD2 = email.strip('][').split(', ')
@st.cache
def Audit_2(df_Audit: pd.DataFrame) -> pd.DataFrame:
df_Audit = df_Audit[(df_Audit["IDD"].isin(IDD2))]
return df_Audit.loc[1:, ["id","Chantier","AC","VC","NEU","SMPAR","NPR","IE","Date"]]
df_Audit1 = Audit_2(df_Audit)
#filtrage par chantier
splitted_df_Audit1 = df_Audit1['Chantier'].str.split(',')
unique_vals8 = list(dict.fromkeys([y for x in splitted_df_Audit1 for y in x]).keys())
filtrechantier8 = st.selectbox('AFFICHEZ VOTRE GRILLE EN FONCTION DU CHANTIER', unique_vals8,key=36)
mask = (df_Audit1['Chantier'] == filtrechantier8)
df_filter8=df_Audit1.loc[mask]
st.dataframe(df_filter8)
else:
st.warning("Veuillez-vous enregistrer")
elif choice == "Inscription":
st.subheader("Créer un nouveau compte")
new_user = st.text_input("Email")
new_password = st.text_input("<PASSWORD> de passe",type='password')
if st.button("Inscription"):
#pour valider l'entrée email
regex = '^(\w|\.|\_|\-)+[@](\w|\_|\-|\.)+[.]\w{2,3}$'
if(re.search(regex, new_user)):
new_user
else:
st.error("Email non valide")
st.stop()
create_table()
add_userdata(new_user,make_hashes(new_password))
#initialisation de la base de donné pour l'application je l'ai incrusté ici rien avoir avec le code login
IDD=new_user
Chantier=0
NArrivant=0
Ninduction=0
NChantier=0
NTBM=0
NCR=0
FNCR=0
NCC=0
FNCC=0
NCH=0
FNCH=0
NCHC=0
FNCHC=0
NA=0
FNA=0
NAC=0
FNAC=0
NAct=0
NJSA=0
NInc=0
AAA=0
ASA=0
AT=0
NJP=0
AC=0
VC=0
NEU=0
SMPAR=0
NPR=0
IE=0
T1=(datetime.now() - timedelta(1)).strftime('%Y-%m-%d')
T2=(datetime.now() - timedelta(2)).strftime('%Y-%m-%d')
Date=T2
Date2=T1
c.execute('INSERT INTO Accueil(IDD,Chantier,NArrivant,Ninduction,Date) VALUES (%s,%s,%s,%s,%s)',(IDD,Chantier,NArrivant,Ninduction,Date2))
conn.commit()
c.execute('INSERT INTO Accueil(IDD,Chantier,NArrivant,Ninduction,Date) VALUES (%s,%s,%s,%s,%s)',(IDD,Chantier,NArrivant,Ninduction,Date2))
conn.commit()
c.execute('INSERT INTO TBM(IDD,Chantier,NChantier,NTBM,Date) VALUES (%s,%s,%s,%s,%s)',(IDD,Chantier,NChantier,NTBM,Date))
conn.commit()
c.execute('INSERT INTO TBM(IDD,Chantier,NChantier,NTBM,Date) VALUES (%s,%s,%s,%s,%s)',(IDD,Chantier,NChantier,NTBM,Date2))
conn.commit()
c.execute('INSERT INTO NC(IDD,Chantier,NCR,FNCR,NCC,FNCC,Date) VALUES (%s,%s,%s,%s,%s,%s,%s)',(IDD,Chantier,NCR,FNCR,NCC,FNCC,Date))
conn.commit()
c.execute('INSERT INTO NC(IDD,Chantier,NCR,FNCR,NCC,FNCC,Date) VALUES (%s,%s,%s,%s,%s,%s,%s)',(IDD,Chantier,NCR,FNCR,NCC,FNCC,Date2))
conn.commit()
c.execute('INSERT INTO Changements(IDD,Chantier,NCH,FNCH,NCHC,FNCHC,Date) VALUES (%s,%s,%s,%s,%s,%s,%s)',(IDD,Chantier,NCH,FNCH,NCHC,FNCHC,Date))
conn.commit()
c.execute('INSERT INTO Changements(IDD,Chantier,NCH,FNCH,NCHC,FNCHC,Date) VALUES (%s,%s,%s,%s,%s,%s,%s)',(IDD,Chantier,NCH,FNCH,NCHC,FNCHC,Date2))
conn.commit()
c.execute('INSERT INTO Anomalies(IDD,Chantier,NA,FNA,NAC,FNAC,Date) VALUES (%s,%s,%s,%s,%s,%s,%s)',(IDD,Chantier,NA,FNA,NAC,FNAC,Date))
conn.commit()
c.execute('INSERT INTO Anomalies(IDD,Chantier,NA,FNA,NAC,FNAC,Date) VALUES (%s,%s,%s,%s,%s,%s,%s)',(IDD,Chantier,NA,FNA,NAC,FNAC,Date2))
conn.commit()
c.execute('INSERT INTO JSA(IDD,Chantier,NAct,NJSA,Date) VALUES (%s,%s,%s,%s,%s)',(IDD,Chantier,NAct,NJSA,Date))
conn.commit()
c.execute('INSERT INTO JSA(IDD,Chantier,NAct,NJSA,Date) VALUES (%s,%s,%s,%s,%s)',(IDD,Chantier,NAct,NJSA,Date2))
conn.commit()
c.execute('INSERT INTO Incident_Accident(IDD,Chantier,NInc,AAA,ASA,AT,NJP,Date) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)',(IDD,Chantier,NInc,AAA,ASA,AT,NJP,Date))
conn.commit()
c.execute('INSERT INTO Incident_Accident(IDD,Chantier,NInc,AAA,ASA,AT,NJP,Date) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)',(IDD,Chantier,NInc,AAA,ASA,AT,NJP,Date2))
conn.commit()
c.execute('INSERT INTO Audit(IDD,Chantier,AC,VC,NEU,SMPAR,NPR,IE,Date) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)',(IDD,Chantier,AC,VC,NEU,SMPAR,NPR,IE,Date))
conn.commit()
c.execute('INSERT INTO Audit(IDD,Chantier,AC,VC,NEU,SMPAR,NPR,IE,Date) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)',(IDD,Chantier,AC,VC,NEU,SMPAR,NPR,IE,Date2))
conn.commit()
####fin
st.success("Votre compte a été créé avec succès")
st.info("Allez au menu de connexion pour vous connecter")
col1, col2, col3 = st.beta_columns([1,6,1])
with col2:
st.image("http://cabinetnpm.com/wp-content/uploads/2020/02/t%C3%A9l%C3%A9chargement.png",width=200,)
image_ren ="""
<img src="https://1tpecash.fr/wp-content/uploads/elementor/thumbs/Renaud-Louis-osf6t5lcki4q31uzfafpi9yx3zp4rrq7je8tj6p938.png" alt="Avatar" style="vertical-align: middle;width: 100px;height: 100px;border-radius: 50%;" >
"""
st.sidebar.markdown(image_ren, unsafe_allow_html = True)
st.sidebar.markdown('**Auteur: <NAME>**')
st.sidebar.markdown('Email:<EMAIL>')
st.sidebar.markdown('[Linkedin](https://www.linkedin.com/in/dahou-renaud-louis-8958599a/)')
st.sidebar.warning('Pour tester HSE KPI RECORDER et faire des enregistrements, allez dans menu- connexion et mettez les informations de connexion ou inscrivez-vous si vous êtes nouveau.') #.\n Email:<EMAIL> \n Mot de passe:lyne18
if __name__ == '__main__':
main()
| 2.65625 | 3 |
uni_class.py | suvojitd79/University-Database-using-python | 0 | 12772399 |
def main():
class student:
std = []
def __init__(self,name,id,cgpa):
self.name = name
self.id = id
self.cgpa = cgpa
def showId(self):
return self.id
def result(self):
if(self.cgpa > 8.5):
print("Great score")
elif(self.cgpa > 7 and self.cgpa < 8.5):
print("Keep it up")
else:
print("Not gonna pass")
def details(self):
print(f'STUDENT ID: {self.id}\nSTUDENT NAME: {self.name}\nCGPA: {self.cgpa}\nPROGRESS REPORT:',end='')+self.result()
def insert():
if 1:
x = input('Enter the name of the student: ')
y = input('Enter the id of the student: ')
z = input('Enter the cgpa of the student: ')
while not(z.isdigit() and int(z)<10):
z = input('Enter a correct cgpa')
if float(z)<5:
print(f'Hey {x}, You better work on your studies')
data = student(x,y,float(z))
student.std.append(data.__dict__)
print(f'id no {y} has been added')
def search():
found = 0
try:
x= input('Enter your id: ')
for data in student.std:
if x == data['id']:
print('NAME: '+ data['name'])
print('CGPA: '+ str(data['cgpa']))
found=1
# print(data['id'])
if found ==0:
print('Data not found')
except:
print('Ooops!Error')
def decision(x):
try:
return{
'1':insert(),
'2':search(),
'3':delete(),
'4': exit()
}[x]
except:
print('Invalid input')
while True:
y = input('Press 1 if you want to insert data\nPress 2 if you want to search data\nPress 3 if you want to delete a data\npress 4 if you want to exit\n')
if y in ['1','2','3']:
if y is '1':
insert()
print(student.std)
continue
elif y is '2':
search()
continue
else:
search()
continue
else:
x1=input('INVALID OPTION.PRESS * TO CONTINUE OR ELSE TO EXIT :')
if int(ord(x1))==42:
continue
else:
break
if __name__=='__main__':
main()
| 3.84375 | 4 |
send_sms.py | apthagowda97/areca_price_alerter | 0 | 12772400 | <reponame>apthagowda97/areca_price_alerter
import os
from twilio.rest import Client
from twilio.http.http_client import TwilioHttpClient
import areca_price
from dotenv import load_dotenv
def send_sms(body: str):
"""Send the SMS with given message body."""
proxy_client = TwilioHttpClient(
proxy={"http": os.getenv("HTTP_PROXY"), "https": os.getenv("HTTPS_PROXY")}
)
client = Client(http_client=proxy_client)
from_phone = os.getenv("TWILIO_SMS_FROM")
to_phone = os.getenv("TWILIO_SMS_TO")
webhook = os.getenv("TWILIO_WEBHOOK")
client.messages.create(
from_=from_phone, body=body, status_callback=webhook, to=to_phone
)
def main():
"""Main method with SHIVAMOGGA as city."""
load_dotenv()
body = areca_price.get_body(city="SHIVAMOGGA")
if body is not None:
send_sms(body=body)
if __name__ == "__main__":
main()
| 2.625 | 3 |
gen_luh2.py | ricardog/raster-project | 1 | 12772401 | #!/usr/bin/env python
import numpy as np
import numpy.ma as ma
import os
import rasterio
import rasterio.warp as rwarp
import subprocess
import pdb
import projections.poly as poly
from projections.rasterset import Raster, RasterSet
import projections.reproject as reproj
from projections.simpleexpr import SimpleExpr
from projections.utils import luh2_dir, luh2_prefix, luh2_scenarios, \
lui_model_dir, outfn
def luh2_states(ssp):
if ssp != 'historical':
dname = luh2_prefix() + ssp.upper()
else:
dname = ssp
return 'netcdf:' + os.path.join(luh2_dir(), dname, 'states.nc')
def luh2_secd(ssp):
return 'netcdf:' + .outfn('luh2', 'secd-%s.nc' % ssp)
def luh2_secd_types():
return [x % fnf
for fnf in ('f', 'n')
for x in ('secd%s%%s' % n for n in ('y', 'i', 'm'))]
def luh2_types(ssp, year):
res = {}
assert ssp in luh2_scenarios()
path = luh2_states(ssp)
if ssp == 'historical':
assert year >= 850 and year < 2015
bidx = year - 849
else:
assert year >= 2015
bidx = year - 2014
for lu in ['primf', 'primn', 'secdf', 'secdn', 'pastr', 'range',
'urban', 'c3ann', 'c3per', 'c4ann', 'c4per', 'c3nfx']:
res[lu] = Raster(lu, '%s:%s' % (path, lu), bidx)
for secd in luh2_secd_types():
res[secd] = Raster(secd, '%s:%s' % (luh2_secd(ssp), secd), bidx)
return res
def rset_add(rasters, name, expr):
rasters[name] = SimpleExpr(name, expr)
def luh2_rasterset(scenario, year):
rset = luh2_types('historical', 1999)
rset_add(rset, 'perennial', 'c3per + c4per')
rset_add(rset, 'annual', 'c3ann + c4ann')
rset_add(rset, 'nitrogen', 'c3nfx')
rset_add(rset, 'rangelands', 'range')
rset_add(rset, 'secondaryf', 'secdyf + secdif + secdmf')
rset_add(rset, 'secondaryn', 'secdyn + secdin + secdmn')
rset_add(rset, 'secondary',
'secdyf + secdif + secdmf + secdyn + secdin + secdmn')
return RasterSet(rset)
def process_lu(rcp_lu, comps, luh2, mask=None):
rcp_lui_ds = rasterio.open(outfn('lui', %s.tif' % rcp_lu))
rcp_lui_data = rcp_lui_ds.read(masked=True)
rcp_lu_ds = rasterio.open(outfn('lu', 'rcp', 'hyde', '%s_1999.tif' % rcp_lu))
rcp_lu_data = rcp_lu_ds.read(masked=True)
rcp_lui_data /= rcp_lu_data
meta, data = reproj.reproject2(rcp_lui_ds, rcp_lui_data, (0.25, 0.25),
rwarp.Resampling.mode)
lu_meta = meta.copy()
lu_meta.update({'count': 1})
count = meta['count']
if mask is not None:
for idx in range(count):
np.logical_or(data[idx].mask, mask, data[idx].mask)
arrays = (luh2.eval(what)[0] for what in comps)
shares = ma.array(tuple(arrays), fill_value=-9999)
total = shares.sum(axis=0)
#fract = shares / total
fract = ma.empty_like(shares)
for idx in range(shares.shape[0]):
fract[idx] = ma.where(total == 0, 0, shares[idx] / total)
for idx, lu in enumerate(comps):
with rasterio.open(outfn('luh2', '%s.tif' % lu), 'w', **meta) as dst:
xxx = data * fract[idx] * shares[idx]
dst.write(xxx.filled(meta['nodata']), indexes=range(1, count + 1))
with rasterio.open(outfn('luh2', lu-%s.tif' % lu), 'w', **lu_meta) as dst:
dst.write(shares[idx].filled(meta['nodata']), indexes=1)
cmd = [os.path.join(os.getcwd(), 'lu-recalibrate.R'),
'-m', lui_model_dir(),
'--hpd', outfn('luh2', 'gluds00ag-full.tif'),
'-u', outfn('luh2', 'un_subregions-full.tif'),
'--mask',
'netcdf:%s/staticData_quarterdeg.nc:icwtr' % luh2_dir(),
'--lu', outfn('luh2', 'lu-%s.tif' % lu),
'--lui', outfn('luh2', '%s.tif' % lu),
'-o', outfn('luh2', '%s-recal.tif' % lu),
'-t', rcp_lu]
subprocess.check_output(cmd, shell=False)
def main(scenario='historical', year=1999):
static = os.path.join(data_root(), 'luh2_v2',
'staticData_quarterdeg.nc')
icewtr = rasterio.open('netcdf:%s:icwtr' % static)
icewtr_mask = ma.where(icewtr.read(1) == 1.0, True, False)
# How to allocate current land use types
rcp = {'cropland': ['perennial', 'annual', 'nitrogen'],
'pasture': ['pastr', 'range'],
'primary': ['primf', 'primn'],
'secondary': ['secdyf', 'secdif', 'secdmf',
'secdyn', 'secdin', 'secdmn'],
'urban': ['urban']}
luh2 = luh2_rasterset(scenario, year)
for rcp_lu in rcp:
process_lu(rcp_lu, rcp[rcp_lu], luh2, mask=icewtr_mask)
if __name__ == '__main__':
main('historical', 1999)
| 2.03125 | 2 |
Protheus_WebApp/Modules/SIGAGFE/GFEA014TESTCASE.py | 98llm/tir-script-samples | 17 | 12772402 | <filename>Protheus_WebApp/Modules/SIGAGFE/GFEA014TESTCASE.py
from tir import Webapp
import unittest
class GFEA014(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup('SIGAGFE','03/11/2020','T1','D MG 01 ','78')
inst.oHelper.Program('GFEA014')
def test_GFEA014_CT001(self):
self.oHelper.SetButton('Incluir')
self.oHelper.SetValue('Tipo de Item', '9999')
self.oHelper.SetValue('Descricao ', 'TIPO DE ITENS PARA TESTE AUTOMACAO')
self.oHelper.SetButton('Confirmar')
self.oHelper.SetButton('Fechar')
self.oHelper.SearchBrowse(' 9999')
self.oHelper.SetButton('Alterar')
self.oHelper.SetValue('Descricao ', 'TIPO DE ITENS PARA TESTE AUTOMACAO - ALTERACAO')
self.oHelper.SetButton('Confirmar')
self.oHelper.SetButton('Fechar')
self.oHelper.SearchBrowse(' 9999')
self.oHelper.SetButton('Visualizar')
self.oHelper.SetButton('Fechar')
self.oHelper.SearchBrowse(' 9999')
self.oHelper.SetButton('Outras Ações','Excluir')
self.oHelper.SetButton('Confirmar')
self.oHelper.SetButton('Fechar')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
| 2.4375 | 2 |
dicom_tools/highlight_color.py | carlomt/dicom_tools | 7 | 12772403 | import numpy as np
def highlight_color(data, colorrange, verbose=False):
if verbose:
print("highlight_color")
kMax = data.shape[0]
jMax = data.shape[1]
iMax = data.shape[2]
valMin = float(colorrange.split(":")[0])
valMax = float(colorrange.split(":")[1])
if valMin > valMax:
print("warning valMin > valMax")
referenceValue = 0
if valMin<300:
referenceValue = data[:,:,:,0].max()
if verbose:
print("required minimum:",valMin)
print("required maximum:",valMax)
counter = 0
for k in xrange(0,kMax):
for j in xrange(0,jMax):
for i in xrange(0,iMax):
if data[k,j,i,0]>valMin and data[k,j,i,0]<valMax:
data[k,j,i,2] = referenceValue
if verbose:
print("highlight_color returning")
print(counter,"pixels modified")
return data
| 2.90625 | 3 |
Food Blog Backend/blog.py | oxxio/hyperskill-projects-python | 0 | 12772404 | import sqlite3
import sys
def get_pk_key(databasename, name, value):
connect_sqlite3 = sqlite3.connect(databasename)
cursor_sqlite3 = connect_sqlite3.cursor()
if name == "meals":
meal_name = ""
# 1) breakfast 2) brunch 3) lunch 4) supper
if int(value) == 1:
meal_name = "breakfast"
if int(value) == 2:
meal_name = "brunch"
if int(value) == 3:
meal_name = "lunch"
if int(value) == 4:
meal_name = "supper"
sqlite_insert_with_param = "SELECT meal_id FROM meals WHERE meal_name = '{}'".format(meal_name)
if name == "measures":
sqlite_insert_with_param = "SELECT measure_id FROM measures WHERE measure_name = '{}'".format(value)
if name == "ingredients":
sqlite_insert_with_param = "SELECT ingredient_id FROM ingredients WHERE ingredient_name like '%{}%'".format(value)
if name == "recipes":
sqlite_insert_with_param = "SELECT recipe_id FROM recipes WHERE recipe_name = '{}'".format(value)
cursor_sqlite3.execute(sqlite_insert_with_param)
results = cursor_sqlite3.fetchone()
if connect_sqlite3:
connect_sqlite3.close()
# meals_id doesn't exists
if results is None:
return 0
# meals_id
return results[0]
def validate_parameters(measure, ingredient, databasename):
# not pass
if not get_pk_key(databasename, "measures", measure):
print("The measure is not conclusive!")
return 0
# not pass
if not get_pk_key(databasename, "ingredients", ingredient):
print("The ingredient is not conclusive!")
return 0
# pass
return 1
def insert_value(name, databasename, value1, value2, meals):
try:
connect = sqlite3.connect(databasename)
connect.execute("PRAGMA foreign_keys = 1")
cursor = connect.cursor()
sqlite_insert_with_param = ""
if name == "measures":
sqlite_insert_with_param = "INSERT INTO measures (measure_name) VALUES ('{}');".format(value1)
if name == "ingredients":
sqlite_insert_with_param = "INSERT INTO ingredients (ingredient_name) VALUES ('{}');".format(value1)
if name == "meals":
sqlite_insert_with_param = "INSERT INTO meals (meal_name) VALUES ('{}');".format(value1)
if name == "recipes":
sqlite_insert_with_param = "INSERT INTO recipes (recipe_name, recipe_description) VALUES ('{}', '{}');"\
.format(value1, value2)
result = cursor.execute(sqlite_insert_with_param).lastrowid
connect.commit()
if name == "recipes":
for meal in meals:
# serve
meal_id = get_pk_key(databasename, "meals", meal)
sqlite_insert_with_param = "INSERT INTO serve (meal_id, recipe_id) VALUES ({}, {});"\
.format(meal_id, result)
cursor.execute(sqlite_insert_with_param)
connect.commit()
while True:
input_value = input("Input quantity of ingredient <press enter to stop>:").split()
if input_value == "" or len(input_value) == 0:
break
if len(input_value) == 2:
quantity = input_value[0]
measure = ""
ingredient = input_value[1]
if len(input_value) == 3:
quantity = input_value[0]
measure = input_value[1]
ingredient = input_value[2]
if validate_parameters(measure, ingredient, databasename) == 0:
continue
measure_id = get_pk_key(databasename, "measures", measure)
ingredients_id = get_pk_key(databasename, "ingredients", ingredient)
#quantity
sqlite_insert_with_param = "INSERT INTO quantity (quantity, recipe_id, measure_id, ingredient_id) " \
"VALUES ({}, {}, {}, {});".format(quantity, result, measure_id, ingredients_id)
print(" ### quantity " + sqlite_insert_with_param)
cursor.execute(sqlite_insert_with_param)
connect.commit()
except sqlite3.Error as error:
print("Failed to insert Python variable into sqlite table", error)
finally:
if connect:
connect.close()
def populate_db(data, databasename):
for name, keys in data.items():
for value in keys:
insert_value(name, databasename, value, "", "")
def create_db(arg):
try:
connect = sqlite3.connect(arg)
connect.execute("PRAGMA foreign_keys = 1")
cursor = connect.cursor()
cursor.execute("DROP TABLE IF EXISTS measures;")
cursor.execute('''CREATE TABLE IF NOT EXISTS measures (measure_id integer not null primary key autoincrement,
measure_name text unique);''')
cursor.execute("DROP TABLE IF EXISTS ingredients;")
cursor.execute('''CREATE TABLE IF NOT EXISTS ingredients (ingredient_id integer not null primary key autoincrement,
ingredient_name text not null unique);''')
cursor.execute("DROP TABLE IF EXISTS meals;")
cursor.execute('''CREATE TABLE IF NOT EXISTS meals (meal_id integer not null primary key autoincrement,
meal_name text not null unique);''')
cursor.execute("DROP TABLE IF EXISTS recipes;")
cursor.execute('''CREATE TABLE IF NOT EXISTS recipes (recipe_id integer not null primary key autoincrement,
recipe_name text not null,
recipe_description text);''')
cursor.execute("DROP TABLE IF EXISTS serve;")
cursor.execute('''CREATE TABLE IF NOT EXISTS serve (serve_id integer not null primary key autoincrement,
meal_id integer not null,
recipe_id integer not null,
FOREIGN KEY(meal_id) REFERENCES meals(meal_id),
FOREIGN KEY(recipe_id) REFERENCES recipes(recipe_id));''')
cursor.execute("DROP TABLE IF EXISTS quantity ;")
cursor.execute('''CREATE TABLE IF NOT EXISTS quantity (quantity_id integer not null primary key autoincrement,
quantity integer not null,
recipe_id integer not null,
measure_id integer not null,
ingredient_id integer not null,
FOREIGN KEY(recipe_id) REFERENCES recipes(recipe_id),
FOREIGN KEY(measure_id) REFERENCES measures(measure_id),
FOREIGN KEY(ingredient_id) REFERENCES ingredients(ingredient_id));''')
connect.commit()
except sqlite3.Error as error:
print("Failed to insert Python variable into sqlite table", error)
finally:
if connect:
connect.close()
def check_recipes(database, ingredients, meals):
"""
cur = conn.cursor()
cur.execute("SELECT * FROM tasks WHERE priority=?", (priority,))
rows = cur.fetchall()
for row in rows:
print(row)
"""
connect = sqlite3.connect(database)
cursor = connect.cursor()
ingredients_sql = ""
for value in ingredients:
ingredients_sql += "'" + value + "',"
if ingredients_sql != "":
ingredients_sql += "''"
meals_sql = ""
for value in meals:
meals_sql += "'" + value + "',"
if meals_sql != "":
meals_sql += "''"
# ----- first sql
sql_with_param = "SELECT recipes.recipe_id, recipes.recipe_name FROM recipes WHERE recipes.recipe_id IN ( \
SELECT serve.recipe_id FROM serve, meals WHERE \
serve.meal_id = meals.meal_id AND meals.meal_name IN ({}));" \
.format(meals_sql)
cursor.execute(sql_with_param)
recipes_list = []
rows = cursor.fetchall()
for row in rows:
recipes_list.append(row)
# ----- second sql
sql_with_param = "SELECT quantity.recipe_id,ingredients.ingredient_name FROM quantity, ingredients \
WHERE quantity.ingredient_id = ingredients.ingredient_id \
AND ingredients.ingredient_name in ({});" \
.format(ingredients_sql)
cursor.execute(sql_with_param)
ingredients_list = []
rows = cursor.fetchall()
for row in rows:
ingredients_list.append(row)
lenght = len(ingredients)
recipes = []
for x, y in recipes_list:
result = [i for i,j in ingredients_list if i==x ]
if len(result) == lenght:
recipes.append(y)
if len(recipes) > 0:
print("Recipes selected for you: " + " and ".join(recipes))
else:
print("There are no such recipes in the database.")
connect.close()
if __name__ == "__main__":
data = {"meals": ("breakfast", "brunch", "lunch", "supper"),
"ingredients": ("milk", "cacao", "strawberry", "blueberry", "blackberry", "sugar"),
"measures": ("ml", "g", "l", "cup", "tbsp", "tsp", "dsp", "")}
args = sys.argv
if len(args) == 4:
# database name
database = str(args[1])
# --ingredients
if args[2].split("=")[0] == "--ingredients":
ingredients = args[2].split("=")[1].split(",")
#print("ingredients : " + str(ingredients))
# --meals
if args[3].split("=")[0] == "--meals":
meals = args[3].split("=")[1].split(",")
#print("meals : " + str(meals))
check_recipes(database, ingredients, meals)
elif len(args) == 2:
database = str(args[1])
create_db(database)
populate_db(data, database)
print("Pass the empty recipe name to exit.")
while True:
print("Recipe name:")
name = input()
if name == "":
break
print("Recipe description:")
description = input()
if description == "":
break
print("1) breakfast 2) brunch 3) lunch 4) supper")
print("When the dish can be served:")
serve = input().split()
insert_value("recipes", database, name, description, serve)
| 3.4375 | 3 |
nash.py | girving/poker | 10 | 12772405 | '''Exact Nash equilibrium computation given a payoff matrix'''
from __future__ import division
from numpy import *
'''We implement a fairly unoptimized version of the simplex method for linear programming following
Wikipedia: http://en.wikipedia.org/wiki/Simplex_method. Thus, our tableu has the structure
T = [[1,-cB.T,-cD.T,zB]
,[0, 1, D, b]]
where B and N are the ordered lists of basis and nonbasis variables, respectively. Note that we
store the identity matrix explicitly, which is lazy.
One note: since we always operate on linear programs in canonical form, where the only inequality
constraints are x>=0, the feasible solutions we traverse always have m-n zeros and n (potential)
nonzeros, where the constraint matrix A in A x = b is m by n. Thus, the core state of the simplex
method is the set of n possibly nonzero variable B, called the set of basis variables.
'''
class Unbounded(OverflowError):
pass
class Infeasible(ArithmeticError):
pass
def solve_tableau(T,B,N):
# Verify that T has an identity matrix where we expect it to be
k = len(T)-len(B) # 1 for phase 2, 2 for phase 1
assert all(T[k:,k+B]==eye(len(B),dtype=T.dtype))
# Price out the basis variables
T[0] -= dot(T[k:].T,T[0,k+B])
assert all(T[0,k+B]==0)
# Solve the tableu
while 1:
# Assert that we're sitting on a feasible basic solution
assert all(T[k:,-1]>=0)
# Pick a variable to enter the basis using Dantzig's rule
enter = argmax(T[0,k+N])
c = k+N[enter]
if T[0,c]<=0:
break # We've hit a local optimum, which is therefore a global optimum
# Pick a variable to leave
leavings, = nonzero(T[k:,c]>0)
if not len(leavings):
raise Unbounded('unbounded linear program')
rows = k+leavings
leave = leavings[argmin(T[rows,-1]/T[rows,c])]
r = k+leave
# Perform the pivot
T[r] /= T[r,c]
rows, = nonzero(arange(len(T))!=r)
T[rows] -= T[rows,c].reshape(-1,1).copy()*T[r]
# Update the sets of basis and nonbasis variables
N[enter],B[leave] = B[leave],N[enter]
def simplex_method(c,A,b):
'''Minimize dot(c,x) s.t. Ax = b, x >= 0 using the simplex method, and return dot(c,x),x.
If c,A,b are fractions, the result is exact.'''
# Phase 1: Add slack variables to get an initial canonical tableu, and solve
(n,m),dtype = A.shape,A.dtype
assert c.shape==(m,)
assert b.shape==(n,)
T = vstack([hstack([1,zeros(m+1,dtype),-ones(n,dtype),0]).reshape(1,-1),
hstack([0,1,-c,zeros(n+1,dtype)]).reshape(1,-1),
hstack([zeros((n,2),dtype),A,eye(n,dtype=dtype),b.reshape(-1,1)])])
N = arange(m)
B = m+arange(n)
solve_tableau(T,B,N)
# Check for infeasibility
if T[0,-1]<0:
raise Infeasible('infeasible linear program')
# Verify that the auxiliary slack variables are nonbasic. This is not always the
# case--they could be basic but just happen to be zero--but we'll deal with that later.
assert B.max()<m
# Remove the now zero auxiliary variables
T = T[1:,hstack([1+arange(m+1),-1])]
assert T.shape==(1+n,1+m+1)
N = N[nonzero(N<m)[0]]
# Solve our new canonical tableau
solve_tableau(T,B,N)
x = zeros(m,dtype)
x[B] = T[1:,-1]
return T[0,-1],x
def zero_sum_nash_equilibrium_side(payoff):
'''Alice chooses the row and maximizes, Bob chooses the column and minimizes.
Given Alice's payoff matrix, we compute Alice's optimal payoff and (mixed) strategy.
See http://en.wikipedia.org/wiki/Zero_sum_game for details.'''
assert payoff.ndim==2
M = payoff.T
M = M - M.min()
M = M/(M.max() or 1)+1
assert all(M>0)
# We want to minimize sum(u) s.t. Mu >= 1, u >= 0. Let M be m by n.
# Adding m positive slack variables s, this is
# min 1_n . u s.t. Mu = 1_m + s, [u,s] >= 0
# min 1_n . u s.t. Mu - s = 1_m, [u,s] >= 0
# min hstack(1_n,0_m) . [u,s] s.t. hstack(M,-eye(m)) stack(u,s) = 1_m, [u,s] >= 0
# Our linear program is now in standard form.
m,n = M.shape
dtype = M.dtype
f,u = simplex_method(hstack([ones(n,dtype),zeros(m,dtype)]),hstack([M,-eye(m,dtype=dtype)]),ones(m,dtype))
u = u[:n]
u /= sum(u)
return u
def zero_sum_nash_equilibrium(payoff):
alice = zero_sum_nash_equilibrium_side(payoff)
bob = zero_sum_nash_equilibrium_side(-payoff.T)
return dot(payoff,bob).max(),alice,bob
| 3.390625 | 3 |
examples/rtt.py | dia38/pylink | 0 | 12772406 | <reponame>dia38/pylink
# -*- coding: utf-8 -*-
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Example RTT terminal.
#
# This module creates an interactive terminal with the target using RTT.
#
# Usage: rtt target_device
# Author: <NAME>
# Date: October 11, 2017
# Copyright: 2017 Square, Inc.
import pylink
import sys
import time
from builtins import input
try:
import thread
except ImportError:
import _thread as thread
def read_rtt(jlink):
"""Reads the JLink RTT buffer #0 at 10Hz and prints to stdout.
This method is a polling loop against the connected JLink unit. If
the JLink is disconnected, it will exit. Additionally, if any exceptions
are raised, they will be caught and re-raised after interrupting the
main thread.
sys.stdout.write and sys.stdout.flush are used since target terminals
are expected to transmit newlines, which may or may not line up with the
arbitrarily-chosen 1024-byte buffer that this loop uses to read.
Args:
jlink (pylink.JLink): The JLink to read.
Raises:
Exception on error.
"""
try:
while jlink.connected():
terminal_bytes = jlink.rtt_read(0, 1024)
if terminal_bytes:
sys.stdout.write("".join(map(chr, terminal_bytes)))
sys.stdout.flush()
time.sleep(0.1)
except Exception:
print("IO read thread exception, exiting...")
thread.interrupt_main()
raise
def write_rtt(jlink):
"""Writes kayboard input to JLink RTT buffer #0.
This method is a loop that blocks waiting on stdin. When enter is pressed,
LF and NUL bytes are added to the input and transmitted as a byte list.
If the JLink is disconnected, it will exit gracefully. If any other
exceptions are raised, they will be caught and re-raised after interrupting
the main thread.
Args:
jlink (pylink.JLink): The JLink to write to.
Raises:
Exception on error.
"""
try:
while jlink.connected():
bytes = list(bytearray(input(), "utf-8") + b"\x0A\x00")
bytes_written = jlink.rtt_write(0, bytes)
except Exception:
print("IO write thread exception, exiting...")
thread.interrupt_main()
raise
def main(target_device):
"""Creates an interactive terminal to the target via RTT.
The main loop opens a connection to the JLink, and then connects
to the target device. RTT is started, the number of buffers is presented,
and then two worker threads are spawned: one for read, and one for write.
The main loops sleeps until the JLink is either disconnected or the
user hits ctrl-c.
Args:
target_device (string): The target CPU to connect to.
Returns:
Always returns ``0`` or a JLinkException.
Raises:
JLinkException on error.
"""
jlink = pylink.JLink()
print("connecting to JLink...")
jlink.open()
print("connecting to %s..." % target_device)
jlink.set_tif(pylink.enums.JLinkInterfaces.SWD)
jlink.connect(target_device)
print("connected, starting RTT...")
jlink.rtt_start()
while True:
try:
num_up = jlink.rtt_get_num_up_buffers()
num_down = jlink.rtt_get_num_down_buffers()
print("RTT started, %d up bufs, %d down bufs." % (num_up, num_down))
break
except pylink.errors.JLinkRTTException:
time.sleep(0.1)
try:
thread.start_new_thread(read_rtt, (jlink,))
thread.start_new_thread(write_rtt, (jlink,))
while jlink.connected():
time.sleep(1)
print("JLink disconnected, exiting...")
except KeyboardInterrupt:
print("ctrl-c detected, exiting...")
pass
if __name__ == "__main__":
sys.exit(main(sys.argv[1]))
| 2.5625 | 3 |
icevision/models/mmdet/models/retinanet/backbones/resnet_fpn.py | bluseking/-first-agnostic-computer-vision-framework-to-offer-a-curated-collection-with-hundreds-of-high-qualit | 580 | 12772407 | __all__ = [
"resnet50_caffe_fpn_1x",
"resnet50_fpn_1x",
"resnet50_fpn_2x",
"resnet101_caffe_fpn_1x",
"resnet101_fpn_1x",
"resnet101_fpn_2x",
"resnext101_32x4d_fpn_1x",
"resnext101_32x4d_fpn_2x",
"resnext101_64x4d_fpn_1x",
"resnext101_64x4d_fpn_2x",
]
from icevision.imports import *
from icevision.models.mmdet.utils import *
class MMDetRetinanetBackboneConfig(MMDetBackboneConfig):
def __init__(self, **kwargs):
super().__init__(model_name="retinanet", **kwargs)
base_config_path = mmdet_configs_path / "retinanet"
base_weights_url = "http://download.openmmlab.com/mmdetection/v2.0/retinanet"
resnet50_caffe_fpn_1x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_r50_caffe_fpn_1x_coco.py",
weights_url=f"{base_weights_url}/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531-f11027c5.pth",
)
resnet50_fpn_1x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_r50_fpn_1x_coco.py",
weights_url=f"{base_weights_url}/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth",
)
resnet50_fpn_2x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_r50_fpn_2x_coco.py",
weights_url=f"{base_weights_url}/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131-fdb43119.pth",
)
resnet101_caffe_fpn_1x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_r101_caffe_fpn_1x_coco.py",
weights_url=f"{base_weights_url}/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531-b428fa0f.pth",
)
resnet101_fpn_1x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_r101_fpn_1x_coco.py",
weights_url=f"{base_weights_url}/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130-7a93545f.pth",
)
resnet101_fpn_2x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_r101_fpn_2x_coco.py",
weights_url=f"{base_weights_url}/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131-5560aee8.pth",
)
resnext101_32x4d_fpn_1x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_x101_32x4d_fpn_1x_coco.py",
weights_url=f"{base_weights_url}/retinanet_x101_32x4d_fpn_1x_coco/retinanet_x101_32x4d_fpn_1x_coco_20200130-5c8b7ec4.pth",
)
resnext101_32x4d_fpn_2x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_x101_32x4d_fpn_2x_coco.py",
weights_url=f"{base_weights_url}/retinanet_x101_32x4d_fpn_2x_coco/retinanet_x101_32x4d_fpn_2x_coco_20200131-237fc5e1.pth",
)
resnext101_64x4d_fpn_1x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_x101_64x4d_fpn_1x_coco.py",
weights_url=f"{base_weights_url}/retinanet_x101_64x4d_fpn_1x_coco/retinanet_x101_64x4d_fpn_1x_coco_20200130-366f5af1.pth",
)
resnext101_64x4d_fpn_2x = MMDetRetinanetBackboneConfig(
config_path=base_config_path / "retinanet_x101_64x4d_fpn_2x_coco.py",
weights_url=f"{base_weights_url}/retinanet_x101_64x4d_fpn_2x_coco/retinanet_x101_64x4d_fpn_2x_coco_20200131-bca068ab.pth",
)
| 1.421875 | 1 |
reverseWords_01b.py | robertbyers1111/python | 0 | 12772408 | <reponame>robertbyers1111/python
#!/usr/bin/python
from reverseWords_01a import Sentence
s = Sentence('abc def xyz')
s.show()
s.reverse_words()
s.show()
| 3.40625 | 3 |
docs/source/conftest.py | chaaklau/pycantonese | 124 | 12772409 | """Test code snippets embedded in the docs.
Reference: https://sybil.readthedocs.io/en/latest/use.html#pytest
"""
from doctest import NORMALIZE_WHITESPACE
from os import chdir, getcwd
from shutil import rmtree
from tempfile import mkdtemp
import pytest
from sybil import Sybil
from sybil.parsers.doctest import DocTestParser
from sybil.parsers.skip import skip
@pytest.fixture(scope="module")
def tempdir():
path = mkdtemp()
cwd = getcwd()
try:
chdir(path)
yield path
finally:
chdir(cwd)
rmtree(path)
pytest_collect_file = Sybil(
parsers=[DocTestParser(optionflags=NORMALIZE_WHITESPACE), skip],
pattern="*.rst",
fixtures=["tempdir"],
).pytest()
| 2.453125 | 2 |
setup.py | rcarmo/miniredis | 1 | 12772410 | <filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2013, <NAME>
Description: Experimental Cython compile script
License: MIT (see LICENSE.md for details)
"""
import os, sys
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
from glob import glob
try:
from Cython.Distutils import build_ext
except:
print "You don't seem to have Cython installed"
sys.exit(1)
def scandir(dir, files=[]):
for file in os.listdir(dir):
path = os.path.join(dir, file)
if os.path.isfile(path) and path.endswith(".py"):
files.append(path.replace(os.path.sep, ".")[:-3])
elif os.path.isdir(path):
scandir(path, files)
return files
def makeExtension(extName):
extPath = extName.replace(".", os.path.sep)+".py"
return Extension(
extName,
[extPath],
include_dirs = ["."],
extra_compile_args = ["-O3", "-Wall"],
extra_link_args = ['-g'],
libraries = [],
)
extNames = scandir("miniredis")
extensions = [makeExtension(name) for name in extNames]
setup(
name = "miniredis",
packages = "miniredis",
ext_modules=extensions,
cmdclass = {'build_ext': build_ext},
setup_requires=['nose'],
test_suite='nose.main',
)
| 2.109375 | 2 |
CCC/CCC '13 J3 - From 1987 to 2013.py | Joon7891/Competitive-Programming | 2 | 12772411 | def valid(a):
a = str(a)
num = set()
for char in a:
num.add(char)
return len(a) == len(num)
n = int(input())
n += 1
while True:
if valid(n):
print(n)
break
else:
n += 1
| 3.46875 | 3 |
test/test_handlers.py | charles-x-chen/pyaem | 17 | 12772412 | import pyaem
import unittest
class TestHandlers(unittest.TestCase):
def test_auth_fail(self):
response = {
'http_code': 401,
'body': 'some body'
}
try:
pyaem.handlers.auth_fail(response)
self.fail('An exception should have been raised')
except pyaem.PyAemException as exception:
self.assertEqual(exception.code, 401)
self.assertEqual(exception.message, 'Authentication failed - incorrect username and/or password')
self.assertEqual(exception.response, response)
def test_method_not_allowed(self):
response = {
'http_code': 405,
'body': '<html><body><title>some error message</title></body></html>'
}
try:
pyaem.handlers.method_not_allowed(response)
self.fail('An exception should have been raised')
except pyaem.PyAemException as exception:
self.assertEqual(exception.code, 405)
self.assertEqual(exception.message, 'some error message')
self.assertEqual(exception.response, response)
def test_unexpected(self):
response = {
'http_code': 500,
'body': 'some unexpected server error'
}
try:
pyaem.handlers.unexpected(response)
self.fail('An exception should have been raised')
except pyaem.PyAemException as exception:
self.assertEqual(exception.code, 500)
self.assertEqual(
exception.message, 'Unexpected response\nhttp code: 500\nbody:\nsome unexpected server error')
self.assertEqual(exception.response, response)
if __name__ == '__main__':
unittest.main()
| 3.09375 | 3 |
plot/plot.py | firiceguo/my-pycuda | 0 | 12772413 | <filename>plot/plot.py
import matplotlib.pyplot as plt
def getData(file):
'''
Get the x-axis and time usage information from log.
input file - log format: 'IMAGE_W:1:Time:0.048934:ms'
output: x is a list of IMAGE_W, time is a list of microsecond.
'''
x = []
time = []
f = open(file, 'r')
while 1:
line = f.readline()
if line:
i = 0
temp = ''
while not line[i].isdigit():
i += 1
while line[i].isdigit():
temp += line[i]
i += 1
x.append(int(temp))
temp = ''
while not line[i].isdigit():
i += 1
while line[i].isdigit() or line[i] == '.':
temp += line[i]
i += 1
time.append(float(temp))
else:
break
return(x, time)
if __name__ == '__main__':
radius = 10
ReddFile = "../log/Redundant.log"
FilterFile = "../log/filter.log"
NaiveFile = "../log/naive.log"
# SerialFile = "../log/serial.log"
ReddX, ReddTime = getData(ReddFile)
FilterX, FilterTime = getData(FilterFile)
NaiveX, NaiveTime = getData(NaiveFile)
# SerialX, SetialTime = getData(SerialFile)
# plt.plot(SerialX, SetialTime, 'k', label="Setial Time")
plt.plot(NaiveX, NaiveTime, 'r', label="Naive")
plt.plot(FilterX, FilterTime, 'g', label="Separable Filter")
plt.plot(ReddX, ReddTime, 'b', label="Redundant Boundary")
plt.xlabel("Image radius")
plt.ylabel("Time(ms)")
plt.title("Time usage(Kernel Radius = " + str(radius) + ")")
plt.grid(True)
plt.legend(loc='upper left')
# plt.show()
plt.savefig("out-r" + str(radius) + ".png")
plt.close("all")
| 3.140625 | 3 |
qtcurate/exceptions.py | quantxt/qtcurate-sdk-python | 0 | 12772414 | class QtConnectionError(Exception):
pass
class QtRestApiError(Exception):
""" Problem with authentification"""
pass
class QtFileTypeError(Exception):
"""Invalid type of file"""
pass
class QtArgumentError(Exception):
pass
class QtVocabularyError(Exception):
pass
class QtModelError(Exception):
pass
class QtJobError(Exception):
pass | 2.09375 | 2 |
simulator/units/cavalry.py | ClementJ18/aoe | 0 | 12772415 | <reponame>ClementJ18/aoe
import objects
class Templars(objects.Unit):
def __init__(self):
super().__init__(
name = "Templar Knights",
attack = 200,
defense = 200,
range = 1,
movement = 10,
vision = 7,
type = objects.UnitType.cavalry,
abilities = ["zeal", "plain charge"]
)
class EliteTemplars(Templars):
def __init__(self):
super().__init__()
self.name = "Elite Templar Knights"
self.attack = 250
self.defense = 250
class LightCav(objects.Unit):
def __init__(self):
super().__init__(
name = "Light Cavalry",
attack = 150,
defense = 150,
range = 1,
movement = 10,
vision = 7,
type = objects.UnitType.cavalry,
abilities = ["plain charge"]
)
class Knights(LightCav):
def __init__(self):
super().__init__()
self.name = "Knights"
self.attack = 200
self.defense = 200
class Cavaliers(Knights):
def __init__(self):
super().__init__()
self.name = "Cavaliers"
self.attack = 250
self.defense = 250
class Paladins(Cavaliers):
def __init__(self):
super().__init__()
self.name = "Paladins"
self.attack = 300
self.defense = 300
class Camels(objects.Unit):
def __init__(self):
super().__init__(
attack = 200,
defense = 200,
range = 1,
movement = 7,
vision = 7,
type = objects.UnitType.cavalry,
abilities = ["scared horses", "desert charge"]
)
class HeavyCamels(Camels):
def __init__(self):
super().__init__()
self.name = "Heavy Camels"
self.attack = 250
self.defense = 250
class Mameluks(objects.Unit):
def __init__(self):
super().__init__(
attack = 250,
defense = 200,
range = 1,
movement = 10,
vision = 7,
type = objects.UnitType.cavalry,
abilities = ["scared horses", "desert charge"]
)
class EliteMameluks(Mameluks):
def __init__(self):
super().__init__()
self.name = "Elite Mameluks"
self.attack = 300
self.defense = 300
class ScoutCav(objects.Unit):
def __init__(self):
super().__init__(
name = "Scout Cavalry",
attack = 100,
defense = 100,
range = 1,
movement = 10,
vision = 10,
type = objects.UnitType.cavalry
)
class WarElephants(objects.Unit):
def __init__(self):
super().__init__(
name = "Persian War Elephant",
attack = 200,
defense = 250,
range = 1,
movement = 7,
vision = 7,
type = objects.UnitType.cavalry,
abilities = ["cause fear"]
) | 2.765625 | 3 |
SS/offer17_printNumbers.py | MTandHJ/leetcode | 0 | 12772416 |
from typing import List
class Solution:
def printNumbers(self, n:int) -> List[int]:
def dfs(x:int):
if x == n:
res.append(''.join(num))
return
for i in range(10):
num[x] = str(i)
dfs(x+1)
num = ['0'] * n
res = []
dfs(0)
return ''.join(res)
class Solution:
def printNumbers(self, n:int) -> List[int]:
def dfs(index, num, digit):
if index == digit:
# 当前第几位 VS 真实的位数
# 比如当前index: 1, 真实digit: 2
res.append(int(''.join(num)))
return
for i in range(10):
# 从第二位数开始,依次往下递增
num.append(str(i))
dfs(index+1, num, digit)
# 一次递归完成之后,返回
# 将上一次最后一位给删除,比如91
# 继续进行下一次,比如92, 直到当前for i in range(10)循环结束
num.pop()
res = []
# digit代表是当前的第几位
for digit in range(1, n+1):
for first in range(1, 10):
# 第一位数
num = [str(first)]
dfs(1, num, digit)
return res
if __name__ == '__main__':
ins = Solution()
n = 2
print(ins.printNumbers(2)) | 3.4375 | 3 |
bach/bach/sql_model.py | objectiv/objectiv-analytics | 23 | 12772417 | """
Copyright 2021 Objectiv B.V.
"""
import typing
from typing import Dict, TypeVar, Tuple, List, Optional, Mapping, Hashable, Union
from sqlalchemy.engine import Dialect
from bach.expression import Expression, get_variable_tokens, VariableToken
from bach.types import value_to_dtype, get_series_type_from_dtype
from sql_models.util import quote_identifier
from sql_models.model import CustomSqlModelBuilder, SqlModel, Materialization, SqlModelSpec
from sql_models.constants import NotSet, not_set
T = TypeVar('T', bound='SqlModelSpec')
TBachSqlModel = TypeVar('TBachSqlModel', bound='BachSqlModel')
if typing.TYPE_CHECKING:
from bach.dataframe import DtypeNamePair
class BachSqlModel(SqlModel[T]):
"""
SqlModel with meta information about the columns that it produces.
This additional information needs to be specifically set at model instantiation, it cannot be deduced
from the sql.
The column information is not used for sql generation, but can be used by other code
interacting with the models. The information is not reflected in the `hash`, as it doesn't matter for
the purpose of sql generation.
"""
def __init__(
self,
model_spec: T,
placeholders: Mapping[str, Hashable],
references: Mapping[str, 'SqlModel'],
materialization: Materialization,
materialization_name: Optional[str],
column_expressions: Dict[str, Expression],
) -> None:
"""
Similar to :py:meth:`SqlModel.__init__()`. With one additional parameter: column_expressions,
a mapping between the names of the columns and expressions
that this model's query will return in the correct order.
"""
self._column_expressions = column_expressions
super().__init__(
model_spec=model_spec,
placeholders=placeholders,
references=references,
materialization=materialization,
materialization_name=materialization_name,
)
@property
def columns(self) -> Tuple[str, ...]:
""" Columns returned by the query of this model, in order."""
return tuple(self._column_expressions.keys())
@property
def column_expressions(self) -> Dict[str, Expression]:
""" Mapping containing the expression used per column."""
return self._column_expressions
def copy_override(
self: TBachSqlModel,
*,
model_spec: T = None,
placeholders: Mapping[str, Hashable] = None,
references: Mapping[str, 'SqlModel'] = None,
materialization: Materialization = None,
materialization_name: Union[Optional[str], NotSet] = not_set,
column_expressions: Dict[str, Expression] = None
) -> TBachSqlModel:
"""
Similar to super class's implementation, but adds optional 'columns' parameter
"""
materialization_name_value = (
self.materialization_name if materialization_name is not_set else materialization_name
)
return self.__class__(
model_spec=self.model_spec if model_spec is None else model_spec,
placeholders=self.placeholders if placeholders is None else placeholders,
references=self.references if references is None else references,
materialization=self.materialization if materialization is None else materialization,
materialization_name=materialization_name_value,
column_expressions=self.column_expressions if column_expressions is None else column_expressions
)
@classmethod
def from_sql_model(cls, sql_model: SqlModel, column_expressions: Dict[str, Expression]) -> 'BachSqlModel':
""" From any SqlModel create a BachSqlModel with the given column definitions. """
return cls(
model_spec=sql_model.model_spec,
placeholders=sql_model.placeholders,
references=sql_model.references,
materialization=sql_model.materialization,
materialization_name=sql_model.materialization_name,
column_expressions=column_expressions,
)
@classmethod
def _get_placeholders(
cls,
dialect: Dialect,
variables: Dict['DtypeNamePair', Hashable],
expressions: List[Expression],
) -> Dict[str, str]:
filtered_variables = filter_variables(variables, expressions)
return get_variable_values_sql(dialect, filtered_variables)
class SampleSqlModel(BachSqlModel):
"""
A custom SqlModel that simply does select * from a table. In addition to that, this class stores an
extra property: previous.
The previous property is not used in the generated sql at all, but can be used to track a previous
SqlModel. This is useful for how we implemented sampling, as that effectively inserts a sql-model in the
graph that has no regular reference to the previous node in the graph. By storing the previous node
here, we can later still reconstruct what the actual previous node was with some custom logic.
See the DataFrame.sample() implementation for more information
"""
def __init__(
self,
model_spec: T,
placeholders: Mapping[str, Hashable],
references: Mapping[str, 'SqlModel'],
materialization: Materialization,
materialization_name: Optional[str],
column_expressions: Dict[str, Expression],
previous: BachSqlModel,
) -> None:
self.previous = previous
super().__init__(
model_spec=model_spec,
placeholders=placeholders,
references=references,
materialization=materialization,
materialization_name=materialization_name,
column_expressions=column_expressions,
)
def copy_override(
self: 'SampleSqlModel',
*,
model_spec: T = None,
placeholders: Mapping[str, Hashable] = None,
references: Mapping[str, 'SqlModel'] = None,
materialization: Materialization = None,
materialization_name: Union[Optional[str], NotSet] = not_set,
column_expressions: Dict[str, Expression] = None,
previous: BachSqlModel = None
) -> 'SampleSqlModel':
"""
Similar to super class's implementation, but adds optional 'previous' parameter
"""
materialization_name_value = \
self.materialization_name if materialization_name is not_set else materialization_name
return self.__class__(
model_spec=self.model_spec if model_spec is None else model_spec,
placeholders=self.placeholders if placeholders is None else placeholders,
references=self.references if references is None else references,
materialization=self.materialization if materialization is None else materialization,
materialization_name=materialization_name_value,
column_expressions=self.column_expressions if column_expressions is None else column_expressions,
previous=self.previous if previous is None else previous
)
@staticmethod
def get_instance(
*,
dialect: Dialect,
table_name: str,
previous: BachSqlModel,
column_expressions: Dict[str, Expression],
name: str = 'sample_node',
) -> 'SampleSqlModel':
""" Helper function to instantiate a SampleSqlModel """
sql = 'SELECT * FROM {table_name}'
return SampleSqlModel(
model_spec=CustomSqlModelBuilder(sql=sql, name=name),
placeholders={'table_name': quote_identifier(dialect, table_name)},
references={},
materialization=Materialization.CTE,
materialization_name=None,
column_expressions=column_expressions,
previous=previous
)
class CurrentNodeSqlModel(BachSqlModel):
@staticmethod
def get_instance(
*,
dialect: Dialect,
name: str,
column_names: Tuple[str, ...],
column_exprs: List[Expression],
distinct: bool,
where_clause: Optional[Expression],
group_by_clause: Optional[Expression],
having_clause: Optional[Expression],
order_by_clause: Optional[Expression],
limit_clause: Expression,
previous_node: BachSqlModel,
variables: Dict['DtypeNamePair', Hashable],
) -> 'CurrentNodeSqlModel':
columns_str = ', '.join(expr.to_sql(dialect) for expr in column_exprs)
distinct_stmt = ' distinct ' if distinct else ''
where_str = where_clause.to_sql(dialect) if where_clause else ''
group_by_str = group_by_clause.to_sql(dialect) if group_by_clause else ''
having_str = having_clause.to_sql(dialect) if having_clause else ''
order_by_str = order_by_clause.to_sql(dialect) if order_by_clause else ''
limit_str = limit_clause.to_sql(dialect) if limit_clause else ''
sql = (
f"select {distinct_stmt}{columns_str} \n"
f"from {{{{prev}}}} \n"
f"{where_str} \n"
f"{group_by_str} \n"
f"{having_str} \n"
f"{order_by_str} \n"
f"{limit_str} \n"
)
# Add all references found in the Expressions to self.references
nullable_expressions = [where_clause, group_by_clause, having_clause, order_by_clause, limit_clause]
all_expressions = column_exprs + [expr for expr in nullable_expressions if expr is not None]
references = construct_references({'prev': previous_node}, all_expressions)
return CurrentNodeSqlModel(
model_spec=CustomSqlModelBuilder(sql=sql, name=name),
placeholders=BachSqlModel._get_placeholders(dialect, variables, all_expressions),
references=references,
materialization=Materialization.CTE,
materialization_name=None,
column_expressions={name: expr for name, expr in zip(column_names, column_exprs)},
)
def construct_references(
base_references: Mapping[str, 'SqlModel'],
expressions: List['Expression']
) -> Dict[str, 'SqlModel']:
"""
Create a dictionary of references consisting of the base_references and all references found in the
expressions.
Will raise an exception if there are references with the same name that reference different models.
"""
result: Dict[str, SqlModel] = {}
for expr in expressions:
references = expr.get_references()
_check_reference_conflicts(result, references)
result.update(references)
_check_reference_conflicts(base_references, result)
result.update(base_references)
return result
def _check_reference_conflicts(left: Mapping[str, 'SqlModel'], right: Mapping[str, 'SqlModel']) -> None:
"""
Util function: Check that two dicts with references don't have conflicting values.
"""
for ref_name, model in right.items():
if left.get(ref_name) not in (None, model):
# This should never happen, if other code doesn't mess up.
# We have this check as a backstop assertion to fail early
raise Exception(f'Encountered reference {ref_name} before, but with a different value: '
f'{left.get(ref_name)} != {model}')
def filter_variables(
variable_values: Dict['DtypeNamePair', Hashable],
filter_expressions: List['Expression']
) -> Dict['DtypeNamePair', Hashable]:
"""
Util function: Return a copy of the variable_values, with only the variables for which there is a
VariableToken in the filter_expressions.
"""
available_tokens = get_variable_tokens(filter_expressions)
dtype_names = {token.dtype_name for token in available_tokens}
return {dtype_name: value for dtype_name, value in variable_values.items() if dtype_name in dtype_names}
def get_variable_values_sql(
dialect: Dialect,
variable_values: Dict['DtypeNamePair', Hashable]
) -> Dict[str, str]:
"""
Take a dictionary with variable_values and return a dict with the full variable names and the values
as sql.
The sql assumes it will be used as values for SqlModels's placeholders. i.e. It will not be format
escaped, unlike if it would be used directly into SqlModel.sql in which case it would be escaped twice.
The sql will be proper sql tho, with identifier, strings, etc. properly quoted and escaped.
:param variable_values: Mapping of variable to value.
:return: Dictionary mapping full variable name to sql literal
"""
result = {}
for dtype_name, value in variable_values.items():
dtype, name = dtype_name
value_dtype = value_to_dtype(value)
if dtype != value_dtype: # should never happen
Exception(f'Dtype of value {value}, {value_dtype} does not match registered dtype {dtype}')
placeholder_name = VariableToken.dtype_name_to_placeholder_name(dtype=dtype, name=name)
series_type = get_series_type_from_dtype(dtype)
expr = series_type.value_to_literal(dialect=dialect, value=value, dtype=dtype)
double_escaped_sql = expr.to_sql(dialect)
sql = double_escaped_sql.format().format()
result[placeholder_name] = sql
return result
| 2.4375 | 2 |
server/crud/protocol.py | lab-grid/labflow | 4 | 12772418 | <gh_stars>1-10
import casbin
from sqlalchemy.orm import Session, Query
from typing import Optional, List
from functools import reduce
from fastapi import HTTPException
from authorization import check_access
from server import Auth0ClaimsPatched
from database import (
filter_by_plate_label,
filter_by_reagent_label,
filter_by_sample_label,
Protocol,
ProtocolVersion,
Run,
RunVersion,
fix_plate_markers_protocol,
)
from api.utils import paginatify
def all_protocols(db: Session, include_archived=False) -> Query:
query = db.query(Protocol)
if not include_archived:
query = query.filter(Protocol.is_deleted != True)
return query
def crud_get_protocols(
item_to_dict,
enforcer: casbin.Enforcer,
db: Session,
current_user: Auth0ClaimsPatched,
protocol: Optional[int] = None,
run: Optional[int] = None,
plate: Optional[str] = None,
reagent: Optional[str] = None,
sample: Optional[str] = None,
creator: Optional[str] = None,
archived: Optional[bool] = None,
page: Optional[int] = None,
per_page: Optional[int] = None,
) -> List[dict]:
protocols_queries = []
if protocol:
protocols_queries.append(
all_protocols(db, archived)\
.filter(Protocol.id == protocol)
)
if run:
protocols_queries.append(
all_protocols(db, archived)\
.join(ProtocolVersion, ProtocolVersion.protocol_id == Protocol.id)\
.join(Run, Run.protocol_version_id == ProtocolVersion.id)\
.filter(Run.id == run)
)
if plate:
run_version_query = all_protocols(db, archived)\
.join(ProtocolVersion, ProtocolVersion.protocol_id == Protocol.id)\
.join(Run, Run.protocol_version_id == ProtocolVersion.id)\
.join(RunVersion, RunVersion.id == Run.version_id)
protocols_subquery = filter_by_plate_label(run_version_query, plate)
protocols_queries.append(protocols_subquery)
if reagent:
run_version_query = all_protocols(db, archived)\
.join(ProtocolVersion, ProtocolVersion.protocol_id == Protocol.id)\
.join(Run, Run.protocol_version_id == ProtocolVersion.id)\
.join(RunVersion, RunVersion.id == Run.version_id)
protocols_subquery = filter_by_reagent_label(run_version_query, reagent)
protocols_queries.append(protocols_subquery)
if sample:
run_version_query = all_protocols(db, archived)\
.join(ProtocolVersion, ProtocolVersion.protocol_id == Protocol.id)\
.join(Run, Run.protocol_version_id == ProtocolVersion.id)\
.join(RunVersion, RunVersion.id == Run.version_id)
protocols_subquery = filter_by_sample_label(run_version_query, sample)
protocols_queries.append(protocols_subquery)
if creator:
protocols_queries.append(
all_protocols(db, archived)\
# .filter(Protocol.id == protocol)\
.filter(Protocol.created_by == creator)
)
# Add a basic non-deleted items query if no filters were specified.
if len(protocols_queries) == 0:
protocols_queries.append(all_protocols(db, archived))
# Only return the intersection of all queries.
protocols_query = reduce(lambda a, b: a.intersect(b), protocols_queries)
return paginatify(
items_label='protocols',
items=[
protocol
for protocol
in protocols_query.distinct().order_by(Protocol.created_on.desc())
if check_access(enforcer, user=current_user.username, path=f"/protocol/{str(protocol.id)}", method="GET") and protocol and protocol.current
],
item_to_dict=lambda protocol: item_to_dict(fix_plate_markers_protocol(db, protocol)),
page=page,
per_page=per_page,
)
def crud_get_protocol(
item_to_dict,
enforcer: casbin.Enforcer,
db: Session,
current_user: Auth0ClaimsPatched,
protocol_id: int,
version_id: Optional[int] = None,
) -> dict:
if not check_access(enforcer, user=current_user.username, path=f"/protocol/{str(protocol_id)}", method="GET"):
raise HTTPException(status_code=403, detail='Insufficient Permissions')
if version_id:
protocol_version = ProtocolVersion.query\
.filter(ProtocolVersion.id == version_id)\
.filter(Protocol.id == protocol_id)\
.first()
if (not protocol_version) or protocol_version.protocol.is_deleted:
raise HTTPException(status_code=404, detail='Protocol Not Found')
return item_to_dict(protocol_version.protocol)
protocol = db.query(Protocol).get(protocol_id)
if (not protocol) or protocol.is_deleted:
raise HTTPException(status_code=404, detail='Protocol Not Found')
return item_to_dict(fix_plate_markers_protocol(db, protocol))
| 2 | 2 |
basic/blog/models.py | mrmonkington/django-basic-apps | 1 | 12772419 | <gh_stars>1-10
import datetime
from django.db import models
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.db.models import permalink
from django.contrib.auth.models import User
from django.template.defaultfilters import truncatewords_html
from taggit.managers import TaggableManager
from django_markup.fields import MarkupField
from django_markup.markup import formatter
from basic.blog.managers import PublicManager
from basic.blog import settings
from basic.inlines.parser import inlines
class Category(models.Model):
"""Category model."""
title = models.CharField(_('title'), max_length=100)
slug = models.SlugField(_('slug'), unique=True)
class Meta:
verbose_name = _('category')
verbose_name_plural = _('categories')
db_table = 'blog_categories'
ordering = ('title',)
def __unicode__(self):
return u'%s' % self.title
@permalink
def get_absolute_url(self):
return ('blog_category_detail', None, {'slug': self.slug})
class Post(models.Model):
"""Post model."""
STATUS_CHOICES = (
(1, _('Draft')),
(2, _('Public')),
)
title = models.CharField(_('title'), max_length=200)
slug = models.SlugField(_('slug'), unique_for_date='publish', max_length=100)
author = models.ForeignKey(User, blank=True, null=True)
markup = MarkupField(default='markdown')
body = models.TextField(_('body'), )
body_rendered = models.TextField(editable=True, blank=True, null=True)
tease = models.TextField(_('tease'), blank=True)
tease_rendered = models.TextField(editable=True, blank=True, null=True)
visits = models.IntegerField(_('visits'), default=0, editable=False)
status = models.IntegerField(_('status'), choices=STATUS_CHOICES, default=2)
allow_comments = models.BooleanField(_('allow comments'), default=True)
publish = models.DateTimeField(_('publish'), default=datetime.datetime.now)
created = models.DateTimeField(_('created'), auto_now_add=True)
modified = models.DateTimeField(_('modified'), auto_now=True)
categories = models.ManyToManyField(Category, blank=True)
tags = TaggableManager(blank=True)
objects = PublicManager()
class Meta:
verbose_name = _('post')
verbose_name_plural = _('posts')
db_table = 'blog_posts'
ordering = ('-publish',)
get_latest_by = 'publish'
def __unicode__(self):
return u'%s' % self.title
def save(self, *args, **kwargs):
# Inlines must be rendered before markup in order to properly preserve
# whitespace
self.body_rendered = inlines(self.body)
self.tease_rendered = inlines(self.tease)
# Render the markup and save it in the body_rendered field.
self.body_rendered = mark_safe(formatter(self.body_rendered, filter_name=self.markup))
self.tease_rendered = mark_safe(formatter(self.tease_rendered, filter_name=self.markup))
# Run the body and tease through Smartypants, if enabled.
if settings.BLOG_SMARTYPANTS:
self.body_rendered = mark_safe(formatter(self.body_rendered, filter_name='smartypants'))
self.tease_rendered = mark_safe(formatter(self.tease_rendered, filter_name='smartypants'))
# Call the real save.
super(Post, self).save(*args, **kwargs)
@permalink
def get_absolute_url(self):
return ('blog_detail', None, {
'year': self.publish.year,
'month': self.publish.strftime('%m'),
'day': self.publish.day,
'slug': self.slug
})
@property
def excerpt(self):
"""
Return the excerpt of a post, respecting the auto excerpt settings,
with a link to continue reading if appropriate.
"""
# Create the link to continue reading the full post.
continue_link = """
<p class="continue">
<a href="%s" title="Continue reading this post">%s</a>
</p>
""" % (self.get_absolute_url(), settings.BLOG_CONTINUE)
excerpt = self.tease_rendered
# If auto excerpts are enabled and the post does not have a tease,
# truncate the body and set that to the tease.
if settings.BLOG_AUTOEXCERPTS and not self.tease:
excerpt = truncatewords_html(self.body_rendered,
settings.BLOG_AUTOEXCERPTS)
# If the auto excerpt is the same as the full body, set the
# continue link to an empty string so that it is not displayed.
if excerpt == self.body_rendered:
continue_link = ""
# If there is an excerpt, return it followed by the continue link.
if excerpt:
return "%s %s" % (excerpt, mark_safe(continue_link))
# If we're still here, there is no excerpt.
return False
def get_previous_post(self):
return self.get_previous_by_publish(status__gte=2)
def get_next_post(self):
return self.get_next_by_publish(status__gte=2)
class BlogRoll(models.Model):
"""Other blogs you follow."""
name = models.CharField(max_length=100)
url = models.URLField(verify_exists=False)
sort_order = models.PositiveIntegerField(default=0)
description = models.TextField(max_length=500, blank=True)
relationship = models.CharField(max_length=200, blank=True)
class Meta:
ordering = ('sort_order', 'name',)
verbose_name = _('blog roll')
verbose_name_plural = _('blog roll')
def __unicode__(self):
return self.name
def get_absolute_url(self):
return self.url
| 2.078125 | 2 |
packages/v8env/vendor/whatwg-streams/reference-implementation/web-platform-tests/css/tools/apiclient/apiclient/apiclient.py | GagnDeep/v8-isolates | 0 | 12772420 | # coding=utf-8
#
# Copyright © 2013 Hewlett-Packard Development Company, L.P.
#
# This work is distributed under the W3C® Software License [1]
# in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# [1] http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231
#
# Process URI templates per http://tools.ietf.org/html/rfc6570
import urllib2
import urlparse
import json
import base64
import contextlib
import collections
import UserString
import uritemplate
class MimeType(UserString.MutableString):
def __init__(self, mimeType):
UserString.MutableString.__init__(self, mimeType)
self._type = None
self._subtype = None
self._structure = None
slashIndex = mimeType.find('/')
if (-1 < slashIndex):
self._type = mimeType[:slashIndex]
mimeType = mimeType[slashIndex + 1:]
plusIndex = mimeType.find('+')
if (-1 < plusIndex):
self._subtype = mimeType[:plusIndex]
self._structure = mimeType[plusIndex + 1:]
else:
self._structure = mimeType
else:
self._type = mimeType
def _update(self):
if (self._structure):
if (self._subtype):
self.data = self._type + '/' + self._subtype + '+' + self._structure
else:
self.data = self._type + '/' + self._structure
else:
self.data = self._type
def set(self, type, structure, subtype = None):
self._type = type
self._subtype = subtype
self._structure = structure
self._update()
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
self._update()
@property
def subtype(self):
return self._subtype
@subtype.setter
def subtype(self, value):
self._subtype = value
self._update()
@property
def structure(self):
return self._structure
@structure.setter
def structure(self, value):
self._structure = value
self._update()
class APIResponse(object):
def __init__(self, response):
self.status = response.getcode() if (response) else 0
self.headers = response.info() if (response) else {}
self.data = response.read() if (200 == self.status) else None
if (self.data and
(('json' == self.contentType.structure) or ('json-home' == self.contentType.structure))):
try:
self.data = json.loads(self.data, object_pairs_hook = collections.OrderedDict)
except:
pass
@property
def contentType(self):
contentType = self.headers.get('content-type') if (self.headers) else None
return MimeType(contentType.split(';')[0]) if (contentType and (';' in contentType)) else MimeType(contentType)
@property
def encoding(self):
contentType = self.headers.get('content-type') if (self.headers) else None
if (contentType and (';' in contentType)):
encoding = contentType.split(';', 1)[1]
if ('=' in encoding):
return encoding.split('=', 1)[1].strip()
return 'utf-8'
class APIHints(object):
def __init__(self, data):
self.httpMethods = [method.upper() for method in data['allow'] if method] if ('allow' in data) else ['GET']
self.formats = {}
formats = [MimeType(format) for format in data['formats']] if ('formats' in data) else []
if (formats):
if ('GET' in self.httpMethods):
self.formats['GET'] = formats
if ('PUT' in self.httpMethods):
self.formats['PUT'] = formats
if (('PATCH' in self.httpMethods) and ('accept-patch' in data)):
self.formats['PATCH'] = [MimeType(format) for format in data['accept-patch']]
if (('POST' in self.httpMethods) and ('accept-post' in data)):
self.formats['POST'] = [MimeType(format) for format in data['accept-post']]
# TODO: ranges from 'accept-ranges'; preferece tokens from 'accept-prefer';
# preconditions from 'precondition-req'; auth from 'auth-req'
self.ranges = None
self.preferences = None
self.preconditions = None
self.auth = None
self.docs = data.get('docs')
self.status = data.get('status')
class APIResource(object):
def __init__(self, baseURI, uri, variables = None, hints = None):
try:
self.template = uritemplate.URITemplate(urlparse.urljoin(baseURI, uri))
if (variables):
self.variables = {variable: urlparse.urljoin(baseURI, variables[variable]) for variable in variables}
else:
self.variables = {variable: '' for variable in self.template.variables}
self.hints = hints
except Exception as e:
self.template = uritemplate.URITemplate('')
self.variables = {}
self.hints = None
class APIClient(object):
def __init__(self, baseURI, version = None, username = None, password = <PASSWORD>):
self._baseURI = baseURI
self.defaultVersion = version
self.defaultAccept = 'application/json'
self.username = username
self.password = password
self._resources = {}
self._versions = {}
self._accepts = {}
self._loadHome()
@property
def baseURI(self):
return self._baseURI
def _loadHome(self):
home = self._callURI('GET', self.baseURI, 'application/home+json, application/json-home, application/json')
if (home):
if ('application/json' == home.contentType):
for name in home.data:
apiKey = urlparse.urljoin(self.baseURI, name)
self._resources[apiKey] = APIResource(self.baseURI, home.data[name])
elif (('application/home+json' == home.contentType) or
('application/json-home' == home.contentType)):
resources = home.data.get('resources')
if (resources):
for name in resources:
apiKey = urlparse.urljoin(self.baseURI, name)
data = resources[name]
uri = data['href'] if ('href' in data) else data.get('href-template')
variables = data.get('href-vars')
hints = APIHints(data['hints']) if ('hints' in data) else None
self._resources[apiKey] = APIResource(self.baseURI, uri, variables, hints)
def relativeURI(self, uri):
if (uri.startswith(self.baseURI)):
relative = uri[len(self.baseURI):]
if (relative.startswith('/') and not self.baseURI.endswith('/')):
relative = relative[1:]
return relative
return uri
@property
def resourceNames(self):
return [self.relativeURI(apiKey) for apiKey in self._resources]
def resource(self, name):
return self._resources.get(urlparse.urljoin(self.baseURI, name))
def addResource(self, name, uri):
resource = APIResource(self.baseURI, uri)
apiKey = urlparse.urljoin(self.baseURI, name)
self._resources[apiKey] = resource
def _accept(self, resource):
version = None
if (api and (api in self._versions)):
version = self._versions[api]
if (not version):
version = self.defaultVersion
return ('application/' + version + '+json, application/json') if (version) else 'application/json'
def _callURI(self, method, uri, accept, payload = None, payloadType = None):
try:
request = urllib2.Request(uri, data = payload, headers = { 'Accept' : accept })
if (self.username and self.password):
request.add_header('Authorization', b'Basic ' + base64.b64encode(self.username + b':' + self.password))
if (payload and payloadType):
request.add_header('Content-Type', payloadType)
request.get_method = lambda: method
with contextlib.closing(urllib2.urlopen(request)) as response:
return APIResponse(response)
except Exception as e:
pass
return None
def _call(self, method, name, arguments, payload = None, payloadType = None):
apiKey = urlparse.urljoin(self.baseURI, name)
resource = self._resources.get(apiKey)
if (resource):
uri = resource.template.expand(**arguments)
if (uri):
version = self._versions.get(apiKey) if (apiKey in self._versions) else self.defaultVersion
accept = MimeType(self._accepts(apiKey) if (apiKey in self._accepts) else self.defaultAccept)
if (version):
accept.subtype = version
return self._callURI(method, uri, accept, payload, payloadType)
return None
def setVersion(self, name, version):
apiKey = urlparse.urljoin(self.baseURI, name)
self._versions[apiKey] = version
def setAccept(self, name, mimeType):
apiKey = urlparse.urljoin(self.baseURI, name)
self._accepts[apiKey] = mimeType
def get(self, name, **kwargs):
return self._call('GET', name, kwargs)
def post(self, name, payload = None, payloadType = None, **kwargs):
return self._call('POST', name, kwargs, payload, payloadType)
def postForm(self, name, payload = None, **kwargs):
return self._call('POST', name, kwargs, urllib.urlencode(payload), 'application/x-www-form-urlencoded')
def postJSON(self, name, payload = None, **kwargs):
return self._call('POST', name, kwargs, json.dumps(payload), 'application/json')
def put(self, name, payload = None, payloadType = None, **kwargs):
return self._call('PUT', name, kwargs, payload, payloadType)
def patch(self, name, patch = None, **kwargs):
return self._call('PATCH', name, kwargs, json.dumps(patch), 'application/json-patch')
def delete(self, name, **kwargs):
return self._call('DELETE', name, kwargs)
| 2.09375 | 2 |
src/main.py | chezyn/ir-sender | 1 | 12772421 | <filename>src/main.py
from irsender.irsender import IrSender
irsender = IrSender()
irsender.start()
| 1.132813 | 1 |
tests/orgmode.py | thetomcraig/HPI | 0 | 12772422 | <gh_stars>0
import my.notes.orgmode as orgmode
def test():
# meh
results = orgmode.query().query_all(lambda x: x.with_tag('python'))
assert len(results) > 5
| 1.859375 | 2 |
utils/keymaster.py | samadhicsec/threatware | 0 | 12772423 | <gh_stars>0
#!/usr/bin/env python3
"""
Utility methods for data.key.Key
"""
import logging
import re
from data import find
from data.key import key as Key
import utils.logging
logger = logging.getLogger(utils.logging.getLoggerName(__name__))
def get_section_for_key(row_key:Key) -> Key:
if row_key is not None:
sectionKey = row_key
while (sectionKey.getProperty("section") is None) and (sectionKey.getProperty("parentKey") is not None):
sectionKey = sectionKey.getProperty("parentKey")
if sectionKey.getProperty("section") is not None:
return sectionKey
return None
def get_data_tag_for_key(row_key:Key) -> str:
if row_key is not None:
sectionKey = row_key
while (len([tag for tag in sectionKey.getTags() if re.search("^.*-data$", tag)]) == 0) and (sectionKey.getProperty("parentKey") is not None):
sectionKey = sectionKey.getProperty("parentKey")
data_tag_list = [tag for tag in sectionKey.getTags() if re.search("^.*-data$", tag)]
if len(data_tag_list) > 0:
if len(data_tag_list) > 1:
logger.warning(f"The key '{sectionKey}' should not be members of multiple data sections i.e. '{data_tag_list}'")
return data_tag_list[0]
return None
def get_row_identifier_for_key(row_key:Key):
if row_key is not None:
if row_key.hasTag("row-identifier"):
return row_key
elif row_key.getProperty("rowID") is not None:
return row_key.getProperty("rowID")
return None
def get_row_identifiers_for_key(row_key:Key):
if row_key is not None:
if row_key.hasTag("row-identifier") or row_key.getProperty("rowID") is not None:
# It's possible the row_key is the same as the row identifier location, in which case the
# "rowID" property will not be set on it (it would be a circular reference)
rowID_key = row_key.getProperty("rowID")
if rowID_key is None:
# row_key must have tag "row-identifier"
return row_key.name, row_key.getProperty("value")
else:
return rowID_key.name, rowID_key.getProperty("value")
return None, None
def get_column_name_for_key(row_key:Key) -> str:
return row_key.getProperty("colname")
# def get_row_identifier_key(row_key:Key):
# row_identifier_key, row_identifier_value = get_row_identifiers_for_key(row_key)
# parentKeys = []
# if row_identifier_key is None:
# tableKey = row_key
# # Navigate up the parents until we hit a table section
# while (tableKey.getProperty("section") is None) and (tableKey.getProperty("parentKey") is not None):
# parentKeys.append(tableKey)
# tableKey = tableKey.getProperty("parentKey")
# # Navigate back down, noting the row for the table
# for row in tableKey.getProperty("value"):
# def find_child_row(row, index):
# if isinstance(row, dict):
# row = [row]
# for row_entry in row:
# for row_key, row_value in row_entry.items():
# if row_key is parentKeys[index]:
# if index == 0:
# return True
# return find_child_row(row_value, index - 1)
# return False
# success = find_child_row(row, -1)
# if success:
# row_identifier_key = find.key_with_tag(row, "row-identifier")
# return row_identifier_key
# return None
| 2.78125 | 3 |
rebal.py | djevans071/Reba | 0 | 12772424 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 6 13:02:10 2017
@author: psamtik071
"""
# create rebalancing datasets from trips datasets and save to rebaltrips\
from workflow.data import *
import os
month = 3
year = 2017
#trips = trip_data(year, month)
#columns to shift by
#shift_cols = ['stop_id','stop_time', 'stop_long', 'stop_lat', 'stop_name']
'''
these functions are moved to workflow/data.py
def shift_cond(bike_df):
bike_df[shift_cols] = bike_df[shift_cols].shift(1)
return bike_df[bike_df.start_id != bike_df.stop_id]
def rebal_from_trips(trips):
trips = trips.sort_values(['bike_id', 'start_time'])
rebal_df = trips.groupby('bike_id').apply(shift_cond).dropna()
rebal_df.index = rebal_df.index.droplevel('bike_id')
return rebal_df
'''
# track rebalancing events
#(REMEMBER THAT REBAL EVENTS USE STOP_TIME AS THE STARTING TIME)
to_folder = 'rebals/'
#rebal_from_trips(trip_data(year, month)).to_csv(to_folder + to_filename,
# index = None)
for year in xrange(2017,2018):
for month in xrange(1,3):
to_filename = '{}{:02}-rebal-data.csv'.format(year, month)
path = to_folder + to_filename
print "extracting rebalancing trips to {}".format(path)
if os.path.exists(path):
print "{} already exists".format(path)
pass
else:
try:
rebal_from_trips(trip_data(year, month)).to_csv(path, index = None)
except IOError:
print '{} does not exist'.format(path)
| 2.875 | 3 |
main_natselect.py | JLMadsen/TetrisAI | 1 | 12772425 | """
Tetris in Python for Natural Selection
"""
from nat_selection.agent import Agent as NatAgent
from nat_selection.model import Model
import time
from enviorment.tetris import Tetris
env = Tetris({'reduced_grid': 0, 'reduced_shapes': 0}, 'Genetic algorithm')
def main():
agent = NatAgent(cores=4)
generations = 1000
#candidate = agent.train(generations)
candidate = Model([-0.8995652940240592, 0.06425443268253492, -0.3175211096545741, -0.292974392382306])
while True:
score = 0
state, reward, done, info = env.reset()
while not done:
action = candidate.best(env)
for a in action:
env.render()
#time.sleep(0.1)
state, reward, done, info = env.step(a)
score += reward
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
finally:
env.quit()
| 3.359375 | 3 |
maker/SQL_Query.py | PrinceS17/Crypcy | 0 | 12772426 | <gh_stars>0
#Begin Simple SQl Queries
#@author <NAME>
import sqlite3
from django.db import connection
from .sql_operation import *
# search crypto-currencies based on prefix
# prefix: pref
def search_by_prefix(pref):
with connection.cursor() as cursor:
cursor.execute('''SELECT cr.logo, cr.id, cr.name, me.price, me.volume, me.supply,me.utility
FROM maker_cryptocurrency as cr, maker_metric as me
WHERE cr.id=me.crypto_currency_id
AND cr.name LIKE "%s%%"''' % pref)
res=dictfetchall(cursor)
return res
# sort by price, ascending
def sort_by_price():
with connection.cursor() as cursor:
cursor.execute('''SELECT cr.logo, cr.id, cr.name, me.price, me.volume, me.supply,me.utility
FROM maker_cryptocurrency cr, maker_metric me where cr.id=
me.crypto_currency_id
ORDER BY price''')
res=dictfetchall(cursor)
return res
#sort by volume, descending
def sort_by_volume():
with connection.cursor() as cursor:
cursor.execute('''SELECT cr.logo, cr.id, cr.name, me.price, me.volume, me.supply,me.utility
FROM maker_cryptocurrency cr, maker_metric me where cr.id=
me.crypto_currency_id
ORDER BY volume DESC''')
res=dictfetchall(cursor)
return res
#sort by supply, descending
def sort_by_supply():
with connection.cursor() as cursor:
cursor.execute('''SELECT cr.logo, cr.id, cr.name, me.price, me.volume, me.supply,me.utility
FROM maker_cryptocurrency cr, maker_metric me where cr.id=
me.crypto_currency_id
ORDER BY supply DESC''')
res=dictfetchall(cursor)
return res
#sort by utility, descending
def sort_by_utility():
with connection.cursor() as cursor:
cursor.execute('''SELECT cr.logo, cr.id, cr.name, me.price, me.volume, me.supply,me.utility
FROM maker_cryptocurrency cr, maker_metric me where cr.id=
me.crypto_currency_id
ORDER BY utility DESC''')
res=dictfetchall(cursor)
return res
#----end <NAME>
| 2.65625 | 3 |
Code/PeformancePoint.py | ChimieleCode/OpenSees_Script | 0 | 12772427 | <reponame>ChimieleCode/OpenSees_Script<filename>Code/PeformancePoint.py
from os import listdir, write
from numpy.lib.function_base import disp
import openseespy.opensees as ops
import time
import csv
import math
import numpy as np
from ModelOptions import field_factor
from PostProcessing.Damping import computeDamping
from ModelBuilder import buildModel
import matplotlib.pyplot as plt
# ---------------------------------------------------------------------------------------------------------------------------
# <NAME>ttri
# ---------------------------------------------------------------------------------------------------------------------------
dont_iterate = ['SLD']
pushover_step = 0.001
tollerance = 0.002
g = 9.81
spectras = {}
cases = []
with open('Input\Spectras.csv') as csvfile:
data = csv.reader(csvfile)
for i, row in enumerate(data):
array = []
# Solo se mi trovo su riga pari definisco nuovo dizionario
if (i % 2) == 0:
spectra = {}
for j, value in enumerate(row):
# Il primo valore è l'indice
if j == 0:
index = value
elif value == '':
break
else:
array.append(float(value))
# scrivo nel dizionario nestato
if (i % 2) == 0:
spectra['T'] = np.array(array)
else:
spectra['Sa'] = np.array(array)
# Solo se mi trovo su riga dispari salvo i dati
if (i % 2) == 1:
spectras[index] = spectra
cases.append(index)
# Calcolo gli spettri in spostamento
for case in cases:
spectras[case]['Sd'] = spectras[case]['Sa'] * g * spectras[case]['T']**2 / (4 * math.pi**2)
# ---------------------------------------------------------------------------------------------------------------------------
# Inizio la definizione della prima Push
# ---------------------------------------------------------------------------------------------------------------------------
from ImportFromJson import frame
from ControlNode import controlNode
from BasicFunctions.NodeFunctions import nodeGrid
from BasicFunctions.InelasticShape import inelasticShape, getEffectiveMass
from Classes.PushPullAnalysis import PushPullAnalysis
m = frame.m
n = frame.n
first_pushover = PushPullAnalysis(points = [0.7], step = pushover_step, pattern = inelasticShape(frame))
# Setto i recorders
base_nodes = []
for i in range (n + 1): # Scrivo a quali nodi devo settare un recorder per la base
base_nodes.append(nodeGrid(i, 0))
# Reazione alla base
ops.recorder('Node', '-file', 'Output\PerformancePoint\Base_Reactions.out', '-time', '-node', *base_nodes, '-dof', 1, 'reaction')
# Spostamento del nodo di Controllo
ops.recorder('Node', '-file', 'Output\PerformancePoint\Control_Disp.out', '-time', '-node', controlNode(), '-dof', 1, 'disp')
print(f'-o-o-o- Performance Point: Prima Pushover -o-o-o-')
# Parte il cronometro
tStart = round(time.time() * 1000)
# Definisco i Parametri
direzione = first_pushover.points[0]/abs(first_pushover.points[0])
spostamento = first_pushover.points[0]
step = direzione * first_pushover.step
total_steps = round(spostamento/step)
# Definisco il pattern di spinta
ops.pattern('Plain', 1, 1)
for j in range(m): # j da 0 a m - 1
try:
ops.load(nodeGrid(0, j + 1), direzione * first_pushover.pattern[j], 0., 0.) # Applico la forza j al piano j + 1 poiché salto il piano 0
except:
print('Errore: Il numero di forze del pattern differisce dal numero di piani fuori terra')
# Opzioni di analisi
ops.constraints('Transformation')
ops.numberer('RCM')
ops.system('BandGen')
ops.test('NormDispIncr', 0.000001, 100)
ops.algorithm('NewtonLineSearch', True, False, False, False, 0.8, 1000, 0.1, 10)
ops.integrator('DisplacementControl', controlNode(), 1, step)
ops.analysis('Static')
# Mando l'analisi
ops.record()
ops.analyze(total_steps)
# Info sulle Performance
tStop = round(time.time() * 1000)
totalTime = (tStop - tStart)/1000
print(f'-o-o-o- Analisi conclusa Prima Pushover dopo {totalTime} sec -o-o-o- ')
ops.remove('recorders')
ops.reset()
ops.wipeAnalysis()
ops.wipe()
# ---------------------------------------------------------------------------------------------------------------------------
# Leggo la curva
# ---------------------------------------------------------------------------------------------------------------------------
pushover_curve = {
'Base Reactions' : [],
'Displacements' : []
}
with open('Output\PerformancePoint\Base_Reactions.out') as csvfile:
data = csv.reader(csvfile, delimiter=' ')
for row in data:
base_reaction = 0
for i, value in enumerate(row):
if i == 0:
continue
else:
base_reaction += float(row[i])
pushover_curve['Base Reactions'].append(-base_reaction)
with open('Output\PerformancePoint\Control_Disp.out', mode = 'r') as csvfile:
data = csv.reader(csvfile, delimiter=' ')
for row in data:
pushover_curve['Displacements'].append(float(row[1]))
effective_mass = getEffectiveMass(frame)
pushover_curve['Base Reactions'] = np.array(pushover_curve['Base Reactions'])
pushover_curve['Displacements'] = np.array(pushover_curve['Displacements'])
pushover_curve['Accelerations'] = pushover_curve['Base Reactions'] / (effective_mass/frame.r * g)
performance_points = {}
for case in cases:
from Performance.Functions import intersection
displacement, acceleration = intersection(
pushover_curve['Displacements'],
pushover_curve['Accelerations'],
spectras[case]['Sd'],
spectras[case]['Sa']
)
performance_points[case] = {
'Sd' : displacement,
'Vb' : acceleration * effective_mass * g,
'Sa' : acceleration,
'eta' : 1
}
# ---------------------------------------------------------------------------------------------------------------------------
# ITER
# ---------------------------------------------------------------------------------------------------------------------------
for removal in dont_iterate:
cases.remove(removal)
for case in cases:
residual = 1
iteration = 1
while residual > tollerance and iteration <= 20:
if iteration == 1:
performance_points[case]['Sd'] = performance_points[case]['Sd'] * 0.6
cycle_disp = float(performance_points[case]['Sd'])
buildModel()
# Definisco il pattern di spinta
ops.pattern('Plain', 1, 1)
for j in range(m): # j da 0 a m - 1
try:
ops.load(nodeGrid(0, j + 1), direzione * first_pushover.pattern[j], 0., 0.) # Applico la forza j al piano j + 1 poiché salto il piano 0
except:
print('Errore: Il numero di forze del pattern differisce dal numero di piani fuori terra')
push_pull_ciclica = PushPullAnalysis(points = [cycle_disp, -cycle_disp, cycle_disp], step = pushover_step, pattern = inelasticShape(frame))
# Reazione alla base
ops.recorder('Node', '-file', 'Output\Pushover\Pushover_Base_Reactions.out', '-time', '-node', *base_nodes, '-dof', 1, 'reaction')
# Spostamento del nodo di Controllo
ops.recorder('Node', '-file', 'Output\Pushover\Pushover_Control_Disp.out', '-time', '-node', controlNode(), '-dof', 1, 'disp')
print(f'-o-o-o- Performing PushPull iter:{iteration} -o-o-o-')
# Parte il cronometro
tStart = round(time.time() * 1000)
# Definisco i Parametri
direzione = push_pull_ciclica.points[0]/abs(push_pull_ciclica.points[0])
spostamento = push_pull_ciclica.points[0]
step = direzione * push_pull_ciclica.step
total_steps = round(spostamento/step)
# ---------------------------------------------------------------------------------------------------------------------------
# Opzioni di analisi
# ---------------------------------------------------------------------------------------------------------------------------
ops.constraints('Transformation')
ops.numberer('RCM')
ops.system('BandGen')
ops.test('NormDispIncr', 0.000001, 100)
ops.algorithm('NewtonLineSearch', True, False, False, False, 0.8, 1000, 0.1, 10)
ops.integrator('DisplacementControl', controlNode(), 1, step)
ops.analysis('Static')
# ---------------------------------------------------------------------------------------------------------------------------
# Mando l'analisi
# ---------------------------------------------------------------------------------------------------------------------------
ops.record()
ops.analyze(total_steps)
# Info sulle Performance
tStop = round(time.time() * 1000)
totalTime = (tStop - tStart)/1000
print(f'-o-o-o- Analisi conclusa 1/{len(push_pull_ciclica.points)} dopo {totalTime} sec -o-o-o- ')
# ---------------------------------------------------------------------------------------------------------------------------
# Punti successivi
# ---------------------------------------------------------------------------------------------------------------------------
if len(push_pull_ciclica.points) > 1:
for v in range(1,len(push_pull_ciclica.points)):
print(f'-o-o-o- Analisi Push-Pull {v + 1}/{len(push_pull_ciclica.points)} -o-o-o-')
# Parte il cronometro
tStart = round(time.time() * 1000)
# Definisco i Parametri
try:
direzione = abs(push_pull_ciclica.points[v] - push_pull_ciclica.points[v - 1]) / (push_pull_ciclica.points[v] - push_pull_ciclica.points[v - 1])
spostamento = push_pull_ciclica.points[v] - push_pull_ciclica.points[v - 1]
step = direzione * push_pull_ciclica.step
total_steps = round(spostamento/step)
except:
print('Errore nella definizione dei punti: controllare che non ci siano punti successivi di egual spostamento')
break
# Opzioni di Analisi
ops.numberer('RCM')
ops.system('BandGen')
ops.test('NormDispIncr', 0.000001, 100)
ops.algorithm('NewtonLineSearch', True, False, False, False, 0.8, 1000, 0.1, 10)
ops.integrator('DisplacementControl', controlNode(), 1, step)
ops.analysis('Static')
# Mando l'analisi
ops.record()
ops.analyze(total_steps)
# Info sulle Performance
tStop = round(time.time() * 1000)
totalTime = (tStop - tStart)/1000
print(f'-o-o-o- Analisi conclusa {v + 1}/{len(push_pull_ciclica.points)} dopo {totalTime} sec -o-o-o- ')
ops.remove('recorders')
ops.reset()
ops.wipeAnalysis()
ops.wipe()
# ---------------------------------------------------------------------------------------------------------------------------
# Computa damping e riduci spettro
# ---------------------------------------------------------------------------------------------------------------------------
damping_points = computeDamping()
damping = damping_points[0][1]
old_eta = performance_points[case]['eta']
performance_points[case]['eta'] = (7/(2 + 2 + damping * 100))**field_factor
spectras[case]['Sd'] = spectras[case]['Sd'] * performance_points[case]['eta']/old_eta
spectras[case]['Sa'] = spectras[case]['Sa'] * performance_points[case]['eta']/old_eta
# ---------------------------------------------------------------------------------------------------------------------------
# Nuova intersezione
# ---------------------------------------------------------------------------------------------------------------------------
displacement, acceleration = intersection(
pushover_curve['Displacements'],
pushover_curve['Accelerations'],
spectras[case]['Sd'],
spectras[case]['Sa']
)
old_displacement = performance_points[case]['Sd']
performance_points[case] = {
'Sd' : displacement,
'Vb' : acceleration * effective_mass * g,
'Sa' : acceleration,
'eta' : performance_points[case]['eta']
}
residual = abs(performance_points[case]['Sd'] - old_displacement)
print(residual)
iteration += 1
# ---------------------------------------------------------------------------------------------------------------------------
# Results to CSV
# ---------------------------------------------------------------------------------------------------------------------------
# Spettri
with open('Output\PerformancePoint\spectras.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
raw_data = []
cases.insert(0,*dont_iterate)
for case in cases:
raw_data.append(spectras[case]['Sd'])
raw_data.append(spectras[case]['Sa'])
writer.writerows(zip(*raw_data))
# Pushover
with open('Output\PerformancePoint\pushover.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
raw_data = [pushover_curve['Displacements'], pushover_curve['Accelerations']]
writer.writerows(zip(*raw_data))
# PP
with open('Output\PerformancePoint\performance_point.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
raw_data = []
for case in cases:
raw_data.append(
[
performance_points[case]['Sd'],
performance_points[case]['Sa'],
performance_points[case]['Vb'],
performance_points[case]['eta']
]
)
writer.writerows(zip(*raw_data))
# ---------------------------------------------------------------------------------------------------------------------------
# Graph
# ---------------------------------------------------------------------------------------------------------------------------
# Push
plt.plot(
pushover_curve['Displacements'],
pushover_curve['Accelerations'],
label = 'Pushover',
color = '0.5',
linestyle = '-',
linewidth = 1
)
for case in cases:
spectras[case]['Sd'] = spectras[case]['Sd'].tolist()
spectras[case]['Sa'] = spectras[case]['Sa'].tolist()
spectras[case]['Sd'].append(spectras[case]['Sd'][-1])
spectras[case]['Sa'].append(0)
plt.plot(
spectras[case]['Sd'],
spectras[case]['Sa'],
label = case,
color = '0.2',
linestyle = '--',
linewidth = 1
)
plt.plot(
performance_points[case]['Sd'],
performance_points[case]['Sa'],
color = 'r',
linestyle = '',
marker = 'o',
markersize = 4
)
# Titoli Assi
plt.ylabel('Sa [g]')
plt.xlabel('Sd [m]')
# Titolo Grafico
plt.title(f'Performance Points')
# Mostra Legenda e Griglia
plt.legend()
# Imposta i valori limite degli assi
plt.ylim(ymin = 0, ymax = 1)
plt.xlim(xmin = 0, xmax = 0.5)
plt.grid(
True,
linestyle = '--'
)
plt.savefig('Figures\Performance.png')
plt.clf()
| 2.171875 | 2 |
tf2onnx/version.py | lucienwang1009/tensorflow-onnx | 1 | 12772428 | <filename>tf2onnx/version.py
version = '1.6.0'
git_version = '82f805f8fe7d2fa91e6ca9d39e153712f6887fec'
| 1.09375 | 1 |
views_cart.py | PDXCodeCoop/ecommerce | 0 | 12772429 | <gh_stars>0
from __future__ import division
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template.context import RequestContext
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
import stripe
import copy
from models import *
from views_utils import *
#Adds an item to the cart
def addToCart(request):
cart = request.session.get('cart', [])
if request.method == "POST":
if 'item_id' in request.POST:
product_id = request.POST['item_id']
product = get_object_or_404(Product, pk = product_id)
accessories = []
if product.preorder or product.status() == "unlimited" or int(product.stock) >= 1:
for accessory_pk in getPostList(request, "accessories"):
try:
accessory = get_object_or_404(Product, pk = accessory_pk)
except Product.DoesNotExist:
return HttpResponseRedirect( reverse('store:checkout') )
if (accessory is not None and (accessory.preorder or accessory.status() == "unlimited" or int(accessory.stock) >= 1)):
accessories.append(accessory)
newCartItem = {
u"product_id": product.pk,
u"quantity": 1,
u"accessories": getPostList(request, "accessories"),
u"options": getPostList(request, "options"),
}
if not findDuplicateDictInList(newCartItem, cart, ['quantity']):
cart.append(newCartItem)
request.session['cart'] = cart
return HttpResponseRedirect( reverse('store:checkout') )
#Changes the quantity in the cart
def changeQuantity(request):
cart = request.session.get('cart', [])
if request.method == "POST":
item_id = int(getPostValue(request, "item_id"))
item = cart[item_id]
try:
product = get_object_or_404(Product, pk = item['product_id'])
item['quantity'] = product.set_limit(int(getPostValue(request, "quantity")))
if item['accessories'] is not None:
for accessory_pk in item['accessories']:
accessory = get_object_or_404(Product, pk = accessory_pk)
item['quantity'] = accessory.set_limit(item['quantity'])
if item['quantity'] < 0:
del cart[item_id]
except Product.DoesNotExist:
del cart[item_id]
request.session['cart'] = cart
return HttpResponseRedirect( reverse('store:checkout') )
def delete(request, product_id):
cart = request.session.get('cart', [])
del cart[int(product_id)]
if len(cart) < 1:
if 'coupon' in request.session:
del request.session['coupon']
request.session['cart'] = cart
return HttpResponseRedirect( reverse('store:checkout') )
| 2.171875 | 2 |
pddl_parser_2/pond/utils.py | QuMuLab/action-reachability-via-deadend-detection | 4 | 12772430 | <reponame>QuMuLab/action-reachability-via-deadend-detection
from .predicate import Predicate
"""
General purpose utilities
"""
import re
def get_contents(fname: str) -> str:
"""
Return the contents of the given file.
Strip comments (lines starting with ;)
"""
fp = open(fname, "r")
contents = fp.read()
fp.close()
return re.sub(r"\s*;(.*?)\n", "\n", contents).strip()
class PDDL_Utils:
"""
Collection of general-purpose utilities used for parsing PDDL files.
"""
@staticmethod
def apply_type(item_list, t):
""" Apply the given type to the item list. Only alter untyped items. """
for i in range(len(item_list) - 1, -1, -1):
if isinstance(item_list[i], tuple):
break
else:
item_list[i] = (item_list[i], t)
@staticmethod
def read_type(node):
"""Read the types for the given node."""
item_list = []
n = 0
while n < len(node.children):
c = node.children[n].name
if c == "-":
PDDL_Utils.apply_type(item_list, node.children[n + 1].name)
n += 2
else:
item_list.append(c)
n += 1
# type all untyped objects with default type
PDDL_Utils.apply_type(item_list, Predicate.OBJECT)
return item_list
| 2.609375 | 3 |
integralpsychology/settings/production.py | djangulo/integralpsychology.life | 0 | 12772431 | from .base import *
from integralpsychology.secrets import SECRET_KEY
DEBUG = False
try:
from .local import *
except ImportError:
pass
| 1.046875 | 1 |
custom_components/wort_des_tages/sensor_const.py | Ludy87/astra_germany_wort_des_tages | 0 | 12772432 | from .const import *
SENSOR_TYPES = {
ATTR_WDT_WORD: [ # state
'Wort des Tages',
'mdi:book-open-variant',
None,
],
ATTR_WDT_ORIGIN: [ # origin
'Herkunft',
'mdi:map-marker-star-outline',
None,
],
ATTR_WDT_MEANING: [ # meaning
'Bedeutung',
'mdi:script-text-play-outline',
None,
],
ATTR_WDT_SPELLING: [ # spelling
'Worttrennung',
'mdi:format-text-wrapping-clip',
None,
],
ATTR_WDT_WORD_FREQUENCY: [ # word_frequency
'Häufigkeit',
'hass:eye',
None,
],
ATTR_WDT_LAST_UPDATED: [ # last_updated
'Update Datum',
'mdi:update',
None,
],
ATTR_WDT_CURRENT_TIME: [ # current_time
'Update Zeit',
'mdi:update',
None,
],
}
| 1.09375 | 1 |
test.py | BailiShanghai/pywzl-bin2c_array | 1 | 12772433 | #!/usr/bin/env python
#
# Copyright 2001-2004 by <NAME>. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the standalone Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""
A test harness for the logging module. Tests thread safety.
Copyright (C) 2001-2004 <NAME>. All Rights Reserved.
"""
import logging, logging.handlers, threading, random
import _thread
logging.raiseExceptions = 1
NUM_THREADS = 10
LOOP_COUNT = 10000
LOG_MESSAGES = [
(logging.DEBUG, "%3d This is a %s message", "debug"),
(logging.INFO, "%3d This is an %s message", "informational"),
(logging.WARNING, "%3d This is a %s message", "warning"),
(logging.ERROR, "%3d This is an %s message", "error"),
(logging.CRITICAL, "%3d This is a %s message", "critical"),
]
LOG_NAMES = ["A", "A.B", "A.B.C", "A.B.C.D"]
def doLog(num):
logger = logging.getLogger('')
logger.info("*** thread %s started (%d)", _thread.get_ident(), num)
for i in range(LOOP_COUNT):
logger = logging.getLogger(random.choice(LOG_NAMES))
a = random.choice(LOG_MESSAGES)
args = a[0:2] + (num,) + a[2:]
logger.log(args)
def test():
f = logging.Formatter("%(asctime)s %(levelname)-9s %(name)-8s %(thread)5s %(message)s")
root = logging.getLogger('')
root.setLevel(logging.DEBUG)
h = logging.FileHandler('thread.log', 'w')
root.addHandler(h)
h.setFormatter(f)
h = logging.handlers.SocketHandler('localhost', logging.handlers.DEFAULT_TCP_LOGGING_PORT)
root.addHandler(h)
threads = []
for i in range(NUM_THREADS):
threads.append(threading.Thread(target=doLog, args=(len(threads),)))
for t in threads:
t.start()
for t in threads:
t.join()
if __name__ == "__main__":
test()
| 2.046875 | 2 |
external-import/kaspersky/src/kaspersky/master_yara/importer.py | opencti-platform/connectors | 0 | 12772434 | <filename>external-import/kaspersky/src/kaspersky/master_yara/importer.py<gh_stars>0
"""Kaspersky Master YARA importer module."""
import itertools
from datetime import datetime
from typing import Any, List, Mapping, Optional, Tuple
from kaspersky.client import KasperskyClient
from kaspersky.importer import BaseImporter
from kaspersky.master_yara.builder import YaraRuleGroupBundleBuilder
from kaspersky.models import Yara, YaraRule
from kaspersky.utils import (
YaraRuleUpdater,
convert_yara_rules_to_yara_model,
datetime_to_timestamp,
datetime_utc_now,
is_current_weekday_before_datetime,
timestamp_to_datetime,
)
from pycti import OpenCTIConnectorHelper # type: ignore
from stix2 import Bundle, Identity, MarkingDefinition # type: ignore
from stix2.exceptions import STIXError # type: ignore
class MasterYaraImporter(BaseImporter):
"""Kaspersky Master YARA importer."""
_LATEST_MASTER_YARA_TIMESTAMP = "latest_master_yara_timestamp"
def __init__(
self,
helper: OpenCTIConnectorHelper,
client: KasperskyClient,
author: Identity,
tlp_marking: MarkingDefinition,
update_existing_data: bool,
master_yara_fetch_weekday: Optional[int],
master_yara_include_report: bool,
master_yara_report_type: str,
master_yara_report_status: int,
) -> None:
"""Initialize Kaspersky Master YARA importer."""
super().__init__(helper, client, author, tlp_marking, update_existing_data)
self.master_yara_fetch_weekday = master_yara_fetch_weekday
self.master_yara_include_report = master_yara_include_report
self.master_yara_report_type = master_yara_report_type
self.master_yara_report_status = master_yara_report_status
self.yara_rule_updater = YaraRuleUpdater(self.helper)
def run(self, state: Mapping[str, Any]) -> Mapping[str, Any]:
"""Run importer."""
self._info(
"Running Kaspersky Master YARA importer (update data: {0}, include report: {1})...", # noqa: E501
self.update_existing_data,
self.master_yara_include_report,
)
latest_master_yara_timestamp = state.get(self._LATEST_MASTER_YARA_TIMESTAMP)
if latest_master_yara_timestamp is None:
latest_master_yara_datetime = None
else:
latest_master_yara_datetime = timestamp_to_datetime(
latest_master_yara_timestamp
)
master_yara_fetch_weekday = self.master_yara_fetch_weekday
if master_yara_fetch_weekday is not None:
if not is_current_weekday_before_datetime(
master_yara_fetch_weekday, latest_master_yara_datetime
):
self._info("It is not time to fetch the Master YARA yet.")
return self._create_state(latest_master_yara_datetime)
yara = self._fetch_master_yara()
yara_rules = yara.rules
yara_rule_count = len(yara_rules)
self._info(
"Master YARA with {0} rules...",
yara_rule_count,
)
new_yara_rules = self.yara_rule_updater.update_existing(yara.rules)
new_yara_rule_count = len(new_yara_rules)
self._info(
"{0} new YARA rules...",
new_yara_rule_count,
)
grouped_yara_rules = self._group_yara_rules_by_report(new_yara_rules)
group_count = len(grouped_yara_rules)
self._info(
"{0} YARA rule groups...",
group_count,
)
for group, rules in grouped_yara_rules:
self._info("YARA rule group: ({0}) {1}", len(rules), group)
failed_count = 0
for yara_rule_group in grouped_yara_rules:
result = self._process_yara_rule_group(yara_rule_group)
if not result:
failed_count += 1
success_count = group_count - failed_count
self._info(
"Kaspersky Master YARA importer completed (imported: {0}, total: {1})",
success_count,
group_count,
)
return self._create_state(datetime_utc_now())
@classmethod
def _create_state(cls, latest_datetime: Optional[datetime]) -> Mapping[str, Any]:
if latest_datetime is None:
return {}
return {
cls._LATEST_MASTER_YARA_TIMESTAMP: datetime_to_timestamp(latest_datetime)
}
def _fetch_master_yara(self) -> Yara:
report_group = "apt"
master_yara = self.client.get_master_yara(report_group)
return convert_yara_rules_to_yara_model(master_yara, imports_at_top=True)
@staticmethod
def _group_yara_rules_by_report(
yara_rules: List[YaraRule],
) -> List[Tuple[str, List[YaraRule]]]:
def _key_func(item: YaraRule) -> str:
if item.report is not None:
return item.report.strip()
return ""
groups = []
sorted_yara_rules = sorted(yara_rules, key=_key_func)
for key, group in itertools.groupby(sorted_yara_rules, key=_key_func):
groups.append((key, list(group)))
return groups
def _process_yara_rule_group(
self, yara_rule_group: Tuple[str, List[YaraRule]]
) -> bool:
self._info("Processing YARA rule group {0}...", yara_rule_group[0])
yara_rule_group_bundle = self._create_yara_rule_group_bundle(yara_rule_group)
if yara_rule_group_bundle is None:
return False
# with open(f"yara_rule_group_bundle_{yara_rule_group[0]}.json", "w") as f:
# f.write(yara_rule_group_bundle.serialize(pretty=True))
self._send_bundle(yara_rule_group_bundle)
return True
def _create_yara_rule_group_bundle(
self, yara_rule_group: Tuple[str, List[YaraRule]]
) -> Optional[Bundle]:
author = self.author
object_markings = [self.tlp_marking]
source_name = self._source_name()
confidence_level = self._confidence_level()
include_report = self.master_yara_include_report
report_type = self.master_yara_report_type
report_status = self.master_yara_report_status
bundle_builder = YaraRuleGroupBundleBuilder(
yara_rule_group[0],
yara_rule_group[1],
author,
object_markings,
source_name,
confidence_level,
include_report,
report_type,
report_status,
)
try:
return bundle_builder.build()
except STIXError as e:
self._error(
"Failed to build YARA rule bundle for '{0}': {1}",
yara_rule_group[0],
e,
)
return None
| 2.078125 | 2 |
src/level_three/peculiar_balance.py | peterlin1/google-foobar | 0 | 12772435 | <reponame>peterlin1/google-foobar
class PeculiarBalance:
"""
Can we save them? Beta Rabbit is trying to break into a lab that contains the only known zombie cure - but there's
an obstacle. The door will only open if a challenge is solved correctly. The future of the zombified rabbit
population is at stake, so Beta reads the challenge: There is a scale with an object on the left-hand side, whose
mass is given in some number of units. Predictably, the task is to balance the two sides. But there is a catch:
You only have this peculiar weight set, having masses 1, 3, 9, 27, ... units. That is, one for each power of 3.
Being a brilliant mathematician, Beta Rabbit quickly discovers that any number of units of mass can be balanced
exactly using this set.
To help Beta get into the room, write a method called answer(x), which outputs a list of strings representing where
the weights should be placed, in order for the two sides to be balanced, assuming that weight on the left has mass
x units.
The first element of the output list should correspond to the 1-unit weight, the second element to the 3-unit
weight, and so on. Each string is one of:
"L" : put weight on left-hand side
"R" : put weight on right-hand side
"-" : do not use weight
To ensure that the output is the smallest possible, the last element of the list must not be "-".
x will always be a positive integer, no larger than 1000000000.
"""
@staticmethod
def answer(x):
"""
Peculiar Balance.
Parameters
----------
x : int
Returns
-------
ret : list
Examples
--------
>>> PeculiarBalance().answer(2)
['L', 'R']
>>> PeculiarBalance().answer(8)
['L', '-', 'R']
>>> PeculiarBalance().answer(345)
['-', 'R', 'L', 'R', 'R', 'R']
"""
def _to_rev_ternary(q):
"""
Converts `q` to ternary equivalent in reversed order.
Parameters
----------
q : int
Returns
-------
d2t : list
Examples
--------
>>> _to_rev_ternary(345)
[0, 1, 2, 0, 1, 1]
"""
d2t = []
if q == 0:
d2t.append(0)
while q > 0:
d2t.append(q % 3)
q = q // 3
return d2t
def _to_rev_balanced_ternary(s_q):
"""
Converts `s_q` into balanced ternary.
Parameters
----------
s_q : list
Returns
-------
t2bt : list
Examples
--------
>>> _to_rev_balanced_ternary([0, 1, 2, 0, 1, 1])
[0, 1, 'T', 1, 1, 1]
"""
t2bt = []
carry = 0
for trit in s_q:
if (trit == 2) or (trit + carry == 2):
trit = trit + carry + 1
if trit == 3:
t2bt.append('T')
carry = 1
elif trit == 4:
t2bt.append(0)
carry = 1
else:
t2bt.append(trit + carry)
carry = 0
if carry > 0:
t2bt.append(carry)
return t2bt
# Unbalanced ternary
_t = _to_rev_ternary(x)
# print("""Ternary: {}""".format(_t[::-1]))
# Balanced ternary
_bt = _to_rev_balanced_ternary(_t)
# print("""Balanced Ternary: {}""".format(_bt[::-1]))
return [('L' if (str(t)) == 'T' else ('R' if t == 1 else '-')) for t in _bt]
| 3.734375 | 4 |
yandex/yandex2016_a_a.py | knuu/competitive-programming | 1 | 12772436 | P, A, B = map(int, input().split())
if P >= A+B:
print(P)
elif B > P:
print(-1)
else:
print(A+B)
| 3.34375 | 3 |
lambdas/mail sender/code.py | PKopel/IOT-Smartband | 0 | 12772437 | import json
import smtplib
from socket import gaierror
import datetime as dt
port = 2525
smtp_server = "smtp.mailtrap.io"
login = "eb<PASSWORD>" # paste your login generated by Mailtrap
password = "<PASSWORD>" # paste your password generated by Mailtrap
sender = "<EMAIL>"
receiver = "<EMAIL>"
const = """\
Subject: Received alarm
To: {receiver}
From: {sender}
"""
last_send = dt.datetime(2000, 1, 1)
def send_mail(msg: str):
try:
with smtplib.SMTP(smtp_server, port) as server:
server.login(login, password)
server.sendmail(sender, receiver, msg)
print('Sent')
except (gaierror, ConnectionRefusedError):
print('Failed to connect to the server. Bad connection settings?')
except smtplib.SMTPServerDisconnected:
print('Failed to connect to the server. Wrong user/password?')
except smtplib.SMTPException as e:
print('SMTP error occurred: ' + str(e))
def handle_json(msg):
structure = json.loads(msg)
global last_send
current_time = dt.datetime.now()
seconds_from_last_call = (current_time - last_send).seconds
isAlarm = "alarm" in structure
if (not isAlarm) or seconds_from_last_call < 60:
return
last_send = current_time
time = dt.datetime.strptime(structure["time"][:-3], "%Y-%m-%dT%H:%M:%S.%f").strftime("%Y-%m-%d %H:%M:%S")
message = const + "Alarm send by " + str(structure["uid"]) + " at " + time + \
"\nPulse: " + str(int(structure["pulse"])) + \
"\nTemperature: " + str(round(structure["temp"], 2))
send_mail(message)
def lambda_handler(event, context):
print(event)
handle_json(json.dumps(event))
return {
'statusCode': 200,
'body': json.dumps(event)
}
| 2.890625 | 3 |
ffws/test/parser/test_LightCycler480.py | mojaie/flashflood-workspace-sample | 0 | 12772438 | #
# (C) 2014-2017 <NAME>
# Licensed under the MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import unittest
from ffws.parser import LightCycler480 as lc
class TestLightCycler480(unittest.TestCase):
def test_file_loader(self):
data = lc.file_loader("./raw/instruments/LightCycler480.txt")
plate1 = data["plates"][0]
self.assertEqual(plate1["plateId"], "Plate1")
self.assertEqual(plate1["layerIndex"], 0)
self.assertEqual(plate1["wellValues"][0], "NaN")
self.assertEqual(plate1["wellValues"][3], "NaN")
self.assertEqual(plate1["wellValues"][22], 9.83E-1)
self.assertEqual(plate1["wellValues"][361], 1.98E3)
self.assertEqual(plate1["wellValues"][383], "NaN")
| 2.40625 | 2 |
pybrowser/elements/file.py | abranjith/pybrowser | 22 | 12772439 | __author__ = 'Ranjith'
import os
from .utils import find_elements_for_element
from .actions import Action
from ..common_utils import get_user_home_dir
from ..exceptions import InvalidArgumentError
from ..downloader import download_url
class File(Action):
def __init__(self, driver, locator=None, element=None, wait_time=10, visible=False):
super().__init__(driver, locator, element, wait_time, visible)
self._downloaded_files = None
self._is_download_complete = False
def upload(self, filename=""):
if not os.path.isfile(filename):
raise InvalidArgumentError(f"{filename} is not a file. Please provide valid filename for upload")
self.element.send_keys(filename)
return self
#TODO: requires work - link not found, more testing
def download(self, directory=None, as_filename=None, asynch=True, unzip=False, del_zipfile=False, add_to_ospath=False):
#flag reset
self._is_download_complete = False
self._downloaded_files = None
if not directory:
directory = get_user_home_dir()
if not os.path.isdir(directory):
raise InvalidArgumentError(f"{directory} is not a directory. Please provide valid directory for download")
link = self.href
if link:
self._download_file(link, directory, as_filename, asynch, unzip, del_zipfile, add_to_ospath)
else:
links = self._get_child_links()
#TODO: not a good solution, think of a better way to resolve this
for l in links:
self._download_file(l, directory, None, asynch, unzip, del_zipfile, add_to_ospath)
return self
def _download_file(self, link, directory, as_filename, asynch, unzip, del_zipfile, add_to_ospath):
try:
download_url(url=link, to_dir=directory, download_filename=as_filename, asynch=asynch,
unzip=unzip, del_zipfile=del_zipfile, add_to_ospath=add_to_ospath, callback=self._callback)
except Exception:
pass
def _get_child_links(self):
child_links_xpath = "xpath=.//a"
links = find_elements_for_element(self.element, child_links_xpath)
return links
@property
def is_download_complete(self):
return self._is_download_complete
@property
def downloaded_files(self):
return self._downloaded_files
def _callback(self, files):
self._is_download_complete = True
if self._downloaded_files:
if files:
self._downloaded_files += files
else:
self._downloaded_files = files
| 2.375 | 2 |
multitask_benchmark/datasets_generation/graph_algorithms.py | Michaelvll/pna | 249 | 12772440 | <reponame>Michaelvll/pna<filename>multitask_benchmark/datasets_generation/graph_algorithms.py
import math
from queue import Queue
import numpy as np
def is_connected(A):
"""
:param A:np.array the adjacency matrix
:return:bool whether the graph is connected or not
"""
for _ in range(int(1 + math.ceil(math.log2(A.shape[0])))):
A = np.dot(A, A)
return np.min(A) > 0
def identity(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return:F
"""
return F
def first_neighbours(A):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, the number of nodes reachable in 1 hop
"""
return np.sum(A > 0, axis=0)
def second_neighbours(A):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, the number of nodes reachable in no more than 2 hops
"""
A = A > 0.0
A = A + np.dot(A, A)
np.fill_diagonal(A, 0)
return np.sum(A > 0, axis=0)
def kth_neighbours(A, k):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, the number of nodes reachable in k hops
"""
A = A > 0.0
R = np.zeros(A.shape)
for _ in range(k):
R = np.dot(R, A) + A
np.fill_diagonal(R, 0)
return np.sum(R > 0, axis=0)
def map_reduce_neighbourhood(A, F, f_reduce, f_map=None, hops=1, consider_itself=False):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, map its neighbourhood with f_map, and reduce it with f_reduce
"""
if f_map is not None:
F = f_map(F)
A = np.array(A)
A = A > 0
R = np.zeros(A.shape)
for _ in range(hops):
R = np.dot(R, A) + A
np.fill_diagonal(R, 1 if consider_itself else 0)
R = R > 0
return np.array([f_reduce(F[R[i]]) for i in range(A.shape[0])])
def max_neighbourhood(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, the maximum in its neighbourhood
"""
return map_reduce_neighbourhood(A, F, np.max, consider_itself=True)
def min_neighbourhood(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, the minimum in its neighbourhood
"""
return map_reduce_neighbourhood(A, F, np.min, consider_itself=True)
def std_neighbourhood(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, the standard deviation of its neighbourhood
"""
return map_reduce_neighbourhood(A, F, np.std, consider_itself=True)
def mean_neighbourhood(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, the mean of its neighbourhood
"""
return map_reduce_neighbourhood(A, F, np.mean, consider_itself=True)
def local_maxima(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, whether it is the maximum in its neighbourhood
"""
return F == map_reduce_neighbourhood(A, F, np.max, consider_itself=True)
def graph_laplacian(A):
"""
:param A:np.array the adjacency matrix
:return: the laplacian of the adjacency matrix
"""
L = (A > 0) * -1
np.fill_diagonal(L, np.sum(A > 0, axis=0))
return L
def graph_laplacian_features(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: the laplacian of the adjacency matrix multiplied by the features
"""
return np.matmul(graph_laplacian(A), F)
def isomorphism(A1, A2, F1=None, F2=None):
"""
Takes two adjacency matrices (A1,A2) and (optionally) two lists of features. It uses Weisfeiler-Lehman algorithms, so false positives might arise
:param A1: adj_matrix, N*N numpy matrix
:param A2: adj_matrix, N*N numpy matrix
:param F1: node_values, numpy array of size N
:param F1: node_values, numpy array of size N
:return: isomorphic: boolean which is false when the two graphs are not isomorphic, true when they probably are.
"""
N = A1.shape[0]
if (F1 is None) ^ (F2 is None):
raise ValueError("either both or none between F1,F2 must be defined.")
if F1 is None:
# Assign same initial value to each node
F1 = np.ones(N, int)
F2 = np.ones(N, int)
else:
if not np.array_equal(np.sort(F1), np.sort(F2)):
return False
if F1.dtype() != int:
raise NotImplementedError('Still have to implement this')
p = 1000000007
def mapping(F):
return (F * 234 + 133) % 1000000007
def adjacency_hash(F):
F = np.sort(F)
b = 257
h = 0
for f in F:
h = (b * h + f) % 1000000007
return h
for i in range(N):
F1 = map_reduce_neighbourhood(A1, F1, adjacency_hash, f_map=mapping, consider_itself=True, hops=1)
F2 = map_reduce_neighbourhood(A2, F2, adjacency_hash, f_map=mapping, consider_itself=True, hops=1)
if not np.array_equal(np.sort(F1), np.sort(F2)):
return False
return True
def count_edges(A):
"""
:param A:np.array the adjacency matrix
:return: the number of edges in the graph
"""
return np.sum(A) / 2
def is_eulerian_cyclable(A):
"""
:param A:np.array the adjacency matrix
:return: whether the graph has an eulerian cycle
"""
return is_connected(A) and np.count_nonzero(first_neighbours(A) % 2 == 1) == 0
def is_eulerian_percorrible(A):
"""
:param A:np.array the adjacency matrix
:return: whether the graph has an eulerian path
"""
return is_connected(A) and np.count_nonzero(first_neighbours(A) % 2 == 1) in [0, 2]
def map_reduce_graph(A, F, f_reduce):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: the features of the nodes reduced by f_reduce
"""
return f_reduce(F)
def mean_graph(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: the mean of the features
"""
return map_reduce_graph(A, F, np.mean)
def max_graph(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: the maximum of the features
"""
return map_reduce_graph(A, F, np.max)
def min_graph(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: the minimum of the features
"""
return map_reduce_graph(A, F, np.min)
def std_graph(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: the standard deviation of the features
"""
return map_reduce_graph(A, F, np.std)
def has_hamiltonian_cycle(A):
"""
:param A:np.array the adjacency matrix
:return:bool whether the graph has an hamiltonian cycle
"""
A += np.transpose(A)
A = A > 0
V = A.shape[0]
def ham_cycle_loop(pos):
if pos == V:
if A[path[pos - 1]][path[0]]:
return True
else:
return False
for v in range(1, V):
if A[path[pos - 1]][v] and not used[v]:
path[pos] = v
used[v] = True
if ham_cycle_loop(pos + 1):
return True
path[pos] = -1
used[v] = False
return False
used = [False] * V
path = [-1] * V
path[0] = 0
return ham_cycle_loop(1)
def all_pairs_shortest_paths(A, inf_sub=math.inf):
"""
:param A:np.array the adjacency matrix
:param inf_sub: the placeholder value to use for pairs which are not connected
:return:np.array all pairs shortest paths
"""
A = np.array(A)
N = A.shape[0]
for i in range(N):
for j in range(N):
if A[i][j] == 0:
A[i][j] = math.inf
if i == j:
A[i][j] = 0
for k in range(N):
for i in range(N):
for j in range(N):
A[i][j] = min(A[i][j], A[i][k] + A[k][j])
A = np.where(A == math.inf, inf_sub, A)
return A
def diameter(A):
"""
:param A:np.array the adjacency matrix
:return: the diameter of the gra[h
"""
sum = np.sum(A)
apsp = all_pairs_shortest_paths(A)
apsp = np.where(apsp < sum + 1, apsp, -1)
return np.max(apsp)
def eccentricity(A):
"""
:param A:np.array the adjacency matrix
:return: the eccentricity of the gra[h
"""
sum = np.sum(A)
apsp = all_pairs_shortest_paths(A)
apsp = np.where(apsp < sum + 1, apsp, -1)
return np.max(apsp, axis=0)
def sssp_predecessor(A, F):
"""
:param A:np.array the adjacency matrix
:param F:np.array the nodes features
:return: for each node, the best next step to reach the designated source
"""
assert (np.sum(F) == 1)
assert (np.max(F) == 1)
s = np.argmax(F)
N = A.shape[0]
P = np.zeros(A.shape)
V = np.zeros(N)
bfs = Queue()
bfs.put(s)
V[s] = 1
while not bfs.empty():
u = bfs.get()
for v in range(N):
if A[u][v] > 0 and V[v] == 0:
V[v] = 1
P[v][u] = 1
bfs.put(v)
return P
def max_eigenvalue(A):
"""
:param A:np.array the adjacency matrix
:return: the maximum eigenvalue of A
since A is positive symmetric, all the eigenvalues are guaranteed to be real
"""
[W, _] = np.linalg.eig(A)
return W[np.argmax(np.absolute(W))].real
def max_eigenvalues(A, k):
"""
:param A:np.array the adjacency matrix
:param k:int the number of eigenvalues to be selected
:return: the k greatest (by absolute value) eigenvalues of A
"""
[W, _] = np.linalg.eig(A)
values = W[sorted(range(len(W)), key=lambda x: -np.absolute(W[x]))[:k]]
return values.real
def max_absolute_eigenvalues(A, k):
"""
:param A:np.array the adjacency matrix
:param k:int the number of eigenvalues to be selected
:return: the absolute value of the k greatest (by absolute value) eigenvalues of A
"""
return np.absolute(max_eigenvalues(A, k))
def max_absolute_eigenvalues_laplacian(A, n):
"""
:param A:np.array the adjacency matrix
:param k:int the number of eigenvalues to be selected
:return: the absolute value of the k greatest (by absolute value) eigenvalues of the laplacian of A
"""
A = graph_laplacian(A)
return np.absolute(max_eigenvalues(A, n))
def max_eigenvector(A):
"""
:param A:np.array the adjacency matrix
:return: the maximum (by absolute value) eigenvector of A
since A is positive symmetric, all the eigenvectors are guaranteed to be real
"""
[W, V] = np.linalg.eig(A)
return V[:, np.argmax(np.absolute(W))].real
def spectral_radius(A):
"""
:param A:np.array the adjacency matrix
:return: the maximum (by absolute value) eigenvector of A
since A is positive symmetric, all the eigenvectors are guaranteed to be real
"""
return np.abs(max_eigenvalue(A))
def page_rank(A, F=None, iter=64):
"""
:param A:np.array the adjacency matrix
:param F:np.array with initial weights. If None, uniform initialization will happen.
:param iter: log2 of length of power iteration
:return: for each node, its pagerank
"""
# normalize A rows
A = np.array(A)
A /= A.sum(axis=1)[:, np.newaxis]
# power iteration
for _ in range(iter):
A = np.matmul(A, A)
# generate prior distribution
if F is None:
F = np.ones(A.shape[-1])
else:
F = np.array(F)
# normalize prior
F /= np.sum(F)
# compute limit distribution
return np.matmul(F, A)
def tsp_length(A, F=None):
"""
:param A:np.array the adjacency matrix
:param F:np.array determining which nodes are to be visited. If None, all of them are.
:return: the length of the Traveling Salesman Problem shortest solution
"""
A = all_pairs_shortest_paths(A)
N = A.shape[0]
if F is None:
F = np.ones(N)
targets = np.nonzero(F)[0]
T = targets.shape[0]
S = (1 << T)
dp = np.zeros((S, T))
def popcount(x):
b = 0
while x > 0:
x &= x - 1
b += 1
return b
msks = np.argsort(np.vectorize(popcount)(np.arange(S)))
for i in range(T + 1):
for j in range(T):
if (1 << j) & msks[i] == 0:
dp[msks[i]][j] = math.inf
for i in range(T + 1, S):
msk = msks[i]
for u in range(T):
if (1 << u) & msk == 0:
dp[msk][u] = math.inf
continue
cost = math.inf
for v in range(T):
if v == u or (1 << v) & msk == 0:
continue
cost = min(cost, dp[msk ^ (1 << u)][v] + A[targets[v]][targets[u]])
dp[msk][u] = cost
return np.min(dp[S - 1])
def get_nodes_labels(A, F):
"""
Takes the adjacency matrix and the list of nodes features (and a list of algorithms) and returns
a set of labels for each node
:param A: adj_matrix, N*N numpy matrix
:param F: node_values, numpy array of size N
:return: labels: KxN numpy matrix where K is the number of labels for each node
"""
labels = [identity(A, F), map_reduce_neighbourhood(A, F, np.mean, consider_itself=True),
map_reduce_neighbourhood(A, F, np.max, consider_itself=True),
map_reduce_neighbourhood(A, F, np.std, consider_itself=True), first_neighbours(A), second_neighbours(A),
eccentricity(A)]
return np.swapaxes(np.stack(labels), 0, 1)
def get_graph_labels(A, F):
"""
Takes the adjacency matrix and the list of nodes features (and a list of algorithms) and returns
a set of labels for the whole graph
:param A: adj_matrix, N*N numpy matrix
:param F: node_values, numpy array of size N
:return: labels: numpy array of size K where K is the number of labels for the graph
"""
labels = [diameter(A)]
return np.asarray(labels)
| 3.296875 | 3 |
05/05.py | teagles/teagles-pc | 0 | 12772441 | <reponame>teagles/teagles-pc<gh_stars>0
#!/usr/bin/env python
#http://www.pythonchallenge.com/pc/def/peak.html
import sys
import urllib2
import pickle
PICKLE_FILE = "http://www.pythonchallenge.com/pc/def/banner.p"
def printMatrix(matrix):
for line in matrix:
for char, num in line:
for x in range(0, num):
sys.stdout.write(char)
sys.stdout.write('\n')
# 05/05.py
def main(args=None):
if args is None:
args = sys.argv[1:]
p = urllib2.urlopen(PICKLE_FILE).read()
matrix = pickle.loads(p)
printMatrix(matrix)
if __name__ == '__main__':
main()
| 3.390625 | 3 |
creds_example.py | kellerjustin/rpi-examples | 0 | 12772442 | PASSWORD = "<PASSWORD>"
TO = ["<EMAIL>",
"<EMAIL>"]
FROM = "<EMAIL>"
| 1.15625 | 1 |
tests/test_newlines.py | MKuranowski/aiocsv | 19 | 12772443 | <reponame>MKuranowski/aiocsv
import aiofiles
import pytest
from aiocsv import AsyncDictReader, AsyncReader
DIALECT_PARAMS = {"escapechar": "$", "lineterminator": "\n"}
FILENAME = "tests/newlines.csv"
HEADER = ["field1", "field2", "field3"]
READ_VALUES = [
["hello", 'is it "me"', "you're\nlooking for"],
["this is going to be", "another\nbroken row", "this time with escapechar"],
["and now it's both quoted\nand", "with", "escape char"]
]
@pytest.mark.asyncio
async def test_newline_read():
async with aiofiles.open(FILENAME, mode="r", encoding="ascii", newline="") as af:
read_rows = [i async for i in AsyncReader(af, **DIALECT_PARAMS)]
assert read_rows == [HEADER] + READ_VALUES
@pytest.mark.asyncio
async def test_newline_dict_read():
async with aiofiles.open(FILENAME, mode="r", encoding="ascii", newline="") as af:
read_rows = [i async for i in AsyncDictReader(af, **DIALECT_PARAMS)]
for read_row, expected_values in zip(read_rows, READ_VALUES):
assert read_row == dict(zip(HEADER, expected_values))
| 2.5625 | 3 |
src/githubclient/prfile.py | Preocts/githubclient | 1 | 12772444 | <reponame>Preocts/githubclient<gh_stars>1-10
"""
CLI Controls for creates a PR with a file
Author: Preocts <Preocts#8196>
"""
from __future__ import annotations
import argparse
import logging
import os
import pathlib
import sys
from datetime import datetime
from typing import Any
from typing import Dict
from typing import List
from typing import MutableMapping
from typing import NamedTuple
from typing import Optional
from typing import Sequence
import colorama
import toml
from colorama import Fore
from githubclient.repoactions import RepoActions
REPO_URL = "https://github.com/Preocts/githubclient"
CONFIG_FILE = ".default_config.toml"
CWD = pathlib.Path.cwd()
DEFAULT_NEW_BRANCH = datetime.now().strftime("%Y%m%d.%H%M%S")
DEFAULT_TITLE = f"{DEFAULT_NEW_BRANCH} - Automated PR request"
DEFAULT_MESSAGE = f"{DEFAULT_NEW_BRANCH} - Automated PR request"
class PromptValues(NamedTuple):
"""Dataclass to hold prompt values"""
new_branch: str
title: str
message: str
class RepoConfig(NamedTuple):
"""Dataclass to hold config for repo actions"""
reponame: str = ""
ownername: str = ""
username: str = ""
useremail: str = ""
usertoken: str = ""
basebranch: str = ""
def to_toml(self) -> Dict[str, Any]:
"""Returns config as nested dict under key: repo"""
return {"repo": self._asdict()}
@classmethod
def from_toml(cls, toml_in: MutableMapping[str, Any]) -> RepoConfig:
"""Generate class from toml load"""
repo = toml_in.get("repo", {})
return cls(
reponame=repo.get("reponame", ""),
ownername=repo.get("ownername", ""),
username=repo.get("username", ""),
useremail=repo.get("useremail", ""),
usertoken=repo.get("usertoken", ""),
basebranch=repo.get("basebranch", ""),
)
def cli_parser(args: Optional[Sequence[str]] = None) -> argparse.Namespace:
"""Configure argparse"""
parser = argparse.ArgumentParser(
prog="prfiles",
description="Add files to a repo on a unique branch and create a Pull Request.",
epilog=f"Check it. {REPO_URL}",
)
parser.add_argument(
"filenames",
type=str,
nargs="*",
help="One, or more, files to be added to the pull request (utf-8 encoding)",
)
parser.add_argument(
"--reponame",
type=str,
help="Set name of target repo (https://github.com/[owner name]/[repo name])",
)
parser.add_argument(
"--ownername",
type=str,
help="Set repo's owner name (https://github.com/[owner name]/[repo name])",
)
parser.add_argument(
"--username",
type=str,
help="Set your GitHub user name",
)
parser.add_argument(
"--useremail",
type=str,
help="Set your GitHub email for pull requests",
)
parser.add_argument(
"--basebranch",
type=str,
help="Set the base branch from which pull requests will be created",
)
parser.add_argument(
"--usertoken",
type=str,
help="Set the developer auth-token (must have 'public_repo' access)",
)
parser.add_argument(
"--draft",
dest="draft",
action="store_true",
help="Submit pull request as a draft.",
)
parser.add_argument(
"--debug",
dest="debug",
action="store_true",
help="Turns internal logging level to DEBUG.",
)
return parser.parse_args() if args is None else parser.parse_args(args)
def main_cli() -> None:
"""CLI Point of Entry"""
colorama.init(autoreset=True)
sys.exit(main(cli_parser()))
def main(args: argparse.Namespace) -> int:
"""Main CLI process"""
logging.basicConfig(level="DEBUG" if args.debug else "ERROR")
config = fill_config(load_config(CONFIG_FILE, args))
save_config(CONFIG_FILE, config)
if not all_files_exist(args.filenames):
raise FileNotFoundError(f"Unable to find files: {args.filenames}")
prompt_values = run_user_prompt()
inject_env_secrets(config)
result = create_pull_request(prompt_values, config, args.filenames, args.draft)
if result:
opt_word = " draft " if args.draft else " "
print(f"{Fore.GREEN}Pull request{opt_word}created: ", end="")
print(result)
else:
print(f"{Fore.RED}Something went wrong...")
return 0 if result else 1
def get_input(prompt: str) -> str:
"""Get user input"""
return input(prompt)
def run_user_prompt() -> PromptValues:
"""Allow user to update values or abort"""
new_branch = DEFAULT_NEW_BRANCH
title = DEFAULT_TITLE
message = DEFAULT_MESSAGE
input_prompt = "set (t)itle, set (m)essage, (s)ubmit, (a)bort (t/m/s/a)? "
uinput = ""
while uinput != "s":
print(f"\n{Fore.GREEN}New Branch: {Fore.WHITE}{new_branch}")
print(f"{Fore.GREEN}PR Title : {Fore.WHITE}{title}")
print(f"{Fore.GREEN}PR Message: {Fore.WHITE}{message}")
print("-" * 20)
uinput = get_input(input_prompt).lower()
if uinput == "a":
sys.exit(1)
elif uinput == "t":
title = get_input("Enter new title: ")
elif uinput == "m":
message = get_input("Enter new message: ")
return PromptValues(
new_branch=new_branch if new_branch else DEFAULT_NEW_BRANCH,
title=title if title else DEFAULT_TITLE,
message=message if message else DEFAULT_MESSAGE,
)
def save_config(filename: str, config: RepoConfig) -> None:
"""Save toml config in the working directory"""
with open(pathlib.Path(CWD / filename), "w") as toml_out:
toml.dump(config.to_toml(), toml_out)
def load_config(filename: str, args: argparse.Namespace) -> RepoConfig:
"""Load config toml, merge with and CLI optionals"""
try:
with open(pathlib.Path(CWD / filename), "r") as toml_in:
config = RepoConfig.from_toml(toml.load(toml_in))
except FileNotFoundError:
config = RepoConfig()
return RepoConfig(
reponame=config.reponame if args.reponame is None else args.reponame,
ownername=config.ownername if args.ownername is None else args.ownername,
username=config.username if args.username is None else args.username,
useremail=config.useremail if args.useremail is None else args.useremail,
usertoken=config.usertoken if args.usertoken is None else args.usertoken,
basebranch=config.basebranch if args.basebranch is None else args.basebranch,
)
def fill_config(config: RepoConfig) -> RepoConfig:
"""Prompts user for missing config values"""
filled_config: Dict[str, str] = {}
for key, value in config._asdict().items():
filled_config[key] = value if value else input(f"Enter {key}: ")
return RepoConfig(**filled_config)
def all_files_exist(files: List[str]) -> bool:
"""Confirms files in list exist"""
return all([pathlib.Path(file).exists() for file in files]) if files else False
def inject_env_secrets(config: RepoConfig) -> None:
"""Push required values to environ"""
os.environ["GITHUB_AUTH_TOKEN"] = config.usertoken
os.environ["GITHUB_USER_NAME"] = config.username
def create_pull_request(
prompt_values: PromptValues,
config: RepoConfig,
filenames: List[str],
draft: bool = False,
) -> str:
"""Create pull request with indicated files, returns url on success"""
client = RepoActions(config.ownername, config.reponame)
branch = client.create_branch(config.basebranch, prompt_values.new_branch)
blobs = [client.create_blob(content).sha for content in load_files(filenames)]
blob_names = [(sha, filename) for sha, filename in zip(blobs, filenames)]
tree = client.create_blob_tree(branch.sha, blob_names)
commit = client.create_commit(
author_name=config.username,
author_email=config.useremail,
branch_sha=branch.sha,
tree_sha=tree.sha,
message=prompt_values.message,
)
client.update_reference(prompt_values.new_branch, commit.sha)
pull_request = client.create_pull_request(
new_branch=prompt_values.new_branch,
base_branch=config.basebranch,
pr_title=prompt_values.title,
pr_body=prompt_values.message,
draft=draft,
)
return pull_request.html_url
def load_files(filenames: List[str]) -> List[str]:
"""Loads files"""
files: List[str] = []
for filename in filenames:
with open(filename, "r", encoding="utf-8") as infile:
files.append(infile.read())
return files
if __name__ == "__main__":
main_cli()
| 2.5625 | 3 |
populate_db.py | BabouZ17/open-food-facts | 0 | 12772445 | <gh_stars>0
#! /usr/bin/env python
#-*- coding: utf-8 -*-
"""
Script performered to populate the database before the program can
be used to searched among the OpenFoodFacts Database.
"""
from api_manager import ApiManager
from constants import MIN_PRODUCTS_PER_CATEGORY, DATABASE, HOST, USER, PASSWORD
from mysql.connector import errorcode
import mysql.connector
def create_db(cursor, cnx):
"""
Create the database if it does not exist
"""
try:
cursor.execute(
"CREATE DATABASE IF NOT EXISTS {} DEFAULT CHARACTER SET 'utf8'".format(DATABASE)
)
except mysql.connector.Error as err:
print("Oups, failed in creating {} with error: {}".format(DATABASE, err))
try:
cnx.database = DATABASE
print("Database is ready.")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_BAD_DB_ERROR:
print("Issue with the DATABASE")
else:
print(err)
def populate_tables(cursor):
"""
SQL Instruction to generate the tables
"""
tables = {}
alterings = {}
tables['category'] = (
" CREATE TABLE IF NOT EXISTS `category` ("
" `id` INT(10) UNSIGNED AUTO_INCREMENT,"
" `name` TEXT NOT NULL,"
" `url` TEXT,"
" `tag` VARCHAR(100),"
" PRIMARY KEY (`id`)"
" )ENGINE=InnoDB")
tables['product'] = (
" CREATE TABLE IF NOT EXISTS `product` ("
" `id` INT(10) UNSIGNED AUTO_INCREMENT,"
" `name` TEXT NOT NULL,"
" `description` TEXT,"
" `stores` VARCHAR(100),"
" `link` TEXT,"
" `grade` VARCHAR(20),"
" `fat` VARCHAR(10),"
" `sugars` VARCHAR(10),"
" `salt` VARCHAR(10),"
" `saturated_fat` VARCHAR(10),"
" PRIMARY KEY (`id`)"
" )ENGINE=InnoDB")
alterings['product'] = (
" ALTER TABLE `product` "
" ADD INDEX `name_index` (`name`)"
)
tables['substitute'] = (
" CREATE TABLE IF NOT EXISTS `substitute` ("
" `id` INT(10) UNSIGNED AUTO_INCREMENT,"
" `name` VARCHAR(45) NOT NULL,"
" `description` VARCHAR(45),"
" PRIMARY KEY (`id`)"
" )ENGINE=InnoDB")
tables['categories_products'] = (
" CREATE TABLE IF NOT EXISTS `categories_products` ("
" `category_id` INT(10) UNSIGNED,"
" `product_id` INT(10) UNSIGNED"
" )ENGINE=InnoDB")
alterings['categories_products'] = (
" ALTER TABLE `categories_products` "
" ADD CONSTRAINT `fk_category` FOREIGN KEY (`category_id`)"
" REFERENCES `category` (`id`) ON DELETE CASCADE,"
" ADD CONSTRAINT `fk_product` FOREIGN KEY (`product_id`)"
" REFERENCES `product` (`id`) ON DELETE CASCADE"
)
tables['substitutes_products'] = (
" CREATE TABLE IF NOT EXISTS `substitutes_products` ("
" `substitute_id` INT(10) UNSIGNED,"
" `product_name` TEXT NOT NULL"
" )ENGINE=InnoDB")
alterings['substitutes_products'] = (
" ALTER TABLE `substitutes_products` "
" ADD CONSTRAINT `fk_substitute` FOREIGN KEY (`substitute_id`)"
" REFERENCES `substitute` (`id`) ON DELETE CASCADE,"
" ADD CONSTRAINT `fk_product_name` FOREIGN KEY (`product_name`)"
" REFERENCES `product` (`name`) ON DELETE CASCADE"
)
for name, ddl in tables.items():
try:
print("Creating table {}: ".format(name), end='')
cursor.execute(ddl)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print("already exists.")
else:
print(err.msg)
else:
print("Table added.")
for name, ddl in alterings.items():
try:
print("Altering table {}: ".format(name), end='')
cursor.execute(ddl)
except mysql.connector.Error as err:
print(err.msg)
else:
print("Table altered.")
def main():
"""
Main part
"""
### Create Databases and tables ###
cnx = mysql.connector.connect(user=USER, password=PASSWORD,host=HOST)
cursor = cnx.cursor()
create_db(cursor, cnx)
populate_tables(cursor)
### Fetch the data from the API ###
api_manager = ApiManager()
categories = api_manager.categories()
for category in categories:
if category['products'] > MIN_PRODUCTS_PER_CATEGORY:
### Category ###
cursor.execute("INSERT INTO `category` (name, url, tag) VALUES (%s, %s, %s)"
,(category['name'], category['url'], category['id']))
category_index = cursor.lastrowid
print("Category added ")
cnx.commit()
### Products ###
products = api_manager.category_products(category['id'])
for product in products:
try:
print(product['product_name'])
cursor.execute("INSERT INTO `product` (name, description, stores, grade, link, fat, sugars, salt, saturated_fat) \
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", (product['product_name'], product['generic_name_fr'], product['stores'], \
product['nutrition_grades'], product['link'], product['nutrient_levels']['fat'], product['nutrient_levels']['sugars'], \
product['nutrient_levels']['salt'], product['nutrient_levels']['saturated-fat']))
product_index = cursor.lastrowid
except KeyError:
pass
print("Product added ")
cnx.commit()
### Mutual table ###
cursor.execute("INSERT INTO `categories_products` (category_id, product_id) \
VALUES (%s, %s)", (category_index, product_index))
if __name__ == '__main__':
main()
| 2.796875 | 3 |
bids_app/models.py | daniel-afana/Bids | 0 | 12772446 | from django.db import models
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import ValidationError
class Channel(models.Model):
name = models.CharField(max_length=50)
slug = models.CharField(max_length=50)
BID_TYPES_CHOICES = (
("CPC", "CPC"),
("CPM", "CPM"),
("CPA", "CPA"),
("CPV", "CPV"),
("CPI", "CPI"),
)
bid_types = ArrayField(
models.CharField(choices=BID_TYPES_CHOICES, max_length=3, default="CPM"),
)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.bid_types = list(map(str.upper, self.bid_types))
super(Channel, self).save(*args, **kwargs)
class Campaign(models.Model):
name = models.CharField(max_length=50)
channel = models.ForeignKey(Channel, on_delete=models.CASCADE)
bid = models.FloatField(blank=True)
bid_type = models.CharField(max_length=3)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.bid_type = self.bid_type.upper()
if self.bid_type not in self.channel.bid_types:
raise ValidationError("Please, specify one of the bid types that belongs to the selected channel: {}"
.format(self.channel.bid_types))
super(Campaign, self).save(*args, **kwargs)
| 2.28125 | 2 |
ahd2fhir/mappers/ahd_to_medication.py | miracum/ahd2fhir | 3 | 12772447 | from fhir.resources.codeableconcept import CodeableConcept
from fhir.resources.coding import Coding
from fhir.resources.identifier import Identifier
from fhir.resources.medication import Medication, MedicationIngredient
from fhir.resources.meta import Meta
from fhir.resources.quantity import Quantity
from fhir.resources.ratio import Ratio
from structlog import get_logger
from ahd2fhir.utils.fhir_utils import sha256_of_identifier
log = get_logger()
MEDICATION_PROFILE = (
"https://www.medizininformatik-initiative.de/"
+ "fhir/core/modul-medikation/StructureDefinition/Medication"
)
def get_medication_from_annotation(annotation):
medication = Medication.construct()
drug = annotation["drugs"][0]
if drug.get("ingredient") is None:
return None
# Medication Meta
medication.meta = Meta.construct()
medication.meta.profile = [MEDICATION_PROFILE]
# Medication Code
codes = []
if "Abdamed-Averbis" in str(drug["ingredient"]["source"]):
system = "http://fhir.de/CodeSystem/dimdi/atc"
codes = str(drug["ingredient"]["conceptId"]).split("-")
elif "RxNorm" in str(drug["ingredient"]["source"]):
system = "http://www.nlm.nih.gov/research/umls/rxnorm"
codes.append(str(drug["ingredient"]["conceptId"]))
else:
system = ""
med_code = CodeableConcept.construct()
med_code.coding = []
for code in codes:
med_coding = Coding.construct()
med_coding.system = system
med_coding.display = drug["ingredient"]["dictCanon"]
med_coding.code = code
med_code.coding.append(med_coding)
medication.code = med_code
# Medication Ingredient
ingredient = MedicationIngredient.construct()
medication.ingredient = [ingredient]
ingredient.itemCodeableConcept = CodeableConcept.construct()
ingredient.itemCodeableConcept.coding = [Coding()]
ingredient.itemCodeableConcept.coding[0].display = drug["ingredient"]["dictCanon"]
ingredient.itemCodeableConcept.coding[0].system = system
medication_identifier_system = (
"https://fhir.miracum.org/nlp/identifiers/"
+ f"{annotation['type'].replace('.', '-').lower()}"
)
medication.identifier = [Identifier()]
medication.identifier[0].value = drug["ingredient"]["dictCanon"]
medication.identifier[0].system = medication_identifier_system
medication.id = sha256_of_identifier(medication.identifier[0])
if (
"strength" not in drug
or drug["strength"] is None
or "value" not in drug["strength"]
or "unit" not in drug["strength"]
or drug["strength"]["value"] is None
or drug["strength"]["unit"] is None
):
return medication
strength = Ratio.construct()
numerator = Quantity.construct()
numerator.value = drug["strength"]["value"]
numerator.unit = drug["strength"]["unit"]
strength.numerator = numerator
medication.identifier[0].value = (
drug["ingredient"]["dictCanon"]
+ "_"
+ str(drug["strength"]["value"])
+ drug["strength"]["unit"]
)
medication.id = sha256_of_identifier(medication.identifier[0])
if "doseForm" not in annotation or annotation["doseForm"] is None:
return medication
denominator = Quantity.construct()
denominator.value = 1
denominator.unit = annotation["doseForm"]["dictCanon"]
strength.denominator = denominator
ingredient.strength = strength
medication.identifier[0].value = (
drug["ingredient"]["dictCanon"]
+ "_"
+ str(drug["strength"]["value"])
+ drug["strength"]["unit"]
+ "_"
+ annotation["doseForm"]["dictCanon"]
)
medication.id = sha256_of_identifier(medication.identifier[0])
return medication
| 2.328125 | 2 |
promtimer/util.py | aartamonau/promtimer | 0 | 12772448 | <reponame>aartamonau/promtimer
#!/usr/bin/env python3
#
# Copyright (c) 2020 Couchbase, Inc All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import atexit
import time
from os import path
import urllib.request
import logging
ROOT_DIR = path.join(path.dirname(__file__), '..')
def get_root_dir():
return ROOT_DIR
def index(alist, predicate):
for i, element in enumerate(alist):
if predicate(element):
return i
return -1
def kill_node(process):
try:
process.kill()
except OSError:
pass
def start_process(args, log_filename, cwd=None):
if log_filename is not None:
log_file = open(log_filename, 'a')
else:
log_file = subprocess.DEVNULL
process = subprocess.Popen(args,
stdin=None,
cwd=cwd,
stdout=log_file,
stderr=log_file)
atexit.register(lambda: kill_node(process))
return process
def poll_processes(processes, count=-1):
check = 0
while count < 0 or check < count:
for p in processes:
result = p.poll()
if result is not None:
return result
time.sleep(0.1)
check += 1
def retry_get_url(url, retries):
req = urllib.request.Request(url=url, data=None)
success = False
get = None
while (not success) and (retries > 0):
try:
get = urllib.request.urlopen(req).read()
success = True
except:
logging.debug('Attempting connection to {}, retrying... {} retries left'.format(url, retries))
retries -= 1
time.sleep(0.5)
return get
| 2.078125 | 2 |
microtbs_rl/utils/plotter.py | alex-petrenko/simple-reinforcement-learning | 8 | 12772449 | """
Plot the training progress data collected by the Monitor.
"""
import csv
import matplotlib.pyplot as plt
from microtbs_rl.utils.common_utils import *
from microtbs_rl.utils.monitor import Monitor
COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'lightpurple', 'darkred', 'darkblue', 'yellow']
logger = logging.getLogger(os.path.basename(__file__))
def main():
init_logger(os.path.basename(__file__))
_ = [
get_experiment_name('MicroTbs-CollectWithTerrain-v0', 'dqn_v0'),
get_experiment_name('MicroTbs-CollectWithTerrain-v0', 'dqn_v3_inception'),
get_experiment_name('MicroTbs-CollectWithTerrain-v0', 'a2c_v0'),
get_experiment_name('MicroTbs-CollectWithTerrain-v0', 'a2c_v1'),
get_experiment_name('MicroTbs-CollectWithTerrain-v0', 'a2c_v2_inception'),
get_experiment_name('MicroTbs-CollectWithTerrain-v0', 'a2c_v4_10steps_097'),
get_experiment_name('MicroTbs-CollectWithTerrain-v0', 'openai_dqn'),
]
experiments = [
get_experiment_name('MicroTbs-CollectWithTerrain-v1', 'a2c_v0'),
get_experiment_name('MicroTbs-CollectWithTerrain-v1', 'a2c_v1'),
get_experiment_name('MicroTbs-CollectWithTerrain-v2', 'a2c_v5'),
]
_ = [
get_experiment_name('MicroTbs-CollectPartiallyObservable-v1', 'a2c_v2'),
get_experiment_name('MicroTbs-CollectPartiallyObservable-v1', 'a2c_v3'),
get_experiment_name('MicroTbs-CollectPartiallyObservable-v3', 'a2c_v5'),
]
for i in range(len(experiments)):
experiment = experiments[i]
x, y = [], []
stats_filename = Monitor.stats_filename(experiment)
with open(stats_filename) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
x.append(int(row[0]))
y.append(float(row[1]))
skip_coeff = max(1, len(x) // 200)
x_filtered, y_filtered = [], []
for j in range(len(x)):
if j % skip_coeff == 0:
x_filtered.append(x[j])
y_filtered.append(y[j])
x = x_filtered
y = y_filtered
logger.info('Plotting %s...', experiment)
plt.plot(x, y, color=COLORS[i], label=experiment)
plt.title('Reward over time')
plt.xlabel('Training step (batch #)')
plt.ylabel('Mean reward')
plt.legend()
plt.tight_layout()
plt.show()
if __name__ == '__main__':
sys.exit(main())
| 2.40625 | 2 |
test/unit/config/test_reload_config.py | rikeshi/galaxy | 1,085 | 12772450 | <filename>test/unit/config/test_reload_config.py
import pytest
from galaxy import config
from galaxy.config import BaseAppConfiguration
from galaxy.config import reload_config_options
from galaxy.config.schema import AppSchema
R1, R2, N1, N2 = 'reloadable1', 'reloadable2', 'nonrelodable1', 'nonreloadable2' # config options
MOCK_SCHEMA = {
R1: {'reloadable': True, 'default': 1},
R2: {'reloadable': True, 'default': 2},
N1: {'default': 3},
N2: {'default': 4},
}
def get_schema(app_mapping):
return {'mapping': {'_': {'mapping': app_mapping}}}
@pytest.fixture
def mock_init(monkeypatch):
monkeypatch.setattr(BaseAppConfiguration, '_load_schema', lambda a: AppSchema(None, '_'))
monkeypatch.setattr(AppSchema, '_read_schema', lambda a, b: get_schema(MOCK_SCHEMA))
def test_update_property(mock_init, monkeypatch):
# This also covers adding a property. When a config file does not set a property,
# that property is set to its default value. Thus, if we add a reloadable property
# to the config file, it's the same as modifying that property's value.
# edits to config file: R2, N1 modified
monkeypatch.setattr(config, 'read_properties_from_file', lambda _: {R1: 1, R2: 42, N1: 99})
appconfig = BaseAppConfiguration()
assert getattr(appconfig, R1) == 1
assert getattr(appconfig, R2) == 2
assert getattr(appconfig, N1) == 3
reload_config_options(appconfig)
assert getattr(appconfig, R1) == 1 # no change
assert getattr(appconfig, R2) == 42 # change: reloadable option modified
assert getattr(appconfig, N1) == 3 # no change: option modified but is non-relodable
def test_overwrite_reloadable_attribute(mock_init, monkeypatch):
# This is similar to test_update_property, but here we overwrite the attribute before reloading.
# This can happen if a config property is modified AFTER it has been loaded from schema or kwargs.
# For example: load `foo` (from schema or kwargs), but then, in a # subsequent step while initializing
# GalaxyAppConfiguration, do something like this: `foo = resove_path(foo, bar)`. Now the value of `foo`
# is not what was initially loaded, and if `foo` is reloadable, it will be reset to its default as soon
# as the config file is modified. To prevent this, we compare the values read from the modified file
# to the `_raw_config` dict. This test ensures this works correctly.
# edits to config file: R2 modified
monkeypatch.setattr(config, 'read_properties_from_file', lambda _: {R1: 1, R2: 42})
appconfig = BaseAppConfiguration()
assert getattr(appconfig, R1) == 1
assert getattr(appconfig, R2) == 2
# overwrite R1
setattr(appconfig, R1, 99)
assert getattr(appconfig, R1) == 99
# then reload
reload_config_options(appconfig)
assert getattr(appconfig, R1) == 99 # no change; should remain overwritten
assert getattr(appconfig, R2) == 42 # change: reloadable option modified
def test_cant_delete_property(mock_init, monkeypatch):
# A property should not be deleted: we don't know whether it was initially
# set to a default, loaded from a config file, env var, etc. Therefore, if a property
# is removed from the config file, it will not be modified or deleted.
# edits to config file: R2, N2 deleted
monkeypatch.setattr(config, 'read_properties_from_file', lambda _: {R1: 1, N1: 3})
appconfig = BaseAppConfiguration()
assert getattr(appconfig, R1) == 1
assert getattr(appconfig, R2) == 2
assert getattr(appconfig, N1) == 3
assert getattr(appconfig, N2) == 4
reload_config_options(appconfig)
assert getattr(appconfig, R1) == 1 # no change
assert getattr(appconfig, R2) == 2 # no change: option cannot be deleted
assert getattr(appconfig, N1) == 3 # no change
assert getattr(appconfig, N2) == 4 # no change: option cannot be deleted
| 2.328125 | 2 |