text stringlengths 8 6.05M |
|---|
import torch
import rlkit.torch.pytorch_util as ptu
import numpy as np
def fetch_preprocessing(obs,
actions=None,
normalizer=None,
robot_dim=10,
object_dim=15,
goal_dim=3,
zero_state_preprocessing_fnx=False,
lop_state_dim=3,
mask=None,
return_combined_state=True):
"""
For Fetch robotics gym environment. Takes a flattened state and processes it into batched, normalized objects.
:param obs: N x (nR + nB * nFb)
:param actions
:param robot_dim:
:param object_dim: nFb
:param num_objects:
:param goal_dim:
:param zero_state_preprocessing_fnx: Zero out state for testing.
:return: N x nB x (nFb + nR). If in QValueCPA, concats actions to the left of the shared return
"""
if len(obs.shape) == 3:
obs = obs.squeeze(1)
if lop_state_dim:
obs = obs.narrow(1, 0, obs.size(1)-lop_state_dim) # Chop off the final 3 dimension of gripper position
batch_size, environment_state_length = obs.size()
if actions is not None:
action_dim = actions.size(-1)
else:
action_dim = 0
if zero_state_preprocessing_fnx:
obs = torch.zeros(batch_size, environment_state_length).to(ptu.device)
nB = (environment_state_length - robot_dim) / (object_dim + goal_dim)
assert nB.is_integer(), (nB, environment_state_length, robot_dim, object_dim, goal_dim) # TODO: this checks if the lopped state still breaks down into the right object dimensions. The only worry here is whether the obs was messed up at the start of the function, e.g. the samples from the replay buffer incorrectly put the lopped state somewwhere.
nB = int(nB)
if mask is None:
mask = torch.ones(obs.shape[0], nB).to(ptu.get_device())
kwargs_state_length = robot_dim + object_dim * nB + goal_dim * nB
assert kwargs_state_length == environment_state_length, F"{kwargs_state_length} != {environment_state_length}"
# N x nR. From index 0 to shared dim per sample, we have the robot_state
robot_state_flat = obs.narrow(1, 0, robot_dim)
# assert (state_length - shared_dim - goal_state_dim) % block_feature_dim == 0, state_length - shared_dim - goal_state_dim
# N x (nB x nFb)
flattened_objects = obs.narrow(1, robot_dim, object_dim * nB)
# -> N x nB x nFb
batched_objects = flattened_objects.view(batch_size, nB, object_dim)
# N x (nB x nFg) # TODO: perhaps add lop state dim
flattened_goals = obs.narrow(1, robot_dim + nB * object_dim, nB * goal_dim)
# -> N x nB x nFg
batched_goals = flattened_goals.view(batch_size, nB, goal_dim)
assert torch.eq(torch.cat((
robot_state_flat.view(batch_size, -1),
batched_objects.view(batch_size, -1),
batched_goals.view(batch_size, -1)), dim=1),
obs).all()
# Broadcast robot_state
# -> N x nB x nR
batch_shared = robot_state_flat.unsqueeze(1).expand(-1, nB, -1)
# Concatenate with block_state
# N x nB x (nFb + nR)
# output_state = torch.cat((block_state, robot_state), dim=2)
# return output_state
# We can just consider the goals to be part of the block state, so we concat them together
batch_objgoals = torch.cat((batched_objects, batched_goals), dim=-1)
batch_shared = batch_shared.clone() * mask.unsqueeze(-1).expand_as(batch_shared)
batch_objgoals = batch_objgoals.clone() * mask.unsqueeze(-1).expand_as(batch_objgoals)
# assert torch.unique(batch_shared, dim=1).shape == torch.Size([batch_size, 1, robot_dim]), (
# torch.unique(batch_shared, dim=1).shape, torch.Size([batch_size, 1, robot_dim]))
if normalizer is not None:
robot_singleobj_singlegoal = torch.cat((batch_shared, batch_objgoals), dim=-1).view(batch_size * nB, robot_dim + object_dim + goal_dim)
# Single objects means, we flatten the nB dimension
norm_singlerobot_singleobj_singlegoal, norm_actions = normalizer.normalize_all(robot_singleobj_singlegoal, actions)
# Set these two variables to be the normalized versions
norm_singlerobot, norm_singleobj_singlegoal = torch.split(norm_singlerobot_singleobj_singlegoal, [robot_dim, object_dim + goal_dim], dim=-1)
# Turn single objects back into batches of nB objects
norm_batchobjgoals = norm_singleobj_singlegoal.contiguous().view(batch_size, nB, object_dim + goal_dim)
norm_batchshared = norm_singlerobot.contiguous().view(batch_size, nB, robot_dim)
# assert torch.unique(norm_batchshared, dim=1).shape == torch.Size([batch_size, 1, robot_dim]), (torch.unique(norm_batchshared, dim=1).shape, torch.Size([batch_size, 1, robot_dim]))
batch_shared = norm_batchshared
batch_objgoals = norm_batchobjgoals
actions = norm_actions
if actions is not None:
batch_shared = torch.cat((actions.unsqueeze(1).expand(-1, nB, -1), batch_shared), dim=-1)
assert batch_shared.shape == torch.Size([batch_size, nB, robot_dim + action_dim]), (batch_shared.shape, torch.Size([batch_size, nB, robot_dim + action_dim]))
if return_combined_state:
batched_combined_state = torch.cat((batch_shared, batch_objgoals), dim=-1)
return batched_combined_state
else:
return batch_shared, batch_objgoals
def invert_fetch_preprocessing(batched_shared,
batched_objects_and_goals,
robot_dim=10,
object_dim=15,
goal_dim=3,
num_blocks=None,
**kwargs):
"""
:param batched: (N * nB) x nShared, (N * nB) x (nObject + nGoal)
:return:
"""
N = batched_shared.size(0)
batched_shared = batched_shared.view(N, num_blocks, batched_shared.size(-1))
# Reduce over nB dimension...
assert (batched_shared[:, 0, :].unsqueeze(1) == batched_shared).all(), batched_shared
shared_flat = batched_shared[:, 0, :]
individual_flat = batched_objects_and_goals.contiguous().view(N, num_blocks * (object_dim + goal_dim))
# Returns N x (nR + nB * nOG)
return torch.cat((shared_flat, individual_flat), dim=-1)
def get_masks(curr_num_blocks, max_num_blocks, path_len, keepdim=False):
assert curr_num_blocks <= max_num_blocks
if path_len > 1:
masks = np.ones((path_len, curr_num_blocks)) # Num_blocks is the MAX num_blocks
masks = np.pad(masks, ((0, 0), (0, int(max_num_blocks - curr_num_blocks))), "constant",
constant_values=((0, 0), (0, 0)))
else:
masks = np.ones(curr_num_blocks)
masks = np.pad(masks, ((0, int(max_num_blocks - curr_num_blocks))), "constant",
constant_values=((0, 0)))
if keepdim:
masks = np.expand_dims(masks, axis=0)
return masks
def pad_obs(obs, key, key_sizes, max_num_blocks=None, curr_num_blocks=None):
"""
Pads the -1 dimension to 'max_num_blocks'
:param obs: ndarray
:param key:
:param key_sizes:
:param max_num_blocks:
:param curr_num_blocks:
:return:
"""
if len(obs.shape) == 1:
obs = np.expand_dims(obs, 0)
if "goal" in key:
lop_state = obs[:, -3:].copy()
padded_obs = obs[:, :-3].copy()
assert int(max_num_blocks - curr_num_blocks) >=0, int(max_num_blocks - curr_num_blocks)
padded_obs = np.pad(padded_obs,
((0, 0), (0, int(max_num_blocks - curr_num_blocks) * key_sizes[key])),
"constant", constant_values=((0, 0), (0, -999)))
padded_obs = np.concatenate((padded_obs, lop_state), axis=-1).copy() # If max_num_blocks == curr_num_blocks, lopping and concatenating shouldn't do anything
else:
assert int(max_num_blocks - curr_num_blocks) >=0, int(max_num_blocks - curr_num_blocks)
# print("obs shape")
# print(obs.shape)
if len(obs.shape) == 2:
padded_obs = np.pad(obs, ((0, 0), (0, int(max_num_blocks - curr_num_blocks) * key_sizes[key])),
"constant", constant_values=((0, 0), (0, -999)))
elif len(obs.shape) == 1:
padded_obs = np.pad(obs, ((0, int(max_num_blocks - curr_num_blocks) * key_sizes[key])),
"constant", constant_values=((0, -999)))
else:
raise NotImplementedError
return padded_obs |
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def cost(x, y, w):
c = 0
for i in np.arange(len(x)):
hx = w * x[i]
c = c + (hx - y[i]) ** 2
return c / len(x)
x = [1, 2, 3]
y = [1, 2, 3]
print(cost(x, y, -1))
print(cost(x, y, 0))
print(cost(x, y, 1))
print(cost(x, y, 2))
print(cost(x, y, 3))
# 색상, 마커, 선의중요
# plt.plot(x, y, 'ro--')
# plt.show()
for w in np.linspace(-3, 5, 50):
c = cost(x, y, w)
print(w, c)
plt.plot(w, c, "ro")
plt.show()
|
import unittest
import numpy as np
from pathlib import Path
from pdb2sql import many2sql
from pdb2sql import pdb2sql
from .utils import CaptureOutErr
from . import pdb_folder
class TestMany2SQL(unittest.TestCase):
def setUp(self):
pdb1 = Path(pdb_folder, '1AK4', '1AK4_5w.pdb')
pdb2 = Path(pdb_folder, '1AK4', '1AK4_10w.pdb')
self.pdbs = [pdb1, pdb2]
self.tablenames = [str(pdb1), str(pdb2)]
def test_init_from_files(self):
"""Verify init from path."""
many = many2sql(self.pdbs, tablenames=self.tablenames)
def test_init_from_pdb_data(self):
"""Verify init from data."""
sqls = [pdb2sql(p) for p in self.pdbs]
data = [db.sql2pdb() for db in sqls]
many = many2sql(data, tablenames=self.tablenames)
def test_init_from_sql(self):
"""Verify default sqls."""
sqls = [pdb2sql(p) for p in self.pdbs]
many = many2sql(sqls, tablenames=self.tablenames)
def test_call(self):
"""Test call function."""
many = many2sql(self.pdbs, tablenames=self.tablenames)
chainA = many(chainID='A')
def test_get_all(self):
"""Test get_all function."""
many = many2sql(self.pdbs, tablenames=self.tablenames)
data = many.get_all('x,y,z', chainID='A')
def test_get_intersection(self):
"""Test get_all function."""
many = many2sql(self.pdbs, tablenames=self.tablenames)
data = many.get_intersection('x,y,z')
def test_intersect(self):
"""Test get_all function."""
many = many2sql(self.pdbs, tablenames=self.tablenames)
chainA = many.intersect()
if __name__ == '__main__':
unittest.main() |
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader
import time
import matplotlib.pyplot as plt
import sys
import itertools
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader
import anfis
from membership import TrapezoidalMembFunc, make_trap_mfs, make_bell_mfs, BellMembFunc, Zero, make_zero
from experimental import train_anfis, test_anfis
def abc(a,b,c):
model = torch.load('anfis_model.npy')
x = torch.tensor([[a,b,c]])
x = model(x).item()
print(type(x))
return x
abc(0.0,0.0,0.0)
|
from __future__ import absolute_import
import os
import sys
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings.base')
# go from /src/apps/workers to /src
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, BASE_DIR)
#os.chdir(BASE_DIR)
from django.conf import settings # noqa
app = Celery('brains')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
#coding: utf-8
from __future__ import print_function, absolute_import
import logging
import re
import json
import requests
import uuid
import time
import os
import argparse
import uuid
import datetime
import socket
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.io.filesystems import FileSystems
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam import pvalue
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
TABLE_SCHEMA = (
'idkey:STRING, '
'fecha:STRING, '
'ANO:STRING, '
'MES:STRING, '
'NOMBRE_GERENTE:STRING, '
'CEDULA:STRING, '
'NOMBRE:STRING, '
'CARGO:STRING, '
'COMISION:STRING, '
'COSTO:STRING, '
'INGRESO:STRING, '
'ASEGURA:STRING, '
'META_ASEGURA:STRING, '
'OCUPACION:STRING, '
'META_RECAUDO:STRING, '
'EJECUCION_RECAUDO:STRING, '
'OPERACION:STRING, '
'CENTRO_DE_COSTOS:STRING, '
'CATEGORIA:STRING, '
'TIPO_DE_OPERACION:STRING, '
'CC_TEAM_LEADER:STRING, '
'NOMBRE_TEAM_LEADER:STRING, '
'CC_EJECUTIVO:STRING, '
'NOMBRE_EJECUTIVO:STRING, '
'CUMPLIMIENTO:STRING, '
'AHT:STRING, '
'ASEGURAMIENTO:STRING, '
'PRODUCTIVIDAD_EFECTIVIDAD:STRING, '
'NIVEL_DESEMPENO:STRING, '
'CLASIFICACION:STRING, '
'CLASIFICACION_BINARIZADA:STRING, '
'NO_CLASIFICADOS_BINARIZADO:STRING, '
'SOBRESALIENTE_B:STRING, '
'OBJETIVO_B:STRING, '
'FO_BINARIZADO:STRING, '
'DEFICIENTE_B:STRING, '
'CLASIFICA:STRING, '
'NO_CLASIFICA:STRING '
)
# ?
class formatearData(beam.DoFn):
def __init__(self, mifecha):
super(formatearData, self).__init__()
self.mifecha = mifecha
def process(self, element):
# print(element)
arrayCSV = element.split(';')
tupla= {'idkey' : str(uuid.uuid4()),
# 'fecha' : datetime.datetime.today().strftime('%Y-%m-%d'),
'fecha': self.mifecha,
'ANO' : arrayCSV[0],
'MES' : arrayCSV[1],
'NOMBRE_GERENTE' : arrayCSV[2],
'CEDULA' : arrayCSV[3],
'NOMBRE' : arrayCSV[4],
'CARGO' : arrayCSV[5],
'COMISION' : arrayCSV[6],
'COSTO' : arrayCSV[7],
'INGRESO' : arrayCSV[8],
'ASEGURA' : arrayCSV[9],
'META_ASEGURA' : arrayCSV[10],
'OCUPACION' : arrayCSV[11],
'META_RECAUDO' : arrayCSV[12],
'EJECUCION_RECAUDO' : arrayCSV[13],
'OPERACION' : arrayCSV[14],
'CENTRO_DE_COSTOS' : arrayCSV[15],
'CATEGORIA' : arrayCSV[16],
'TIPO_DE_OPERACION' : arrayCSV[17],
'CC_TEAM_LEADER' : arrayCSV[18],
'NOMBRE_TEAM_LEADER' : arrayCSV[19],
'CC_EJECUTIVO' : arrayCSV[20],
'NOMBRE_EJECUTIVO' : arrayCSV[21],
'CUMPLIMIENTO' : arrayCSV[22],
'AHT' : arrayCSV[23],
'ASEGURAMIENTO' : arrayCSV[24],
'PRODUCTIVIDAD_EFECTIVIDAD' : arrayCSV[25],
'NIVEL_DESEMPENO' : arrayCSV[26],
'CLASIFICACION' : arrayCSV[27],
'CLASIFICACION_BINARIZADA' : arrayCSV[28],
'NO_CLASIFICADOS_BINARIZADO' : arrayCSV[29],
'SOBRESALIENTE_B' : arrayCSV[30],
'OBJETIVO_B' : arrayCSV[31],
'FO_BINARIZADO' : arrayCSV[32],
'DEFICIENTE_B' : arrayCSV[33],
'CLASIFICA' : arrayCSV[34],
'NO_CLASIFICA' : arrayCSV[35]
}
return [tupla]
def run(archivo, mifecha):
gcs_path = "gs://ct-dispersion" #Definicion de la raiz del bucket
gcs_project = "contento-bi"
mi_runer = ("DirectRunner", "DataflowRunner")[socket.gethostname()=="contentobi"]
pipeline = beam.Pipeline(runner=mi_runer, argv=[
"--project", gcs_project,
"--staging_location", ("%s/dataflow_files/staging_location" % gcs_path),
"--temp_location", ("%s/dataflow_files/temp" % gcs_path),
"--output", ("%s/dataflow_files/output" % gcs_path),
"--setup_file", "./setup.py",
"--max_num_workers", "10",
"--subnetwork", "https://www.googleapis.com/compute/v1/projects/contento-bi/regions/us-central1/subnetworks/contento-subnet1"
# "--num_workers", "30",
# "--autoscaling_algorithm", "NONE"
])
# lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181206 1100.csv", skip_header_lines=1)
#lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181129 0800.csv", skip_header_lines=1)
lines = pipeline | 'Lectura de Archivo' >> ReadFromText(archivo, skip_header_lines=1)
transformed = (lines | 'Formatear Data' >> beam.ParDo(formatearData(mifecha)))
# lines | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_prej_small", file_name_suffix='.csv',shard_name_template='')
# transformed | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_seg", file_name_suffix='.csv',shard_name_template='')
#transformed | 'Escribir en Archivo' >> WriteToText("gs://ct-bancolombia/info-segumiento/info_carga_banco_seg",file_name_suffix='.csv',shard_name_template='')
transformed | 'Escritura a BigQuery base' >> beam.io.WriteToBigQuery(
gcs_project + ":dispersion.base",
schema=TABLE_SCHEMA,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND
)
# transformed | 'Borrar Archivo' >> FileSystems.delete('gs://ct-avon/prejuridico/AVON_INF_PREJ_20181111.TXT')
# 'Eliminar' >> FileSystems.delete (["archivos/Info_carga_avon.1.txt"])
jobObject = pipeline.run()
# jobID = jobObject.job_id()
return ("Corrio Full HD")
|
# Generated by Django 3.0.5 on 2020-05-27 21:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0006_auto_20200527_1543'),
]
operations = [
migrations.AlterField(
model_name='bookinstance',
name='due_back',
field=models.DateField(blank=True, help_text='Enter a date between now and 4 weeks (default 3).', null=True),
),
]
|
import re
from six.moves.urllib.parse import urljoin
import requests
class RobotsTxt(object):
def __init__(self, base_url, verify_ssl=True):
self._url = urljoin(base_url, 'robots.txt')
self._verify_ssl = verify_ssl
self._response = None
@property
def url(self):
return self._url
def _fetch(self):
if self._response is None:
self._response = requests.get(self.url, verify=self._verify_ssl)
return self._response
@property
def status_code(self):
resp = self._fetch()
return resp.status_code
@property
def directives(self):
resp = self._fetch()
return self.parse_directives(resp.text)
@staticmethod
def parse_directives(text):
directives = []
for line in text.splitlines():
line = line.strip()
if line.startswith('#'):
continue
line = re.sub(r'[\s]+#.*', '', line)
if line:
split = line.split(':', 1)
if len(split) != 2:
continue
field, value = [x.strip() for x in split]
directive = (field, value)
directives.append(directive)
else:
directives.append('')
return tuple(directives)
|
#the immutable tuple list of simple foods:
def main():
simple_foods = ('raw fish', 'nuts', 'berries', 'tree bark')
for food in simple_foods:
print(food)
if __name__ == '__main__':
main() |
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
node = None
ret = None
while head:
if head.val != val:
if node:
node.next = ListNode(head.val)
node = node.next
else:
node = ListNode(head.val)
ret = node
head = head.next
return ret
|
from django.contrib import admin
from flatblocks.models import FlatBlock
from flatblocks.settings import ADMIN_SHOW_ALL_SITES_BLOCKS
if ADMIN_SHOW_ALL_SITES_BLOCKS:
list_filter = ['site', ]
else:
list_filter = []
class FlatBlockAdmin(admin.ModelAdmin):
ordering = ['slug', ]
list_display = ('slug', 'header', 'content', 'site')
search_fields = ('slug', 'header', 'content')
list_filter = list_filter
def queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
Copied from Django ModelAdmin with minimal changes
"""
if ADMIN_SHOW_ALL_SITES_BLOCKS:
qs = self.model.all_objects.get_query_set()
else:
qs = self.model._default_manager.get_query_set()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.ordering or () # otherwise we might try to *None, which is bad ;)
if ordering:
qs = qs.order_by(*ordering)
return qs
admin.site.register(FlatBlock, FlatBlockAdmin)
|
__author__ = 'Alberto Nieto'
__version__ = "0.1.0"
# from gis_utils import *
# from general_utils import *
# from geoalloc_utils import *
# from tradearea_utils import *
import gis_utils
import general_utils
import data_utils |
class Solution:
def eraseOverlapIntervals(self, intervals):
intervals.sort(key = lambda x: x[1])
n, count = len(intervals), 1
if n == 0: return 0
curr = intervals[0]
for i in range(n):
if curr[1] <= intervals[i][0]:
count += 1
curr = intervals[i]
return n - count
class Solution:
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
if not intervals:
return 0
intervals.sort(key = lambda x: x[0])
start, end = intervals[0]
print(start, end)
res = 0
for i in range(1, len(intervals)):
intstart, intend = intervals[i]
if intstart < end:
res += 1
end = min(end, intend)
else:
start = intstart
end = intend
return res
|
import re
# Finding Patterns in Text
patterns = ['this', 'that']
text = 'Does this text match the patterns'
for pattern in patterns:
print 'Looking for "%s" in "%s" ->' % (pattern, text),
if re.search(pattern, text):
print 'found a match'
else:
print 'no match'
'''
Looking for "this" in "Does this text match the patterns" -> found a match
Looking for "that" in "Does this text match the patterns" -> no match
'''
pattern = 'this'
match = re.search(pattern, text)
s = match.start()
e = match.end()
print 'Found "%s" in "%s" from %d to %d (%s)' % \
(match.re.pattern, match.string, s, e, text[s:e])
'''
Found "this" in "Does this text match the patterns" from 5 to 9 (this)
'''
# Multiple Matches
text = 'abbaaabbbbaaaaa'
pattern = 'ab'
# findall() returns all matched substrings
for match in re.findall(pattern, text):
print 'Found "%s"' % match
'''
Found "ab"
Found "ab"
'''
# finditer() returns an iterator that produces Match
for match in re.finditer(pattern, text):
s = match.start()
e = match.end()
print 'Found "%s" at %d:%d' % (match.re.pattern, s, e)
'''
Found "ab" at 0:2
Found "ab" at 5:7
'''
# Compiling Expressions
# return a RegexObject
regex = re.compile('this')
text = 'Does this text match the pattern?'
print 'Looking for "%s" in "%s" ->' % (regex.pattern, text)
if regex.search(text):
print 'found a match!'
else:
print 'no match'
# Repetition
text = 'abbaaabbbbaaaaa'
# greedy
match = re.search('ab*', text)
print text[match.start() : match.end()]
# abb
# greedines can be turned off by following the repetition instruction with ?
match = re.search('ab*?', text)
print text[match.start() : match.end()]
# a
def test_patterns(text, patterns = []):
print text
for pattern in patterns:
print 'Matching "%s"' % pattern
for match in re.finditer(pattern, text):
s = match.start()
e = match.end()
print ' %2d : %2d = %s' % (s, e - 1, text[s:e])
# Anchoring
test_patterns('This is some text -- with punctuation.',
[r'^\w+', # word at start of string
r'\w+\S*$', # word at end of string, with optional punctuation
r'\bt\w+', # 't' at start of word
r'\w+t\b' # 't' at end of word
])
'''
This is some text -- with punctuation.
Matching "^\w+"
0 : 3 = This
Matching "\w+\S*$"
26 : 37 = punctuation.
Matching "\bt\w+"
13 : 16 = text
Matching "\w+t\b"
13 : 16 = text
'''
|
from flask import url_for, render_template, flash
from flask_login import login_user, current_user
from werkzeug.security import check_password_hash
from werkzeug.utils import redirect
import logging
import app.forms
from util.logutils import loghelpers
from app.models import User
# from main import app, logger
logger = logging.getLogger(__name__)
@loghelpers.log_decorator()
def processform(form: app.forms.Loginform):
email = form.email.data
password = form.password.data
logger.info(f"{email=}")
logger.info(f"{password=}")
return email, password
@loghelpers.log_decorator()
def login():
from app.forms import Loginform
form = Loginform()
if form.validate_on_submit():
logger.info("POST")
email, password = processform(form)
logger.info(f"query user {email=}")
user = User.query.filter_by(email=email).first()
if user:
logger.info("found user, check password")
if check_password_hash(user.password, password):
logger.info("password ok, login user")
login_user(user)
url = url_for("get_all_posts")
logger.info(f"redirect: {url=}")
return redirect(url)
else:
logger.error("wrong password")
flash("wrong password, try again")
else:
logger.error("user not found")
flash("email not found, please register")
url = url_for("register")
logger.info(f"redirect: {url=}")
return redirect(url)
return render_template(
"login.html",
form=form,
# loggedin=current_user.is_authenticated
)
|
import unittest
from test.test_float import NAN
class ConditionalTests(unittest.TestCase):
def test_if_else(self):
a = input("please input a number")
a = (a.isdigit() and int(a)) or 0
if (a == 4):
print("a is 4")
else:
print("a:%d isn't 4" % a)
def test_if_elif_else(self):
a = input("please input a number")
a = (a.isdigit() and int(a)) or NAN
if (a == 10):
print("a:%d is 10" % a)
elif (a > 10):
print("a:%d is greater than 10" % a)
elif (a < 10):
print("a:%d is less than 10" % a)
else:
print("something is wrong")
|
'''
Manage the pipeline : reading the logs, parsing them and generating stats.
'''
import os
import time
from monilog.parser import Parser
from monilog.statistics import Statistics
from monilog.utils import init_logger
HIGH_TRAFFIC_DUR = 2*60
STAT_DUR = 10
MAX_IDLE_TIME = 5*60
class MonilogPipeline:
'''
Read logs and generates statistics.
Args:
file (str): The file with the logs to monitor.
threshold (int): Max traffic entries for the past 2 mn.
stop (bool): Whether to stop the monitoring.
'''
def __init__(self,
file='/tmp/access.log',
threshold=10):
self.file = file
self.threshold = threshold
self.stop = False
def stop_monitoring(self):
'''
To call when the monitoring app should be stopped.
'''
self.stop = True
def run(self):
'''
Run the monitoring pipeline.
'''
parser = Parser()
get_stats = Statistics(STAT_DUR)
alert = False
high_traffic_nb = 0
traffic_buffer = []
if not os.path.exists(self.file):
time.sleep(1)
file = open(self.file, 'r', os.O_NONBLOCK)
stat_time = time.time()
high_traffic_time = time.time()
start_idle_time = None
idle_duration = 0
logger = init_logger()
while not self.stop:
line = file.readline()
if not line:
if not start_idle_time:
start_idle_time = time.time()
else:
idle_duration = time.time() - start_idle_time
if idle_duration > MAX_IDLE_TIME:
logger.info(
'Stopping monitoring : Logging app not used for %d s.\n'
% (int(idle_duration))
)
self.stop = True
else:
start_idle_time = None
idle_duration = 0
try:
parsed_line = parser(line)
except:
#logger.warning(f"There was an error parsing : {line}")
continue
traffic_buffer.append(
parsed_line
)
high_traffic_nb += 1
if time.time() - stat_time >= STAT_DUR:
logger.info('\n'+get_stats(traffic_buffer))
stat_time = time.time()
traffic_buffer = []
if time.time() - high_traffic_time >= HIGH_TRAFFIC_DUR:
if high_traffic_nb/HIGH_TRAFFIC_DUR > self.threshold and not alert:
alert = True
logger.warning(
"High traffic generated an alert - hits = %f, triggered at %s.\n"
% (
high_traffic_nb/HIGH_TRAFFIC_DUR,
time.strftime('%d/%b/%Y %H:%M:%S')
)
)
elif high_traffic_nb/HIGH_TRAFFIC_DUR <= self.threshold and alert:
logger.info(
"The high traffic alert is recovered at %s.\n"
% (time.strftime('%d/%b/%Y %H:%M:%S'))
)
high_traffic_time = time.time()
high_traffic_nb = 0
|
import requests
import feedparser
from typing import List
from readability import Document
from .story import Story
from .util import PlacementPreference
from .storyprovider import StoryProvider
class RSSFeedStoryProvider(StoryProvider):
def __init__(self, rss_path: str, limit: int = 5) -> None:
self.limit = limit
self.feed_url = rss_path
def get_stories(self, limit: int = 5) -> List[Story]:
feed = feedparser.parse(self.feed_url)
limit = min(self.limit, len(feed.entries))
stories = []
for entry in feed.entries[:limit]:
if "link" in entry.keys():
req = requests.get(entry["link"])
if not req.ok:
print(f"Honk! Couldn't grab content for {self.feed_url}")
continue
doc = Document(req.content)
source = entry["link"].split(".")[1]
stories.append(
Story(doc.title(), body_html=doc.summary(), byline=source)
)
return stories
|
from django.shortcuts import render
from .forms import BlogDetail
from .models import Blog
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic import CreateView, DeleteView, UpdateView, ListView, DetailView
@method_decorator(login_required, name = "dispatch")
class createBlog(CreateView):
form_class = BlogDetail
template_name = 'myblog/create.html'
success_url = '/accounts/profile/'
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.user = self.request.user
self.object.save()
return super().form_valid(form)
class DeleteBlog(DeleteView):
model = BlogDetail.Meta.model
success_url = '/myblog/allblog/'
def get_queryset(self):
return Blog.objects.filter(
author = self.request.user
)
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
class UpdatedBlog(UpdateView):
form_class = BlogDetail
pk_url_kwarg = 'id'
success_url = '/accounts/profile/'
model = Blog
template_name = 'myblog/update.html'
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.user = self.request.user
self.object.save()
return super().form_valid(form)
class AllBlog(ListView):
model = Blog
template_name = 'myblog/index.html'
context_object_name ='data'
def detail(request, pk):
user_obj = Blog.objects.get(id=pk)
return render(request, 'myblog/post_detail.html', context={
'data':user_obj
})
|
n=int(input("enter upper range"))
m=int(input("enter lower range"))
for x in range(m,n+1):
if x>=0:
print("positives are:",x)
else:
print("negatives are:",x) |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 10:07:06 2019
@author: HP
"""
import math
def Det(x,n):
if len(x[0])==1:
return x[0][0]
else:
sum=0
for i in range(n):
sum+=math.pow(1,i)*x[0][i]*Det(x[1:n,[j for j in range(n) if j!=i]],n-1)
return sum
x=[[1,2,3],[4,5,6],[7,8,9]]
print(Det(x,3)) |
from util.subprocess import register
reg = register()
f = open('code.txt', 'w') #清空文件内容再写
f.write(reg.getCombinNumber()) #只能写字符串
f.close()
#F4:8E:38:99:38:28BFEBFBFF000306C3WD-WCC6Z6DFR1S6/5257GD2/CN70163654032M/ |
from distutils.core import setup, Extension
module1 = Extension('_system',
sources=['_system.c'])
setup(name='_system',
version='0.1',
description='Basic system commands',
ext_modules=[module1])
|
# -*- coding: utf-8 -*-
# @Author: Fallen
# @Date: 2020-04-19 14:36:33
# @Last Modified by: Fallen
# @Last Modified time: 2020-04-19 15:37:29
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-18 16:59:44
# @Author : Fallen (xdd043@qq.com)
# @Link : https://github.com/fallencrasher/python-learning
# @Version : $Id$
import re
# 1、匹配整数或者小数(包括正数和负数)
'-?\d+(\.\d+)?'
# 2、匹配年月日日期 格式2018-12-6
'\d{4}([\-|\/|.])\d{1,2}\1\d{1,2}' # 这里的 \1 是前引用,就引用前边括号里的东西
'\d{4}-\d{1,2}-\d{1,2}'
## 更精确的版本
'[1-9]\d{3}([\-|\/|.])(1[0-2]|0?[1-9])\1([12]\d|3[01]|0?[1-9])'
# 3、匹配qq号
# 首位不是0,长度为 5-10 个数字
'[1-9]\d{4,9}'
# 4、11位的电话号码
# 以 1 开头,第二位 3-9,共11位
'1[3-9]\d{9}'
# 5、长度为8-10位的用户密码 : 包含数字字母下划线
'^\w{8,10}$'
# 6、匹配验证码:4位数字字母组成的
'[0-9a-zA-Z]{4}'
# 7、匹配邮箱地址
# 下边这俩,不对
# '\w+((-\w+)|(\.\w+))*\@[0-9a-zA-Z]+([\.-+|](0-9a-zA-Z)+)*\.[0-9a-zA-Z]+)'
# '(\w+([+.-]*\w+)*\@\w+([+.-]*\w+)*\.([+.-]*\w+)+'
# 邮箱规则
# @之前必须有内容且只能是字母(大小写)、数字、下划线(_)、减号(-)、点(.)
# @和最后一个点(.)之间必须有内容且只能是字母(大小写)、数字、点(.)、减号(-),且两个点不能挨着
# 最后一个点(.)之后必须有内容且内容只能是字母(大小写)、数字且长度为大于等于2个字节,小于等于6个字节
# [-\w.]+@([-\da-zA-Z]+\.)+[a-zA-Z\d]{2,6}
# 8、从类似
a = '<a>wahaha</a><b>banana</b><h1>qqxing</h1>'
# 这样的字符串中,
# 1)匹配出wahaha,banana,qqxing内容。
# 2)匹配出a,b,h1这样的内容
import re
ret = re.findall('<(\w+)>(\w+)</(\w+)>',a)
print(ret)
# 1)
ret = re.findall('<\w+>(\w+)</\w+>',a)
print(ret)
# 2)
ret = re.findall('<(\w+)>\w+</\w+>',a)
print(ret)
# 9、
x = '1-2*((60-30+(-40/5)(9-25/3+7/399/42998+10568/14))-(-43)/(16-3*2))'
# 1)从上面算式中匹配出最内层小括号以及小括号内的表达式
# '\(-?(\d+[+\-*/]*)*\)'
ret = re.findall('\(-?(?:\d+[+\-*/]*)*\)',x)
ret2 = re.findall('\([^()]+\)',x) # 这个更好,逻辑就是找到内部不含括号的括号
print(ret) #['(-40/5)', '(9-25/3+7/399/42998+10568/14)', '(-43)', '(16-3*2)']
print(ret2)
# 10、从类似9-25/3+7/399/42998+10568/14的表达式中匹配出从左到右第一个乘法或除法
x = '9-25/3+7/399/42998+10568/14'
ret = re.search('[1-9]\d*[*|/]-?[1-9]\d*',x)
print(ret.group())
## 要是匹配小数呢?
'\d+(\.\d+)+[*|/]-?\d(\.\d+)+'
# 11.匹配一篇英文文章的标题 类似 The Voice Of China
'([A-Z][a-z]+ ?)+'
# 12.匹配一个网址
'(http)?s?:?(\\\\)?(www\.)?(.*?).(com|cn|net)(/.*)*'
# 13.从链家网中匹配出标题,户型和面积,结果如下:
#[('金台路交通部部委楼南北大三居带客厅 单位自持物业', '3室1厅', '91.22平米'), ('西山枫林 高楼层南向两居 户型方正 采光好', '2室1厅', '94.14平米')]
import requests
import re
import json
def getPage(url):
response = requests.get(url)
return response.text
def parsePage(s):
com = re.compile(
'<a class="title" href=.*?data-el="ershoufang">(?P<title>.*?)</a>.*?<div class="info">(?P<region>.*?)<span>/</span>(?P<style>.*?)<span>/</span>(?P<size>.*?)<span>/</span>(?P<direction>.*?)<span>/</span>(?P<decoration>.*?)</div>', re.S)
ret = com.finditer(s)
for i in ret:
print(i.group())
yield {"title": i.group("title"),"region": i.group("region"),"style": i.group("style"),"size": i.group("size"),"direction": i.group("direction"),"decoration": i.group("decoration"),}
def main():
url = r'https://lf.lianjia.com/ershoufang/rs%E5%9B%BA%E5%AE%89%E5%8E%BF/'
response_html = getPage(url)
ret = parsePage(response_html)
f = open('lianjia',mode='a',encoding='utf-8')
for obj in ret:
print(obj)
data = json.dumps(obj,ensure_ascii=False)
f.write(data + "\n")
if __name__ == '__main__':
main()
# 14.从类似9-25/3+7/399/42998+10*568/14的表达式中匹配出乘法或除法
s = '9-25/3+7/399/42998+10*568/14'
ret = re.findall('([1-9]\d*[\*|\/][1-9]\d*?)[+\-*/]?',s)
print(ret) #['25/3', '7/3', '99/4', '10*5', '68/1']
|
year,month,day = int(input("년 : ")), int(input("월 : ")), int(input("일 : "))
#2008 % 10 => 8
# if str(year - month + day)[-1] == '0':
if (year - month + day) % 10 == 0:
print("올해 대박")
else:
print("그럭저럭")
|
'''Write the above solution in a function which takes take numbers and return the bigger number
[topic covered: function]'''
num1 = float(input("Enter first number: "))
num2 = float(input("Enter second number: "))
num3 = float(input("Enter third number: "))
def largest(n1,n2,n3):
if (n1 >= n2 and n1 >= n3):
return n1
elif (n2 >= n1) and (n2 >= n3):
return n2
else:
return n3
print(largest(num1,num2,num3))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url,include
from .views import inicio,contato
urlpatterns = [
url(r'^$', inicio,name='inicio'),
url(r'^contato/$', contato,name='contato'),
]
|
# Usage: $ python3 get_comment_ratio.py /home/kevin/Desktop/sac-data/stats output.csv
# python3 get_comment_ratio.py <merged_files> <output_path>
#
# Merges all the extracted contribution per tag data into one single file.
__author__ = 'kevin'
import sys
import csv
import os
# RQ 1: Generate a csv file for each project with: file, release, if its SAC, LOC
csv_header = ['project',
'mean SAC', 'mean non SAC', "mean change",
'pVal']
def mean(l):
return float(sum(l))/len(l) if len(l) > 0 else float('nan')
def main(argv):
data_dir = argv[1]
output_file = argv[2]
result = []
for data_file in os.listdir(data_dir):
with open(os.path.join(data_dir, data_file), newline="") as csv_file:
#file_name lines_count top_single_dev_contribution_knowledge
# top_single_dev_contribution_knowledge_percent commit_num bug_commit_num
# bug_commit_ratio CountLine CountLineCode CountLineComment SumCyclomatic
data = [{ 'contrib_percent': float(row['top_single_dev_contribution_knowledge_percent']),
'sum_cyclomatic': float(row['SumCyclomatic']) if row['SumCyclomatic'] else 0}
for row in csv.DictReader(csv_file)]
files_sac = [{'sum_cyclomatic': l['sum_cyclomatic']}
for l in data if l['contrib_percent'] >= 90]
files_non_sac = [{'sum_cyclomatic': l['sum_cyclomatic']}
for l in data if l['contrib_percent'] < 90]
complexity_sac = [f['sum_cyclomatic'] for f in files_sac]
complexity_non_sac = [f['sum_cyclomatic'] for f in files_non_sac]
mean_sac = mean(complexity_sac)
mean_non_sac = mean(complexity_non_sac)
mean_change = mean_non_sac - mean_sac
result.append({
'project': data_file,
'mean SAC': round(mean_sac, 2),
'mean non SAC': round(mean_non_sac, 2),
'mean change': round(mean_change, 2),
'pVal': "TO CALCULATE"
})
with open(output_file, 'w', newline='') as output:
writer = csv.DictWriter(output, csv_header)
writer.writeheader()
writer.writerows(result)
if __name__ == "__main__":
main(sys.argv)
|
"""
This is a game of BlackJack.
"""
from random import randint
# Here we should make some Objects(classes) and Functions for the BJ game
# First define the class Account with all its attributes
class Account:
def __init__(self, balance = 0):
self.balance = balance
def add_funds(self, funds_amt = 0):
self.balance += funds_amt
return self.balance
def place_bet(self, bet_amt):
while bet_amt > self.balance:
print(f'Not enough cash\nBalance: {self.balance}')
bet_amt = int(input(f'Place your bet (Balance: {balance}) : '))
print(f'A bet of {bet_amt} $ was placed')
def loosing_bet(self, bet_amt):
self.balance -= bet_amt
if self.balance < 0:
self.balance = 0
else:
pass
return self.balance
def winning_bet(self, bet_amt):
self.balance += bet_amt*2
return self.balance
def tie_bet(self):
return self.balance
# Try making a class for both players, i.e. the house and the player
class House:
def __init__(self, house_hand = []):
self.house_hand = house_hand
def initial_draw(self, stack):
self.house_hand = []
draw = stack[randint(0,51)]
self.house_hand.append(stack[draw])
stack.pop(draw)
draw = stack[randint(0,50)]
self.house_hand.append(stack[draw])
stack.pop(draw)
house_cards = []
for (i, number) in enumerate(self.house_hand):
house_cards.append(card_names[str(self.house_hand[i])])
print('House: ' + str(house_cards[0]))
house_hand = self.house_hand
return stack, house_hand
def show_hand(self, card_names):
house_cards = []
for (i, number) in enumerate(self.house_hand):
house_cards.append(card_names[str(self.house_hand[i])])
print('House: ' + str(house_cards[0]))
def show_hand_final(self, card_names):
house_cards = []
house_cards_string = ''
for (i, number) in enumerate(self.house_hand):
house_cards.append(card_names[str(self.house_hand[i])])
house_cards_string += card_names[str(self.house_hand[i])] + ' '
print('House: ' + str(house_cards_string))
class Player:
def __init__(self, card_sum = 0, player_hand = []):
self.card_sum = card_sum
self.player_hand = player_hand
def initial_draw(self, stack):
self.card_sum = 0
self.player_hand = []
draw = stack[randint(0, 49)]
self.player_hand.append(stack[draw])
stack.pop(draw)
draw = stack[randint(0, 48)]
self.player_hand.append(stack[draw])
stack.pop(draw)
player_cards = []
player_cards_string = ''
for (i, number) in enumerate(self.player_hand):
player_cards.append(card_names[str(self.player_hand[i])])
player_cards_string += card_names[str(self.player_hand[i])] + ' '
print('You: ' + player_cards_string)
player_hand = self.player_hand
return stack, player_hand
def new_card(self, stack, counter):
draw = stack[randint(0, counter)]
self.player_hand.append(stack[draw])
stack.pop(draw)
return stack
def show_hand(self, card_names):
player_cards = []
player_cards_string = ''
for (i, number) in enumerate(self.player_hand):
player_cards.append(card_names[str(self.player_hand[i])])
player_cards_string += card_names[str(self.player_hand[i])] + ' '
print('You: ' + player_cards_string)
# Then, define functions for the game itself
def bust_check(house_cards, player_cards, card_values):
# Calculate the sum for the house
house_cards_values = 0
for card in house_cards:
house_cards_values += card_values[str(card)]
# Calculate the sum for the player
player_cards_values = 0
for card in player_cards:
player_cards_values += card_values[str(card)]
# Check if house is BUST
bust = 0
if house_cards_values > 21 and 14 in house_cards:
house_cards_values -= 10
if house_cards_values > 21:
bust = 2
return bust, house_cards_values, player_cards_values
else:
pass
elif house_cards_values > 21:
bust = 2
return bust, house_cards_values, player_cards_values
else:
pass
# Check if player is BUST
if player_cards_values > 21 and 14 in player_cards:
player_cards_values -= 10
if player_cards_values > 21:
bust = 1
return bust, house_cards_values, player_cards_values
else:
pass
elif player_cards_values > 21:
bust = 1
return bust, house_cards_values, player_cards_values
else:
return bust, house_cards_values, player_cards_values
def final_draw(house_hand, player_hand, card_values, stack, counter):
bust, house_cards_values, player_cards_values = bust_check(house_hand, player_hand, card_values)
while player_cards_values > house_cards_values and bust == 0:
draw = stack[randint(0, counter)]
house_hand.append(stack[draw])
house_cards_values += card_values[str(stack[draw])]
stack.pop(draw)
counter -= 1
bust, house_cards_values, player_cards_values = bust_check(house_hand, player_hand, card_values)
return bust, house_cards_values, player_cards_values
def win_check(house_cards_values, player_cards_values):
if house_cards_values >= player_cards_values:
win = 'House'
return win
elif house_cards_values < player_cards_values:
win = 'Player'
return win
elif house_cards_values == player_cards_values:
win = 'Tie'
return win
def start_rerun():
global stack
stack = list(range(2,15))*4
# Here a library for the deck is being made
card_names = {'2':'2', '3':'3', '4':'4', '5':'5', '6':'6', '7':'7', '8':'8', '9':'9', '10':'10', '11':'J', '12':'Q', '13':'K','14':'A'}
card_values = {'2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, '10':10, '11':10, '12':10, '13':10, '14':11}
stack = list(range(2,15))*4
# Here we code the actual game
player_account = Account()
balance = player_account.add_funds(100)
house = House(stack)
player = Player(stack)
run = 1
while run == 1:
# Ask the player for his bet and safe under variable
bet_amt = int(input(f'Place your bet (Balance: {balance} $) : '))
player_account.place_bet(bet_amt)
# Some random function deals two cards to the dealer and two to the player and prints
# the cards (1 of the dealer and both of the player)
stack, house_hand = house.initial_draw(stack)
stack, player_hand = player.initial_draw(stack)
# The game asks for an input of the player weather the Player wants another card or not
# if yes: it deals another card and prints the cards
# Then it checks for the sum of the cards and returns BUST in case the sum is above 21
# expect if one of the cards is an Ace
counter = 47
bust = 0
while bust == 0:
new_card = input('Do you want another card (Y/N)? ')
if new_card == 'Y' or new_card == 'y' or new_card == 'yes' or new_card == 'Yes' or new_card == '+' or new_card == '1':
stack = player.new_card(stack, counter)
counter -= 1
bust, house_cards_values, player_cards_values = bust_check(house_hand, player_hand, card_values)
if bust == 0:
house.show_hand(card_names)
player.show_hand(card_names)
else:
pass
else:
break
if bust == 0:
bust, house_cards_values, player_cards_values = final_draw(house_hand, player_hand, card_values, stack, counter)
house.show_hand_final(card_names)
player.show_hand(card_names)
if bust == 2:
print('The house has bust, you win!')
balance = player_account.winning_bet(bet_amt)
print(f'Your new balance is {balance} $')
else:
win = win_check(house_cards_values, player_cards_values)
if win == 'House':
print('The house wins!')
balance = player_account.loosing_bet(bet_amt)
print(f'Your new balance is {balance} $')
elif win == 'Player':
print('You win!')
balance = player_account.winning_bet(bet_amt)
print(f'Your new balance is {balance} $')
elif win == 'Tie':
print("It's a tie!")
balance = player_account.tie_bet()
print(f'Your balance is still {balance} $')
else:
print('Whoops, there is an error!')
elif bust == 1:
house.show_hand_final(card_names)
player.show_hand(card_names)
print('BUST!')
balance = player_account.loosing_bet(bet_amt)
print(f'Your new balance is {balance} $')
elif bust == 2:
print('The house has bust, you win!')
balance = player_account.winning_bet(bet_amt)
else:
print('error')
rerun = input('You want to keep playing (Y/N)? ')
if rerun == 'Y' or rerun == 'y' or rerun == 'yes' or rerun == 'Yes' or rerun == '+' or rerun == '1':
start_rerun()
else:
print(f'You leave the table with {balance} $')
run = 0
print('Goodbye! :)')
|
import cv2 as cv
src = cv.imread("D:/python_file/Opencv3_study_file/images/PT_Picture.jpg",-1)
cv.namedWindow("NO.1 image",cv.WINDOW_AUTOSIZE)
cv.imshow("NO.1 image",src)
cv.waitKey(0)
cv.destroyAllWindows()
print("Hi,Python!") |
# Python Coroutines and Tasks.
# Coroutines declared with async/await syntax is the preferred way of writing asyncio applications.
#
# To actually run a coroutine, asyncio provides three main mechanisms:
#
# > The asyncio.run() function to run the top-level entry point “main()” function.
# > Awaiting on a coroutine.
# > The asyncio.create_task() function to run coroutines concurrently as asyncio Tasks.
# asyncio.as_completed(aws, *, loop=None, timeout=None):
# Run awaitable objects in the aws set concurrently. Return an iterator of Future objects. Each Future object returned represents the earliest result from the
# set of the remaining awaitables.
#
# Raises asyncio.TimeoutError if the timeout occurs before all Futures are done.
#
# Example:
#
for f in as_completed(aws):
earliest_result = await f
# ...
|
import argparse
import datetime
import logging
import platform
import aq.aq_external as aq
import loading.data_loading as dl
from aq.aq_description import Fact
from jsm.jsm_analysis import FactBase, search_norris
from gui.graph_gen import generate_graph
log_levels = ['debug', 'info', 'warning', 'error']
def parse_args():
argparser = argparse.ArgumentParser(description='AQJSM causal relations miner',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
fromfile_prefix_chars='@')
argparser.add_argument(dest='datafile')
argparser.add_argument('-l', '--loglevel', choices=log_levels, default='info',
help='Logging level')
argparser.add_argument('-s', '--reasonsize', type=int, default='3',
help='Maximum size of causes for filtering')
argparser.add_argument('-u', '--univer', type=int, default='30',
help='Maximum size of the set of class properties')
argparser.add_argument('-c', '--classid', type=int, required=True,
help='Index of class column in data file (starting from 0)')
argparser.add_argument('-n', '--nominaldata',
help='Data string of information about nominal columns in format: <col_id1>:<nom1>,<nom2>,...;<col_id2>:<nom1>...')
args, comment = argparser.parse_known_args()
return args, comment
def configure_logger():
rootLogger = logging.getLogger()
logFormatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s', datefmt='%H:%M:%S')
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
fileHandler = logging.FileHandler('aqjsm.log', encoding='cp1251')
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
rootLogger.setLevel(args.loglevel.upper())
logging.info('OS: {0}, date: {1}'.format(platform.platform(), datetime.datetime.now().strftime("%Y-%m-%d")))
if __name__ == "__main__":
args, comment = parse_args()
configure_logger()
logging.info(args)
logging.info(comment)
max_universe_size = args.univer
max_reason_length = args.reasonsize
class_index = args.classid
nominal_data = args.nominaldata
data, class_column = dl.load_data(args.datafile, class_index, nominal_data)
logging.info('Data file {0}: {2} columns, {3} objects, class column is "{1}"'.format(args.datafile,
dl.column_names[class_index],
*reversed(data.shape)))
logging.debug('\n\t'.join(['"{0}": {1}'.format(key, dl.column_ranges[key]) for key in sorted(dl.column_ranges)]))
class_descriptions = aq.run_aq(data, class_column, dl.column_names)
for desc in class_descriptions.values():
desc.build(max_universe_size)
logging.info('\n'.join([str(class_descriptions[d]) for d in class_descriptions]))
for klass in data[class_column].unique():
logging.info('\n' * 3 + '*' * 5 + 'Start search reasons for class {0}'.format(klass) + '*' * 5)
logging.info('Start search reasons for class property {0}'.format(klass))
fb = FactBase(Fact(class_index, {klass}, 'class'))
fb.build(data, class_descriptions[klass])
fb.clear()
all_hypothesis = {}
def _search_in_fb(data_fb, target):
hypotheses = search_norris(data_fb)
reasons = []
for hyp in hypotheses:
if hyp.value.count() <= max_reason_length:
reasons.append((hyp.generator.count(),
[data_fb.properties[i] for i in range(len(hyp.value)) if
hyp.value[i]]))
if reasons:
reasons.sort(key=lambda x: x[0], reverse=True)
logging.info('\tFound {0} reasons for {1}:\n\t'.format(len(reasons), target) + '\n\t'.join(
['[{0}]: '.format(q) + ' & '.join([str(f) for f in r]) for q, r in reasons]))
else:
logging.debug('\tWas not found reasons for {0}'.format(target))
all_hypothesis[target] = hypotheses
# pr = cProfile.Profile()
# pr.enable()
# with PyCallGraph(output=GraphvizOutput()):
_search_in_fb(fb, 'class ' + klass)
# pr.disable()
# pr.print_stats(sort="calls")
# exit()
for prop in class_descriptions[klass].properties:
logging.info('Start search reasons for property {0}'.format(prop))
fb = FactBase(prop)
fb.build(data, class_descriptions[klass])
fb.clear()
_search_in_fb(fb, prop)
#generate_graph(all_hypothesis, 'cause_net.html')
|
# -*- coding: utf-8 -*-
from app.models.meta import metadata, Base
from app.utils import Enum
from sqlalchemy import Table, Column, Integer, String
from sqlalchemy.orm import mapper
from sqlalchemy.orm.exc import NoResultFound
from web import config
import collections
import web
users_table = Table("USERS", metadata,
Column("id", Integer, primary_key=True, nullable=False),
Column("first_name", String(20), nullable=False),
Column("last_name", String(20), nullable=False),
Column("pseudonym", String(20), nullable=False),
Column("email", String(50), unique=True, nullable=False),
Column("password", String(32), nullable=False),
Column("level", Integer, nullable=False)
)
class User(Base):
# Describes the different user levels : the value in the database corresponds to LevelComponent.value
BaseLevels = Enum(["DISABLED", "GUEST", "CORE", "ADMIN"])
LevelComponent = collections.namedtuple("LevelComponent", ["description", "value"])
Levels = {
BaseLevels.DISABLED : LevelComponent("Inactif", 0),
BaseLevels.GUEST : LevelComponent("Guest", 1),
BaseLevels.CORE : LevelComponent("Fondateur", 2),
BaseLevels.ADMIN : LevelComponent("Admin", 3)
}
def __init__(self, first_name=None, last_name=None, pseudonym=None, email=None, password=None, level=None) :
self.first_name = first_name
self.last_name = last_name
self.pseudonym = pseudonym
self.email = email
self.password = password
self.level = level
@classmethod
def all(cls, order_by_clause=None):
""" Overrides the default all method to guarantee the order by """
return Base.all.im_func(User, order_by_clause=order_by_clause or User.email) #@UndefinedVariable
def __repr__(self) :
return "<User(%s,%s,%s,%s,%s,%s)>" % (self.first_name, self.last_name, self.pseudonym, self.email, self.password, self.level)
def __eq__(self, other):
# Horrible hack to bypass FormAlchemy controls (!?)
if isinstance(other, type):
return False
return self.first_name == other.first_name \
and self.last_name == other.last_name \
and self.pseudonym == other.pseudonym \
and self.email == other.email \
and self.password == other.password \
and self.level == other.level
def check_level(self, base_level):
return self.level >= self.Levels[base_level].value
@property
def admin(self):
return self.check_level(self.BaseLevels.ADMIN)
@property
def active(self):
return self.check_level(self.BaseLevels.GUEST)
@staticmethod
def get_user(email):
if not email:
return None
try:
user = config.orm.query(User).filter(User.email == email).one()
except NoResultFound:
user = None
return user
mapper(User, users_table)
web.debug("[MODEL] Successfully mapped User class")
|
'''
Created on May 18th, 2018
author: Julian Weisbord
sources:
description: Create a data set by overlaying Time magazine covers onto random images.
'''
import os
import sys
import random
import glob
import cv2
import numpy as np
BACKGROUND_WIDTH = 600
BACKGROUND_HEIGHT = 600
OVERLAY_WIDTH = 150
OVERLAY_HEIGHT = 150
def create_dataset(background_images, overlay_images):
if not os.path.isdir("./train_data"):
os.mkdir("./train_data")
# print("overlay_images: ", overlay_images)
ovr_images = glob.glob(overlay_images + '/*')
bkg_images = glob.glob(background_images + '/*')
# ovr_images = [img for img in ovr_images]
overlay_list = []
dataset = []
for pos, bkg_img in enumerate(bkg_images):
print("bkg_images", bkg_images)
print("ovr_images", ovr_images)
print("bkg_img: ", bkg_img)
bkg_image = cv2.imread(bkg_img)
bkg_image = cv2.resize(bkg_image, (BACKGROUND_WIDTH, BACKGROUND_HEIGHT), 0, 0, cv2.INTER_LINEAR)
bkg_image = bkg_image.astype(np.float32)
bkg_image = np.multiply(bkg_image, 1.0 / 255.0)
# save overlayed images
cur_overlay = ovr_images[pos]
cur_overlay = cv2.imread(cur_overlay)
cur_overlay = cv2.resize(cur_overlay, (OVERLAY_WIDTH, OVERLAY_HEIGHT), 0,0, cv2.INTER_LINEAR)
cur_overlay = cur_overlay.astype(np.float32)
cur_overlay = np.multiply(cur_overlay, 1.0 / 255.0)
rows, cols, channels = cur_overlay.shape
# Randomly place the image
randx = random.randint(0, BACKGROUND_WIDTH - rows)
randy = random.randint(0, BACKGROUND_HEIGHT - rows)
overlay = cv2.addWeighted(bkg_image[randx:randx + rows, randy:randy + cols], 0, cur_overlay, 1, 0)
bkg_image[randx:randx + rows, randy:randy + cols] = overlay
final_img = np.multiply(bkg_image, 255.0)
cv2.imwrite("./train_data/overlay{}.jpg".format(pos), final_img)
def main():
if len(sys.argv) != 3:
print("Usage: python3 create_dataset.py <background image folder path> <overlay image folder path>")
exit()
background_images = sys.argv[1]
overlay_images = sys.argv[2]
create_dataset(background_images, overlay_images)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import mysql.connector
from sgmllib import SGMLParser
from HTMLParser import HTMLParser
import urllib2
import os
from Site import *
from Card import *
from Edition import *
from Price import *
url="http://www.magiccorporation.com/gathering-cartes-edition-170-tenebres-sur-innistrad.html"
def findColor(url):
if url=='/images/magic/couleurs/micro/incolor.gif':
return 'incolor'
elif url=='/images/magic/couleurs/micro/white.gif':
return 'white'
elif url=='/images/magic/couleurs/micro/blue.gif':
return 'blue'
elif url=='/images/magic/couleurs/micro/red.gif':
return 'red'
elif url=='/images/magic/couleurs/micro/green.gif':
return 'green'
elif url=='/images/magic/couleurs/micro/black.gif':
return 'black'
elif url=='/images/magic/couleurs/micro/multicolor.gif':
return 'multicolor'
return ''
def findLevel(url):
if url=='/images/magic/rarete/icon/rare.gif':
return 2
elif url=='/images/magic/rarete/icon/common.gif':
return 0
elif url=='/images/magic/rarete/icon/uncommon.gif':
return 1
elif url=='/images/magic/rarete/icon/mystic_rare.gif':
return 3
def findCost(value):
cost=""
for i in value:
cost=cost+i[len(i)-5]
return cost
class CardParser(HTMLParser):
def __init__(self,idCard):
HTMLParser.__init__(self)
self.html_table_editions=False
self.image=False
self.caracteristiqueVO=False
self.caracteristiqueVF=False
self.readPrice=False
self.price=0.0
self.card=Card()
self.card.load(idCard)
self.card.capa=""
self.readNameCost=False
self.readCost=False
self.readCapa=False
self.endCost=False
self.imgCost=[]
self.brCounter=0
self.tdCounter=0
self.listPrice=[]
self.nameCost=""
def handle_starttag(self, tag, attrs):
#print "Encountered an start tag :", tag
if tag=='div':
if len(attrs)>0:
if self.caracteristiqueVO==True and attrs[0][0]=='class' and attrs[0][1]=='block_content':
self.readCost=True
for i in attrs:
if i[0]=='style':
if i[1]=='width: 225px':
self.image=True
if i[1]=='width: 30%':
self.caracteristiqueVO=True
if i[1]=='width: 27%':
self.price=True
if tag=='img' and self.readCost==True and self.endCost==False:
self.imgCost.append(attrs[0][1]);
if tag=='img' and self.image==True:
self.image=False
for i in attrs:
if i[0]=='src':
self.card.image=i[1]
#print i[1]
if tag=='tr':
self.tdCounter=0
self.nameCost=""
if tag=='td' and self.price==True:
self.tdCounter+=1
if self.tdCounter==1 or self.tdCounter==2 or self.tdCounter==3:
self.readNameCost=True
else:
self.readNameCost=False
if self.tdCounter==4:
self.readPrice=True
else:
self.readPrice=False
if tag=='br' and self.caracteristiqueVO:
self.brCounter+=1
def handle_endtag(self, tag):
#print "Encountered an end tag :", tag
if tag=='tbody' and self.price==True:
self.price=False
if tag=='br' and self.readCost==True:
self.readCost=False
self.endCost=True
self.readCapa=True
if tag=='div' and self.readCapa==True:
self.caracteristiqueVO=False
self.card.cost=findCost(self.imgCost)
if tag=='html':
priceTest = Price(self.card.idCard,self.listPrice)
priceTest.update("magicCorpo",priceTest.toCSV())
#print "price mini ",
#print self.listPrice
self.card.update()
def handle_data(self, data):
if self.caracteristiqueVO and self.brCounter==1:
#print data
self.card.type=data
if self.caracteristiqueVO and self.brCounter>1 and self.brCounter<4:
#print data
self.card.capa=self.card.capa+" "+data
if self.readNameCost:
self.nameCost=self.nameCost+" "+data
self.readNameCost=False
if self.readPrice:
#print self.nameCost
self.listPrice.append((self.nameCost,self.extract_price(data)))
self.readPrice=False
def extract_price(self, value):
price=0.0
price=value[0:len(value)-2]
return float(price)
def extract_name(self, value):
print value
return None
def price_min(self,price):
minPrice=price[0]
for i in price:
if i<minPrice:
minPrice=i
return minPrice
class EditionParser(HTMLParser):
def __init__(self,idEdition):
HTMLParser.__init__(self)
self.html_table_editions=False
self.tdCounter=0
self.card=Card()
self.card.idEdition=idEdition
def handle_starttag(self, tag, attrs):
#print "Encountered an start tag :", tag
if tag=='table':
if attrs[3][1]=="html_table editions":
self.html_table_editions=True
if self.html_table_editions==True:
if tag=="td":
self.tdCounter=self.tdCounter+1
if tag=="img" and self.tdCounter==1:
self.card.color=findColor(attrs[0][1])
if tag=="img" and self.tdCounter==2:
self.card.level=findLevel(attrs[0][1])
if tag=="a" and self.tdCounter==4:
self.card.url='http://www.magiccorporation.com/'+attrs[0][1]
def handle_endtag(self, tag):
#print "Encountered an end tag :", tag
if tag=='table' and self.html_table_editions==True:
self.html_table_editions=False
if self.html_table_editions:
if tag=='tr':
self.tdCounter=0
self.card.record()
def handle_data(self, data):
#print data
if self.html_table_editions:
if self.tdCounter==0:
print " "
if self.tdCounter==4:
self.card.nameVO=data
print "Name ",
print data,
if self.tdCounter==5:
self.card.nameVF=data
if self.tdCounter==7:
self.card.type=data
if self.tdCounter==8:
strong=0
endu=0
position=0
switch=False
for i in data:
if i=="*":
if switch:
endu=0
else:
strong=0
elif i=="/":
switch=True
position=0
else:
try:
if switch:
endu=endu*10**position+int(i)
else:
strong=strong*10**position+int(i)
except:
pass
position=position+1
print " Force : ",
print strong,
print ", endu ",
print endu,
self.card.force=strong
self.card.endu=endu
if __name__ == "__main__":
print "//////////////////////////////////////////////"
print "/// Enregistre les editions ///"
print "//////////////////////////////////////////////"
"""
edition=Edition()
allEdition=edition.all()
for temp in allEdition:
parser = EditionParser(temp[0])
urlSite='http://www.magiccorporation.com/'+temp[1]
print urlSite
site=Site(urlSite)
parser.feed(site.html)
parser=CardParser(5)
site=Site("http://www.magiccorporation.com/gathering-cartes-view-29823-benevolent-bodyguard.html")
parser.feed(site.html)
"""
try:
conn = mysql.connector.connect(host="localhost",user="root",password="magicpswd", database="magic")
cursor = conn.cursor()
cursor.execute("SELECT id, url FROM card")
rows = cursor.fetchall()
for i in rows:
print i[0],
print i[1]
try:
parser=CardParser(i[0])
site=Site(i[1])
parser.feed(site.html)
except Exception as e:
print("Erreur")
print e
except Exception as e:
print("Erreur")
print e
|
#!/usr/bin/env python2.7
import math
import numpy as np
#pylint: disable=C0301,C0111,W0603,W0613
ARTICLE_FEATURES = {}
USER_FEATURES_DIM = 6
LAST_RECOMMENDATION = None
LAST_USER = None
ALPHA = 3
# Evaluator will call this function and pass the article features.
# Check evaluator.py description for details.
def set_articles(art):
for art_key in art:
ARTICLE_FEATURES[art_key] = {'features' : art[art_key],
'm' : np.identity(USER_FEATURES_DIM),
'b' : np.zeros(USER_FEATURES_DIM),
'w' : np.zeros(USER_FEATURES_DIM),
'updated' : False}
# This function will be called by the evaluator.
# Check task description for details.
def update(reward):
if reward == -1:
return
ARTICLE_FEATURES[LAST_RECOMMENDATION]['m'] += np.outer(LAST_USER,
LAST_USER)
ARTICLE_FEATURES['updated'] = True
if reward == 1:
ARTICLE_FEATURES[LAST_RECOMMENDATION]['b'] += LAST_USER
# This function will be called by the evaluator.
# Check task description for details.
def reccomend(timestamp, user_features, articles):
global LAST_RECOMMENDATION, LAST_USER
LAST_USER = np.array(user_features)
LAST_RECOMMENDATION = max(articles, key=calculate_ucb)
return LAST_RECOMMENDATION
def calculate_ucb(art_id):
current_article = ARTICLE_FEATURES[art_id]
if current_article['updated']:
current_article['w'] = np.linalg.solve(current_article['m'],
current_article['b'])
current_article['updated'] = False
ucb = np.inner(current_article['w'], LAST_USER)
ucb += ALPHA*math.sqrt(np.inner(LAST_USER, np.linalg.solve(current_article['m'], LAST_USER)))
return ucb
|
#!/bin/python
import logging
import os
import time
from typing import List, Dict
import boto3
import botocore
from botocore.config import Config
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def get_enabled_standard_subscriptions(standards, account_id, security_hub_client):
""" return enabled standard in account_id """
standards_subscription_arns_plain = [
arn["StandardsArn"] for arn in standards["Standards"]
]
standards_subscription_arns = [
arn.replace(":::", "::" + account_id + ":").replace(
":" + os.environ["AWS_REGION"] + "::",
":" + os.environ["AWS_REGION"] + ":" + account_id + ":",
)
for arn in standards_subscription_arns_plain
]
enabled_standards = security_hub_client.get_enabled_standards(
StandardsSubscriptionArns=standards_subscription_arns
)
return enabled_standards
def get_controls(enabled_standards, security_hub_client):
""" return list of controls for all aneabled standards """
controls = dict()
for standard in enabled_standards["StandardsSubscriptions"]:
controls[
standard["StandardsArn"]
] = security_hub_client.describe_standards_controls(
StandardsSubscriptionArn=standard["StandardsSubscriptionArn"]
)[
"Controls"
]
return controls
class SecurityStandardUpdateError(Exception):
""" Error Class for failed security standard subscription update """
pass
administrator_security_hub_client = None
sts_client = None
DISABLED_REASON = "Control disabled in the SecurityHub administrator account."
DISABLED = "DISABLED"
ENABLED = "ENABLED"
def lambda_handler(event, context):
logger.info(event)
try:
# set variables and boto3 clients
config = Config(
retries = {
'max_attempts': 23,
'mode': 'standard'
}
)
administrator_account_id = context.invoked_function_arn.split(":")[4]
member_account_id = event["account"]
role_arn = os.environ["MemberRole"].replace("<accountId>", member_account_id)
global sts_client
if not sts_client:
sts_client = boto3.client("sts")
assumed_role_object = sts_client.assume_role(
RoleArn=role_arn, RoleSessionName="SecurityHubUpdater"
)
credentials = assumed_role_object["Credentials"]
member_security_hub_client = boto3.client(
"securityhub",
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
config=config,
)
# Optimization - no need to reinitilize the administrator security hub client for every instance of this Lambda function
global administrator_security_hub_client
if not administrator_security_hub_client:
administrator_security_hub_client = boto3.client("securityhub", config=config)
# Get standard subscription controls
standards = administrator_security_hub_client.describe_standards()
administrator_enabled_standards = get_enabled_standard_subscriptions(
standards, administrator_account_id, administrator_security_hub_client
)
member_enabled_standards = get_enabled_standard_subscriptions(
standards, member_account_id, member_security_hub_client
)
logger.info("Update Account %s", member_account_id)
# Update standard subscriptions in member account
standards_updated = update_standard_subscription(
administrator_enabled_standards,
member_enabled_standards,
member_security_hub_client,
)
if standards_updated:
logger.info("Fetch enabled standards again.")
member_enabled_standards = get_enabled_standard_subscriptions(
standards, member_account_id, member_security_hub_client
)
# Get Controls
admin_controls = get_controls(
administrator_enabled_standards, administrator_security_hub_client
)
member_controls = get_controls(
member_enabled_standards, member_security_hub_client
)
# Get exceptions
exceptions = get_exceptions(event)
logger.debug("Exceptions: %s", str(exceptions))
# Disable/enable the controls in member account
update_member(
admin_controls, member_controls, member_security_hub_client, exceptions
)
except botocore.exceptions.ClientError as error:
logger.error(error)
return {"statusCode": 500, "account": member_account_id, "error": str(error)}
return {"statusCode": 200, "account": member_account_id}
def update_member(
admin_controls, member_controls, member_security_hub_client, exceptions
):
"""
Identifying which control needs to be updated
"""
for admin_key in admin_controls:
for member_key in member_controls:
if admin_key == member_key:
# Same security standard TODO
for admin_control, member_control in zip(
admin_controls[admin_key], member_controls[member_key]
):
logger.info(admin_control)
logger.info(member_control)
# Check for exceptions first
if admin_control["ControlId"] in exceptions["Disabled"]:
if member_control["ControlStatus"] != DISABLED:
# Disable control in member account
update_control_status(
member_control,
member_security_hub_client,
DISABLED,
disabled_reason=exceptions["DisabledReason"][
admin_control["ControlId"]
],
)
elif admin_control["ControlId"] in exceptions["Enabled"]:
if member_control["ControlStatus"] != ENABLED:
# Enable control in member account
update_control_status(
member_control, member_security_hub_client, ENABLED
)
elif (
admin_control["ControlStatus"]
!= member_control["ControlStatus"]
):
# Update control in member account to reflect configuration in SecurityHub admin account
update_control_status(
member_control,
member_security_hub_client,
admin_control["ControlStatus"],
)
def update_control_status(member_control, client, new_status, disabled_reason=None):
"""
Updates the Security Hub control as specified in the the security hub administrator account
"""
if DISABLED == new_status:
client.update_standards_control(
StandardsControlArn=member_control["StandardsControlArn"],
ControlStatus=new_status,
DisabledReason=disabled_reason if disabled_reason else DISABLED_REASON,
)
else:
# ENABLE control
client.update_standards_control(
StandardsControlArn=member_control["StandardsControlArn"],
ControlStatus=new_status,
)
def update_standard_subscription(
administrator_enabled_standards, member_enabled_standards, client
):
"""
Update security standards to reflect state in administrator account
"""
admin_standard_arns = [
standard["StandardsArn"]
for standard in administrator_enabled_standards["StandardsSubscriptions"]
]
member_standard_arns = [
standard["StandardsArn"]
for standard in member_enabled_standards["StandardsSubscriptions"]
]
standards = client.describe_standards()["Standards"]
standard_to_be_enabled = []
standard_to_be_disabled = []
for standard in standards:
if (
standard["StandardsArn"] in admin_standard_arns
and standard["StandardsArn"] not in member_standard_arns
):
# enable standard
standard_to_be_enabled.append({"StandardsArn": standard["StandardsArn"]})
if (
standard["StandardsArn"] not in admin_standard_arns
and standard["StandardsArn"] in member_standard_arns
):
# disable standard
for subscription in member_enabled_standards["StandardsSubscriptions"]:
if (
subscription["StandardsArn"].split("/")[-3]
== standard["StandardsArn"].split("/")[-3]
):
standard_to_be_disabled.append(
subscription["StandardsSubscriptionArn"]
)
standards_changed = False
if len(standard_to_be_enabled) > 0:
# enable standard
logger.info("Enable standards: %s", str(standard_to_be_enabled))
client.batch_enable_standards(
StandardsSubscriptionRequests=standard_to_be_enabled
)
ready = False
while not ready:
response = client.get_enabled_standards()
subscription_statuses = [
subscription["StandardsStatus"]
for subscription in response["StandardsSubscriptions"]
]
ready = all(
(status in ("READY", "INCOMPLETE") for status in subscription_statuses)
)
if not ready:
if "FAILED" in subscription_statuses:
logger.error(
"Standard could not be enabled: %s",
str(response["StandardsSubscriptions"]),
)
raise SecurityStandardUpdateError(
"Security standard could not be enabled: "
+ str(response["StandardsSubscriptions"])
)
logger.info("Wait until standards are enabled...")
time.sleep(1)
if "INCOMPLETE" in subscription_statuses:
logger.warning(
"Standard could not be enabled completely. Some controls may not be available: %s",
str(response["StandardsSubscriptions"]),
)
logger.info("Standards enabled")
standards_changed = True
if len(standard_to_be_disabled) > 0:
# disable standard
logger.info("Disable standards: %s", str(standard_to_be_disabled))
client.batch_disable_standards(
StandardsSubscriptionArns=standard_to_be_disabled
)
ready = False
while not ready:
response = client.get_enabled_standards()
subscription_statuses = [
subscription["StandardsStatus"]
for subscription in response["StandardsSubscriptions"]
]
ready = all(
(status in ("READY", "INCOMPLETE") for status in subscription_statuses)
)
if not ready:
if "FAILED" in subscription_statuses:
logger.error(
"Standard could not be disabled: %s",
str(response["StandardsSubscriptions"]),
)
raise SecurityStandardUpdateError(
"Security standard could not be disabled: "
+ str(response["StandardsSubscriptions"])
)
logger.info("Wait until standards are disabled...")
time.sleep(1)
if "INCOMPLETE" in subscription_statuses:
logger.warning(
"Standard could not be enabled completely. Some controls may not be available: %s",
str(response["StandardsSubscriptions"]),
)
logger.info("Standards disabled")
standards_changed = True
return standards_changed
def get_exceptions(event):
"""
extract exceptions related to the processed account from event. Return dictionary.
"""
exceptions_dict = event["exceptions"]
account_id = event["account"]
exceptions = dict()
exceptions["Disabled"] = []
exceptions["Enabled"] = []
exceptions["DisabledReason"] = dict()
# Identify exceptions for this account
for control in exceptions_dict.keys():
disabled = False
enabled = False
try:
if account_id in exceptions_dict[control]["Disabled"]:
disabled = True
except KeyError:
logger.info('%s: No "Disabled" exceptions.', control)
try:
if account_id in exceptions_dict[control]["Enabled"]:
enabled = True
except KeyError:
logger.info('%s: No "Enabled" exceptions.', control)
try:
exceptions["DisabledReason"][control] = exceptions_dict[control][
"DisabledReason"
]
except KeyError as error:
logger.error('%s: No "DisabledReason".', control)
raise error
if enabled and disabled:
# Conflict - you cannot enable and disable a control at the same time - fallback to default settin in administrator account
logger.warning(
"%s: Conflict - exception states that this control should be enabled AND disabled. Fallback to SecurityHub Administrator configuration.",
control,
)
elif disabled:
exceptions["Disabled"].append(control)
elif enabled:
exceptions["Enabled"].append(control)
return exceptions
|
import modules.peripherals.heap as heap
import tests.peripheral_tests.myio_testing as myio
#tests heap made of churches
def test_heap_church():
buildingList = myio.input('documentation/TN7_Test.xlsx')[1]
building_heap = heap.heap()
building_heap.heapify(buildingList)
a = []
while not building_heap.isEmpty():
a.append(building_heap.pop().getTotal())
b = a
b.sort()
assert a == b
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# from sklearn import preprocessing
# 값차이가 상당히 크기 때문에 정규화가 필수
# 정규화된 데이터
data = np.loadtxt("../../../data/diabetes1.csv", skiprows=1, delimiter=",", dtype=np.float32)
print(data)
x_data = data[:, :-1]
y_data = data[:, -1:]
print(x_data.shape)
print(y_data.shape)
W = tf.Variable(tf.random_uniform([8, 1]))
b = tf.Variable(tf.random_uniform([1]))
# W = tf.get_variable(name='w1', shape=[8, 1],
# initializer=tf.contrib.layers.xavier_initializer())
# b = tf.get_variable(name='b1', shape=[1],
# initializer=tf.contrib.layers.xavier_initializer())
X = tf.placeholder(tf.float32, shape=[None, 8])
Y = tf.placeholder(tf.float32, shape=[None, 1])
z = tf.matmul(X, W) + b
hx = tf.sigmoid(z)
cost = tf.reduce_mean(Y * (-tf.log(hx)) + (1-Y) * (-tf.log(1-hx)))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train = optimizer.minimize(cost)
predicted = tf.cast(hx > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for i in range(10000):
_t, _c = sess.run([train, cost], feed_dict={X: x_data, Y: y_data})
if not i % 100:
print(i, _c)
print(sess.run(W))
print(sess.run(b))
print(sess.run(predicted, feed_dict={X: x_data}))
print(sess.run(accuracy, feed_dict={X: x_data, Y: y_data})) |
# 最多只有两个节点作为根(反证法证明)
# 叶子节点作为根得到的高度比非叶子节点得到的要大 => 遍历叶子节点
# 从叶子节点向上到根节点,每次对其进行剪枝,最后留下的就是高度最小的根节点
class Solution:
def findMinHeightTrees(self, n: int, edges: List[List[int]]) -> List[int]:
in_degree, connect = [0] * n, defaultdict(list)
for a, b in edges:
in_degree[a] += 1
in_degree[b] += 1
connect[a].append(b)
connect[b].append(a)
nodes = [i for i, v in enumerate(in_degree) if v <= 1]
while n > 2:
n -= len(nodes)
nxt = []
for node in nodes:
for other in connect[node]:
in_degree[other] -= 1
if in_degree[other] == 1:
nxt.append(other)
# 剪枝
nodes = nxt
return nodes
|
from sklearn.feature_extraction.text import TfidfVectorizer
# list of text documents
text = ["The quick brown fox jumped over the lazy dog.",
"The dog.",
"The fox"]
# create the transform
vectorizer = TfidfVectorizer()
# tokenize and build vocab
vectorizer.fit(text)
# summarize
print(vectorizer.vocabulary_)
print(vectorizer.idf_)
# encode document
vector = vectorizer.transform([text[0]])
# summarize encoded vector
print(vector.shape)
print(vector.toarray()
|
"""Module that allows Redis to be used as cache. Useful when running on Heroku or such platforms without persistent
file storage.
"""
from redis import Redis
from config import CACHE_URL, CACHE_TTL
from util.caching.caching import CacheAPI
from util.logger import logger
__author__ = 'MePsyDuck'
class RedisCache(CacheAPI):
def __init__(self):
"""Create a new Redis instance when a new object for this class is created.
"""
self.redis = Redis.from_url(CACHE_URL)
logger.info('Connected to Redis at ' + CACHE_URL)
def _exists(self, key):
"""Method to check if `key` exists in redis cache.
:param key: The `key` to to be checked in redis cache.
:return: `True` if `key` exists in redis cache.
"""
if self.redis.exists(key):
return True
def _set(self, key):
"""Method to set `key` with `value` in redis.
Key expires after CACHE_TTL days (`ex` in seconds).
:param key: The `key` (thing_id) to be added to redis cache.
"""
self.redis.set(name=key, value='', ex=CACHE_TTL * 60 * 60)
|
# -*- coding: utf-8 -*-
"""
LeetCode 34.
Given an array of integers sorted in ascending order, find the starting and ending position of a given target value.
Your algorithm's runtime complexity must be in the order of O(log n).
If the target is not found in the array, return [-1, -1].
For example,
Given [5, 7, 7, 8, 8, 10] and target value 8,
return [3, 4].
"""
def searchRange(nums, target):
"""
time: O(logn)
space: O(1)
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
if len(nums) == 0:
return [-1, -1]
start = 0
end = len(nums) - 1
result = [-1, -1]
while start <= end:
mid = (start + end) // 2
if nums[mid] == target:
result[0] = mid
result[1] = mid
i = mid - 1
while i >= 0 and nums[i] == target:
result[0] = i
i -= 1
i = mid + 1
while i <= len(nums)-1 and nums[i] == target:
result[1] = i
i += 1
break
elif nums[mid] > target:
end = mid - 1
else:
start = mid + 1
return result
lst = [5, 7, 7, 8, 8, 10]
print(searchRange(lst, 6))
|
import napalm
from pprint import pprint as pp
from time import sleep
driver = napalm.get_network_driver('ios')
list_of_devices = ['ios-xe-mgmt-latest.cisco.com']
for device in list_of_devices:
connection = driver(hostname=device, username='developer', password='C1sco12345', optional_args={'port': 8181})
connection.open()
pp(connection.get_interfaces())
print('\n \n')
configuration =('interface loopback 667')
connection.load_merge_candidate(config=configuration)
print(connection.compare_config())
connection.commit_config()
pp(connection.get_interfaces())
print('\n \n')
connection.close()
|
from typing import List
from leetcode import TreeNode, test, new_tree, sorted_list
def path_sum(root: TreeNode, target: int) -> List[List[int]]:
if not root:
return []
stack, result, = (
[],
[],
)
def dfs(node: TreeNode, remaining: int) -> None:
stack.append(node)
if node.left or node.right:
if node.left:
dfs(node.left, remaining - node.val)
if node.right:
dfs(node.right, remaining - node.val)
else:
if node.val == remaining:
path = [n.val for n in stack]
result.append(path)
stack.pop()
dfs(root, target)
return result
test(
path_sum,
[
(
new_tree(5, 4, 8, 11, None, 13, 4, 7, 2, None, None, 5, 1),
22,
[[5, 4, 11, 2], [5, 8, 4, 5]],
)
],
map_func=sorted_list,
)
|
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas
import seaborn as sns
import matplotlib.pyplot as plt
# # content_polluters_tweets
# # Loading datasets
data = pd.read_excel (r'datasets/content_polluters_tweets.xlsx')
print(data)
# # Dataset analysis
print(data[data.isnull().any(axis=1)].head())
import numpy as np
np.sum(data.isnull().any(axis=1))
data.isnull().any(axis=0)
print(data.info())
print(data.describe())
neg = data
neg_string = []
for t in neg:
neg_string.append(t)
neg_string = pandas.Series(neg_string).str.cat(sep=' ')
wordcloud = WordCloud(width=1600, height=800,max_font_size=200).generate(neg_string)
plt.figure(figsize=(12,10))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
data['word_count'] = data['Tweet_text'].apply(lambda x: len(str(x).split(" ")))
data[['Tweet_text','word_count']].head()
data['char_count'] = data['Tweet_text'].str.len() ## this also includes spaces
data[['Tweet_text','char_count']].head()
# # Cleaning dataset
# # Data cleaning script
import re
pat_1 = r"(?:\@|https?\://)\S+"
pat_2 = r'#\w+ ?'
part_3= r'br'
combined_pat = r'|'.join((pat_1, pat_2,part_3))
www_pat = r'www.[^ ]+'
html_tag = r'<[^>]+>'
negations_ = {"isn't":"is not", "can't":"can not","couldn't":"could not", "hasn't":"has not",
"hadn't":"had not","won't":"will not",
"wouldn't":"would not","aren't":"are not",
"haven't":"have not", "doesn't":"does not","didn't":"did not",
"don't":"do not","shouldn't":"should not","wasn't":"was not", "weren't":"were not",
"mightn't":"might not",
"mustn't":"must not"}
negation_pattern = re.compile(r'\b(' + '|'.join(negations_.keys()) + r')\b')
from nltk.tokenize import WordPunctTokenizer
tokenizer = WordPunctTokenizer()
def data_cleaner(text):
try:
stripped = re.sub(combined_pat, '', text)
stripped = re.sub(www_pat, '', stripped)
cleantags = re.sub(html_tag, '', stripped)
lower_case = cleantags.lower()
neg_handled = negation_pattern.sub(lambda x: negations_[x.group()], lower_case)
letters_only = re.sub("[^a-zA-Z]", " ", neg_handled)
tokens = tokenizer.tokenize(letters_only)
return (" ".join(tokens)).strip()
except:
return 'NC'
from tqdm import tqdm
tqdm.pandas(desc="progress-bar")
def post_process(data, n=1048575):
data = data.head(n)
data['Tweet_text'] = data['Tweet_text'].progress_map(data_cleaner)
data.reset_index(inplace=True)
data.drop('index', inplace=True, axis=1)
return data
data = post_process(data)
print(data['Tweet_text'])
for letter in '@—.¦!)(':
data['Tweet_text']= data['Tweet_text'].str.replace(letter,'')
print(data.Tweet_text.head(20))
for letter in 'Â':
data['Tweet_text']= data['Tweet_text'].str.replace(letter,'')
print(data.Tweet_text.head(20))
# # After Cleaning data
data['word_count'] = data['Tweet_text'].apply(lambda x: len(str(x).split(" ")))
data[['Tweet_text','word_count']].head(15)
data['char_count'] = data['Tweet_text'].str.len() ## this also includes spaces
data[['Tweet_text','char_count']].head(30)
print(data.head(20))
import pandas
content_polluters_tweets=data
content_polluters_tweets['Class']='1'
# content_polluters_tweets.to_csv("datasets/Clean_content_polluters_tweets.csv", index=False)
content_polluters_tweets.head()
# # legitimate_users_tweets
# # Loading dataset
data = pd.read_excel (r'datasets/legitimate_users_tweets.xlsx')
print(data.head())
# # Dataset analysis
print(data[data.isnull().any(axis=1)].head())
import numpy as np
np.sum(data.isnull().any(axis=1))
data.isnull().any(axis=0)
print(data.info())
print(data.describe())
neg = data
neg_string = []
for t in neg:
neg_string.append(t)
neg_string = pandas.Series(neg_string).str.cat(sep=' ')
wordcloud = WordCloud(width=1600, height=800,max_font_size=200).generate(neg_string)
plt.figure(figsize=(12,10))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
data['word_count'] = data['Tweet_text'].apply(lambda x: len(str(x).split(" ")))
data[['Tweet_text','word_count']].head()
data['char_count'] = data['Tweet_text'].str.len() ## this also includes spaces
data[['Tweet_text','char_count']].head()
# # Cleaning dataset
# # Data cleaning script
import re
pat_1 = r"(?:\@|https?\://)\S+"
pat_2 = r'#\w+ ?'
part_3= r'br'
combined_pat = r'|'.join((pat_1, pat_2,part_3))
www_pat = r'www.[^ ]+'
html_tag = r'<[^>]+>'
negations_ = {"isn't":"is not", "can't":"can not","couldn't":"could not", "hasn't":"has not",
"hadn't":"had not","won't":"will not",
"wouldn't":"would not","aren't":"are not",
"haven't":"have not", "doesn't":"does not","didn't":"did not",
"don't":"do not","shouldn't":"should not","wasn't":"was not", "weren't":"were not",
"mightn't":"might not",
"mustn't":"must not"}
negation_pattern = re.compile(r'\b(' + '|'.join(negations_.keys()) + r')\b')
from nltk.tokenize import WordPunctTokenizer
tokenizer = WordPunctTokenizer()
def data_cleaner(text):
try:
stripped = re.sub(combined_pat, '', text)
stripped = re.sub(www_pat, '', stripped)
cleantags = re.sub(html_tag, '', stripped)
lower_case = cleantags.lower()
neg_handled = negation_pattern.sub(lambda x: negations_[x.group()], lower_case)
letters_only = re.sub("[^a-zA-Z]", " ", neg_handled)
tokens = tokenizer.tokenize(letters_only)
return (" ".join(tokens)).strip()
except:
return 'NC'
from tqdm import tqdm
tqdm.pandas(desc="progress-bar")
def post_process(data, n=1048575):
data = data.head(n)
data['Tweet_text'] = data['Tweet_text'].progress_map(data_cleaner)
data.reset_index(inplace=True)
data.drop('index', inplace=True, axis=1)
return data
data = post_process(data)
print(data['Tweet_text'])
for letter in '@—.¦!)(':
data['Tweet_text']= data['Tweet_text'].str.replace(letter,'')
print(data.Tweet_text.head())
for letter in 'Â':
data['Tweet_text']= data['Tweet_text'].str.replace(letter,'')
print(data.Tweet_text.head())
# # After Cleaning data
data['word_count'] = data['Tweet_text'].apply(lambda x: len(str(x).split(" ")))
data[['Tweet_text','word_count']].head()
data['char_count'] = data['Tweet_text'].str.len() ## this also includes spaces
data[['Tweet_text','char_count']].head()
print(data.head(20))
import pandas
legitimate_users_tweets=data
legitimate_users_tweets['Class'] = '0'
# legitimate_users_tweets.to_csv("datasets/Clean_legitimate_users_tweets.csv", index=False)
legitimate_users_tweets.head()
all_data = pd.concat([content_polluters_tweets, legitimate_users_tweets])
# all_data.to_csv("datasets/all_data_Pollutors_legitimate.csv", index=False)
print(all_data)
|
class parentclass():
def send_message(self):
print("bu alan içerisnde mesaj verilecektir")
class basedclass(parentclass):
def send_message(self):
print("base class üzerinden glen mesaj")
parent = parentclass()
parent.send_message()
base = basedclass()
base.send_message()
|
import random
import time
import decimal
def test_sort(lista):
for i in range(len(lista)-1):
if lista[i] > lista[i+1]:
return False
return True
def interclasre(lista1,lista2):
i = j = 0
sol = []
while i != len(lista1) and j!= len(lista2):
if lista1[i] < lista2[j]:
sol.append(lista1[i])
i+=1
else:
sol.append(lista2[j])
j+=1
while i != len(lista1):
sol.append(lista1[i])
i+=1
while j != len(lista2):
sol.append(lista2[j])
j+=1
return sol
def Timsort(lista):
if len(lista) <= 64:
for i in range(1,len(lista)):
ok = 1
j = i
while ok != 0:
if j == 0:
ok = 0
else:
if lista[j] < lista[j-1]:
lista[j],lista[j-1] = lista[j-1],lista[j]
j -= 1
else:
ok = 0
return lista
else:
return interclasre(Timsort(lista[:len(lista)//2]),Timsort(lista[len(lista)//2:]))
def merge_sort(lista):
if len(lista) == 1:
return lista
else:
return interclasre(merge_sort(lista[:len(lista)//2]),merge_sort(lista[len(lista)//2:]))
def count_sort(lista):
if max(lista) < 100000000:
count = [0 for i in range(max(lista) + 1)]
for i in range(len(lista)):
count[lista[i]] += 1
j = 0
sol = []
while j <= max(lista):
while count[j] != 0:
sol.append(j)
count[j] -= 1
j += 1
return sol
else:
return False
def bubble_sort(lista):
inceput = time.time()
if len(lista) >= 100000:
return False
while 1:
nr = 0
for i in range(len(lista)-1):
if lista[i]>lista[i+1]:
lista[i],lista[i+1] = lista[i+1],lista[i]
nr = nr +1
if nr == 0:
break
return lista
T = int(input("numarul de teste ="))
for i in range(T):
N = int(input(f"numarul de elemente din testul {i+1} = "))
M = int(input(f"valoare maxima posibila din testul {i+1} = "))
L = [random.randrange(0,M+1) for i in range(N)] #lista cu elemente ce va fi sortata
lista_copie = L
start = time.time()
lista_copie = bubble_sort(lista_copie)
final = time.time()
d = decimal.Decimal(final - start)
if lista_copie:
if test_sort(lista_copie):
print("lista a fost sortata de bubble sort")
print(f"timpul de executare = {d}")
else:
print("lista nu a fost sortata de bubble sort")
print(f"timpul de executare = {d}")
else:
print("lista nu poate fi sortata de bubble sort deoarece numarul elementelor este prea mare")
lista_copie = L
start = time.time()
lista_copie = count_sort(lista_copie)
final = time.time()
d = decimal.Decimal(final - start)
if lista_copie:
if test_sort(lista_copie):
print("lista a fost sortata de count sort")
print(f"timpul de executare = {d}")
else:
print("lista nu a fost sortata de count sort")
print(f"timpul de executare = {d}")
else:
print("lista nu a putut fi sortata de count sort din cauza faptului ca ocupa prea multa memorie")
lista_copie = L
start = time.time()
lista_copie = merge_sort(lista_copie)
final = time.time()
d = decimal.Decimal(final - start)
if test_sort(lista_copie):
print("lista a fost sortata de merge sort")
print(f"timpul de executare = {d}")
else:
print("lista nu a fost sortata de merge sort")
print(f"timpul de executare = {d}")
lista_copie = L
start = time.time()
lista_copie = Timsort(lista_copie)
final = time.time()
d = decimal.Decimal(final - start)
if test_sort(lista_copie):
print("lista a fost sortata de Timsort")
print(f"timpul de executare = {d}")
else:
print("lista nu a fost sortata de Timsort")
print(f"timpul de executare = {d}")
lista_copie = L
start = time.time()
lista_copie.sort()
final = time.time()
d = decimal.Decimal(final - start)
if test_sort(lista_copie):
print("lista a fost sortata de sortarea naturala a limbajului de programare")
print(f"timpul de executare = {d}")
else:
print("lista nu a fost sortata de sortarea naturala a limbajului de programare")
print(f"timpul de executare = {d}")
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import re
import os
try:
from PySide.QtCore import *
from PySide.QtGui import *
except:
print ("Error: This program needs PySide module.", file=sys.stderr)
sys.exit(1)
class InfoWindows(QMessageBox):
""" Simple informative modal message """
def __init__(self, text):
super().__init__()
self.setText(text)
self.setModal(True)
self.exec_()
class NewNormDotExo(QDialog):
""" Modal widget to create and edit Normal<->Dotted exercices """
def __init__(self, parent, item="", diff=1):
super().__init__(parent)
self.diff = diff
#~ To remove the prev file when changing name/diff
#~ of a loaded file
self.prev_file = ""
self.setResult(0)
self.finished.connect(parent.populate)
self.setGeometry(300, 300, 500, 400)
name_label = QLabel("Nom du fichier")
self.name_field = QLineEdit()
#~ Should be auto or not ?
difficulty_label = QLabel("Difficulté")
self.difficulty_value = QSpinBox()
self.difficulty_value.setMinimum(1)
self.difficulty_value.setMaximum(10)
self.difficulty_value.setValue(self.diff)
list_add_btn = QPushButton("Ajouter")
list_rm_btn = QPushButton("Supprimer")
list_add_btn.clicked.connect(self.add)
list_rm_btn.clicked.connect(self.delete)
ok_btn = QPushButton("Sauvegarder et quitter")
ok_btn.clicked.connect(self.save)
abort_btn = QPushButton("Annuler")
abort_btn.clicked.connect(self.close)
self.list_widget = self.listExo()
layout = QGridLayout()
layout.addWidget(name_label, 0, 0)
layout.addWidget(self.name_field, 0, 1)
layout.addWidget(difficulty_label, 1, 0)
layout.addWidget(self.difficulty_value, 1, 1)
layout.addWidget(self.list_widget, 5, 0, 1, 2)
layout.addWidget(list_add_btn, 6, 0)
layout.addWidget(list_rm_btn, 6, 1)
layout.addWidget(ok_btn, 7, 0)
layout.addWidget(abort_btn, 7, 1)
self.setLayout(layout)
if item is not "":
self.load(item)
self.setModal(True)
self.exec_()
def listExo(self):
list_wid = QTableWidget()
list_wid.setColumnCount(2)
list_wid.setHorizontalHeaderLabels(["Dot", "Expression"])
list_wid.setColumnWidth(0, 40)
list_wid.horizontalHeader().setStretchLastSection(True)
list_wid.setSortingEnabled(False)
list_wid.setSelectionMode(QAbstractItemView.SingleSelection)
list_wid.setEditTriggers(QAbstractItemView.AllEditTriggers)
list_wid.itemChanged.connect(self.verify)
return list_wid
def add(self, value="None", state=Qt.Unchecked):
""" Create an entry with nedeed flags """
qi = QTableWidgetItem(value)
qi.setFlags(Qt.ItemIsEditable | Qt.ItemIsSelectable | Qt.ItemIsEnabled)
qdot = QTableWidgetItem()
qdot.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)
# ~ qdot.setFlags(Qt.ItemIsUserCheckable)
qdot.setCheckState(state)
# ~ On ajoute une ligne
# ~ Les rowCount ont un décalage de 1 Oo"
self.list_widget.setRowCount(self.list_widget.rowCount() + 1)
self.list_widget.setItem(self.list_widget.rowCount() - 1, 0, qdot)
self.list_widget.setItem(self.list_widget.rowCount() - 1, 1, qi)
def delete(self):
""" Delete the current item """
self.list_widget.removeRow(self.list_widget.currentRow())
def verify(self, item):
# ~ Should check for valid lisp expr
if (item.text() == "") and (item.column() == 1):
item.setText("None")
#~ def fileExist(self, item):
#~ for f in os.listdir("save/"):
#~ if f.split('_')[2] == item:
#~ return True
#~ else:
#~ return False
# ~ Cute iterator creator
def iterAllItems(self):
for i in range(self.list_widget.rowCount()):
yield self.list_widget.item(i, 0), self.list_widget.item(i, 1)
# ~ Save to file, need to be serialized ?
#~ Should be validated
#~ Erase an existing file if same name, should ask
def save(self):
if self.name_field.text() is not "":
if self.list_widget.rowCount() > 0:
location = 'save/NormDot_{0}_{1}'.format(self.difficulty_value.value(), self.name_field.text())
file = open(location, 'w+')
try:
file.write("# Normal/Dotted serie\n")
for s, item in self.iterAllItems():
file.write("{0}\t{1}".format(s.checkState(), item.text()))
file.write("\n")
finally:
file.close()
if self.prev_file is not "":
os.remove(self.prev_file)
self.done(1)
else:
InfoWindows("Entrez au moins un exercice")
else:
InfoWindows("Entrez un nom de fichier")
# ~ Also need de-serial
def load(self, exo):
location = 'save/NormDot_{0}_{1}'.format(self.diff, exo)
self.prev_file = location
self.name_field.setText(exo)
try:
file = open(location, 'r+')
info = file.readline().rstrip('\n\r')
for line in file:
self.add(line.rstrip('\n\r').split("\t")[1])
file.close()
except IOError as e:
print(e)
self.done(0)
class NewNormGraphExo(QDialog):
""" Modal widget to create and edit Normal->Graph exercices """
def __init__(self, parent, item="", diff=1):
super().__init__(parent)
self.diff = diff
#~ To remove the prev file when changing name/diff
#~ of a loaded file
self.prev_file = ""
self.setResult(0)
self.finished.connect(parent.populate)
self.setGeometry(300, 300, 500, 400)
name_label = QLabel("Nom du fichier")
self.name_field = QLineEdit()
#~ Should be auto or not ?
difficulty_label = QLabel("Difficulté")
self.difficulty_value = QSpinBox()
self.difficulty_value.setMinimum(1)
self.difficulty_value.setMaximum(10)
self.difficulty_value.setValue(self.diff)
list_add_btn = QPushButton("Ajouter")
list_rm_btn = QPushButton("Supprimer")
list_add_btn.clicked.connect(self.add)
list_rm_btn.clicked.connect(self.delete)
ok_btn = QPushButton("Sauvegarder et quitter")
ok_btn.clicked.connect(self.save)
abort_btn = QPushButton("Annuler")
abort_btn.clicked.connect(self.close)
self.list_widget = self.listExo()
layout = QGridLayout()
layout.addWidget(name_label, 0, 0)
layout.addWidget(self.name_field, 0, 1)
layout.addWidget(difficulty_label, 1, 0)
layout.addWidget(self.difficulty_value, 1, 1)
layout.addWidget(self.list_widget, 5, 0, 1, 2)
layout.addWidget(list_add_btn, 6, 0)
layout.addWidget(list_rm_btn, 6, 1)
layout.addWidget(ok_btn, 7, 0)
layout.addWidget(abort_btn, 7, 1)
self.setLayout(layout)
if item is not "":
self.load(item)
self.setModal(True)
self.exec_()
def listExo(self):
list_wid = QTableWidget()
list_wid.setColumnCount(1)
list_wid.setHorizontalHeaderLabels(["Expression"])
list_wid.horizontalHeader().setStretchLastSection(True)
list_wid.setSortingEnabled(False)
list_wid.setSelectionMode(QAbstractItemView.SingleSelection)
list_wid.setEditTriggers(QAbstractItemView.AllEditTriggers)
list_wid.itemChanged.connect(self.verify)
return list_wid
def add(self, value="None"):
""" Create an entry with nedeed flags """
qi = QTableWidgetItem(value)
qi.setFlags(Qt.ItemIsEditable | Qt.ItemIsSelectable | Qt.ItemIsEnabled)
self.list_widget.setRowCount(self.list_widget.rowCount() + 1)
self.list_widget.setItem(self.list_widget.rowCount() - 1, 0, qi)
def delete(self):
self.list_widget.removeRow(self.list_widget.currentRow())
def verify(self, item):
# ~ Should check for valid lisp expr
if (item.text() == ""):
item.setText("None")
# ~ Cute iterator creator
def iterAllItems(self):
for i in range(self.list_widget.rowCount()):
yield self.list_widget.item(i, 0)
# ~ Save to file, need to be serialized
def save(self):
if self.name_field.text() is not "":
if self.list_widget.rowCount() > 0:
location = 'save/NormGraph_{0}_{1}'.format(self.difficulty_value.value(), self.name_field.text())
file = open(location, 'w+')
try:
file.write("# Normal/Graph serie\n")
for item in self.iterAllItems():
file.write("{0}".format(item.text()))
file.write("\n")
finally:
file.close()
if self.prev_file is not "":
os.remove(self.prev_file)
self.done(1)
else:
InfoWindows("Entrez au moins un exercice")
else:
InfoWindows("Entrez un nom de fichier")
# ~ Also need de-serial
def load(self, exo):
location = 'save/NormGraph_{0}_{1}'.format(self.diff, exo)
self.prev_file = location
self.name_field.setText(exo)
try:
file = open(location, 'r+')
info = file.readline().rstrip('\n\r')
for line in file:
self.add(line.rstrip('\n\r'))
file.close()
except IOError as e:
print(e)
self.done(0)
class NewGraphNormExo(QDialog):
pass
|
from iranlowo import adr
|
import os # run os commands
import subprocess # capture os output
class MetaData:
def __init__(self, file):
self.file = file
# Read codecs
cmd = 'mdls -name kMDItemCodecs ' + file
result = self.run_cmd(cmd)
self.codecs = result
# Read height
cmd = 'mdls -name kMDItemPixelHeight ' + file
result = self.run_cmd(cmd)
self.height = result
# Read width
cmd = 'mdls -name kMDItemPixelWidth ' + file
result = self.run_cmd(cmd)
self.width = result
# Get FPS
import cv2
cap=cv2.VideoCapture(file)
self.fps = cap.get(cv2.CAP_PROP_FPS)
def run_cmd(self, cmd_str):
res = subprocess.run([cmd_str], shell=True, stdout=subprocess.PIPE).stdout.decode('utf-8')
return res.split(" = ")[1]
def get_codecs(self):
return self.codecs
def get_height(self):
return self.height
def get_width(self):
return self.width
def get_fps(self):
return self.fps
|
# Generated from Wordlify.g4 by ANTLR 4.9.2
from antlr4 import *
if __name__ is not None and "." in __name__:
from .WordlifyParser import WordlifyParser
else:
from WordlifyParser import WordlifyParser
# This class defines a complete listener for a parse tree produced by WordlifyParser.
class WordlifyListener(ParseTreeListener):
# Enter a parse tree produced by WordlifyParser#program.
def enterProgram(self, ctx:WordlifyParser.ProgramContext):
pass
# Exit a parse tree produced by WordlifyParser#program.
def exitProgram(self, ctx:WordlifyParser.ProgramContext):
pass
# Enter a parse tree produced by WordlifyParser#fn_def.
def enterFn_def(self, ctx:WordlifyParser.Fn_defContext):
pass
# Exit a parse tree produced by WordlifyParser#fn_def.
def exitFn_def(self, ctx:WordlifyParser.Fn_defContext):
pass
# Enter a parse tree produced by WordlifyParser#block_instr.
def enterBlock_instr(self, ctx:WordlifyParser.Block_instrContext):
pass
# Exit a parse tree produced by WordlifyParser#block_instr.
def exitBlock_instr(self, ctx:WordlifyParser.Block_instrContext):
pass
# Enter a parse tree produced by WordlifyParser#foreach.
def enterForeach(self, ctx:WordlifyParser.ForeachContext):
pass
# Exit a parse tree produced by WordlifyParser#foreach.
def exitForeach(self, ctx:WordlifyParser.ForeachContext):
pass
# Enter a parse tree produced by WordlifyParser#while_instr.
def enterWhile_instr(self, ctx:WordlifyParser.While_instrContext):
pass
# Exit a parse tree produced by WordlifyParser#while_instr.
def exitWhile_instr(self, ctx:WordlifyParser.While_instrContext):
pass
# Enter a parse tree produced by WordlifyParser#if_instr.
def enterIf_instr(self, ctx:WordlifyParser.If_instrContext):
pass
# Exit a parse tree produced by WordlifyParser#if_instr.
def exitIf_instr(self, ctx:WordlifyParser.If_instrContext):
pass
# Enter a parse tree produced by WordlifyParser#if_cond.
def enterIf_cond(self, ctx:WordlifyParser.If_condContext):
pass
# Exit a parse tree produced by WordlifyParser#if_cond.
def exitIf_cond(self, ctx:WordlifyParser.If_condContext):
pass
# Enter a parse tree produced by WordlifyParser#then.
def enterThen(self, ctx:WordlifyParser.ThenContext):
pass
# Exit a parse tree produced by WordlifyParser#then.
def exitThen(self, ctx:WordlifyParser.ThenContext):
pass
# Enter a parse tree produced by WordlifyParser#else_if.
def enterElse_if(self, ctx:WordlifyParser.Else_ifContext):
pass
# Exit a parse tree produced by WordlifyParser#else_if.
def exitElse_if(self, ctx:WordlifyParser.Else_ifContext):
pass
# Enter a parse tree produced by WordlifyParser#else_block.
def enterElse_block(self, ctx:WordlifyParser.Else_blockContext):
pass
# Exit a parse tree produced by WordlifyParser#else_block.
def exitElse_block(self, ctx:WordlifyParser.Else_blockContext):
pass
# Enter a parse tree produced by WordlifyParser#cond.
def enterCond(self, ctx:WordlifyParser.CondContext):
pass
# Exit a parse tree produced by WordlifyParser#cond.
def exitCond(self, ctx:WordlifyParser.CondContext):
pass
# Enter a parse tree produced by WordlifyParser#cond1.
def enterCond1(self, ctx:WordlifyParser.Cond1Context):
pass
# Exit a parse tree produced by WordlifyParser#cond1.
def exitCond1(self, ctx:WordlifyParser.Cond1Context):
pass
# Enter a parse tree produced by WordlifyParser#single_cond.
def enterSingle_cond(self, ctx:WordlifyParser.Single_condContext):
pass
# Exit a parse tree produced by WordlifyParser#single_cond.
def exitSingle_cond(self, ctx:WordlifyParser.Single_condContext):
pass
# Enter a parse tree produced by WordlifyParser#comparison.
def enterComparison(self, ctx:WordlifyParser.ComparisonContext):
pass
# Exit a parse tree produced by WordlifyParser#comparison.
def exitComparison(self, ctx:WordlifyParser.ComparisonContext):
pass
# Enter a parse tree produced by WordlifyParser#expr.
def enterExpr(self, ctx:WordlifyParser.ExprContext):
pass
# Exit a parse tree produced by WordlifyParser#expr.
def exitExpr(self, ctx:WordlifyParser.ExprContext):
pass
# Enter a parse tree produced by WordlifyParser#arith_expr.
def enterArith_expr(self, ctx:WordlifyParser.Arith_exprContext):
pass
# Exit a parse tree produced by WordlifyParser#arith_expr.
def exitArith_expr(self, ctx:WordlifyParser.Arith_exprContext):
pass
# Enter a parse tree produced by WordlifyParser#arith_expr1.
def enterArith_expr1(self, ctx:WordlifyParser.Arith_expr1Context):
pass
# Exit a parse tree produced by WordlifyParser#arith_expr1.
def exitArith_expr1(self, ctx:WordlifyParser.Arith_expr1Context):
pass
# Enter a parse tree produced by WordlifyParser#arith_elem.
def enterArith_elem(self, ctx:WordlifyParser.Arith_elemContext):
pass
# Exit a parse tree produced by WordlifyParser#arith_elem.
def exitArith_elem(self, ctx:WordlifyParser.Arith_elemContext):
pass
# Enter a parse tree produced by WordlifyParser#concat.
def enterConcat(self, ctx:WordlifyParser.ConcatContext):
pass
# Exit a parse tree produced by WordlifyParser#concat.
def exitConcat(self, ctx:WordlifyParser.ConcatContext):
pass
# Enter a parse tree produced by WordlifyParser#concat_elem.
def enterConcat_elem(self, ctx:WordlifyParser.Concat_elemContext):
pass
# Exit a parse tree produced by WordlifyParser#concat_elem.
def exitConcat_elem(self, ctx:WordlifyParser.Concat_elemContext):
pass
# Enter a parse tree produced by WordlifyParser#fn_call.
def enterFn_call(self, ctx:WordlifyParser.Fn_callContext):
pass
# Exit a parse tree produced by WordlifyParser#fn_call.
def exitFn_call(self, ctx:WordlifyParser.Fn_callContext):
pass
# Enter a parse tree produced by WordlifyParser#atom_instr.
def enterAtom_instr(self, ctx:WordlifyParser.Atom_instrContext):
pass
# Exit a parse tree produced by WordlifyParser#atom_instr.
def exitAtom_instr(self, ctx:WordlifyParser.Atom_instrContext):
pass
# Enter a parse tree produced by WordlifyParser#assign.
def enterAssign(self, ctx:WordlifyParser.AssignContext):
pass
# Exit a parse tree produced by WordlifyParser#assign.
def exitAssign(self, ctx:WordlifyParser.AssignContext):
pass
# Enter a parse tree produced by WordlifyParser#array_append.
def enterArray_append(self, ctx:WordlifyParser.Array_appendContext):
pass
# Exit a parse tree produced by WordlifyParser#array_append.
def exitArray_append(self, ctx:WordlifyParser.Array_appendContext):
pass
# Enter a parse tree produced by WordlifyParser#array_elem.
def enterArray_elem(self, ctx:WordlifyParser.Array_elemContext):
pass
# Exit a parse tree produced by WordlifyParser#array_elem.
def exitArray_elem(self, ctx:WordlifyParser.Array_elemContext):
pass
# Enter a parse tree produced by WordlifyParser#import_call.
def enterImport_call(self, ctx:WordlifyParser.Import_callContext):
pass
# Exit a parse tree produced by WordlifyParser#import_call.
def exitImport_call(self, ctx:WordlifyParser.Import_callContext):
pass
# Enter a parse tree produced by WordlifyParser#own_fn_call.
def enterOwn_fn_call(self, ctx:WordlifyParser.Own_fn_callContext):
pass
# Exit a parse tree produced by WordlifyParser#own_fn_call.
def exitOwn_fn_call(self, ctx:WordlifyParser.Own_fn_callContext):
pass
# Enter a parse tree produced by WordlifyParser#exist.
def enterExist(self, ctx:WordlifyParser.ExistContext):
pass
# Exit a parse tree produced by WordlifyParser#exist.
def exitExist(self, ctx:WordlifyParser.ExistContext):
pass
# Enter a parse tree produced by WordlifyParser#is_file.
def enterIs_file(self, ctx:WordlifyParser.Is_fileContext):
pass
# Exit a parse tree produced by WordlifyParser#is_file.
def exitIs_file(self, ctx:WordlifyParser.Is_fileContext):
pass
# Enter a parse tree produced by WordlifyParser#is_dir.
def enterIs_dir(self, ctx:WordlifyParser.Is_dirContext):
pass
# Exit a parse tree produced by WordlifyParser#is_dir.
def exitIs_dir(self, ctx:WordlifyParser.Is_dirContext):
pass
# Enter a parse tree produced by WordlifyParser#print_instr.
def enterPrint_instr(self, ctx:WordlifyParser.Print_instrContext):
pass
# Exit a parse tree produced by WordlifyParser#print_instr.
def exitPrint_instr(self, ctx:WordlifyParser.Print_instrContext):
pass
# Enter a parse tree produced by WordlifyParser#rename.
def enterRename(self, ctx:WordlifyParser.RenameContext):
pass
# Exit a parse tree produced by WordlifyParser#rename.
def exitRename(self, ctx:WordlifyParser.RenameContext):
pass
# Enter a parse tree produced by WordlifyParser#remove.
def enterRemove(self, ctx:WordlifyParser.RemoveContext):
pass
# Exit a parse tree produced by WordlifyParser#remove.
def exitRemove(self, ctx:WordlifyParser.RemoveContext):
pass
# Enter a parse tree produced by WordlifyParser#move.
def enterMove(self, ctx:WordlifyParser.MoveContext):
pass
# Exit a parse tree produced by WordlifyParser#move.
def exitMove(self, ctx:WordlifyParser.MoveContext):
pass
# Enter a parse tree produced by WordlifyParser#copy.
def enterCopy(self, ctx:WordlifyParser.CopyContext):
pass
# Exit a parse tree produced by WordlifyParser#copy.
def exitCopy(self, ctx:WordlifyParser.CopyContext):
pass
# Enter a parse tree produced by WordlifyParser#download.
def enterDownload(self, ctx:WordlifyParser.DownloadContext):
pass
# Exit a parse tree produced by WordlifyParser#download.
def exitDownload(self, ctx:WordlifyParser.DownloadContext):
pass
# Enter a parse tree produced by WordlifyParser#write.
def enterWrite(self, ctx:WordlifyParser.WriteContext):
pass
# Exit a parse tree produced by WordlifyParser#write.
def exitWrite(self, ctx:WordlifyParser.WriteContext):
pass
# Enter a parse tree produced by WordlifyParser#read.
def enterRead(self, ctx:WordlifyParser.ReadContext):
pass
# Exit a parse tree produced by WordlifyParser#read.
def exitRead(self, ctx:WordlifyParser.ReadContext):
pass
# Enter a parse tree produced by WordlifyParser#wait_instr.
def enterWait_instr(self, ctx:WordlifyParser.Wait_instrContext):
pass
# Exit a parse tree produced by WordlifyParser#wait_instr.
def exitWait_instr(self, ctx:WordlifyParser.Wait_instrContext):
pass
# Enter a parse tree produced by WordlifyParser#execute.
def enterExecute(self, ctx:WordlifyParser.ExecuteContext):
pass
# Exit a parse tree produced by WordlifyParser#execute.
def exitExecute(self, ctx:WordlifyParser.ExecuteContext):
pass
# Enter a parse tree produced by WordlifyParser#get_files.
def enterGet_files(self, ctx:WordlifyParser.Get_filesContext):
pass
# Exit a parse tree produced by WordlifyParser#get_files.
def exitGet_files(self, ctx:WordlifyParser.Get_filesContext):
pass
# Enter a parse tree produced by WordlifyParser#date_modified.
def enterDate_modified(self, ctx:WordlifyParser.Date_modifiedContext):
pass
# Exit a parse tree produced by WordlifyParser#date_modified.
def exitDate_modified(self, ctx:WordlifyParser.Date_modifiedContext):
pass
# Enter a parse tree produced by WordlifyParser#size.
def enterSize(self, ctx:WordlifyParser.SizeContext):
pass
# Exit a parse tree produced by WordlifyParser#size.
def exitSize(self, ctx:WordlifyParser.SizeContext):
pass
# Enter a parse tree produced by WordlifyParser#exit.
def enterExit(self, ctx:WordlifyParser.ExitContext):
pass
# Exit a parse tree produced by WordlifyParser#exit.
def exitExit(self, ctx:WordlifyParser.ExitContext):
pass
# Enter a parse tree produced by WordlifyParser#create.
def enterCreate(self, ctx:WordlifyParser.CreateContext):
pass
# Exit a parse tree produced by WordlifyParser#create.
def exitCreate(self, ctx:WordlifyParser.CreateContext):
pass
# Enter a parse tree produced by WordlifyParser#length.
def enterLength(self, ctx:WordlifyParser.LengthContext):
pass
# Exit a parse tree produced by WordlifyParser#length.
def exitLength(self, ctx:WordlifyParser.LengthContext):
pass
# Enter a parse tree produced by WordlifyParser#basename.
def enterBasename(self, ctx:WordlifyParser.BasenameContext):
pass
# Exit a parse tree produced by WordlifyParser#basename.
def exitBasename(self, ctx:WordlifyParser.BasenameContext):
pass
# Enter a parse tree produced by WordlifyParser#args.
def enterArgs(self, ctx:WordlifyParser.ArgsContext):
pass
# Exit a parse tree produced by WordlifyParser#args.
def exitArgs(self, ctx:WordlifyParser.ArgsContext):
pass
# Enter a parse tree produced by WordlifyParser#array.
def enterArray(self, ctx:WordlifyParser.ArrayContext):
pass
# Exit a parse tree produced by WordlifyParser#array.
def exitArray(self, ctx:WordlifyParser.ArrayContext):
pass
# Enter a parse tree produced by WordlifyParser#value_or_id.
def enterValue_or_id(self, ctx:WordlifyParser.Value_or_idContext):
pass
# Exit a parse tree produced by WordlifyParser#value_or_id.
def exitValue_or_id(self, ctx:WordlifyParser.Value_or_idContext):
pass
del WordlifyParser |
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
genstep.py: Fit genstep xyz vs time, used for viewpoint tracking
=================================================================
See okc/TrackView.cc for the usage of this
Fit the genstep xyz vs time to obtain parametric eqn of genstep position
with time parameter.::
In [3]: tk = A.load_("cerenkov","1_track","dayabay")
In [4]: tk
Out[4]: A(cerenkov,1_track,dayabay)
In [5]: print tk
[[ -16390.518 -802295.938 -7059.101]
[ -162.573 251.993 0.172]]
::
In [1]: run genstep.py
[[[ 0.177 -1.583 4.94 1. ]
[-252.339 -45.677 -155.278 0. ]
[ 0. 82.83 0. 0. ]]]
INFO:opticks.ana.nload:saving derivative of A(cerenkov,1,juno) to /usr/local/env/opticks/juno/cerenkov/1_track.npy
::
simon:npy blyth$ /usr/local/opticks.ana/bin/NumpyEvtTest
[2016-Mar-25 12:08:16.965734]:info: NumpyEvt::loadGenstepDerivativeFromFile typ cerenkov tag 1_track det dayabay
[2016-Mar-25 12:08:16.966326]:info: NumpyEvt::loadGenstepDerivativeFromFile (3,4)
( 0) -16390.518 -802295.938 -7059.101 1.000
( 1) -162.573 251.993 0.172 0.000
( 2) 0.844 27.423 0.000 0.000
"""
import os, logging
import numpy as np
import matplotlib.pyplot as plt
from opticks.ana.base import opticks_main
from opticks.ana.nload import A, I, II, path_
log = logging.getLogger(__name__)
X,Y,Z,W,T = 0,1,2,3,3
if __name__ == '__main__':
args = opticks_main(det="juno", src="cerenkov", tag="1")
try:
a = A.load_("gensteps",args.src,args.tag,args.det)
except IOError as err:
log.fatal(err)
sys.exit(args.mrc)
log.info("loaded gensteps %s %s %s " % (a.path, a.stamp, repr(a.shape)))
#path = os.path.expandvars("$LOCAL_BASE/opticks/opticksdata/gensteps/dayabay/cerenkov/1.npy")
#path = os.path.expandvars("$LOCAL_BASE/opticks/opticksdata/gensteps/juno/cerenkov/1.npy")
#a = np.load(path)
xyzt = a[:,1]
#print xyzt
x,y,z,t = xyzt[:,X], xyzt[:,Y], xyzt[:,Z], xyzt[:,T]
tr = [t.min(), t.max()]
xr = [x.min(), x.max()]
yr = [y.min(), y.max()]
zr = [z.min(), z.max()]
plt.close()
plt.ion()
ny,nx = 1,3
fig = plt.figure()
ax = fig.add_subplot(ny,nx,1)
ax.scatter(t, x)
xf = np.polyfit(t,x,1,full=True)
xm, xc = xf[0] ## xm is "x**1" coefficient, xc is "x**0" (constant term)
xl = [xm*tt + xc for tt in tr]
ax.plot(tr, xl, '-r')
ax = fig.add_subplot(ny,nx,2)
ax.scatter(t, y)
yf = np.polyfit(t,y,1,full=True)
ym, yc = yf[0]
yl = [ym*tt + yc for tt in tr]
ax.plot(tr, yl, '-r')
ax = fig.add_subplot(ny,nx,3)
ax.scatter(t, z)
zf = np.polyfit(t,z,1,full=True)
zm, zc = zf[0]
zl = [zm*tt + zc for tt in tr]
ax.plot(tr, zl, '-r')
## origin, direction, range
track = np.array(
[[
[xc,yc,zc,1.0],
[xm,ym,zm,0.0],
[tr[0],tr[1],0.0,0.0]
]], dtype=np.float32)
print track
#a.derivative_save(track, "track")
|
"""
Created by Alex Wang on 2018-03-14
"""
import numpy as np
def test_numpy_array():
"""
获取部分数组只是引用,不是真的复制
复制需要使用copy函数
:return:
"""
ones_arr = np.ones((5, 5), dtype=np.uint8)
part = ones_arr[2:4, 2:4]
print(ones_arr)
part[0, 0] = 0
print(ones_arr)
def test_broadcast():
"""
不同形状的矩阵或数组加减乘除,如果形状不同,其中一个必须为1
:return:
"""
a_np = np.random.random(size=(32, 50, 128, 1))
b_np = np.random.random(size=(1, 1, 128, 1024))
c_np = a_np + b_np
print('test_tf_plus, shape of c_np:{}'.format(c_np.shape)) # (32, 50, 128, 1024)
def test_npz():
"""
numpy矩阵可以保存在npy或者npz文件
:return:
"""
str_a = 'abc'
dict_a = {'name': 'alexwang', 'age': 20}
arr_a = np.array(range(12)).reshape((3, 4))
np.savez('test.npz', str_a=str_a, dict_a=dict_a, arr_a=arr_a)
data = np.load('test.npz')
print('arr_a:{}'.format(data['arr_a']))
print('str_a:{}'.format(data['str_a']))
print('dict_a:{}'.format(data['dict_a']))
def test_tostring_fromstring():
"""
:return:
"""
arr = np.reshape(np.array(range(20)), newshape=(4, 5))
print(arr)
arr_str = arr.tostring()
arr_rec = np.fromstring(arr_str, dtype=np.int32)
print(arr_rec)
if __name__ == '__main__':
test_numpy_array()
test_broadcast()
# test_npz()
test_tostring_fromstring()
|
#!/usr/bin/env python
import boto3
import requests
from sys import exit
from os import path, remove
from subprocess import call
from datetime import datetime, timedelta
TIMESTAMP_FILE = 'shutdown.timestamp'
NOW = datetime.now()
def get_instance_id():
return requests.get('http://instance-data/latest/meta-data/instance-id').text
def shutdown_self():
ec2 = boto3.resource('ec2')
try:
server_start_time = datetime.fromtimestamp(path.getmtime(TIMESTAMP_FILE))
except:
call(['touch', TIMESTAMP_FILE])
call(['chmod', '666', TIMESTAMP_FILE])
exit()
if ((NOW - timedelta(minutes=110)) > server_start_time):
try:
remove(TIMESTAMP_FILE)
ec2.instances.filter(InstanceIds=[get_instance_id(),]).stop()
except OSError:
pass
if __name__ == "__main__":
shutdown_self()
|
#!/usr/bin/python3
import hidden_4
if __name__ == "__main__":
names = dir(hidden_4)
for i in range(0, len(names)):
if names[i].find("__") == -1:
print("{:s}".format(names[i]))
|
import cv2
image = cv2.imread("red_panda.jpg")
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imwrite("Gray_panda.jpg", gray_image)
|
import os
import sys
import subprocess
import shutil
sys.path.insert(0, 'scripts')
import experiments as exp
import rf_distance
def get_rf_pair(tree1, tree2):
command = []
command.append(exp.raxml_nompi_exec)
command.append("--rf")
command.append(tree1 + "," + tree2)
out = subprocess.check_output(command).decode("utf-8")
lines = out.split("\n")
rf_abs = lines[0].split(" ")[-1]
rf_rel = lines[1].split(" ")[-1]
res = [float(rf_abs), float(rf_rel)]
return res
if (__name__ == "__main__"):
if (len(sys.argv) != 3):
print("Syntax python fast_rf_distance.py tree1 tree2")
sys.exit(1)
tree1 = sys.argv[1]
tree2 = sys.argv[2]
|
TO_KELVIN = {
'C': (1, 273.15),
'F': (5.0 / 9, 459.67 * 5.0 / 9),
'R': (5.0 / 9, 0),
'De': (-2.0 / 3, 373.15),
'N': (100.0 / 33, 273.15),
'Re': (5.0 / 4, 273.15),
'Ro': (40.0 / 21, -7.5 * 40 / 21 + 273.15),
}
def convert_temp(temp, from_scale, to_scale):
""" Thanks to 'jolaf' on CodeWars (much better than my solution) """
if from_scale == to_scale:
return temp
if from_scale != 'K':
(a, b) = TO_KELVIN[from_scale]
temp = a * temp + b
if to_scale == 'K':
return int(round(temp))
a, b = TO_KELVIN[to_scale]
return int(round((temp - b) / a))
|
import csv
from multiprocessing import Pool
import xmltodict
from api.fill import fill_match_info
from api.headers import headers
from api.send_request import futblot24
from scripts.utils import parse_leagues
from tables.countries import countries
import os
from api.utils import get_time
_FIELD_NAMES = headers
if not os.path.isdir('results'):
os.mkdir('results')
def save_matches_to_csv(date, csv_file_path, start_time=0, end_time=25):
"""
Parses main page and saves matches in csv
:param end_time:
:param start_time:
:param date: string -- '20180929'
:param csv_file_path: string -- path to string file
:return:
"""
csv_file = open(csv_file_path, "w")
writer = csv.DictWriter(csv_file, _FIELD_NAMES)
writer.writeheader()
csv_file.flush()
url = f'https://www.futbol24.com/matchDayXml/?Day={date}'
response = futblot24.send_request(url)
dict_response = xmltodict.parse(response.content.decode('utf-8'))
json_response = dict(dict_response)
desired_teams = dict(dict(json_response["F24"])["Mecze"])['M']
leagues = parse_leagues(json_response)
pool_arguments = [(each, leagues, start_time, end_time, csv_file_path) for each in desired_teams]
pool = Pool()
pool.map(fill_multiple, pool_arguments)
csv_file.close()
def fill_multiple(_tuple):
_BASE_URL = 'https://www.futbol24.com/teamCompare'
team, leagues = _tuple[:2]
start_time, end_time = _tuple[2:4]
csv_file_path = _tuple[4]
csv_file = open(csv_file_path, "a")
writer = csv.DictWriter(csv_file, _FIELD_NAMES)
if team['@S1']:
return 0
country_id = int(team['@KId'])
country = countries[country_id]['name'].replace(' ', '-')
league_id = int(team['@LId'])
league = leagues[league_id]['sname']
home_team = team['@HN'].replace(' ', '-')
guest_team = team['@GN'].replace(' ', '-')
time_string = team['@C0']
print(f'Getting details of match {home_team} vs {guest_team}')
page_link = f'{_BASE_URL}/{country}/{home_team}/vs/{country}/{guest_team}/' \
f'?statTALR-Table=1&statTALR-Limit=2&statTBLR-Table=2&statTBLR-Limit=2'
page_link = page_link.replace(')', '').replace('(', '')
row_dict = {'home_team': home_team,
'time': get_time(time_string),
'guest_team': guest_team,
'country': country,
'league': league,
'page_link': page_link}
row = fill_match_info(page_link, row_dict, start_time, end_time)
writer.writerow(row)
csv_file.flush()
|
from pocket import Pocket, PocketException
p = Pocket(
consumer_key="92912-704c4cd2fa6f2871d28faebb",
access_token="cb3b7b6d-f8c5-e0e9-02ae-21ff42"
)
|
def conference_picker(visited, offered):
visited = set(visited)
return next((city for city in offered if city not in visited),
'No worthwhile conferences this year!')
|
def fib(n, list):
if n == 0:
return 1, 0
elif n == 1:
return 0, 1
else:
return list[n-1][0] + list[n-2][0], list[n-1][1] + list[n-2][1]
from sys import stdin
n = int(stdin.readline())
value = []
for _ in range(n):
tn = int(stdin.readline())
value.append(tn)
for n in value:
if n == 0:
print('1 0')
elif n == 1:
print('0 1')
else:
count = []
count.append([1, 0])
count.append([0, 1])
for i in range(2, n+1):
count.append(list(fib(i, count)))
print(count[-1][0], count[-1][1]) |
import argparse
import config
import os
import json
if __name__ == '__main__':
parse = argparse.ArgumentParser()
parse.add_argument("corpus", choices=["en", "fr", "de", "ru", "pt", "zh", "pl", "uk", "ta"])
args = parse.parse_args()
inputdir = config.CORPUS_NAME_TO_PATH[args.corpus]
outdir = os.path.join(inputdir, 'evidence')
if not os.path.exists(outdir):
os.mkdir(outdir)
outdir2 = os.path.join(inputdir, 'qa')
if not os.path.exists(outdir2):
os.mkdir(outdir2)
if os.path.basename(inputdir) == 'en':
all_sess = ['train', 'dev', 'test']
else:
all_sess = ['dev', 'test']
for sess in all_sess:
f1 = os.path.join(inputdir, sess + '_doc.json')
f2 = os.path.join(inputdir, sess + '.txt')
fo = os.path.join(inputdir, 'qa', sess + '.json')
outlines = []
with open(f1, 'r') as fin1, open(f2, 'r') as fin2:
for (qidx, (line1, line2)) in enumerate(zip(fin1, fin2)):
if qidx % 100 == 0:
print(qidx)
qds = json.loads(line1.strip())
qa = json.loads(line2.strip())
out_item = {}
out_item['answers'] = qa['answers']
out_item['question_id'] = '%s_%d' % (sess, qidx)
out_item['docs'] = []
out_item['question'] = " ".join(qa['question'].split())
for (didx, qd) in enumerate(qds):
filename = "%s_%d_%d.txt" % (sess, qidx, didx)
subdirpath = '%s_%d' % (sess, qidx)
outd = os.path.join(outdir, subdirpath)
if not os.path.isdir(outd):
os.mkdir(outd)
outfile = os.path.join(outd, filename)
out_item['docs'].append((subdirpath, filename))
document = "\n\n".join(qd['document'].split("\n\n"))
with open(outfile, 'w') as fout:
fout.write(document)
outline = json.dumps(out_item, ensure_ascii=False)
outlines.append(outline)
with open(fo, 'w') as fout:
for line in outlines:
fout.write(line + "\n")
|
import json
from datetime import datetime
import os
from subprocess import Popen, PIPE
import sys
#import psutil
import re
import time
class main():
def __init__(self):
#Verifica o Sistema Operacional
self.verify_system_host()
#Verifica se o ADB está instalado
self.verify_installed_adb()
#Mata o processo
#self.adb_kill()
#Inicia o processo
#self.adb_start()
#Verifica os dispisitivos conectados
check_devices = self.verify_connect_devices()
if not check_devices:
print("Nenhum dispositivo encontrado.")
sys.exit()
else:
self.choice_device(check_devices)
self.rel = {
"title": f"Relatório do Dispositivo {self.get_device()}",
}
#self.get_info_android()
#self.logical_backup()
self.extract_data()
def verify_system_host(self):
so, hostname, release, version, arquitect = os.uname()
output = f"""
INFORMAÇÕES DO SISTEMA:
Arquitetura: {arquitect}
Descrição: {version}
Hostname: {hostname}
Sistema Operacional: {so}
Release: {release}
"""
print(output)
#Metodo que verifica se tem dispistivo conectado
def verify_connect_devices(self):
devices = self.execute_cmd(options=1)
devices = devices.strip().split('\n')
#print(devices)
if len(devices) > 1:
return devices[1:]
else:
print("entrou no false")
return False
#Metodo que seleciona o dispositivo a ser usado
def choice_device(self, devices):
aux = 0
print("Os dispositivos conectados são:")
for device in devices:
print(device)
if len(devices) > 0:
print("\nEscolha o seu dispositivo desejado:")
for device in devices:
aux += 1
print(f"{aux} - {device}")
aux = 0
while aux < 1 or aux > len(devices):
aux2 = input("\nEscolha: ")
if aux2.isdigit():
aux = int(aux2)
self._device = str(devices[aux-1]).split('\t')[0]
print("\nDevice selecionado é: ",self._device)
#Metodo que retorna qual dispositivo foi selecionado
def get_device(self):
return self._device
def set_device(self, device):
pass
#Inicia o servidor do ADB
def adb_start(self):
print("INICIANDO.........")
self.execute_cmd(3)
print("ADB SERVER INICIADO.")
#Mata o serviço do ADB
def adb_kill(self):
self.execute_cmd(4)
print("ADB SERVER FECHADO.")
#Verifica se o ADB está instalado
def verify_installed_adb(self):
check = self.execute_cmd(2)
if not check:
print("Ferramenta ADB não instalada. Instale para continuar")
sys.exit()
#Pega as principais informações do Android
def get_info_android(self):
aux = ""
aux_fast = {
"Marca": "ro.product.manufacturer",
"Modelo": "ro.product.model",
"Serial Number": "ro.serialno",
"Versão do Android": "ro.build.version.release",
"Versão do SDK": "ro.build.version.sdk",
"Interface de rede": "wifi.interface",
"Status Interface de rede": "wlan.driver.status",
}
info_fast, info_full = self.execute_cmd(5)
info_fast = info_fast.replace("[","").replace("]","").strip(" ").split("\n")
info_fast = [i for i in info_fast if i != '']
fast = dict()
fast = {k: j.split(":")[1].strip() for j in info_fast for k,v in aux_fast.items() if v in j}
for key,value in fast.items():
aux += f"{key} - {value.upper()}\n"
self.rel["info_fast"] = aux
print("\n\n")
print(self.rel["title"])
print(self.rel["info_fast"])
#print("Valor de info: ",info_full)
#self.rel["info_fast"]
#self.rel["info_full"]
def logical_backup(self):
check = self.execute_cmd(6)
if check:
"BACKUP executado com sucesso."
def restore_logical_backup(self):
#check = self.execute_cmd(7)
pass
def extract_data(self):
check = self.execute_cmd(8)
def create_relatory(self):
pass
def export_relatory(self):
pass
def execute_cmd(self,options):
def run(cmd):
try:
out = Popen(cmd, stdout=PIPE)
while out.poll() == None:
pass
output, error = out.communicate(timeout=15)
if error != None:
return output.decode('UTF-8'), error.decode('UTF-8')
return output.decode('UTF-8'), error
except Exception as inst:
#print("insta args", inst.args)
return False, False #Comando falhou
if options == 1:
output, error = run(["adb","devices"])
return output
elif options == 2:
output, error = run(["adb"])
return output
elif options == 3:
output, error = run(["adb","start-server"])
return output
elif options == 4:
output, error = run(["adb","kill-server"])
return output
elif options == 5:
fast = ""
full = ""
fast, error = run(["adb","-s",f"{self.get_device()}","shell","getprop"])
aux_full = {
"Processos": "ps",
"Soquets": "netstat",
#"Estados do dispositivo": "dumpsys", esta gerando erro
"Processador": "cat /proc/cpuinfo",
"Memoria": "cat /proc/meminfo",
"Pacotes instalados": "pm list packages",
"Processador": "cat /proc/cpuinfo",
"Informações da rede": "ip addr show wlan0",
"Rotas": "route",
"IMEI": "service call iphonesubinfo 1",
}
#for key,value in aux_full.items():
# full += run(["adb","-s",f"{self.get_device()}","shell",value])
return fast,full
elif options == 6:
print("Iniciando BACKUP......")
print("Pode demorar alguns minuto......")
bkp, error = run(["adb", "-s", f"{self.get_device()}", "backup", "-f", f"bkp_{datetime.now().strftime('%d_%m_%Y')}_.ab" ,"-apk","-system","-all","-shared" ])
print("BACKUP finalizado.")
print(f"O BACKUP está localizado na seguinte pasta {os.getcwd()} com o nome: bkp_{datetime.now().strftime('%d_%m_%Y')}_.ab")
return bkp
elif options == 7:
pass
elif options == 8:
print("Iniciando verificação das pastas e arquivos.")
files_folders,error = run(["adb", "-s", f"{self.get_device()}", "shell", "ls", "-l","-a", "/" ])
print(type(files_folders))
files_folders = files_folders.split("\n")
get_names = [name for name in files_folders if re.findall(r'[-dlrwxt]{10}',name)]
#print("Lista: ", files_folders)
print("Names: ", get_names)
print("Erro: ", error)
#pega = re.findall(r'[-dlrwx]{10}',texto2)
#print(pega)
sys.exit()
droid = main()
|
mac = ['aabb:cc80:7000', 'aabb:dd80:7340', 'aabb:ee80:7000', 'aabb:ff80:7000']
mac_cisco = []
for mac_address in mac:
temp = mac_address.split(':')
mac_cisco.append('.'.join(temp))
print(mac)
print(mac_cisco) |
###Find cos-sim and rating estimations
import numpy
from scipy import spatial
import operator
import os.path
my_path = os.path.abspath(os.path.dirname(__file__))
prePath = os.path.join(my_path,"checkins/")
cityName = "London/"
expertListFileName = "experts"
fileSuffix = ".csv"
delimiter = ","
expertsFilePath = prePath + cityName + expertListFileName + fileSuffix
expertCheckinsPrefix = prePath + cityName
#Turkish Restaurant,Restaurant,Museum
#TODO Find category-checkin count vector of the user
userCategoryCheckinCounts = [26, 77, 40]
#Iterate through experts
expertsFile = open(expertsFilePath, "r")
lines = expertsFile.readlines()
cosineSimilarities = {}
for line in lines:
tokens = line.split(delimiter)
expertCategoryCheckinCounts = tokens[1:]
expertCategoryCheckinCounts = [int(numeric_string) for numeric_string in expertCategoryCheckinCounts]
cosineSimilarity = 1 - spatial.distance.cosine(userCategoryCheckinCounts, expertCategoryCheckinCounts)
cosineSimilarities[tokens[0]] = cosineSimilarity
estimatedRankings = {}
#Find estimated rankings
for expert, similarity in cosineSimilarities.items():
expertCheckinsPath = expertCheckinsPrefix + expert + fileSuffix
checkinsFile = open(expertCheckinsPath, "r")
lines = checkinsFile.readlines()
for line in lines:
tokens = line.split(delimiter)
venueId = tokens[0]
venueCheckinCount = float(tokens[1])
estimatedRanking = cosineSimilarities[expert] * venueCheckinCount
#TODO Need decision?
if venueId in estimatedRankings:
estimatedRankings[venueId] = max(estimatedRankings[venueId], estimatedRanking)
else:
estimatedRankings[venueId] = estimatedRanking
sortedEstimatedRankings = sorted(estimatedRankings.items(), key=operator.itemgetter(1))
sortedEstimatedRankings.reverse()
print(sortedEstimatedRankings)
|
import logging
from decimal import Decimal
from django.db import transaction
from furskru_tools.numbers import round_int, round_down_int
from loyalty.bonuses.const import CEILING, DEFAULT_DISCOUNT_PERCENT
from loyalty.program.models import Discount, PointsRange, RatingGroup
logger = logging.getLogger('points_discount')
@transaction.atomic()
def import_rating_groups(groups):
groups_ids = []
for group in groups:
obj, _ = RatingGroup.objects.update_or_create(
min_points=group['minPoints'],
max_points=group['maxPoints'],
defaults={'name': group['name']})
groups_ids.append(obj.pk)
RatingGroup.objects.exclude(pk__in=groups_ids).delete()
@transaction.atomic()
def import_rating_rules(points_ranges):
points_ranges_ids = []
discount_ids = []
for i, points in enumerate(points_ranges):
points_object, _ = PointsRange.objects.update_or_create(
guid=points['guid'],
defaults={
'group_name': points['groupName'],
'min_points': points['minPoints'],
'max_points': points['maxPoints'],
'accrual_percent': points['accrualPercent'],
'repair_percent': points['repairPercent'],
'cleaning_percent': points['cleaningPercent'],
'free_repair': points['freeRepair'],
'free_cleaning': points['freeCleaning']})
points_ranges_ids.append(points_object.pk)
for discount in points_ranges[i]['discounts']:
obj, _ = Discount.objects.update_or_create(
points_range=points_object,
category=discount['category'],
discount=discount['discount'])
discount_ids.append(obj.pk)
PointsRange.objects.exclude(pk__in=points_ranges_ids).delete()
Discount.objects.exclude(pk__in=discount_ids).delete()
def get_client_discount(category_hierarchy, client_points):
"""Получение скидки Клиента
:param category_hierarchy: Иерархия номенклатуры
:param client_points: Текущая сумма покупок Клиента
:type category_hierarchy: list
:type client_points: int
:return: Скидка для Клиента
"""
discount = DEFAULT_DISCOUNT_PERCENT
points_range = PointsRange.objects.filter(
max_points__gte=client_points).values_list('pk', flat=True).first()
if not points_range:
logger.error(
'Points range not found for {} client points'.format(client_points))
return discount
client_categories = dict(Discount.objects.filter(points_range=points_range,
category__in=category_hierarchy).values_list('category', 'discount'))
for category in reversed(category_hierarchy):
if category in client_categories:
discount = client_categories[category]
break
return discount
def get_discount(price, balance, category_hierarchy, client_points):
"""Вычисляет скидку по бонусной карте.
:param balance: Баланс Клиента
:param price: Цена товара
:param category_hierarchy: Иерархия номенклатуры в виде списка
:param client_points: Сумма покупок Клиента
:type balance: int
:type category_hierarchy: list
:type client_points: int
:type price: int
:return: discount - Итоговая скидка.
"""
client_discount = get_client_discount(category_hierarchy, client_points)
max_discount = round_int(Decimal(price) / 100 * client_discount, CEILING)
balance_discount = round_down_int(Decimal(balance), CEILING)
discount = min(max_discount, balance_discount)
return discount
def get_bonus(price, client_points):
"""Вычисляет бонус
:param price: Цена товара
:param category_hierarchy: Иерархия номенклатуры в виде списка
:param client_points: Сумма покупок Клиента
:type client_points: int
:type price: int
:return: bonus - Итоговый бонус
"""
bonus = 0
accrual_percent = PointsRange.objects.filter(
max_points__gte=client_points).values_list('accrual_percent',
flat=True).first()
if not accrual_percent:
logger.error(
'Points range not found for {} client points'.format(client_points))
else:
accrual = accrual_percent
bonus = round_int(Decimal(price) / 100 * accrual, CEILING)
return bonus
def get_bonus_price(price, balance):
"""Вычисляет цену со скидкой по бонусной карте.
:return: Стоимость продукта с учётом вычисленной скидки
"""
discount = round_int((Decimal(price) / 100) * DEFAULT_DISCOUNT_PERCENT, CEILING)
balance_discount = round_down_int(Decimal(balance), 50)
final_discount = min(discount, balance_discount)
return price - final_discount
|
class Solution:
def maximumTop(self, nums: List[int], k: int) -> int:
n = len(nums)
# n = 0
# n=1, k为奇数个时会拿空
if n == 0 or (n==1 and k % 2 == 1):
return -1
# sb
if k == 0:
return nums[0]
# 只能是第二个
if n > 1 and k == 1:
return nums[1]
# 可以拿完后摆个大的
# 多的先摆小的后摆大的
if k > n:
return max(nums)
# 只能取前面的,不可能取完
elif k == n:
return max(nums[:k-1])
# 取完后和前面的比
else:
return max(max(nums[:k-1]), nums[k])
|
from django.db import models
def upload_partner_icon(instance, filename):
return f'image/partner/{filename}'
class Partner(models.Model):
name = models.CharField(
max_length=50
)
image = models.FileField(
upload_to=upload_partner_icon
)
url = models.URLField()
def __str__(self):
return f'{self.name}'
|
#start_dir = os.getcwd()
cmd.set("cartoon_fancy_helices", 1)
cmd.set("ignore_case", 1)
cmd.set("ignore_case_chain", 1)
#Dropbox scripts
#cd E:
#run \Users\Brahm\Documents\Dropbox\pymol\seq_diff.py
#run \Users\Brahm\Documents\Dropbox\pymol\goto.py
#run \Users\Brahm\Documents\Dropbox\pymol\modevectors.py
#run \Users\Brahm\Documents\Dropbox\pymol\figure_quality.py
#run \Users\Brahm\Documents\Dropbox\pymol\rcinit.py
#os.chdir(start_dir)
cmd.do("run https://raw.githubusercontent.com/BYachnin/PymolScripts/master/seq_diff.py")
cmd.do("run https://raw.githubusercontent.com/BYachnin/PymolScripts/master/goto.py")
cmd.do("run https://raw.githubusercontent.com/BYachnin/PymolScripts/master/modevectors.py")
cmd.do("run https://raw.githubusercontent.com/BYachnin/PymolScripts/master/figure_quality.py")
cmd.do("run https://raw.githubusercontent.com/BYachnin/PymolScripts/master/rcinit.py")
cmd.do("run https://raw.githubusercontent.com/BYachnin/PymolScripts/master/get_sequence.py")
cmd.do("run https://raw.githubusercontent.com/BYachnin/PymolScripts/master/pdb2pose.py")
cmd.do("run https://raw.githubusercontent.com/BYachnin/PymolScripts/master/color_by_restype.py")
cmd.do("run https://raw.githubusercontent.com/BYachnin/PymolScripts/master/design_movie.py")
cmd.do("run https://raw.githubusercontent.com/BYachnin/PymolScripts/master/loadBfacts.py")
cmd.do("run https://raw.githubusercontent.com/BYachnin/PymolScripts/master/ray_tracer.py")
cmd.do("run https://raw.githubusercontent.com/BYachnin/PymolScripts/master/antibody_tools.py")
cmd.do("run https://raw.githubusercontent.com/Pymol-Scripts/Pymol-script-repo/master/color_by_conservation.py")
# If available, try to import the visterra visualization tools
try:
from vis_pymol.display_utils import *
except ModuleNotFoundError:
print("Visterra visualization tools not found.")
#Try to set up a pyrosetta link
import sys,os
main_to_pyrsrv = '/source/src/python/PyRosetta/src/' # How to get from main to the location of the PyMOL-RosettaServer files
#Figure out if we should use the python2 or python3 version
if sys.version_info[0] == 2:
pyr_scpt = "PyMOL-RosettaServer.py"
elif sys.version_info[0] == 3:
pyr_scpt = "PyMOL-RosettaServer.python3.py"
#Check for visterra_scripts server or environment variables for Rosetta
pyr_scpt_path = None
if os.path.exists(os.path.expanduser("~") + "/visterra_scripts/vis_pymol/" + pyr_scpt.replace('.python', '')):
pyr_scpt = pyr_scpt.replace('.python', '')
pyr_scpt_path = os.path.expanduser("~") + "/visterra_scripts/vis_pymol/"
print("Found the visterra_script pymol server file.")
elif 'ROSETTA' in os.environ:
pyr_scpt_path = os.environ['ROSETTA'] + '/main' + main_to_pyrsrv
elif 'ROSDB' in os.environ:
pyr_scpt_path = os.environ['ROSDB'] + '/..' + main_to_pyrsrv
elif 'ROSETTA3_DB' in os.environ:
pyr_scpt_path = os.environ['ROSETTA3_DB'] + '/..' + main_to_pyrsrv
#If we have a route to the PyMOL-RosettaServer files and we can find a file at that location
if pyr_scpt_path and os.path.exists(pyr_scpt_path + pyr_scpt):
#Run the script in PyMOL
cmd.do("run " + pyr_scpt_path + pyr_scpt)
#End of pyrosetta link stuff
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An Organization Resource.
See: https://cloud.google.com/resource-manager/reference/rest/v1/organizations
"""
from google.cloud.security.common.gcp_type import resource
class OrgLifecycleState(resource.LifecycleState):
"""Organization lifecycle state."""
DELETED_REQUESTED = 'DELETE_REQUESTED'
class Organization(resource.Resource):
"""Organization resource."""
RESOURCE_NAME_FMT = 'organizations/%s'
def __init__(
self,
organization_id,
name=None,
display_name=None,
lifecycle_state=OrgLifecycleState.UNSPECIFIED):
"""Initialize.
Args:
organization_id (int): The organization id.
name (str): The organization's unique GCP name, with the
format "organizations/{id}".
display_name (str): The organization's display name.
lifecycle_state (LifecycleState): The lifecycle state of the
organization.
"""
super(Organization, self).__init__(
resource_id=organization_id,
resource_type=resource.ResourceType.ORGANIZATION,
name=name,
display_name=display_name,
lifecycle_state=lifecycle_state)
|
# Generated by Django 2.0.9 on 2018-11-06 00:11
import cloudinary.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Perros',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=50, verbose_name='Nombre')),
('razaPredominante', models.CharField(max_length=50, verbose_name='Raza Predominante')),
('descripcion', models.CharField(max_length=100, verbose_name='Descripcion')),
('estado', models.CharField(choices=[('1', 'Adoptado'), ('2', 'Disponible'), ('3', 'Rescatado')], max_length=50, verbose_name='Estado')),
('imagen', cloudinary.models.CloudinaryField(max_length=255, verbose_name='imagen')),
],
),
migrations.CreateModel(
name='UsuariosAdoptantes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=50, verbose_name='Nombre')),
('apellido', models.CharField(max_length=50, verbose_name='Apellido')),
('correo', models.CharField(max_length=50, verbose_name='Correo')),
('clave', models.CharField(max_length=50, verbose_name='Clave')),
],
),
]
|
import datetime
import os
import os.path
import numpy as np
import pandas as pd
datadir = "./data/" # data dorectory path
input_column_names = ["unit","date","rawtime","wind_speed_avg_mps","wind_direction_avg_deg",
"wind_direction_stddev_deg","wind_speed_peak_mps","temperature_C",
"relative_humidity","pressure_mbar","battery_V","zero"]
output_column_names = ["temperature_C","pressure_mbar","relative_humidity",
"wind_speed_avg_mps","wind_speed_peak_mps",
"wind_direction_avg_deg","wind_direction_stddev_deg"]
# read and merge
dfs = [] # empty list to store data file one by one
for name in sorted(os.listdir(datadir)): # read files in sorted fashion
filename = os.path.join(datadir, name) # it will return filename as ./data/filename.txt
print(f"reading {filename}") # console file name
df = pd.read_csv(filename, names=input_column_names, index_col=False, comment='#') # read the content of file
dfs.append(df) # add file to list
df = pd.concat(dfs)
df.drop_duplicates(inplace=True) # remove duplicates
del(dfs) # remove list content after loop finishes
# create datetime index
df["datetime"] = pd.to_datetime(df['date'] +' '+ df['rawtime'], format="%y/%m/%d %H:%M:%S", utc=True)
df.set_index("datetime", inplace=True)
print(df)
print("Checking unit colums")
print(type(df['unit']))
print(df['unit'])
for column in output_column_names:
print(df[column].dtype)
assert df[column].dtype in [np.int64, object , np.float64] # check column type
# write output file
filename = "ccat_site_weather_data_2006_to_2014.csv"
print(f"writing {filename}")
df.to_csv(filename, columns=output_column_names) # creating csv file with output column name list
|
import unittest
import numpy as np
import numpy.testing as npt
from sigpy.mri import samp
if __name__ == '__main__':
unittest.main()
class TestPoisson(unittest.TestCase):
"""Test poisson undersampling defined in `sigpy.mri.samp.poisson`."""
def test_numpy_random_state(self):
"""Verify that random state is unchanged when seed is specified."""
np.random.seed(0)
expected_state = np.random.get_state()
_ = samp.poisson((320, 320), accel=6, seed=80)
state = np.random.get_state()
assert (expected_state[1] == state[1]).all()
def test_reproducibility(self):
"""Verify that poisson is reproducible."""
np.random.seed(45)
mask1 = samp.poisson((320, 320), accel=6, seed=80)
# Changing internal numpy state should not affect mask.
np.random.seed(20)
mask2 = samp.poisson((320, 320), accel=6, seed=80)
npt.assert_allclose(mask2, mask1)
|
# Filename: QuadSplitter.py
# Created by: Brian Lach (June 4, 2020)
# Purpose: Resizable quad-viewport widget for PyQt. Ported to Python.
from direct.directnotify.DirectNotifyGlobal import directNotify
from PyQt5 import QtWidgets, QtCore
import math
class AdvSplitter(QtWidgets.QWidget):
def __init__(self, parent, splitter, orientation):
QtWidgets.QWidget.__init__(self, parent)
self._percent = 0.5
self.orientation = orientation
self.mouse = False
self.center = False
self.splitter = splitter
self.mousePos = QtCore.QPoint(0, 0)
self.setMouseTracking(True)
if orientation == QtCore.Qt.Horizontal:
self.setCursor(QtCore.Qt.SplitHCursor)
elif orientation == QtCore.Qt.Vertical:
self.setCursor(QtCore.Qt.SplitVCursor)
def cleanup(self):
self._percent = None
self.orientation = None
self.mouse = None
self.center = None
self.splitter = None
self.mousePos = None
self.deleteLater()
def percent(self):
return self._percent
def setPercent(self, val):
self._percent = val
def paintEvent(self, event):
painter = QtWidgets.QStylePainter(self)
opt1 = QtWidgets.QStyleOption()
opt2 = QtWidgets.QStyleOption()
opt1.initFrom(self)
opt2.initFrom(self)
# opt1
opt1.state = opt2.state = QtWidgets.QStyle.State_Raised
if (self.orientation == QtCore.Qt.Horizontal):
opt1.state |= QtWidgets.QStyle.State_Horizontal
opt2.state |= QtWidgets.QStyle.State_Horizontal
if self.splitter:
hiPart = math.floor(opt1.rect.height() * self.splitter.realPercent(QtCore.Qt.Vertical) + 0.5)
loPart = opt1.rect.height() - hiPart
opt1.rect.setBottom(opt1.rect.top() + hiPart)
opt2.rect.setTop(opt2.rect.bottom() - loPart)
elif self.splitter:
hiPart = math.floor(opt1.rect.width() * self.splitter.realPercent(QtCore.Qt.Horizontal) + 0.5)
loPart = opt1.rect.width() - hiPart
opt1.rect.setRight(opt1.rect.left() + hiPart)
opt2.rect.setLeft(opt2.rect.right() - loPart)
painter.drawControl(QtWidgets.QStyle.CE_Splitter, opt1)
if self.splitter:
painter.drawControl(QtWidgets.QStyle.CE_Splitter, opt2)
painter.end()
def mouseMoveEvent(self, event):
if (not self.mouse) and self.splitter:
if self.orientation == QtCore.Qt.Horizontal:
hiPart = math.floor(self.height() * self.splitter.realPercent(QtCore.Qt.Vertical) + 0.5)
if ((event.pos().y() > (hiPart - self.splitter.centerPartWidth() / 2)) and
(event.pos().y() < (hiPart + self.splitter.centerPartWidth() / 2))):
self.setCursor(QtCore.Qt.SizeAllCursor)
else:
self.setCursor(QtCore.Qt.SplitHCursor)
else:
hiPart = math.floor(self.width() * self.splitter.realPercent(QtCore.Qt.Horizontal) + 0.5)
if ((event.pos().x() > (hiPart - self.splitter.centerPartWidth() / 2)) and
(event.pos().x() < (hiPart + self.splitter.centerPartWidth() / 2))):
self.setCursor(QtCore.Qt.SizeAllCursor)
else:
self.setCursor(QtCore.Qt.SplitVCursor)
elif self.center:
if self.cursor().shape() != QtCore.Qt.SizeAllCursor:
self.setCursor(QtCore.Qt.SizeAllCursor)
elif self.orientation == QtCore.Qt.Horizontal:
if self.cursor().shape() != QtCore.Qt.SplitHCursor:
self.setCursor(QtCore.Qt.SplitHCursor)
elif self.cursor().shape() != QtCore.Qt.SplitVCursor:
self.setCursor(QtCore.Qt.SplitVCursor)
if self.mouse and (event.buttons() & QtCore.Qt.LeftButton):
pt = self.mapToParent(event.pos())
if isinstance(self.parent(), QuadSplitter):
self.parent().splitterMove(self, pt - self.mousePos, self.center)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.mouse = True
if self.splitter:
if self.orientation == QtCore.Qt.Horizontal:
hiPart = math.floor(self.height() * self.splitter.realPercent(QtCore.Qt.Vertical) + 0.5)
if ((event.pos().y() > (hiPart - self.splitter.centerPartWidth() / 2)) and
(event.pos().y() < (hiPart + self.splitter.centerPartWidth() / 2))):
self.center = True
self.setCursor(QtCore.Qt.SizeAllCursor)
else:
self.setCursor(QtCore.Qt.SplitHCursor)
else:
hiPart = math.floor(self.width() * self.splitter.realPercent(QtCore.Qt.Horizontal) + 0.5)
if ((event.pos().x() > (hiPart - self.splitter.centerPartWidth() / 2)) and
(event.pos().x() < (hiPart + self.splitter.centerPartWidth() / 2))):
self.center = True
self.setCursor(QtCore.Qt.SizeAllCursor)
else:
self.setCursor(QtCore.Qt.SplitVCursor)
self.mousePos = self.mapToParent(event.pos())
if isinstance(self.parent(), QuadSplitter):
self.parent().splitterMoveStart(self, self.center)
def mouseReleaseEvent(self, event):
self.mouse = False
self.center = False
class QuadSplitter(QtWidgets.QFrame):
notify = directNotify.newCategory("QuadSplitter")
def __init__(self, parent = None):
QtWidgets.QFrame.__init__(self, parent)
self.grid = [[None, None], [None, None]]
self.minimumWidgetSize = 30
self.centerPart = 30
self.spacing = 5
self.splittersSpacing = 5
self.horizontalSplitter = AdvSplitter(self, self, QtCore.Qt.Horizontal)
self.verticalSplitter = AdvSplitter(self, self, QtCore.Qt.Vertical)
self.splittersMovingPos = QtCore.QPoint(0, 0)
self.arrange()
def cleanup(self):
self.grid = None
self.minimumWidgetSize = None
self.centerPart = None
self.spacing = None
self.splittersSpacing = None
self.horizontalSplitter.cleanup()
self.horizontalSplitter = None
self.verticalSplitter.cleanup()
self.verticalSplitter = None
self.splittersMovingPos = None
self.deleteLater()
def resizeEvent(self, event):
self.arrange()
def centerPartWidth(self):
return self.centerPart
def addWidget(self, widget, row, column):
if row < 0 or column < 0 or row > 1 or column > 1:
self.notify.warning("Cannot add %s/%s to %s/%s at row %i column %i" % (
widget.metaObject().className(), widget.objectName(),
self.metaObject().className(), self.objectName(), row, column
))
return
needShow = self.isVisible() and \
not (widget.isHidden() and widget.testAttribute(QtCore.Qt.WA_WState_ExplicitShowHide))
if widget.parentWidget() != self:
widget.setParent(self)
if needShow:
widget.show()
self.grid[row][column] = widget
self.arrange()
def realPercent(self, orientation):
if orientation == QtCore.Qt.Horizontal:
newX = self.horizontalSplitter.x() - self.realSpacing()
return newX / self.realWidth()
newY = self.verticalSplitter.y() - self.realSpacing()
return newY / self.realHeight()
def realSpacing(self):
return self.spacing + self.frameWidth()
def realWidth(self):
return self.width() - (self.realSpacing() * 2) - self.splittersSpacing
def realHeight(self):
return self.height() - (self.realSpacing() * 2) - self.splittersSpacing
def arrange(self):
minMaxHorizontalSizes = [[0, QtWidgets.QWIDGETSIZE_MAX], [0, QtWidgets.QWIDGETSIZE_MAX]]
minMaxVerticalSizes = [[0, QtWidgets.QWIDGETSIZE_MAX], [0, QtWidgets.QWIDGETSIZE_MAX]]
for r in range(2):
for c in range(2):
if (self.grid[r][c] is not None) and self.grid[r][c].parent() == self:
minMaxHorizontalSizes[c] = [
max(self.minimumWidgetSize if self.grid[r][c].minimumWidth() < self.minimumWidgetSize else self.grid[r][c].minimumWidth(),
minMaxHorizontalSizes[c][0]),
min(self.grid[r][c].maximumWidth(), minMaxHorizontalSizes[c][1])
]
minMaxVerticalSizes[r] = [
max(self.minimumWidgetSize if self.grid[r][c].minimumHeight() < self.minimumWidgetSize else self.grid[r][c].minimumHeight(),
minMaxVerticalSizes[r][0]),
min(self.grid[r][c].maximumHeight(), minMaxVerticalSizes[r][1])
]
else:
self.grid[r][c] = None
# columns
leftColumnWidth = math.floor(self.realWidth() * self.horizontalSplitter.percent() + 0.5)
if leftColumnWidth < minMaxHorizontalSizes[0][0]:
leftColumnWidth = minMaxHorizontalSizes[0][0]
if leftColumnWidth > minMaxHorizontalSizes[0][1]:
leftColumnWidth = minMaxHorizontalSizes[0][1]
columnWidth = leftColumnWidth
rightColumnWidth = self.realWidth() - leftColumnWidth
if rightColumnWidth < minMaxHorizontalSizes[1][0]:
rightColumnWidth = minMaxHorizontalSizes[1][0]
columnWidth = self.realWidth() - rightColumnWidth
if rightColumnWidth > minMaxHorizontalSizes[1][1]:
rightColumnWidth = minMaxHorizontalSizes[1][1]
columnWidth = self.realWidth() - rightColumnWidth
# rows
topColumnHeight = math.floor(self.realHeight() * self.verticalSplitter.percent() + 0.5)
if topColumnHeight < minMaxVerticalSizes[0][0]:
topColumnHeight = minMaxVerticalSizes[0][0]
if topColumnHeight > minMaxVerticalSizes[0][1]:
topColumnHeight = minMaxVerticalSizes[0][1]
columnHeight = topColumnHeight
bottomColumnHeight = self.realHeight() - topColumnHeight
if bottomColumnHeight < minMaxVerticalSizes[1][0]:
bottomColumnHeight = minMaxVerticalSizes[1][0]
columnHeight = self.realHeight() - bottomColumnHeight
if bottomColumnHeight > minMaxVerticalSizes[1][1]:
bottomColumnHeight = minMaxVerticalSizes[1][1]
columnHeight = self.realHeight() - bottomColumnHeight
self.horizontalSplitter.setGeometry(self.realSpacing() + columnWidth, self.realSpacing(),
self.splittersSpacing, self.height() - self.realSpacing() * 2)
self.horizontalSplitter.raise_()
self.verticalSplitter.setGeometry(self.realSpacing(), self.realSpacing() + columnHeight,
self.width() - self.realSpacing() * 2, self.splittersSpacing)
self.verticalSplitter.raise_()
if self.grid[0][0]:
self.grid[0][0].setGeometry(self.realSpacing(), self.realSpacing(), columnWidth, columnHeight)
if self.grid[0][1]:
self.grid[0][1].setGeometry(self.realSpacing() + columnWidth + self.splittersSpacing, self.realSpacing(),
self.realWidth() - columnWidth, columnHeight)
if self.grid[1][0]:
self.grid[1][0].setGeometry(self.realSpacing(), self.realSpacing() + columnHeight + self.splittersSpacing,
columnWidth, self.realHeight() - columnHeight)
if self.grid[1][1]:
self.grid[1][1].setGeometry(self.realSpacing() + columnWidth + self.splittersSpacing,
self.realSpacing() + columnHeight + self.splittersSpacing, self.realWidth() - columnWidth,
self.realHeight() - columnHeight)
#messenger.send('quadSplitterResized')
def splitterMoveStart(self, splitter, center):
self.splittersMovingPos = QtCore.QPoint(self.horizontalSplitter.x(), self.verticalSplitter.y())
def splitterMove(self, splitter, offset, center):
if center:
newX = self.splittersMovingPos.x() + offset.x() - self.realSpacing()
newY = self.splittersMovingPos.y() + offset.y() - self.realSpacing()
self.horizontalSplitter.setPercent(newX / self.realWidth())
self.verticalSplitter.setPercent(newY / self.realHeight())
elif splitter == self.horizontalSplitter:
newX = self.splittersMovingPos.x() + offset.x() - self.realSpacing()
splitter.setPercent(newX / self.realWidth())
else:
newY = self.splittersMovingPos.y() + offset.y() - self.realSpacing()
splitter.setPercent(newY / self.realHeight())
self.arrange()
|
#!/bin/python
#-*- coding: utf-8 -*-
import requests, os, sys, time, re
from bs4 import BeautifulSoup as bs
token = 'TOKENBOT'
api = f"https://api.telegram.org/bot{token}/"
def res(method, data):
global api
api_res = api+method
if requests.get(api_res,params=data):
return True
def send_Msg(id, text):
data = {
'chat_id':id,
'text':text
}
res('sendMessage',data)
def send_File(id, document, capt=False):
data = {
'chat_id':id,
'document':document
}
if capt:
data['caption'] = capt
res('sendDocument',data)
def send_Img(id, photo, capt=False):
data = {
'chat_id':id,
'photo':photo
}
if capt:
data['caption'] = capt
res('sendPhoto',data)
def search_manga(nime):
try:
c = {}
data = {
'post_type':'manga',
's':nime.replace(' ','+')
}
r = requests.post('https://komiku.co.id',data=data).text
name = re.findall('<a href="https://komiku.co.id/manga/(.*?)/">',r)[0]
link = requests.get('https://komiku.co.id/manga/'+name).text
chapter = re.findall('<span>Chapter Baru </span><span>Chapter (.*?)</span>',link)[0]
b = bs(link, "html.parser")
desc = b.findAll('p')[1].text.replace('\n','').replace('\t','')
image = re.findall('<img src="(.*?)" data-src=".*?" class="lazy sd rd">',r)[0].split('?')[0]
end = image.split('/')[len(image.split('/'))-1]
#if os.path.exists('images/'+end):
# image = 'images/'+end
#else:
# byte = requests.get(image).text
# open('images/'+end, 'wb').write(byte.encode('utf-8'))
# image = 'images/'+end
c['name'] = name
c['chapter'] = chapter
c['desc'] = desc
c['img'] = image
return c
except:
return False
def download(nime, chapter):
try:
r = requests.get('https://komiku.co.id/manga/'+nime).text
rgx = re.findall(f'<a href="(.*?)" title=".*? Chapter {chapter}" class="popunder">',r)[0]
r2 = requests.get(rgx).text
r3 = requests.get('https://komiku.co.id'+re.findall('<a href="(.*?)" rel="nofollow" target="_blank">Download PDF</a>',r2)[0],allow_redirects=True,timeout=400).text
r4 = requests.get(re.findall('<iframe src="(.*?)"></iframe>',r3)[0]).text
pdf_komik = re.findall('<a href="(.*?)" download>',r4)[0]
return pdf_komik
except:
return False
|
#properties: hunger/thirst level, happiness, anger, energy, name, age, size
#methods(function)(method is a part of a class while function is not): run, bark,eat, sleep, play, bite
import random
class Leo:
#constructor
#scale out of 100
def __init__(self):#homework here is a variable that holds a value
self.fullness = 5
self.energy = 5
self.happiness = 5
self.homework = 0#self.homework is the class property
#methods
def play(self):
if self.energy>0 and self.fullness>0:
self.happiness+=1
self.fullness-=1
self.energy-=1
status = "I played"
return status
else:
status = "You played too much and you are dead :("
return status
quit()
def playsquash(self):
if self.energy>0 and self.fullness>0:
self.happiness+=random.randint(-5,5)
self.fullness-=30
self.energy-=30
status = "Played sqaush"
# self.stats()
return status
else:
status = "You died while playing squash because you are out of energy"
return status
quit()
def stats(self):
info + "\nenergy:" + str(self.energy)
info += "\nfullness:" + str(self.fullness)
info += "\nhappiness:" + str(self.happiness)
info += "\nhappiness:" + str(self.homework)
return info
#print(leo1.name)#accessing the property outside fo the calss
leo1 = Leo
while True:
print(leo1.stats())
choice = input("what would you like to do with your dog")
if choice == "play":
print(leo1.play())
else:
print("You can't do that")
|
from tabela_espalhamento import tabela_espalhamento
class Conjunto:
def __init__(self, categorias=10):
self.__elementos = tabela_espalhamento.Tabela_espelhamento(categorias)
def inserir(self, elemento):
self.__elementos.inserir(elemento)
#def inserir_pos(self, elemento, pos):
# if self.contem(elemento):
# return False
# else:
# self.__elementos.inserir_elemento_posicao_especifica(elemento, pos)
# return True
def __str__(self):
return self.__elementos.__str__()
def contem(self, elemento):
return self.__elementos.contem(elemento)
#def indice(self, elemento):
# return self.__elementos.indice(elemento)
def esta_vazio(self, elemento):
return self.__elementos.tamanho == 0
#def recuperar_elemento_no(self, pos):
# return self.__elementos.recuperar_elemento_no(pos)
#def recuperar_no(self, pos):
# return self.__elementos.recuperar_no(pos)
#@property
#def tamanho(self):
# return self.__elementos.tamanho
#def remover_pos(self, pos):
# self.__elementos.remover_pos(pos)
def remover(self, elemento):
self.__elementos.remover(elemento)
|
import threading
import time
def fun1(tread_name,delay):
print('线程{0}开始运行'.format(tread_name))
time.sleep(delay)
print('线程{0}结束运行'.format(tread_name))
def fun2(tread_name, delay):
print('线程{0}开始运行'.format(tread_name))
time.sleep(delay)
print('线程{0}结束运行'.format(tread_name))
if __name__=='__main__':
t1=threading.Thread(target=fun1,args=('tread_1',3))
t2 = threading.Thread(target=fun2, args=('tread_2', 4))
t1.start()
t2.start()
t1.join()
t2.join() |
#variável de texto(string), exemplo de case-sensitive
TEXTO = "teste 1"
texto = "teste 2"
#variável com número inteiro(integer)
numero_inteiro = 100
#variável com número real (float)
numero_real = 2.5
#não existe variável vazia no Python
soma = 0
#função print para mostrar um texto na tela
print("soma contém", soma)
print("TEXTO contém", TEXTO)
print("texto contém", texto)
print("numero_inteiro contém", numero_inteiro)
print("numeroReal contém", numero_real) |
from django.contrib import admin
from .models import Attempted
class AttemptedAdmin(admin.ModelAdmin):
list_display = ('student', 'quiz_name', 'got',)
search_fields = ('user__username', 'quiz__name', 'got')
def quiz_name(self, instance):
return instance.quiz.name
def student(self, instance):
return instance.user.username
admin.site.register(Attempted, AttemptedAdmin)
|
""" script to do pulse shape analysis of the fast neutron background:
Neutron hittimes from script hittime_distribution_fastneutron.py, that are saved in folder
/home/astro/blum/PhD/work/MeVDM_JUNO/fast_neutrons/hittimes/, are analyzed with this script.
As input for the different values defining the pulse shape analysis (tail start, tail end, tot value and efficiency
of positron hittimes) the results from script pulse_shape_analysis_v1.py (summarized in file PSD_results.ods) are
taken.
Procedure to get the efficiency of how many fast neutron background events can be cut away:
1. calculate the tail-to-total values for each neutron hittime distribution with function pulse_shape() also used
in script pulse_shape_analysis_v1.py for start and stop value of tail given by PSD_results.ods.
2. Take the tail-to-total value corresponding to the IBD efficiency from PSD_results.ods and calculate the
fast neutron efficiency due to this tail-to-total value.
To compare the results, also the positron hittimes and NC hittimes are analyzed in the same way like in
pulse_shape_analysis_v1.py
"""
import datetime
import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from NC_background_functions import pulse_shape
def fast_n_efficiency(array_tot_fn, tot_value_pos):
"""
calculate the number of values in array_tot_fn, that are greater than tot_values_pos
:param array_tot_fn: array, where the tail-to-total values of fast neutron hittimes are stored
:param tot_value_pos: tail-to-total value from positron and NC PSD
:return:
"""
# number of analyzed hittime distributions:
number_events = len(array_tot_fn)
# sort array_tot_fn in ascending order (from small to large):
array_tot_fn.sort()
# loop over array_tot_fn until you reach tot_value_pos:
for index9 in range(number_events):
if array_tot_fn[index9] >= tot_value_pos:
# you reach tot_value_pos -> break from the loop
break
# index9 events are smaller than tot_value_pos -> How many events are greater than tot_value_pos?
number_cut_away = float(number_events - index9)
# calculate the fast neutron efficiency (How many events are cut away) in percent:
fn_eff = number_cut_away / float(number_events) * 100.0
return fn_eff
# get the date and time, when the script was run:
date = datetime.datetime.now()
now = date.strftime("%Y-%m-%d %H:%M")
# path, where output is saved:
output_path = "/home/astro/blum/PhD/work/MeVDM_JUNO/fast_neutrons/"
# Set flag, if plots should be saved:
DISPLAY_PLOTS = True
""" parameters for tail to total method from pulse shape analysis of IBD events and NC events (PSD_results.ods): """
# INFO-me: parameters should agree with the bin-width of the time window!
# start of the tail in ns:
start_tail = np.array([335, 350, 340])
# end of the tail in ns:
stop_tail = np.array([600, 540, 540])
# tail-to-total value corresponding to tail window:
tot_value_positron = np.array([0.00661, 0.00497, 0.00525])
# best positron (IBD) efficiencies in %:
best_positron_efficiencies = np.array([2.3, 5.77, 14.16])
# corresponding NC (IBD-like) efficiencies in %:
NC_efficiencies = np.array([95, 96, 97])
# check if array have same length:
if (len(start_tail) != len(stop_tail) != len(tot_value_positron) != len(best_positron_efficiencies)
!= len(NC_efficiencies)):
sys.exit("ERROR: input parameters have not the same length!!!")
print("start_tail = {0}".format(start_tail))
print("stop_tail = {0}".format(stop_tail))
""" parameters that define the time window of prompt signal: """
# start of the time window in ns:
start_time = 0.0
# end of the time window in ns:
end_time = 2000.0
# loop over different start values of the tail:
for index in range(len(start_tail)):
""" analyze the hittime distribution of neutrons: """
print("analyze neutrons...")
# path, where hittime distributions neutrons are saved:
input_path_neutron = "/home/astro/blum/PhD/work/MeVDM_JUNO/fast_neutrons/hittimes/"
# number of events that are analyzed:
num_events_analyzed_neutron = 0
# preallocate array, where tail-to-total ratios are stored:
array_tot_ratio_neutron = []
# preallocate array, where a average hittime-distribution of neutrons are stored (number of pe per bin):
# length of average hittime distribution (number of bins):
length_average_hittime = 500
hittime_average_neutron = np.zeros(length_average_hittime)
# loop over all files in folder input_path_neutron, that start with 'file' and end with 'neutron.txt'
# (files where hittime distribution is saved, each file is equal to one event):
for file_neutron in os.listdir(input_path_neutron):
if file_neutron.startswith("file") and file_neutron.endswith("neutron.txt"):
# get the file name:
file_name_neutron = input_path_neutron + file_neutron
# read txt file:
file_data_neutron = np.loadtxt(file_name_neutron)
# 0th entry in file_data_neutron is minimum of time window in ns:
min_time_neutron = file_data_neutron[0]
# 1st entry in file_data_neutron is maximum of time window in ns:
max_time_neutron = file_data_neutron[1]
# 2nd entry in file_data_neutron is bin-width in ns:
bin_width = file_data_neutron[2]
# the rest of file_data_neutron is the hittime distribution histogram in nPE per bin:
number_pe_per_bin_neutron = file_data_neutron[3:]
# check if max_time_neutron is greater than end_time:
if max_time_neutron > end_time:
# prompt signal is longer than time window.
print("max_time_neutron {0:.2f} ns > end_time {1:.1f} ns in file {2}".format(max_time_neutron,
end_time,
file_name_neutron))
# time window:
time_window_neutron = np.arange(min_time_neutron, end_time + bin_width, bin_width)
# compare len(time_window_neutron) with len(number_pe_per_bin_neutron):
missing_zeros = len(time_window_neutron) - len(number_pe_per_bin_neutron)
# append the missing_zeros to number_pe_per_bin_neutron:
number_pe_per_bin_neutron = np.pad(number_pe_per_bin_neutron, (0, missing_zeros), 'constant',
constant_values=(0.0, 0.0))
# analyze the hittime distribution of these event:
tot_ratio_neutron, npe_norm_neutron = pulse_shape(time_window_neutron, number_pe_per_bin_neutron,
start_tail[index], stop_tail[index])
# check if tot-value is not 0:
if tot_ratio_neutron == 0:
continue
# increment number of analyzed events:
num_events_analyzed_neutron += 1
# append tail-to-total ratio to array:
array_tot_ratio_neutron.append(tot_ratio_neutron)
# append zeros to npe_norm_neutron to get a average length of the hittimes:
npe_norm_neutron = np.pad(npe_norm_neutron, (0, length_average_hittime - len(npe_norm_neutron)),
'constant', constant_values=(0.0, 0.0))
# add the normalized hittime distribution (npe_norm_neutron) to the average hittime distribution
# (hittime_average_neutron):
hittime_average_neutron = hittime_average_neutron + npe_norm_neutron
else:
continue
# array_tot_ratio_neutron contains the tot-values of each neutron hittime distribution!
# to get the average hittime distribution with a maximum of 1, normalize hittime_average_neutron with
# max(hittime_average_neutron):
hittime_average_neutron = hittime_average_neutron / max(hittime_average_neutron)
""" calculate the fast neutron efficiency due to the tot-value from positron and NC pulse shape analysis: """
fast_n_eff = fast_n_efficiency(array_tot_ratio_neutron, tot_value_positron[index])
print("\ntail start = {0:.1f} ns, tail end = {1:.1f} ns, tot value = {2:.5f}"
.format(start_tail[index], stop_tail[index], tot_value_positron[index]))
print("positron (IBD) efficiency = {0:.3f} %".format(best_positron_efficiencies[index]))
print("NC efficiency = {0:.3f} %".format(NC_efficiencies[index]))
print("Fast Neutron efficiency = {0:.3f} %\n".format(fast_n_eff))
""" analyze the hittime distribution of positrons with kinetic energy uniformly distributed from 10 MeV to
100 MeV: """
print("analyze positrons...")
# path, where hittime distributions of 100 MeV positrons are saved:
input_path_positron = "/home/astro/blum/juno/atmoNC/data_NC/output_PSD/positron_hittime/"
# number of events that are analyzed:
num_events_analyzed_positron = 0
# preallocate array, where tail-to-total ratios are stored:
array_tot_ratio_positron = []
# preallocate array, where a average hittime-distribution of positrons are stored (number of pe per bin):
# length of average hittime distribution (number of bins):
hittime_average_positron = np.zeros(length_average_hittime)
# loop over all files in folder input_path_positron, that start with 'file' and end with 'positron.txt'
# (files where hittime distribution is saved, each file is equal to one event):
for file_positron in os.listdir(input_path_positron):
if file_positron.startswith("file") and file_positron.endswith("positron.txt"):
# increment num_event_analyzed_positron:
num_events_analyzed_positron += 1
# get the file name:
file_name_positron = input_path_positron + file_positron
# read txt file:
file_data_positron = np.loadtxt(file_name_positron)
# 0th entry in file_data_positron is minimum of time window in ns:
min_time_positron = file_data_positron[0]
# 1st entry in file_data_positron is maximum of time window in ns:
max_time_positron = file_data_positron[1]
# 2nd entry in file_data_positron is bin-width in ns:
bin_width = file_data_positron[2]
# the rest of file_data_positron is the hittime distribution histogram in nPE per bin:
number_pe_per_bin_positron = file_data_positron[3:]
# check if max_time_positron is greater than end_time:
if max_time_positron > end_time:
# prompt signal is longer than time window.
print("max_time_positron {0:.2f} ns > end_time {1:.1f} ns in file {2}".format(max_time_positron,
end_time,
file_name_positron))
# time window:
time_window_positron = np.arange(min_time_positron, end_time + bin_width, bin_width)
# compare len(time_window_positron) with len(number_pe_per_bin_positron):
missing_zeros = len(time_window_positron) - len(number_pe_per_bin_positron)
# append the missing_zeros to number_pe_per_bin_positron:
number_pe_per_bin_positron = np.pad(number_pe_per_bin_positron, (0, missing_zeros), 'constant',
constant_values=(0.0, 0.0))
# analyze the hittime distribution of these event:
tot_ratio_positron, npe_norm_positron = pulse_shape(time_window_positron, number_pe_per_bin_positron,
start_tail[index], stop_tail[index])
# append tail-to-total ratio to array:
array_tot_ratio_positron.append(tot_ratio_positron)
# append zeros to npe_norm_positron to get a average length of the hittimes:
npe_norm_positron = np.pad(npe_norm_positron, (0, length_average_hittime - len(npe_norm_positron)),
'constant', constant_values=(0.0, 0.0))
# add the normalized hittime distribution (npe_norm_positron) to the average hittime distribution
# (hittime_average_positron):
hittime_average_positron = hittime_average_positron + npe_norm_positron
else:
continue
# to get the average hittime distribution with a maximum of 1, normalize hittime_average_positron with
# max(hittime_average_positron):
hittime_average_positron = hittime_average_positron / max(hittime_average_positron)
""" analyze the hittime distribution of NC events that can mimic IBD signal: """
print("analyze NC events...")
# path, where hittime distributions of preselected NC events are saved:
input_path_NCevents = "/home/astro/blum/juno/atmoNC/data_NC/output_detsim/"
# number of events that are analyzed:
num_events_analyzed_NC = 0
# preallocate array, where tail-to-total ratios are stored:
array_tot_ratio_NC = []
# preallocate array, where a average hittime-distribution of NC events are stored (number of pe per bin):
hittime_average_NC = np.zeros(length_average_hittime)
# loop over all files in folder input_path_NCevents, that start with "file" and end with ".txt"
# (file where hittime distribution is saved, each file is equal to one event that mimics IBD signal):
for file_NC in os.listdir(input_path_NCevents):
if file_NC.startswith("file") and file_NC.endswith(".txt"):
# increment num_event_analyzed_NC:
num_events_analyzed_NC += 1
# get the file name:
file_name_NC = input_path_NCevents + file_NC
# read txt file:
file_data_NC = np.loadtxt(file_name_NC)
# 0th entry in file_data_NC is minimum of time window in ns:
min_time_NC = file_data_NC[0]
# 1st entry in file_data_NC is maximum of time window in ns:
max_time_NC = file_data_NC[1]
# 2nd entry in file_data_NC is bin-width in ns:
bin_width = file_data_NC[2]
# the rest of file_data_NC is the hittime distribution histogram in nPE per bin:
number_pe_per_bin_NC = file_data_NC[3:]
# check if max_time_NC is greater than end_time:
if max_time_NC > end_time:
# prompt signal is longer than time window. -> Set max_time_NC = end_time:
print("max_time_NC {0:.2f} ns > end_time {1:.1f} ns in file {2}".format(max_time_NC, end_time,
file_name_NC))
max_time_NC = end_time
# time window corresponding to number_pe_per_bin_NC:
time_window_NC = np.arange(min_time_NC, end_time + bin_width, bin_width)
# compare len(time_window_NC) with len(number_pe_per_bin_NC):
missing_zeros = len(time_window_NC) - len(number_pe_per_bin_NC)
# append the missing_zeros to number_pe_per_bin_positron:
number_pe_per_bin_NC = np.pad(number_pe_per_bin_NC, (0, missing_zeros), 'constant',
constant_values=(0.0, 0.0))
# analyze the hittime distribution of these event:
tot_ratio_NC, npe_norm_NC = pulse_shape(time_window_NC, number_pe_per_bin_NC, start_tail[index],
stop_tail[index])
# append tail-to-total ratio to array:
array_tot_ratio_NC.append(tot_ratio_NC)
# append zeros to npe_norm_NC to get a average length of the hittimes:
npe_norm_NC = np.pad(npe_norm_NC, (0, length_average_hittime - len(npe_norm_NC)), 'constant',
constant_values=(0.0, 0.0))
# add the normalized hittime distribution (npe_norm_NC) to the average hittime distribution
# (hittime_average_NC):
hittime_average_NC = hittime_average_NC + npe_norm_NC
else:
continue
# to get the average hittime distribution with a maximum of 1, normalize hittime_average_NC with
# max(hittime_average_NC):
hittime_average_NC = hittime_average_NC / max(hittime_average_NC)
if DISPLAY_PLOTS:
# display tot-values for positrons, NC and fast neutron events for the given configuration:
h1 = plt.figure(1, figsize=(15, 8))
First_bin = 0.0
Last_bin = 0.05
Bin_width = (Last_bin-First_bin) / 200
Bins = np.arange(First_bin, Last_bin+Bin_width, Bin_width)
plt.hist(array_tot_ratio_positron, bins=Bins, histtype="step", align='mid', color="r",
label="positrons with kinetic energy between 10 MeV and 100 MeV (entries = {0:d})"
.format(num_events_analyzed_positron))
plt.hist(array_tot_ratio_NC, bins=Bins, histtype="step", align='mid', color="b",
label="prompt signal of NC events that mimic IBD signal (entries = {0:d})"
.format(num_events_analyzed_NC))
plt.hist(array_tot_ratio_neutron, bins=Bins, histtype="step", align='mid', color="g",
label="prompt signal of neutrons representing fast neutron events (entries = {0:d})"
.format(num_events_analyzed_neutron))
plt.xlabel("tail-to-total ratio")
plt.ylabel("events")
plt.title("Tail-to-total value for prompt signals of positron, NC and fast neutron events" +
"\n(tail window {0:0.1f} ns to {1:0.1f} ns)".format(start_tail[index], stop_tail[index]))
plt.legend()
plt.grid()
plt.savefig(output_path + "tot_ratio_tail_{0:.0f}_PosNCfastN.png".format(NC_efficiencies[index]))
plt.close()
# display tot-values for positrons, NC and fast neutron events for the given configuration with efficiencies:
h2 = plt.figure(2, figsize=(15, 8))
First_bin = 0.0
Last_bin = 0.05
Bin_width = (Last_bin-First_bin) / 200
Bins = np.arange(First_bin, Last_bin+Bin_width, Bin_width)
n_pos_1, bins_pos_1, patches_pos_1 = plt.hist(array_tot_ratio_positron, bins=Bins, histtype="step", align='mid',
color="r", linewidth=1.5,
label="positrons with kinetic energy between 10 MeV and 100 MeV "
"(entries = {0:d})"
.format(num_events_analyzed_positron))
n_NC_1, bins_NC_1, patches_NC_1 = plt.hist(array_tot_ratio_NC, bins=Bins, histtype="step", align='mid',
color="b", linewidth=1.5,
label="prompt signal of NC events that mimic IBD signal "
"(entries = {0:d})"
.format(num_events_analyzed_NC))
n_n_1, bins_n_1, patches_n_1 = plt.hist(array_tot_ratio_neutron, bins=Bins, histtype="step", align='mid',
color="g", linewidth=1.5,
label="prompt signal of neutrons representing fast neutron "
"events (entries = {0:d})"
.format(num_events_analyzed_neutron))
plt.vlines(tot_value_positron[index], 0, max(n_pos_1)+max(n_pos_1)/10, colors="k", linestyles="--",
label="$\\epsilon_{IBD}$ = "+"{0:0.2f} %\n".format(best_positron_efficiencies[index])+
"$\\epsilon_{NC}$ = "+"{0:0.2f} %\n".format(NC_efficiencies[index])+
"$\\epsilon_{fastN}$ = "+"{0:0.2f} %\n".format(fast_n_eff)+
"tot value = {0:.5f}".format(tot_value_positron[index]))
plt.xlabel("tail-to-total ratio")
plt.ylabel("events")
plt.title("Tail-to-total value for prompt signals of positron, NC and fast neutron events" +
"\n(tail window {0:0.1f} ns to {1:0.1f} ns)".format(start_tail[index], stop_tail[index]))
plt.legend()
plt.grid()
plt.savefig(output_path + "tot_ratio_tail_{0:.0f}_PosNCfastN_efficiencies.png".format(NC_efficiencies[index]))
plt.close()
""" Display the average hittime distributions of positrons and IBD-like NC events: """
h3 = plt.figure(3, figsize=(15, 8))
bin_edges = np.arange(0.0, bin_width*length_average_hittime, bin_width)
plt.semilogy(bin_edges, hittime_average_positron, linestyle="steps", color="r",
label="average positron hittime distribution")
plt.semilogy(bin_edges, hittime_average_NC, linestyle="steps", color="b",
label="average hittime distribution of IBD-like NC events")
plt.semilogy(bin_edges, hittime_average_neutron, linestyle="steps", color="g",
label="average hittime distribution of fast neutron events")
plt.xlabel("hittime in ns")
plt.ylabel("probability per bin (bin-width = {0:0.1f} ns)".format(bin_width))
plt.xlim(xmin=0.0, xmax=end_time)
plt.ylim(ymin=1e-4, ymax=2.0)
plt.title("Average hittime distribution of prompt signals")
plt.legend()
plt.grid()
plt.savefig(output_path + "average_hittimes_PosNCfastN.png")
plt.close()
|
from random import randrange
print("A random number from 1 to 100")
n = randrange(1,100)
print(n)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
import ujson
from sanic import Blueprint
from sanic import response
from sanic.log import logger
from sanic.request import Request
from sanic_jwt import inject_user, scoped
from web_backend.nvlserver.helper.request_wrapper import populate_response_format
from web_backend.nvlserver.helper.process_request_args import proc_arg_to_int
# from web_backend.nvlserver.module.hw_module_command_state.service import create_hw_module_command_state_element
from .service import (
get_hw_module_list, get_hw_module_list_count, create_hw_module_element,
get_hw_module_element, update_hw_module_element, delete_hw_module_element,
get_hw_module_dropdown_list, get_hw_module_random_unique_str,
get_hw_module_random_unique_str_list, update_user_hw_module_element
)
api_hw_module_blueprint = Blueprint('api_hw_module', url_prefix='/api/hw_module')
@api_hw_module_blueprint.route('/', methods=['GET'])
@inject_user()
@scoped(['hw_module:read'], require_all=True, require_all_actions=True)
async def api_hw_module_list_get(
request: Request,
user):
"""
:param request:
:param user:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
size = proc_arg_to_int(request.args.get('size', '1'), 1)
page = proc_arg_to_int(request.args.get('page', '1'), 1)
name = request.args.get('name', None)
user_id = proc_arg_to_int(request.args.get('user_id', None),None)
offset = (page - 1) * size
if request.method == 'GET':
try:
if user:
if user.get('user_id', None):
if user.get('account_type_name') in ['admin']:
user_id = user_id
else:
user_id = user.get('user_id')
hw_module_list = await get_hw_module_list(
request, user_id=user_id, name=name, limit=size, offset=offset)
hw_module_count = await get_hw_module_list_count(request, user_id=user_id, name=name)
if hw_module_list:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
res_data_formatted = await populate_response_format(
hw_module_list, hw_module_count, size=size, page=page)
ret_val['data'] = res_data_formatted
status = 200
else:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = {}
status = 200
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as al_err:
logger.error('Function api_hw_module_get -> GET erred with: {}'.format(al_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
@api_hw_module_blueprint.route('/dropdown', methods=['GET'])
@inject_user()
@scoped(['hw_module:query_dropdown'], require_all=True, require_all_actions=True)
async def api_hw_module_list_dropdown_get(
request: Request,
user):
"""
:param request:
:param user:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
name = request.args.get('name', None)
id = request.args.get('id', None)
if request.method == 'GET':
try:
if user:
if user.get('user_id', None):
if user.get('account_type_name') in ['admin']:
user_id = None
#user_id = request.args.get('user_id', None)
else:
user_id = request.args.get('user_id', None)
if request.args.get('id', None):
user_id=request.args.get('id', None)
user_id=proc_arg_to_int(user_id)
hw_module_list = await get_hw_module_dropdown_list(
request, user_id=user_id, name=name)
if hw_module_list:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = hw_module_list
status = 200
else:
ret_val['success'] = True
ret_val['user_id'] = user_id
ret_val['message'] = 'server.query_success'
ret_val['data'] = []
status = 200
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as al_err:
logger.error('Function api_hw_module_list_dropdown_gets -> GET erred with: {}'.format(al_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
@api_hw_module_blueprint.route('/', methods=['POST'])
@inject_user()
@scoped(['hw_module:create'], require_all=True, require_all_actions=True)
async def api_hw_module_post(
request: Request,
user):
"""
:param request:
:param user:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
if request.method == 'POST':
try:
if user:
if user.get('user_id'):
if user.get('account_type_name') in ['admin']:
# print(request.json)
name = request.json.get('name', None)
user_id = request.json.get('user_id', None)
traceable_object_id = request.json.get('traceable_object_id', None)
show_on_map = request.json.get('show_on_map', False)
active = request.json.get('active', False)
meta_information = {}
module_id = request.json.get('module_id', await get_hw_module_random_unique_str(request))
if None not in [module_id, name]:
hw_module = await create_hw_module_element(
request, name, module_id, user_id, traceable_object_id,
meta_information, show_on_map, active)
if hw_module:
# await create_hw_module_command_state_element(
# request, hw_module.get('id')
# )
ret_val['data'] = hw_module
ret_val['success'] = True
status = 201
ret_val['message'] = 'server.object_created'
else:
status = 412
ret_val['message'] = 'server.query_condition_failed'
else:
status = 403
ret_val['message'] = 'server.forbidden'
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as al_err:
logger.error('Function api_hw_module_post -> POST erred with: {}'.format(al_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
@api_hw_module_blueprint.route('/<hw_module_id:int>', methods=['GET'])
@inject_user()
@scoped(['hw_module:read'], require_all=True, require_all_actions=True)
async def api_hw_module_element_get(
request: Request,
user,
hw_module_id: int = 0):
"""
:param request:
:param user:
:param hw_module_id:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
if request.method == 'GET':
try:
if user:
if user.get('user_id', None) and hw_module_id:
hw_module_element = await get_hw_module_element(request, hw_module_id)
if hw_module_element:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = hw_module_element
status = 200
else:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
status = 200
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as al_err:
logger.error('Function api_hw_module_element_get -> GET erred with: {}'.format(al_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
@api_hw_module_blueprint.route('/<hw_module_id:int>', methods=['PUT'])
@inject_user()
@scoped(['hw_module:update'], require_all=True, require_all_actions=True)
async def api_hw_module_element_put(
request: Request,
user,
hw_module_id: int = 0):
"""
:param request:
:param user:
:param hw_module_id:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
if request.method == 'PUT':
try:
if user:
if user.get('user_id'):
# TODO: IMPLEMENT USER ACCESS if user.get('is_superuser'):
if True and hw_module_id:
name = request.json.get('name', None)
if user.get('account_type_name') in ['admin']:
user_id = request.json.get('user_id', None)
else:
user_id = user.get('user_id')
traceable_object_id = request.json.get('traceable_object_id', None)
show_on_map = request.json.get('show_on_map', False)
active = request.json.get('active', False)
module_id = request.json.get('module_id', None)
meta_information = {}
if None not in [show_on_map, active]:
if user.get('account_type_name') in ['admin']:
updated_module = await update_hw_module_element(
request, hw_module_id=hw_module_id, name=name,
module_id=module_id, user_id=user_id, traceable_object_id=traceable_object_id,
meta_information=meta_information,
show_on_map=show_on_map, active=active)
else:
updated_module = await update_user_hw_module_element(
request, hw_module_id=hw_module_id, user_id=user_id,
traceable_object_id=traceable_object_id,
show_on_map=show_on_map, active=active)
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = updated_module
status = 202
ret_val['message'] = 'server.accepted'
else:
status = 412
ret_val['message'] = 'server.query_condition_failed'
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as al_err:
logger.error('Function api_hw_module_element_put -> PUT erred with: {}'.format(al_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
@api_hw_module_blueprint.route('/<hw_module_id:int>', methods=['DELETE'])
@inject_user()
@scoped(['hw_module:delete'], require_all=True, require_all_actions=True)
async def api_hw_module_element_delete(
request: Request,
user,
hw_module_id: int = 0):
"""
:param request:
:param user:
:param hw_module_id:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
if request.method == 'DELETE':
try:
if user:
if user.get('user_id'):
# TODO: IMPLEMENT USER ACCESS if user.get('is_superuser'):
if True and hw_module_id:
hw_module = await delete_hw_module_element(request, hw_module_id)
if hw_module:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = None
status = 202
ret_val['message'] = 'server.accepted'
else:
status = 412
ret_val['message'] = 'server.query_condition_failed'
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as al_err:
logger.error('Function api_hw_module_element_delete -> DELETE erred with: {}'.format(al_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
@api_hw_module_blueprint.route('/unassigned_hw_modules/dropdown', methods=['GET'])
@inject_user()
@scoped(['hw_module:create'], require_all=True, require_all_actions=True)
async def api_hw_module_unassigned_id_get(
request: Request,
user):
"""
:param request:
:param user:
:return:
"""
status = 500
ret_val = {'success': False, 'message': 'server.query_failed', 'data': None}
module_id = request.args.get('module_id', None)
if request.method == 'GET':
try:
if user:
if user.get('user_id', None):
hw_module_unique_id_list = await get_hw_module_random_unique_str_list(
request, module_id=module_id)
if hw_module_unique_id_list:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = hw_module_unique_id_list
status = 200
else:
ret_val['success'] = True
ret_val['message'] = 'server.query_success'
ret_val['data'] = []
status = 200
else:
status = 400
ret_val['message'] = 'server.bad_request'
else:
status = 401
ret_val['message'] = 'server.unauthorized'
except Exception as al_err:
logger.error('Function api_hw_module_get -> GET erred with: {}'.format(al_err))
return response.raw(
ujson.dumps(ret_val).encode(),
headers={'X-Served-By': 'sanic', 'Content-Type': 'application/json'},
status=status
)
|
import concurrent.futures
import config
import time
import pandas as pd
import requests
import os
def worker_process(d):
search_results = requests.get("https://api.spotify.com/v1/search",
{"q": d[0] + " " + d[1], "type": "track", "limit":50},
headers=config.spotify_headers).json()["tracks"]["items"]
filtered_results = []
for sr in search_results:
# match on album first
if sr["album"]["name"].lower() == d[2].lower():
filtered_results.append(sr)
# try matching on artist
else:
artists = [artist["name"].lower() for artist in sr["artists"]]
if d[1].lower() in artists:
filtered_results.append(sr)
if filtered_results:
best_match = max(filtered_results, key = lambda x: int(x["popularity"]))
track_ID = best_match["id"]
artist_IDs = [artist["id"] for artist in best_match["artists"]]
else:
track_ID, artist_IDs = "NA", "NA"
time.sleep(2)
return d.tolist() + [track_ID, artist_IDs]
def main():
full_data = pd.read_csv(os.path.join(config.DATADIR, "last_month_tracks.csv"))
collection_period = full_data["collection_period"].max()
data = full_data.sort_values("date", ascending=False) # most recent songs are listed first
# keeping the last ensures that duplicate tracks from earlier collection periods
# are identified
idx = ["track","artist","album"]
data = data.loc[~full_data[idx].duplicated(keep="last")]
# now we can drop tracks that are outside our collection period
data = data.loc[full_data["collection_period"] == collection_period]
# drop these columns temporarily
data = data.drop(["date","collection_period"], axis=1).values
res = []
with concurrent.futures.ProcessPoolExecutor() as executor:
futures = [executor.submit(worker_process, d) for d in data]
for future in concurrent.futures.as_completed(futures):
res.append(future.result())
# merge new IDs with track data
res_df = pd.DataFrame(data = res, columns = idx + ["track_ID","artist_IDs"])
combined_data = pd.merge(full_data, res_df, how = "left", on = idx)
# merge old IDs
# this servers 2 purposes
# 1) tracks within this collection period which were collected previously
# 2) tracks from all previous collection periods
if collection_period > 0: # then there is previous data
missing = combined_data.loc[combined_data["track_ID"].isna()].drop(["track_ID","artist_IDs"], axis=1)
previous_IDs = pd.read_csv(os.path.join(config.DATADIR, "with_IDs.csv"))
previous_IDs = previous_IDs.loc[~previous_IDs[idx].duplicated(keep="last"), ["track","artist","album","track_ID","artist_IDs"]]
missing = pd.merge(missing, previous_IDs, how = "left")
not_missing = combined_data.loc[~combined_data["track_ID"].isna()]
combined_data = pd.concat([missing, not_missing]).sort_values("date", ascending=False)
combined_data.to_csv(os.path.join(config.DATADIR, "with_IDs.csv"), index = False)
if __name__ == '__main__':
main() |
import sys
import itertools
if len(sys.argv) == 1 or sys.argv[1] == '-v':
print('Input filename:')
f=str(sys.stdin.readline()).strip()
else: f = sys.argv[1]
verbose = sys.argv[-1] == '-v'
for l in open(f):
mreset = [int(x) for x in l.strip().split(',')]
class Machine:
def __init__(self, m):
self.memory = m
self.pointer = 0
self.len = len(m)
self.out = 0
self.__halt = False
self.__initialized = False
def input(self, v):
self.run(v)
def output(self):
return self.out
def halted(self):
return self.__halt
def initialized(self):
return self.__initialized
def run(self, input = None):
if input != None:
self.__initialized = True
while self.pointer < self.len:
n = self.memory[self.pointer]
op = n % 100
m1 = n // 100 % 10
m2 = n // 1000 % 10
m3 = n // 10000 % 10
p1, p2, p3, *_ = (self.memory + [0]*5)[self.pointer+1:]
v1 = self.memory[p1] if m1 == 0 and op != 99 else p1
v2 = self.memory[p2] if m2 == 0 and op not in {99,3,4} else p2
v3 = self.memory[p3] if m3 == 0 and op in {1,2,7,8} else p3
# 99: end
if op == 99:
self.__halt = True
if verbose: print('END')
break
# 1: sum
elif op == 1:
r = v1 + v2
if m3 == 0: self.memory[p3] = r
else: self.memory[self.pointer+3] = r
self.pointer += 4
# 2: multiply
elif op == 2:
r = v1 * v2
if m3 == 0: self.memory[p3] = r
else: self.memory[self.pointer+3] = r
self.pointer += 4
# 3: save to address
elif op == 3:
if input == None:
if verbose: print('Waiting for input...')
break # Wait for input...
self.memory[p1] = input
input = None
if verbose: print('READ:', self.memory[p1])
self.pointer += 2
# 4: output
elif op == 4:
if verbose: print('OUTPUT: ', v1)
self.out = v1
self.pointer += 2
# 5: jump-if-true
elif op == 5:
if v1 != 0: self.pointer = v2
else: self.pointer += 3
# 6: jump-if-false
elif op == 6:
if v1 == 0: self.pointer = v2
else: self.pointer += 3
# 7: less than
elif op == 7:
r = 1 if v1 < v2 else 0
if m3 == 0: self.memory[p3] = r
else: self.memory[self.pointer+3] = r
self.pointer += 4
# 8: equal
elif op == 8:
r = 1 if v1 == v2 else 0
if m3 == 0: self.memory[p3] = r
else: self.memory[self.pointer+3] = r
self.pointer += 4
else:
print('ERRRRR.....', self.pointer, op, self.memory[self.pointer:self.pointer+4])
break
def run(data, settings):
result, perms = 0, list(itertools.permutations(settings, 5))
for p in perms:
m = [ Machine(data[:]) for _ in range(5) ]
_prev, _current = 4, 0
while not m[_current].halted():
if not m[_current].initialized(): m[_current].run(p[_current])
else: m[_current].run(m[_prev].output())
if _current == 4:
_prev = 4
_current = 0
else:
_prev = _current
_current += 1
if verbose: print('SEQUENCE:', p, m[4].output())
result = max(m[4].output(), result)
return result
'''
PART 1
'''
settings_1 = list(range(5))
result_1 = run(mreset, settings_1)
'''
PART 2
'''
settings_2 = list(range(5,10))
result_2 = run(mreset, settings_2)
print('------ Part One ------')
print('MAX THRUST:', result_1)
print('------ Part Two ------')
print('MAX THRUST:', result_2) |
try:
import smtplib
import sys
import pandas as pd
import numpy as np
import sqlite3
import os
import random
from datetime import datetime
from pm4py.objects.log.importer.xes import factory as xes_import_factory
from pm4py.objects.log.exporter.csv import factory as csv_exporter
from scipy.stats import itemfreq
def generate_projection_view(projections_local, case_attribute_local, activity_local, event_attribute_local,
timestamp_local):
""" Depending on the projection, the corresponding columns are selected."""
if projections_local == "1":
qi = []
events = activity_local + timestamp_local
elif projections_local == "2":
qi = case_attribute_local
events = activity_local + event_attribute_local
elif projections_local == "3":
qi = []
events = activity_local + event_attribute_local
elif projections_local == "4":
qi = case_attribute_local
events = activity_local
elif projections_local == "5":
qi = []
events = activity_local
else:
sys.exit("The given projection '" + projections_local + "' is not a valid projection")
return qi, events
def prepare_data(events, data, attributes_local):
""" Put the data in the right format. The column of the activities and event
attributes consist of a list with the corresponding events.
"""
for event in events:
filter_col = [col for col in data if col.startswith(event)]
col_name = event + '_combined'
attributes_local.append(col_name)
if type(data[filter_col[0]][0]).__name__ == 'str':
data[col_name] = data[filter_col].apply(lambda row: row.tolist(), axis=1)
data[col_name] = data[col_name].apply(helps)
else:
data[filter_col] = data[filter_col].astype(str)
data[col_name] = data[filter_col].apply(lambda row: row.tolist(), axis=1)
data[col_name] = data[col_name].apply(helps)
return data[attributes_local]
def calculate_unicity(data, qi, events, number_points):
""" Calculate the unicity based on randomly selected points.
events[0] represents the column of activities.
The other events[1] ... events[len(events)-1] correspond to the other event attributes or timestamps.
1. Activities and their correspondig attributes are selected randomly. We call them points.
2. Each case, more precisely all its points, are compared with the others.
If the case is the only one with these points, it is declared as unique.
The sum(uniques) represents the number of cases that are unique with the given points.
3. Unicity is then the proportion of unique cases compared to the total number of cases.
"""
if number_points > 1:
data = generate_random_points_absolute(data, events[0], number_points)
else:
data = generate_random_points(data, events[0], number_points)
for k in range(1, len(events)):
event = events[k]
col_name = event + '_combined'
col_name_new = event + '_points'
data[col_name_new] = data.apply(make_otherpoints, args=[col_name, events[0]], axis=1)
uniques = data.apply(uniqueness, args=[qi, events, data], axis=1)
unicity = sum(uniques)/len(data)
return unicity
def generate_random_points(data, activity_local, number_points_local):
""" generates random points depending on the relative frequency """
data['random_p'] = data.apply(lambda x:
random.sample(list(enumerate(x[activity_local+'_combined'])),
int(len(x[activity_local + '_combined'])*number_points_local))
if (int(len(x[activity_local+'_combined'])*number_points_local) > 1)
else random.sample(list(enumerate(x[activity_local+'_combined'])), 1), axis=1)
data['random_points_number'] = data.apply(lambda x: len(x.random_p), axis=1)
data[activity_local + '_points'] = data.apply(makepoints, axis=1)
data[activity_local + 'random_index'] = data.apply(getindex, axis=1)
return data
def generate_random_points_absolute(data, activity_local, number_points_local):
""" generates random points depending max trace length """
data['random_p'] = data.apply(lambda x:
random.sample(list(enumerate(x[activity_local + '_combined'])),
number_points_local)
if (len(x[activity + '_combined']) > number_points_local)
else random.sample(list(enumerate(x[activity_local+'_combined'])),
len(x[activity_local+'_combined'])), axis=1)
data['random_points_number'] = data.apply(lambda x: len(x.random_p), axis=1)
data[activity_local + '_points'] = data.apply(makepoints, axis=1)
data[activity_local + 'random_index'] = data.apply(getindex, axis=1)
return data
def check_subset(data, subset):
"""frequency of each element than compare them"""
if all(elem in data for elem in subset):
data_freq = itemfreq(data)
subset_freq = itemfreq(subset)
for elem in subset_freq:
if elem[0] in data_freq[:, 0]:
itemindex = np.where(data_freq[:, 0] == elem[0])
if (len(elem[0]) != len(data_freq[itemindex][0][0])) or \
(int(data_freq[itemindex][0][1]) < int(elem[1])):
return False
else:
return False
return True
return False
def makepoints(x):
values = []
for idx, val in x['random_p']:
values.append(val)
return values
def getindex(x):
indexes = []
for idx, val in x['random_p']:
indexes.append(idx)
return indexes
def make_otherpoints(x, event,act):
points = []
indexes = x[act+'random_index']
for i in indexes:
if i < len(x[event]):
points.append(x[event][i])
return points
def helps(x):
n = len(x)-pd.Series(x).last_valid_index()
del x[-n:]
return x
def equality(x, qi, events_to_concat, row):
"""return true if all conditions true"""
if len(qi) > 0:
for q in qi:
if x[q] != row[q]:
return 0
for e in events_to_concat:
event_row = e + '_combined'
points_row = e + '_points'
if not check_subset(x[event_row], row[points_row]):
return 0
return 1
def uniqueness(x, qi, events_to_concat, df_data):
unique = df_data.apply(equality, args=[qi, events_to_concat, x], axis=1)
if sum(unique) == 1:
return 1
return 0
def command_print(list):
with open("cmd.txt", 'w') as filehandle:
filehandle.writelines("%s " % column for column in list)
#set parameters
filePath = sys.argv[1]
projection = sys.argv[2]
case_attribute_string = sys.argv[3]
event_attribute_string = sys.argv[4]
dbName = sys.argv[5]
secure_token = sys.argv[6]
sys.setrecursionlimit(3000)
#command_print(sys.argv)
case_attribute = list(case_attribute_string.split(","))
event_attribute = list(event_attribute_string.split(","))
if case_attribute[0] == '$empty_string$':
print("empty case attributes")
case_attribute = list()
if event_attribute[0] == '$empty_string$':
print("empty case attributes")
event_attribute = list()
attributes_non_unique = case_attribute + event_attribute
#attributes_non_unique.append('Activity')
#attributes_non_unique.append('time:timestamp')
attributes = list(set(attributes_non_unique))
unique_identifier = ['Case ID']
#unique_identifier = ['case:concept:name']
activity = ['Activity']
#activity = ['concept:name']
timestamp = ['time:timestamp']
attributes = attributes + timestamp + activity
print(attributes)
current_file_name= ""
#########################################
for filename in os.listdir(filePath):
if filename.endswith("_renamed.csv"):
current_file_name= filename
#print(current_file_name)
#df_data = pd.read_csv(filePath, delimiter=";",skipinitialspace=True, encoding="utf-8-sig")
buffer_path = os.path.join(filePath, "buffer.csv")
filePath = os.path.join(filePath, current_file_name)
df_data = pd.read_csv(filePath, delimiter=",",skipinitialspace=True, encoding="utf-8-sig", keep_default_na=False, na_values=['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A N/A', '#N/A', 'N/A', 'n/a', '', '#NA', 'NULL', 'null', 'NaN', '-NaN', 'nan', '-nan', ''])
#if not 'Activity' in df_data.columns:
#print("Changing concept:name to Activity,case:concept:name to Case ID")
#df_data.rename(columns={'concept:name': 'Activity', 'case:concept:name': 'Case ID'}, inplace=True)
#xes_csv_file.rename(columns={'concept:name': 'Activity', 'case:concept:name': 'Case ID'}, inplace=True)
#if not 'Duration' in xes_csv_file.columns:
# xes_csv_file.loc[:,"Duration"] = 0.0
##########################################
filePath = filePath.replace(" ","_")
#if filePath.endswith(".xes"):
# log = xes_import_factory.apply(filePath)
# filePath = filePath + ".csv"
# csv_exporter.export(log, filePath)
#targetFilePath = filePath.replace(".csv","_results")
#run PRETSA
#if filePath.endswith(".xes.csv"):
# xes_csv_file = pd.read_csv(filePath, delimiter=",",skipinitialspace=True, encoding="utf-8-sig")
# xes_csv_file.rename(columns={'concept:name': 'Activity', 'case:concept:name': 'Case ID'}, inplace=True)
# if not 'Duration' in xes_csv_file.columns:
# xes_csv_file.loc[:,"Duration"] = 0.0
# #xes_csv_file.to_csv(filePath,sep=";",encoding="utf-8-sig",index=False)
#eventLog = pd.read_csv(filePath, delimiter=";",skipinitialspace=True, encoding="utf-8-sig")
#######################
####csv2simple_auto####
#######################
df_data_column_list = list(df_data.columns.values.tolist())
#print("\n",df_data_column_list,"\n")
# drop all unnecessary columns
df_important_columns = df_data[unique_identifier + attributes]
# group data by unique identifier
df_grouped_by_identifier = df_important_columns.groupby(unique_identifier)
# enumerate all data in their respective column
df_enumerated_data = df_grouped_by_identifier.aggregate(lambda x: list(x))
# create list to store data frames of each attribute
list_of_data_frames = []
list_column_names = []
# insert constant values in the beginning, but respect given order
# use this variable to determine the insertion position
constant_value_count = 0
# use attributes in file name
list_file_name = []
# loop through all variable attributes
for attribute in attributes:
# create data frame from list (from enumerated data)
df_current_iteration = pd.DataFrame.from_records(list(df_enumerated_data[attribute]))
# if attribute is constant only use it once and do not create multiple columns
# determined by: count unique values for each row and drops 'None' values
# if only the first column has a value or if all columns have the same value 'df.nunique' will
# return '1'
# if all 'df.nunique' returns for all rows '1' it will sum up to the number of rows
# and therefore if those numbers are the same every row only contains one unique value
if sum(df_current_iteration.nunique(dropna=True, axis=1)) == df_current_iteration.shape[0]:
# get only first column. all other columns should either be empty or equal
df_current_iteration = df_current_iteration.iloc[:, 0]
# save it in a list of data frames
list_of_data_frames.insert(constant_value_count, df_current_iteration)
# create meaningful header, use the attribute name
list_column_names.insert(constant_value_count, attribute.replace(" ", ""))
# add attribute to filename
list_file_name.insert(constant_value_count, attribute.replace(" ", ""))
# increase insertion position by one
constant_value_count += 1
else:
# save it in a list of data frames
list_of_data_frames.append(df_current_iteration)
# create meaningful header, use the attribute name and a number
list_column_names.extend(np.core.defchararray.add(
[attribute.replace(" ", "")] * list_of_data_frames[-1].shape[1],
np.array(range(0, list_of_data_frames[-1].shape[1]), dtype=str)))
list_file_name.append(attribute.replace(" ", ""))
if len(list_of_data_frames) > 0:
# concatenate separate data frames to one data frame
df_for_export = pd.concat(list_of_data_frames, axis=1)
# rename columns
df_for_export.columns = list_column_names
# get index (unique identifier) from enumerated data
df_for_export.index = df_enumerated_data.index
df_for_export.to_csv(buffer_path,sep=";")
###########################################################################################################################################################
###########################################################################################################################################################
###########################################################################################################################################################
###########################################################################################################################################################
###########################################################################################################################################################
pd.options.mode.chained_assignment = None
df_two = pd.read_csv(buffer_path, delimiter=";",low_memory=False, nrows=1000,skipinitialspace=True, encoding="utf-8-sig", keep_default_na=False, na_values=['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A N/A', '#N/A', 'N/A', 'n/a', '', '#NA', 'NULL', 'null', 'NaN', '-NaN', 'nan', '-nan', ''])
else:
df_two = []
# Specify number or relative frequency of points
number_points = 1
quasi_identifier, events_to_concat = generate_projection_view(projection, case_attribute, activity,
event_attribute, timestamp)
attributes_quasi = unique_identifier + quasi_identifier
#print("unique_identifier + quasi_identifier",unique_identifier,"\n",quasi_identifier)
df_aggregated_data = prepare_data(events_to_concat, df_two, attributes_quasi)
print("Data preparation finished")
unicity = calculate_unicity(df_aggregated_data, quasi_identifier, events_to_concat, number_points)
print("unicity = ", unicity)
###########################################################################################################################################################
result_filename = current_file_name.replace(".csv","_results.txt")
puffer,targetFile = filePath.split("media"+os.path.sep)
result_path = puffer +"media" +os.path.sep + secure_token +os.path.sep + result_filename
targetFile = secure_token + os.path.sep + result_filename
with open(result_path, 'w') as filehandle:
filehandle.write("Unicity = %s \n" % unicity )
filehandle.write("Based on activities \n")
if projection == "1":
filehandle.write("Timestamps \n")
if projection == "2" or projection == "4":
filehandle.write("Case attributes: \n")
filehandle.writelines("%s\n" % cases for cases in case_attribute)
filehandle.write("\n")
if projection == "2" or projection == "3":
filehandle.write("Event attributes: \n")
filehandle.writelines("%s\n" % event_attr for event_attr in event_attribute)
###########################################################################################################################################################
conn = sqlite3.connect(dbName)
c = conn.cursor()
c.execute("UPDATE eventlogUploader_document SET status = ?, docfile = ? WHERE token = ?", ("FINISHED", targetFile, secure_token))
conn.commit()
conn.close()
print("DB submit done")
except:
filePath = sys.argv[1]
projection = sys.argv[2]
case_attribute_string = sys.argv[3]
event_attribute_string = sys.argv[4]
dbName = sys.argv[5]
secure_token = sys.argv[6]
print()
print("ERROR_except")
print()
conn = sqlite3.connect(dbName)
c = conn.cursor()
c.execute("UPDATE eventlogUploader_document SET status = ? WHERE token = ?", ("ERROR", secure_token))
conn.commit()
conn.close()
|
from flask_wtf import FlaskForm
from flask_wtf.file import FileRequired, FileAllowed
from wtforms import SubmitField, MultipleFileField
class UploadForm(FlaskForm):
"""
Data form of upload mask.
"""
file = MultipleFileField('File',
validators=[FileRequired(),
FileAllowed(['jpg', 'png'],
'Images Only!')])
submit = SubmitField('Upload')
|
#!/usr/bin/env/python
"""
Goal: from the input csv files, calculate the following variables:
PlusCount, AvgScore, MedianFeedbackLength, EntriesRead, EntriesWritten, MedianSubjectLength, MedianTextLength
for each user in each kursXX/semesterXX pair
where
PlusCount = count(plus) per user in a course
AvgScore = sum(score) / count(score) per user in a course
MedianFeedbackLength = median([feedback1,...,feedbackN])
EntriesRead = count([Read1, ..., ReadN])
EntriesWritten = count([Written1, ..., WrittenN])
MedianSubjectLength = median([subj_length1, ..., subj_lengthN])
MedianTextLength = median([text_length1, ..., text_lengthN])
The source files are:
PlusCount = abgabe_assessment_pluses_courses.csv
AvgScore = abgabe_assesssment_results_courses.csv
MedianFeedbackLength = abgabe_feedback_courses.csv
EntriesRead = forum_readlist.csv
EntriesWritten = forum_entries_courses.csv
MedianSubjectLength = forum_entries_courses.csv
MedianTextLength = forum_entries_courses.csv
The headers are:
abgabe_assessment_pluses_courses.csv
service,course_id,kurs,semester,description,user_id,plus_date
abgabe_assessment_results_courses.csv
service,course_id,kurs,semester,description,user_id,result_id,result_value
abgabe_feedback_courses.csv
service,course_id,kurs,semester,description,user_id,task,subtask,author,comment_length
forum_readlist.csv
service,course_id,username,nid,id
forum_entries_courses.csv
service,course_id,kurs,semester,description,user,name,nid,id,parent_id,date,subject_length,text_length
"""
from common import load_file
from common import load_lines
from common import groupby
from common import is_valid_matrikel_nummer
def average(l):
return float(sum(l)) / float(len(l))
def median(l):
return l[len(l)/2]
def process_plus_count():
"""
input file: "abgabe_assessment_pluses_courses.csv"
header: service,course_id,kurs,semester,description,user_id,plus_date
returns a dict with
(kurs,semester) -> "username" -> plus count [integer]
"""
def count_pluses(d):
return dict((x, len(d[x])) for x in d\
if is_valid_matrikel_nummer(x))
lines = load_lines("abgabe_assessment_pluses_courses.csv")
KURS_COL = 2
SEMESTER_COL = 3
USER_COL = 5
by_kurs_semester = groupby(lines, [KURS_COL, SEMESTER_COL])
result = groupby(by_kurs_semester, [USER_COL], count_pluses)
return result
def process_avg_score():
"""
input file: "abgabe_assessment_results_courses.csv"
header: service,course_id,kurs,semester,description,user_id,result_id,result_value
returns a dict with
(kurs,semester) -> "username" -> (quality score [float],
avg score [float],
number of scores [int])
where quality score:
avg score * number of scores
"""
KURS_COL = 2
SEMESTER_COL = 3
USER_COL = 5
RESULT_ID_COL = 6
RESULT_SCORE_COL = 7
def calculate_avg_score(grouped):
result = {}
for username in grouped:
if not is_valid_matrikel_nummer(username):
continue
value = grouped[username]
by_result_id = groupby(value, [RESULT_ID_COL])
result_scores = [float(line[0][RESULT_SCORE_COL]) \
for line in by_result_id.values()]
avg_score = average(result_scores)
num_scores = len(by_result_id.keys())
max_score = max(result_scores)
if max_score == 0:
quality_score = 0
else:
quality_score = float(sum(result_scores)) / float(max_score)
result[username] = (quality_score, avg_score, num_scores)
return result
lines = load_lines("abgabe_assessment_results_courses.csv")
by_kurs_semester = groupby(lines, [KURS_COL, SEMESTER_COL])
result = groupby(by_kurs_semester, [USER_COL], calculate_avg_score)
return result
def process_median_feedback_length():
"""
input file: "abgabe_feedback_courses.csv"
header: service,course_id,kurs,semester,description,user_id,task,subtask,author,comment_length
returns a dict with
(kurs,semester) -> "username" -> median feedback [float]
"""
KURS_COL = 2
SEMESTER_COL = 3
USER_COL = 5
COMMENT_LENGTH_COL = 9
def calculate_feedback(grouped):
result = {}
for username in grouped:
if not is_valid_matrikel_nummer(username):
continue
value = grouped[username]
feedback_lengths = [int(line[COMMENT_LENGTH_COL]) for line in value]
median_feedback = median(feedback_lengths)
result[username] = median_feedback
return result
lines = load_lines("abgabe_feedback_courses.csv")
by_kurs_semester = groupby(lines, [KURS_COL, SEMESTER_COL])
result = groupby(by_kurs_semester, [USER_COL], calculate_feedback)
return result
def process_entries_read(kurs_mapping):
"""
input:
kurs_mapping - dict with: forum_course_id -> (kurs,semester)
input file: "forum_readlist.py"
header: service,course_id,username,nid,id
returns a dict with
(kurs,semester) -> "username" -> read count [integer]
"""
COURSE_ID_COL = 1
USERNAME_COL = 2
def calculate_read(grouped):
return dict((username, len(grouped[username])) for username in grouped\
if is_valid_matrikel_nummer(username))
lines = load_lines("forum_readlist.csv")
by_course_id = groupby(lines, [COURSE_ID_COL])
by_user = groupby(by_course_id, [USERNAME_COL], calculate_read)
result = {}
for course_id in by_course_id:
key = kurs_mapping[course_id]
result[key] = by_user[course_id]
return result
def process_kurs_mapping():
"""
input file: "forum_entries_courses.csv"
header: service,course_id,kurs,semester,description,user,name,nid,id,parent_id,date,subject_length,text_length
returns a dict with:
(course_id) -> (kurs,semester)
"""
KURS_COL = 2
SEMESTER_COL = 3
COURSE_ID_COL = 1
lines = load_lines("forum_entries_courses.csv")
by_kurs_semester = groupby(lines, [KURS_COL, SEMESTER_COL])
return dict((by_kurs_semester[x][0][COURSE_ID_COL], x)\
for x in by_kurs_semester)
def process_forum_entries():
"""
input file: "forum_entries_courses.csv"
header: service,course_id,kurs,semester,description,user,name,nid,id,parent_id,date,subject_length,text_length
returns a dict with:
(kurs,semester) -> "username" -> (written count [int],
avg subject length [float],
median text length [int])
"""
KURS_COL = 2
SEMESTER_COL = 3
USER_COL = 5
SUBJ_COL = 11
TEXT_COL = 12
def handle_forum_entries(grouped):
result = {}
for username in grouped:
if not is_valid_matrikel_nummer(username):
continue
value = grouped[username]
written_count = len(value)
subject_lengths = [int(line[SUBJ_COL]) for line in value]
text_lengths = [int(line[TEXT_COL]) for line in value]
result[username] = (written_count, average(subject_lengths), median(text_lengths))
return result
lines = load_lines("forum_entries_courses.csv")
by_kurs_semester = groupby(lines, [KURS_COL, SEMESTER_COL])
result = groupby(by_kurs_semester, [USER_COL], handle_forum_entries)
return result
def process_all():
plus_counts = process_plus_count()
avg_scores = process_avg_score()
median_feedback = process_median_feedback_length()
forum_entries = process_forum_entries()
kurs_mapping = process_kurs_mapping()
forum_entries_read = process_entries_read(kurs_mapping)
all_keys = [set(plus_counts.keys()),
set(avg_scores.keys()),
set(median_feedback.keys()),
set(forum_entries.keys()),
set(forum_entries_read.keys())]
common_keys = all_keys[0]
for keys in all_keys:
common_keys = common_keys.intersection(keys)
for kurs,semester in common_keys:
key = (kurs,semester)
user_rows = {}
filename = "abgabe_analyse_kurs%(kurs)s_semester%(semester)s.csv" % locals()
print "Handling", filename
f = open(filename, "w")
f.write("username,plus_count, avg_quality_score, median_feedback_length, entries_read, entries_written, avg_subject_length, median_text_length\n")
# plus counts
for username in plus_counts[key]:
value = plus_counts[key][username]
user_rows.setdefault(username, {})
user_rows[username]["plus_count"] = value
# avg scores
for username in avg_scores[key]:
quality_score = avg_scores[key][username][0]
user_rows.setdefault(username, {})
user_rows[username]["quality_score"] = quality_score
# median feedback
for username in median_feedback[key]:
feedback_length = median_feedback[key][username]
user_rows.setdefault(username, {})
user_rows[username]["median_feedback_length"] = feedback_length
# forum entries
for username in forum_entries[key]:
written, subj, text = forum_entries[key][username]
user_rows.setdefault(username, {})
user_rows[username]["written_entries"] = written
user_rows[username]["avg_subj_length"] = subj
user_rows[username]["median_text_length"] = text
# forum entries read
for username in forum_entries_read[key]:
read = forum_entries_read[key][username]
user_rows.setdefault(username, {})
user_rows[username]["read"] = read
for username in user_rows:
user = user_rows[username]
row = [username,
user.get("plus_count","0"),
user.get("quality_score","0"),
user.get("median_feedback_length","0"),
user.get("read", "0"),
user.get("written_entries", "0"),
user.get("avg_subj_length", "0"),
user.get("median_text_length", "0")]
row = [str(x) for x in row]
f.write(",".join(row))
f.write("\n")
f.close()
if __name__ == "__main__":
process_all()
|
from typing import Optional, Tuple
import requests
import microstrategy_api
from microstrategy_api.task_proc.exceptions import MstrDocumentException
from microstrategy_api.task_proc.executable_base import ExecutableBase
from microstrategy_api.task_proc.object_type import ObjectType
class Document(ExecutableBase):
"""
Encapsulates a document in MicroStrategy
The most common use case will be to execute a document.
Args:
task_api_client:
client to be used to make requests
guid:
document guid
name:
Optional. Name of the doc/report
"""
def __init__(self, task_api_client, guid, name=None):
super().__init__(task_api_client, guid, name)
self.object_type = ObjectType.DocumentDefinition
self.obect_id_param = 'objectID'
self.message_type = 55
self.exec_task = 'RWExecute'
self.message_id_param = 'messageID'
self.refresh_cache_argument = 'freshExec'
self.refresh_cache_value = 'True'
self.prompt_args = {} # Haven't found any that prevent document execution if no prompts.
def execute(self,
arguments: Optional[dict] = None,
value_prompt_answers: Optional[list] = None,
element_prompt_answers: Optional[dict] = None,
refresh_cache: Optional[bool] = False,
task_api_client: 'microstrategy_api.task_proc.task_prod.TaskProc' = None,
):
"""
Execute a report.
Executes a report with the specified parameters. Default values
are chosen so that most likely all rows and columns will be
retrieved in one call. However, a client could use pagination
by cycling through calls of execute and changing the min and max
rows. Pagination is useful when there is a risk of the amount of
data causing the MicroStrategy API to run out of memory. The report
supports any combination of optional/required value prompt answers
and element prompt answers.
Arguments
---------
value_prompt_answers:
list of (Prompts, strings) in order. If a value is to be left blank, the second argument in the tuple
should be the empty string
element_prompt_answers:
element prompt answers represented as a dictionary of Prompt objects (with attr field specified)
mapping to a list of attribute values to pass
refresh_cache:
Do a new run against the data source?
arguments:
Other arbitrary arguments to pass to TaskProc.
task_api_client:
Alternative task_api_client to use when executing
Raises
------
MstrReportException: if there was an error executing the report.
"""
if arguments is None:
arguments = dict()
# The style to use to transform the ReportBean. If omitted, a simple MessageResult is generated.
# RWDocumentViewStyle
if 'styleName' not in arguments:
arguments['styleName'] = 'RWDataVisualizationXMLStyle'
# prevent columns from merging
arguments['gridsResultFlags'] = '393216'
response = self.execute_object(
arguments=arguments,
value_prompt_answers=value_prompt_answers,
element_prompt_answers=element_prompt_answers,
refresh_cache=refresh_cache,
task_api_client=task_api_client,
)
return response
@staticmethod
def get_redirect_url(response):
found_title = False
errors = []
for line in response.iter_lines():
if not found_title:
if b'<title' in line:
found_title = True
if b'WELCOME. MicroStrategy' in line:
errors.append('Got welcome page!')
elif b'Login. MicroStrategy' in line:
errors.append('Got login page!')
elif b'Executing' not in line:
return None
else:
if b'mstrAlert' in line:
errors.append(line.decode('ascii'))
else:
# HTML to scan for
pos1 = line.find(b'submitLinkAsForm({href:')
if pos1 != -1:
pos2 = line.find(b"'", pos1 + 1)
if pos2 != -1:
pos3 = line.find(b"'", pos2 + 1)
if pos3 != -1:
return line[pos2 + 1:pos3 + 1].decode('ascii', errors='replace')
if errors:
raise MstrDocumentException('\n'.join(errors))
return 'ERROR'
def get_url_api_parts(
self,
arguments: Optional[dict] = None,
value_prompt_answers: Optional[list] = None,
element_prompt_answers: Optional[dict] = None,
refresh_cache: Optional[bool] = False,
is_dossier: Optional[bool] = False,
) -> Tuple[str, dict]:
"""
See https://lw.microstrategy.com/msdz/MSDL/GARelease_Current/docs/ReferenceFiles/eventHandlerRef/web.app.beans.ServletWebComponent.html#2048001
Parameters
-----------
arguments:
value_prompt_answers:
element_prompt_answers:
refresh_cache:
task_api_client:
is_dossier:
Returns
-------
The resulting html document
"""
if not arguments:
arguments = dict()
if is_dossier:
arguments['evt'] = '3140'
arguments['src'] = 'Main.aspx.3140'
else:
arguments['evt'] = '2048001'
arguments['src'] = 'Main.aspx.2048001'
arguments['currentViewMedia'] = '1'
arguments['visMode'] = '0'
arguments['usrSmgr'] = self._task_api_client.session
# arguments['uid'] = self._task_api_client.username
# arguments['pwd'] = self._task_api_client.password
arguments['documentID'] = self.guid
arguments['server'] = self._task_api_client.server
arguments['project'] = self._task_api_client.project_name
arguments['Port'] = '0'
arguments['connmode'] = '1'
arguments['ru'] = '1'
arguments['share'] = '1'
arguments['promptAnswerMode'] = '1' # 1 = default for un-answered. 2= empty for un-answered
if value_prompt_answers and element_prompt_answers:
arguments.update(
ExecutableBase._format_xml_prompts(
value_prompt_answers,
element_prompt_answers)
)
elif value_prompt_answers:
arguments.update(
ExecutableBase._format_value_prompts(value_prompt_answers)
)
elif element_prompt_answers:
arguments.update(
ExecutableBase._format_element_prompts(element_prompt_answers)
)
if refresh_cache:
arguments[self.refresh_cache_argument] = self.refresh_cache_value
main_url = self._task_api_client.base_url.replace('TaskProc', 'Main')
return main_url, arguments
def execute_url_api(self,
arguments: Optional[dict] = None,
value_prompt_answers: Optional[list] = None,
element_prompt_answers: Optional[dict] = None,
refresh_cache: Optional[bool] = False,
task_api_client: 'microstrategy_api.task_proc.task_proc.TaskProc' = None,
is_dossier: Optional[bool] = False,
) -> bytes:
"""
See https://lw.microstrategy.com/msdz/MSDL/GARelease_Current/docs/ReferenceFiles/eventHandlerRef/web.app.beans.ServletWebComponent.html#2048001
Parameters
-----------
arguments:
value_prompt_answers:
element_prompt_answers:
refresh_cache:
task_api_client:
Returns
-------
The resulting html document
"""
if task_api_client:
self._task_api_client = task_api_client
main_url, arguments = self.get_url_api_parts(
arguments=arguments,
value_prompt_answers=value_prompt_answers,
element_prompt_answers=element_prompt_answers,
refresh_cache=refresh_cache,
is_dossier=is_dossier,
)
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; Locust) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36"
}
response = requests.get(main_url,
params=arguments,
headers=headers,
cookies=self._task_api_client.cookies
)
response.raise_for_status()
sub_url = Document.get_redirect_url(response)
if sub_url is not None:
base_url = self._task_api_client.base_url.replace('TaskProc.aspx', '')
sub_params = {'usrSmgr': self._task_api_client.session}
done = False
while not done:
if sub_url == 'ERROR':
raise MstrDocumentException("timedRedirect with no url found")
else:
print("timedRedirect call")
sub_url = base_url + '/MicroStrategy/asp/' + sub_url
sub_response = requests.get(url=sub_url,
params=sub_params,
headers=headers,
cookies=self._task_api_client.cookies
)
sub_url = Document.get_redirect_url(sub_response)
if not sub_url:
done = True
return response.content
|
__author__ = "Narwhale"
class Single_instance(object):
"""单例模式"""
__instance = None
def __init__(self):
pass
def __new__(cls, *args, **kwargs):
if cls.__instance == None:
cls._instance = object.__new__(cls, *args, **kwargs)
return cls.__instance
s = Single_instance()
a = Single_instance()
print(id(s),id(a)) |
from django.contrib import admin
# Register your models here.
from LaF.models import Lost,Find
# Register your models here.
admin.site.register(Lost)
admin.site.register(Find) |
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given a singly linked list where elements are sorted in ascending order, convert it to a height balanced BST.
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# First converted into Array List, then convert it as the same as 'Convert_Sorted_Array_to_Binary_Search_Tree'.
# 32 / 32 test cases passed.
# Status: Accepted
# Runtime: 255 ms
# Your runtime beats 64.85 % of python submissions.
class Solution(object):
def sortedListToBST(self, head):
"""
:type head: ListNode
:rtype: TreeNode
"""
if not head:
return None
nums = []
while head:
nums += head.val,
head = head.next
def convert(nums):
if nums:
if len(nums) == 1:
return TreeNode(nums[0])
mid = len(nums) // 2
root = TreeNode(nums[mid])
root.left = convert(nums[:mid])
root.right = convert(nums[mid + 1:])
return root
return convert(nums)
# Use the slow and fast nodes to cut the linked list half, the slow node is the root of subtree.
# And delete the pre-slow node to avoid handle the same node in next iterate.
# 32 / 32 test cases passed.
# Status: Accepted
# Runtime: 249 ms
# Your runtime beats 74.20 % of python submissions.
class Solution(object):
def sortedListToBST(self, head):
"""
:type head: ListNode
:rtype: TreeNode
"""
def convert(head):
if not head:
return None
if head.next is None:
return TreeNode(head.val)
slow = fast = head
while fast and fast.next:
pre = slow # Avoid to handle the same val in next iterate.So it is necessary to delete the slow node.
slow = slow.next
fast = fast.next.next
pre.next = None # Avoid to handle the same val in next iterate.So it is necessary to delete the slow node.
root = TreeNode(slow.val)
root.left = convert(head)
root.right = convert(slow.next)
return root
return convert(head)
if __name__ == '__main__':
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
head.next.next.next.next = ListNode(5)
print(Solution().sortedListToBST(head))
|
import curses
import time
from brewtroller import *
from mash import *
class CursesTun(Tun):
# window size
height=13
width=11
# temperature display position in the window
tempx=3
tempy=3
def __init__(self,bt,myid,title,x,y):
Tun.__init__(self,bt,myid,title)
self.win = curses.newwin(self.height, self.width, x, y)
self.win.border("|","|","-","-","/","\\","\\","/")
self.win.addstr(0,(self.width-len(title))/2,title)
self.win.addstr(2,3,"Temp:",curses.A_BOLD)
self.win.addstr(5,3,"Set:",curses.A_BOLD)
self.win.addstr(8,3,"Power:",curses.A_BOLD)
def update(self):
redraw=0
Tun.update(self)
if self.temperature != self.newtemperature:
self.temperature = self.newtemperature
redraw = 1
if self.temperature > 150: # easy, ugly check if there is no sensor
self.win.addstr(3,3,"ERROR",curses.A_BLINK)
else:
self.win.addstr(3,3," ") # erase first
self.win.addstr(3,3,str(self.temperature))
# get setpoint
if self.setpoint != self.newsetpoint:
self.setpoint=self.newsetpoint
redraw = 1
self.win.addstr(6,3," ") # erase first
self.win.addstr(6,3,str(self.setpoint))
# get heatingpower
if self.power != self.newpower:
redraw=1
self.power=self.newpower
self.win.addstr(9,3," ") # erase first
self.win.addstr(9,3,str(self.newpower)+"%")
if redraw == 1:
self.win.refresh()
class CursesHLTRecirc(HLTRecirc):
def __init__(self,bt,x,y,width,height):
HLTRecirc.__init__(self,bt)
self.win = curses.newwin(height, width, y, x)
#self.win.hline(10,0,"=",width)
#self.win.border()
self.width=width
self.height=height
def update(self):
HLTRecirc.update(self)
if self.state != self.newstate:
self.state = self.newstate
self.win.erase()
if self.state == 0:
self.win.hline(10,0,">",2)
self.win.hline(2,0,"<",2)
self.win.vline(2,2,"^",8)
else:
self.win.hline(10,0,">",self.width)
self.win.refresh()
class CursesBrewStep(BrewStep):
def __init__ (self, bt, x,y,width,height):
BrewStep.__init__(self,bt)
self.win = curses.newwin(height, width, y, x)
#self.win.hline(10,0,"=",width)
#self.win.border()
self.width=width
self.height=height
def update(self):
BrewStep.update(self)
if self.step != self.newstep:
self.step = self.newstep
self.win.erase()
self.win.addstr(0,0,self.stepnames[self.step])
self.win.refresh()
def main(stdscr):
# begin_x = 20; begin_y = 7
# height = 5; width = 40
# win = curses.newwin(height, width, begin_y, begin_x)
# win.addstr(0,0,"apa")
# win.refresh()
# time.sleep(10)
# create a connection to the btnic daemon
bt = BrewTroller("http://10.168.0.10/cgi-bin/btnic.cgi")
# we create two windows - one for the HLT and one for the MLT
hlt = CursesTun(bt,0, "HLT", 10,10)
mlt = CursesTun(bt,1, "MLT", 10,35)
pump = CursesHLTRecirc(bt,21,10,14,13)
step = CursesBrewStep(bt,21,25,15,1)
while 1:
hlt.update()
mlt.update()
pump.update()
step.update()
time.sleep(1)
curses.wrapper(main)
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
from collections import defaultdict
import six
from pants.backend.jvm.zinc.zinc_analysis_element_types import (APIs, Compilations, CompileSetup,
Relations, SourceInfos, Stamps)
class ZincAnalysis(object):
"""Parsed representation of a zinc analysis.
Note also that all files in keys/values are full-path, just as they appear in the analysis file.
If you want paths relative to the build root or the classes dir or whatever, you must compute
those yourself.
"""
FORMAT_VERSION_LINE = b'format version: 5\n'
@classmethod
def merge(cls, analyses):
# Note: correctly handles "internalizing" external deps that must be internal post-merge.
# "Merge" compile setup. We assume that all merged analyses have the same setup, so we just take the
# setup of the first analysis. TODO: Validate that all analyses have the same setup.
compile_setup = analyses[0].compile_setup if len(analyses) > 0 else CompileSetup((defaultdict(list), ))
# Merge relations.
src_prod = ZincAnalysis._merge_disjoint_dicts([a.relations.src_prod for a in analyses])
binary_dep = ZincAnalysis._merge_disjoint_dicts([a.relations.binary_dep for a in analyses])
classes = ZincAnalysis._merge_disjoint_dicts([a.relations.classes for a in analyses])
used = ZincAnalysis._merge_disjoint_dicts([a.relations.used for a in analyses])
class_to_source = dict((v, k) for k, vs in classes.items() for v in vs)
def merge_dependencies(internals, externals):
internal = defaultdict(list)
external = defaultdict(list)
naive_internal = ZincAnalysis._merge_disjoint_dicts(internals)
naive_external = ZincAnalysis._merge_disjoint_dicts(externals)
# Note that we take care not to create empty values in internal.
for k, vs in six.iteritems(naive_internal):
if vs:
internal[k].extend(vs) # Ensure a new list.
for k, vs in six.iteritems(naive_external):
# class->source is many->one, so make sure we only internalize a source once.
internal_k = set(internal.get(k, []))
for v in vs:
vfile = class_to_source.get(v)
if vfile and vfile in src_prod:
internal_k.add(vfile) # Internalized.
else:
external[k].append(v) # Remains external.
if internal_k:
internal[k] = list(internal_k)
return internal, external
internal, external = merge_dependencies(
[a.relations.internal_src_dep for a in analyses],
[a.relations.external_dep for a in analyses])
internal_pi, external_pi = merge_dependencies(
[a.relations.internal_src_dep_pi for a in analyses],
[a.relations.external_dep_pi for a in analyses])
member_ref_internal, member_ref_external = merge_dependencies(
[a.relations.member_ref_internal_dep for a in analyses],
[a.relations.member_ref_external_dep for a in analyses])
inheritance_internal, inheritance_external = merge_dependencies(
[a.relations.inheritance_internal_dep for a in analyses],
[a.relations.inheritance_external_dep for a in analyses])
relations = Relations((src_prod, binary_dep,
internal, external,
internal_pi, external_pi,
member_ref_internal, member_ref_external,
inheritance_internal, inheritance_external,
classes, used))
# Merge stamps.
products = ZincAnalysis._merge_disjoint_dicts([a.stamps.products for a in analyses])
sources = ZincAnalysis._merge_disjoint_dicts([a.stamps.sources for a in analyses])
binaries = ZincAnalysis._merge_overlapping_dicts([a.stamps.binaries for a in analyses])
classnames = ZincAnalysis._merge_disjoint_dicts([a.stamps.classnames for a in analyses])
stamps = Stamps((products, sources, binaries, classnames))
# Merge APIs.
internal_apis = ZincAnalysis._merge_disjoint_dicts([a.apis.internal for a in analyses])
naive_external_apis = ZincAnalysis._merge_disjoint_dicts([a.apis.external for a in analyses])
external_apis = defaultdict(list)
for k, vs in six.iteritems(naive_external_apis):
kfile = class_to_source.get(k)
if kfile and kfile in src_prod:
internal_apis[kfile] = vs # Internalized.
else:
external_apis[k] = vs # Remains external.
apis = APIs((internal_apis, external_apis))
# Merge source infos.
source_infos = SourceInfos((ZincAnalysis._merge_disjoint_dicts([a.source_infos.source_infos for a in analyses]), ))
# Merge compilations.
compilation_vals = sorted(set([x[0] for a in analyses for x in six.itervalues(a.compilations.compilations)]))
compilations_dict = defaultdict(list)
for i, v in enumerate(compilation_vals):
compilations_dict[b'{:03}'.format(int(i))] = [v]
compilations = Compilations((compilations_dict, ))
return ZincAnalysis(compile_setup, relations, stamps, apis, source_infos, compilations)
@staticmethod
def _merge_disjoint_dicts(dicts):
"""Merges multiple dicts with disjoint key sets into one.
May also be used when we don't care which value is picked for a key that appears more than once.
"""
ret = defaultdict(list)
for d in dicts:
ret.update(d)
return ret
@staticmethod
def _merge_overlapping_dicts(dicts):
"""Merges multiple, possibly overlapping, dicts into one.
If a key exists in more than one dict, takes the largest value in dictionary order.
This is useful when the values are singleton stamp lists of the form ['lastModified(XXXXXXXX)'],
as it will lead to taking the most recent modification time.
"""
ret = defaultdict(list)
for d in dicts:
for k, v in six.iteritems(d):
if k not in ret or ret[k] < v:
ret[k] = v
return ret
def __init__(self, compile_setup, relations, stamps, apis, source_infos, compilations):
(self.compile_setup, self.relations, self.stamps, self.apis, self.source_infos, self.compilations) = \
(compile_setup, relations, stamps, apis, source_infos, compilations)
def diff(self, other):
"""Returns a list of element diffs, one per element where self and other differ."""
element_diffs = []
for self_elem, other_elem in zip(
(self.compile_setup, self.relations, self.stamps, self.apis,
self.source_infos, self.compilations),
(other.compile_setup, other.relations, other.stamps, other.apis,
other.source_infos, other.compilations)):
element_diff = self_elem.diff(other_elem)
if element_diff.is_different():
element_diffs.append(element_diff)
return element_diffs
def sources(self):
return self.stamps.sources.keys()
def is_equal_to(self, other):
for self_element, other_element in zip(
(self.compile_setup, self.relations, self.stamps, self.apis,
self.source_infos, self.compilations),
(other.compile_setup, other.relations, other.stamps, other.apis,
other.source_infos, other.compilations)):
if not self_element.is_equal_to(other_element):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.compile_setup, self.relations, self.stamps, self.apis,
self.source_infos, self.compilations))
def split(self, splits, catchall=False):
# Note: correctly handles "externalizing" internal deps that must be external post-split.
splits = [set(x) for x in splits] # Ensure sets, for performance.
if catchall:
# Even empty sources with no products have stamps.
remainder_sources = set(self.sources()).difference(*splits)
splits.append(remainder_sources) # The catch-all
# The inner functions are primarily for ease of performance profiling.
# For historical reasons, external deps are specified as src->class while internal deps are
# specified as src->src. So when splitting we need to pick a representative. We must pick
# consistently, so we take the first class name in alphanumeric order.
def make_representatives():
representatives = {k: min(vs) for k, vs in six.iteritems(self.relations.classes)}
return representatives
representatives = make_representatives()
# Split the source, binary and classes keys in our relations structs.
# Subsequent operations need this data.
def split_relation_keys():
src_prod_splits = self._split_dict(self.relations.src_prod, splits)
binary_dep_splits = self._split_dict(self.relations.binary_dep, splits)
classes_splits = self._split_dict(self.relations.classes, splits)
return src_prod_splits, binary_dep_splits, classes_splits
src_prod_splits, binary_dep_splits, classes_splits = split_relation_keys()
# Split relations.
def split_relations():
# Split a single pair of (internal, external) dependencies.
def _split_dependencies(all_internal, all_external):
internals = []
externals = []
naive_internals = self._split_dict(all_internal, splits)
naive_externals = self._split_dict(all_external, splits)
for naive_internal, naive_external, split in zip(naive_internals, naive_externals, splits):
internal = defaultdict(list)
external = defaultdict(list)
# Note that we take care not to create empty values in external.
for k, vs in six.iteritems(naive_external):
if vs:
external[k].extend(vs) # Ensure a new list.
for k, vs in six.iteritems(naive_internal):
for v in vs:
if v in split:
internal[k].append(v) # Remains internal.
else:
external[k].append(representatives[v]) # Externalized.
internals.append(internal)
externals.append(external)
return internals, externals
internal_splits, external_splits = \
_split_dependencies(self.relations.internal_src_dep, self.relations.external_dep)
internal_pi_splits, external_pi_splits = \
_split_dependencies(self.relations.internal_src_dep_pi, self.relations.external_dep_pi)
member_ref_internal_splits, member_ref_external_splits = \
_split_dependencies(self.relations.member_ref_internal_dep, self.relations.member_ref_external_dep)
inheritance_internal_splits, inheritance_external_splits = \
_split_dependencies(self.relations.inheritance_internal_dep, self.relations.inheritance_external_dep)
used_splits = self._split_dict(self.relations.used, splits)
relations_splits = []
for args in zip(src_prod_splits, binary_dep_splits,
internal_splits, external_splits,
internal_pi_splits, external_pi_splits,
member_ref_internal_splits, member_ref_external_splits,
inheritance_internal_splits, inheritance_external_splits,
classes_splits, used_splits):
relations_splits.append(Relations(args))
return relations_splits
relations_splits = split_relations()
# Split stamps.
def split_stamps():
stamps_splits = []
sources_splits = self._split_dict(self.stamps.sources, splits)
for src_prod, binary_dep, sources in zip(src_prod_splits, binary_dep_splits, sources_splits):
products_set = set(itertools.chain(*six.itervalues(src_prod)))
binaries_set = set(itertools.chain(*six.itervalues(binary_dep)))
products, _ = self._restrict_dicts(products_set, self.stamps.products)
binaries, classnames = self._restrict_dicts(binaries_set, self.stamps.binaries,
self.stamps.classnames)
stamps_splits.append(Stamps((products, sources, binaries, classnames)))
return stamps_splits
stamps_splits = split_stamps()
# Split apis.
def split_apis():
# Externalized deps must copy the target's formerly internal API.
representative_to_internal_api = {}
for src, rep in six.iteritems(representatives):
representative_to_internal_api[rep] = self.apis.internal.get(src)
internal_api_splits = self._split_dict(self.apis.internal, splits)
external_api_splits = []
for rel in relations_splits:
external_api = {}
for vs in six.itervalues(rel.external_dep):
for v in vs:
if v in representative_to_internal_api: # This is an externalized dep.
external_api[v] = representative_to_internal_api[v]
else: # This is a dep that was already external.
external_api[v] = self.apis.external[v]
external_api_splits.append(external_api)
apis_splits = []
for args in zip(internal_api_splits, external_api_splits):
apis_splits.append(APIs(args))
return apis_splits
apis_splits = split_apis()
# Split source infos.
def split_source_infos():
source_info_splits = \
[SourceInfos((x, )) for x in self._split_dict(self.source_infos.source_infos, splits)]
return source_info_splits
source_info_splits = split_source_infos()
# Create the final ZincAnalysis instances from all these split pieces.
def create_analyses():
analyses = []
for relations, stamps, apis, source_infos in zip(relations_splits, stamps_splits, apis_splits, source_info_splits):
analyses.append(ZincAnalysis(self.compile_setup, relations, stamps, apis, source_infos, self.compilations))
return analyses
analyses = create_analyses()
return analyses
def write_to_path(self, outfile_path):
with open(outfile_path, 'wb') as outfile:
self.write(outfile)
def write(self, outfile):
outfile.write(ZincAnalysis.FORMAT_VERSION_LINE)
self.compile_setup.write(outfile)
self.relations.write(outfile)
self.stamps.write(outfile)
self.apis.write(outfile)
self.source_infos.write(outfile)
self.compilations.write(outfile)
# Translate the contents of this analysis. Useful for creating anonymized test data.
# Note that the resulting file is not a valid analysis, as the base64-encoded serialized objects
# will be replaced with random base64 strings. So these are useful for testing analysis parsing,
# splitting and merging, but not for actually reading into Zinc.
def translate(self, token_translator):
for element in [self.compile_setup, self.relations, self.stamps, self.apis,
self.source_infos, self.compilations]:
element.translate(token_translator)
def _split_dict(self, d, splits):
"""Split a dict by its keys.
splits: A list of lists of keys.
Returns one dict per split.
"""
ret = []
for split in splits:
dict_split = defaultdict(list)
for f in split:
if f in d:
dict_split[f] = d[f]
ret.append(dict_split)
return ret
def _restrict_dicts(self, keys, dict1, dict2=None):
"""Returns a subdict of each input dict with its keys restricted to the given set.
Assumes that iterating over keys is much faster than iterating over the dicts. So use this
when keys is small compared to the total number of items in the dicts.
Note: the interface is a bit odd, and would be more general if we allowed an arbitrary
number of dicts. However in practice we only need this for 1 or 2 dicts, and this code
runs faster than if we had to iterate over a list of dicts in an inner loop.
"""
ret1 = {}
ret2 = None if dict2 is None else {}
for k in keys:
if k in dict1:
ret1[k] = dict1[k]
if dict2 is not None and k in dict2:
ret2[k] = dict2[k]
return ret1, ret2
|
# -*- coding: utf-8 -*-
' a test module '
from com.drcuiyutao.py.find import find_file_by_key
__author__ = 'Declan'
import sys
import os
import pickle
import json
import time
from io import BytesIO
def test():
args = sys.argv
argLength = len(args)
if argLength == 1:
print('Hello World!')
elif argLength == 2:
print('Hello, %s !' % args[1])
else:
print('too many arguments')
class Student(object):
count = 0
def __init__(self, name, score):
self.name = name
self.score = score
Student.count += 1
def print_score(self):
print('score = %i' % self.score)
class Human(object):
def __init__(self, name):
self._name = name
def __str__(self):
return 'Human object (name = %s)' % self._name
class Fib(object):
def __getitem__(self, item):
if isinstance(item, int):
a, b = 1, 1
for x in range(item):
a, b = b, a + b
return a
if isinstance(item, slice):
start = item.start
stop = item.stop
if start is None:
start = 0
a, b = 1, 1
L = []
for x in range(stop):
if x >= start:
L.append(a)
a, b = b, a + b
return L
if __name__ == '__main__':
# a = int(time.mktime(time.strptime('YYYY-mm-dd HH:MM:SS', '%Y-%m-%d %H:%M:%S')))
print(time.localtime(1520524800))
human = Human('Mike')
print(json.dumps(human, default=lambda human: human.__dict__))
with open('human.txt', 'rb') as f:
d = pickle.load(f)
print(d)
# print(human)
# with open('abc', 'r') as f:
# print('aaa %s ' % f.read())
# print(f.tell())
# f.seek(0)
# print('aaa %s ' % f.read())
f = BytesIO()
f.write('aaa'.encode('utf-8'))
print(f.getvalue())
print(os.name)
print(os.environ)
find_file_by_key("find")
|
# sharepoint.py
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
import time
browser = webdriver.Firefox()
browser.get('http://cmcallister:Zephyr2014@portal.catalystitservices.com/')
alert = browser.switch_to.alert
print 'wut?' |
#-*- coding: utf-8 -*-
from django import forms
from django.core.mail import send_mail
from captcha.fields import CaptchaField
class FaleConoscoForm(forms.Form):
assunto = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'placeholder': 'O assunto'}))
nome = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'placeholder': 'Seu nome'}))
email = forms.EmailField(label='E-mail', widget=forms.TextInput(attrs={'placeholder': 'Seu e-mail'}))
mensagem = forms.Field(widget=forms.Textarea(attrs={'placeholder': 'Sua mensagem'}))
captcha = CaptchaField(label='Repita os caracteres a seguir')
def enviar(self):
mensagem_ = 'Nome: %s\n\nE-mail: %s\n\nMensagem:\n\n%s' % (
self.cleaned_data['nome'],
self.cleaned_data['email'],
self.cleaned_data['mensagem']
)
destinatario = 'thiago.amm.agr@gmail.com'
send_mail(
subject='%s' % self.cleaned_data['assunto'],
message=mensagem_,
from_email=destinatario,
recipient_list=[destinatario],
)
|
import requests
API_URL = 'https://api.scryfall.com/cards/arena/'
def get_card_info(mtga_id: int):
"""
Parameters
mtga_id: Must be a valid mtg arena id and
Returns
A dictionary object containing full info of the card that has the specified MTGA id
Example output can be found here: https://api.scryfall.com/cards/arena/75519
"""
card_response = requests.get(API_URL + str(mtga_id))
return card_response.json()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.