blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fd8aa3ea52ca26a7bff1f7c7d6f9d22f8f4d59b7 | c038d06c31de0919d70c04f517f7490146ff80df | /train_nn.py | b8a8c4cc52d03dcfcdb4e339de6e584971a4eca1 | [] | no_license | KWAN-YWAN/gtd-analytics | 235df79f9b95b1734928cd2a9b4d54c5bf3f88e8 | 10fd7fa2965bb0efcc2396d86d3998afbc0fe7c8 | refs/heads/master | 2020-03-28T18:13:39.976015 | 2018-07-11T21:56:29 | 2018-07-11T21:56:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,579 | py | #!/usr/bin/env python
import numpy as np
import pandas as pd
# Plotter library
import seaborn as sns
import matplotlib.pyplot as plt
# Own libraries
import data_preparation as prep
from nn import Nn
from sklearn import preprocessing
def filter_data(df):
# Filter for only kidnapping data (1st, 2nd or 3rd attack type)
kidnap_cats = [5, 6]
df = df[df.attacktype1.isin(kidnap_cats) | df.attacktype2.isin(kidnap_cats) | df.attacktype3.isin(
kidnap_cats) | df.ishostkid == 1]
# Drop attacktype columns. They aren't needed anymore
df = df.drop(['attacktype1', 'attacktype2', 'attacktype3', 'ishostkid'], axis=1)
# Filter also broken data from our classes
df = df[df.hostkidoutcome.notnull()]
# Filter data for NaN nreleased or value -99
df = df[df.nreleased.notnull()]
df = df[df.nreleased != -99]
# Filter also data where nhostkid is lower than nreleased
df = df[df.nhostkid >= df.nreleased]
return df
def augmentate_data(df):
# Add an ID group for gname to the DataFrame
df['gname_id'], _ = prep.str_to_index_arr(df['gname'])
# Add a normalisation for how many of the hostage victims survived
df['nreleased_p'] = np.divide(df.nreleased, df.nhostkid)
# Add a column of died hostages
df['ndied'] = np.subtract(df.nhostkid, df.nreleased)
# Drop all string columns and keep only numeric ones.
df = df._get_numeric_data()
return df
def handle_NaN_in_data(df):
from sklearn.preprocessing import Imputer
fill_NaN = Imputer(missing_values='NaN', strategy='mean', axis=0)
imputed_df = pd.DataFrame(fill_NaN.fit_transform(df))
imputed_df.columns = df.columns
imputed_df.index = df.index
df = imputed_df
return df
def set_NaN_to_value(df, value):
return df.replace(np.nan, value)
def set_unknown_to_NaN(df, unknown_values):
for unknown_value in unknown_values:
df.replace(unknown_value, np.nan)
return df
def visualize_data(df, path='', suffix=''):
# First: a plot about number of kidnapped persons
sns.set(style="darkgrid", color_codes=True)
g1 = sns.jointplot(
'iyear',
'nhostkid',
data=df,
kind="reg",
color='r',
size=7,
xlim=[1970, 2016]
)
g1.set_axis_labels('Years', 'Number of kidnapped victims')
g1.savefig(path + 'interaction-iyear_nhostkid' + suffix + '.png')
g1.savefig(path + 'interaction-iyear_nhostkid' + suffix + '.pdf')
# Outcomes vs percentage of released victims
g2 = sns.violinplot(
x='hostkidoutcome',
y='nreleased_p',
data=df,
hue='ransom'
)
g2.figure.savefig(path + 'interaction-hostkidoutcome_nreleased_p' + suffix + '.png')
g2.figure.savefig(path + 'interaction-hostkidoutcome_nreleased_p' + suffix + '.pdf')
### Correlation
corr = df.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
g3 = sns.heatmap(
corr,
mask=mask,
cmap=cmap,
vmax=.3,
center=0,
square=True,
linewidths=.5,
cbar_kws={"shrink": .5}
)
g3.figure.savefig(path + 'correlation_full' + suffix + '.png')
g3.figure.savefig(path + 'correlation_full' + suffix + '.pdf')
def train(X, Y):
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit(X_train, Y_train)
def train_svm(X, y, C=1.0):
'''
Trains the SVM with X as input and y as output data
Input:
- X: Input vector with features
- y: Output vector with one label column
- C: SVM regularisation parameter
'''
from sklearn.svm import SVC
svm_model = SVC(kernel='linear', C=C, decision_function_shape='ovr')
svm_model.fit(X, y)
return svm_model
def predict_svm(model, X, y):
Z = model.predict(X)
return Z
if __name__ == "__main__":
#####
# The purpose of our classifier is to predict the hostkidoutcome category and a percentage of released persons.
# Y: hostkidoutcome, npreleased
# X: extended, iyear, gname_id, nhostkid, ndays, ransom, ransompaid, ishostkid
#####
### Data filtering
# Read data and exclude cols
# @Snippet: To exclude: lambda x: x not in ["eventid","imonth","iday", "attacktype2","claims2","claimmode2","claimmode3","gname2"]
df = prep.read_data('globalterrorismdb_0617dist.csv',
usecols=['nreleased', 'attacktype1', 'attacktype2', 'attacktype3', 'extended', 'iyear', 'gname',
'nhostkid', 'nhours', 'ndays', 'ransom', 'ransompaid', 'ransompaidus', 'ishostkid',
'hostkidoutcome'])
df = filter_data(df)
df = augmentate_data(df)
# We also have sometimes -99 or -9 as values when things were unknown. We have to replace them as well with NaNs
df = set_unknown_to_NaN(df, [-9, -99])
# We have a whole number of columns which contains NaNs for missing data. To overcome those, we simply use the sklearn Imputer to fill the NaNs with the mean values
df = set_NaN_to_value(df, -1)
head = df.head()
print(df.head())
# Plot data
visualize_data(df, path="plots/")
print('Resulting columns for training: \n{}\n'.format(df.columns))
# Normalize to 0-1
x = df.values
x_normed = x / x.max(axis=0)
df = pd.DataFrame(columns=[head], data=x_normed)
print(df)
### Separate set into train, validation, test by assigning each to the preferred class randomly.
train = df.sample(frac=0.6, replace=True)
validation = df.sample(frac=0.2, replace=True)
test = df.sample(frac=0.2, replace=True)
labels = ['hostkidoutcome', 'nreleased_p']
X_train, Y_train, Y_train_columns = prep.separate_labels(train, labels)
X_validation, Y_validation, Y_validation_columns = prep.separate_labels(validation, labels)
X_test, Y_test, Y_test_columns = prep.separate_labels(test, labels)
nn = Nn()
nn.create_model()
nn.load_model_from_json()
# nn.train(x=X_train.values,
# y=Y_train.values,
# validation_data=(X_validation.values, Y_validation.values))
# nn.persist_model()
score = nn.evaluate(x=X_test, y=Y_test)
print("Achieved Score:", score)
| [
"vinh-ngu@hotmail.com"
] | vinh-ngu@hotmail.com |
4a91a67b682f1783ab47dec7b9f606dc53773550 | 0d1fa87b14e5b73f7acad0275845cf290a72b36a | /rlcycle/common/abstract/loss.py | d0aa9136116f8927434fbf189052732269e691e5 | [
"MIT"
] | permissive | RL-code-lib/RLcycle | 101ec129ea8b2b1b8e0f56e3715c65d5bebee35f | 5c65b9dd61a6fd5d6dfe92f0b3e04bf309828569 | refs/heads/master | 2023-08-25T04:53:13.730944 | 2021-09-23T19:46:51 | 2021-09-23T19:46:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | from abc import ABC, abstractmethod
from typing import Tuple
import numpy as np
from omegaconf import DictConfig
import torch
import torch.nn as nn
class Loss(ABC):
"""Abstract class for callable loss functions
Attributes:
hyper_params (DictConfig): algorithm hyperparameters
use_cuda (bool): true if using gpu
"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
self.hyper_params = hyper_params
self.use_cuda = use_cuda
@abstractmethod
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[np.ndarray, ...],
) -> Tuple[torch.Tensor, ...]:
pass
| [
"chrisyoon1729@gmail.com"
] | chrisyoon1729@gmail.com |
bc7058a78e6e9bd0a48ef99d6ea623b099bcb7de | 81e88bb831b33cdc84dc2aeeb1b97f709f0c0802 | /main.py | 6920b5473b2e8b873b208bf71db52281d8d0cd83 | [
"MIT"
] | permissive | aroch/protobuf-dataframe | 2fed247c0200ab5a007c0e645fda532aebf96713 | bdd6a13a9ca6f77072c55a05aab79baaef4b712f | refs/heads/master | 2022-11-14T07:18:42.752410 | 2022-11-13T08:30:35 | 2022-11-13T08:30:35 | 203,235,530 | 11 | 3 | MIT | 2022-11-13T08:30:37 | 2019-08-19T19:20:53 | Python | UTF-8 | Python | false | false | 1,315 | py | from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col
from protodf import schema_for, message_to_row
def specific_message_bytes_to_row(pb_bytes):
# import your protobuf here
msg = message_type.FromString(pb_bytes)
row = message_to_row(message_type().DESCRIPTOR, msg)
return row
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("StructuredProtobuf") \
.getOrCreate()
message_type = {} # TODO: replace with a specific Protobuf message
schema = schema_for(message_type().DESCRIPTOR)
specific_message_bytes_to_row_udf = udf(specific_message_bytes_to_row, schema)
df = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "localhost:9092") \
.option("subscribe", "topic") \
.load()
df = df.withColumn("event", specific_message_bytes_to_row_udf(col("value")))
df.printSchema()
# Now you can query your protobuf with regular SQL! Nested messages, repeated, etc are all supported!
df = df.select("event.field_name", "event.nested_message.field")
query = df.writeStream \
.outputMode("update") \
.format("console") \
.option("truncate", "false") \
.start()
query.awaitTermination()
| [
"rafi.aroch@gmail.com"
] | rafi.aroch@gmail.com |
a82fc06568fcb535206a9cbd973e444dcb6fd998 | a97b2f48fc6cbfe3eaa05628b7d5fde0219bd4dd | /saba67.py | b05be9c26301266a39a11edec761e0ffb0b547fc | [] | no_license | sabapathy1234/P.saba | 715cdd4f88432f5e45f79d0d4e43d24213a3e5c6 | 25f55bda92938c7d418801715f44f89473733874 | refs/heads/master | 2020-06-11T17:27:28.997123 | 2019-08-14T10:22:12 | 2019-08-14T10:22:12 | 194,036,280 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | hj23=int(input())
gk=0
for u in range(1,hj23+1):
gk=gk+u
print(gk)
| [
"noreply@github.com"
] | noreply@github.com |
1d97335e9b892423815671859110140fd3a0e9e0 | 015806e237bfcd85a3e4eb48bac4461413f26a62 | /website/views.py | 5c0c67478b6c12e19f3532f3baa3ac9a0d9d870d | [] | no_license | alexsteeve/escoladasnacoes | 7fc63ea4da8ab6927184bc62ce25f78534227a7b | c6c9011c146e245a9f3693361a93cd81db822b7a | refs/heads/master | 2020-03-28T04:32:37.159757 | 2018-09-06T19:21:44 | 2018-09-06T19:21:44 | 147,721,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,643 | py | from django.urls import reverse_lazy
from django.views.generic import TemplateView, ListView, UpdateView, CreateView, DeleteView, DetailView
from helloworld.models import Estudante, Curso, Materia, CursoPeriodo, CursoPeriodoEstudante
from website.forms import InsereEstudanteForm
from datetime import datetime
# PÁGINA PRINCIPAL
# ----------------------------------------------
class IndexTemplateView(TemplateView):
template_name = "website/index.html"
# LISTA DE ESTUDANTES
# ----------------------------------------------
class EstudanteListView(ListView):
template_name = "website/lista.html"
model = Estudante
context_object_name = "estudantes"
# CADASTRAMENTO DE ESTUDANTES
# ----------------------------------------------
class EstudanteCreateView(CreateView):
template_name = "website/cria.html"
model = Estudante
form_class = InsereEstudanteForm
success_url = reverse_lazy("website:lista_estudantes")
# ATUALIZAÇÃO DE ESTUDANTES
# ----------------------------------------------
class EstudanteUpdateView(UpdateView):
template_name = "website/atualiza.html"
model = Estudante
fields = '__all__'
context_object_name = 'estudante'
success_url = reverse_lazy("website:lista_estudantes")
# EXCLUSÃO DE ESTUDANTES
# ----------------------------------------------
class EstudanteDeleteView(DeleteView):
template_name = "website/exclui.html"
model = Estudante
context_object_name = 'estudante'
success_url = reverse_lazy("website:lista_estudantes")
# LISTA DE ANIVERSARIANTES
# ----------------------------------------------
class AniversariantesListView(ListView):
this_month = datetime.now().month
next_month = datetime.now().month + 1
template_name = "website/aniversariantes.html"
model = Estudante
context_object_name = "estudantes"
ordering = ['dataNascimento']
queryset = Estudante.objetos.filter(dataNascimento__month=this_month)
# PERFIL DO ESTUDANTE
# ----------------------------------------------
class EstudantePerfilView(DetailView):
template_name = "website/estudante.html"
model = Estudante
context_object_name = "estudante"
def get_context_data(self, **kwargs):
context = super(EstudantePerfilView, self).get_context_data(**kwargs)
context['cursoperiodoestudante'] = CursoPeriodoEstudante.objetos.filter(estudante_id=self.kwargs['pk'])
return context
# LISTA DE CURSOS
# ----------------------------------------------
class CursoListView(ListView):
template_name = "website/listacursos.html"
model = Curso
context_object_name = "cursos"
# PERFIL DO CURSO
# ----------------------------------------------
class CursoPerfilView(DetailView):
template_name = "website/cursos.html"
model = Curso
context_object_name = "curso"
def get_context_data(self, **kwargs):
context = super(CursoPerfilView, self).get_context_data(**kwargs)
context['materia'] = Materia.objetos.filter(curso_id=self.kwargs['pk'])
context['cursoperiodo'] = CursoPeriodo.objetos.filter(curso_id=self.kwargs['pk'])
return context
# DETALHE DO CURSO COM LISTA DE ALUNOS
# ----------------------------------------------
class CursoPeriodoView(DetailView):
template_name = "website/cursoperiodo.html"
model = CursoPeriodo
context_object_name = "cursoperiodo"
def get_context_data(self, **kwargs):
context = super(CursoPeriodoView, self).get_context_data(**kwargs)
context['cursoperiodoestudante'] = CursoPeriodoEstudante.objetos.filter(cursoPeriodo_id=self.kwargs['pk'])
return context
| [
"alexsteeve@gmail.com"
] | alexsteeve@gmail.com |
4b0df2a739e371cc097d3d87f76e0530b774c8e8 | c5238f0de5074b13a0d3f93e474a9cfac8ad0277 | /playback/studio/recordings_lookup.py | 067b5fdeaee186413e45a4b084937f0ae20b409b | [
"BSD-3-Clause"
] | permissive | mayacahana/playback | eb16ec865535d7e7b5148fb505946e4cb198ef9a | fc6083910a66130845d9f0d69853c886df504692 | refs/heads/main | 2023-04-10T12:14:37.673042 | 2021-03-25T09:12:45 | 2021-03-25T09:12:45 | 357,560,067 | 0 | 0 | BSD-3-Clause | 2021-04-13T13:17:25 | 2021-04-13T13:17:24 | null | UTF-8 | Python | false | false | 1,909 | py | from random import shuffle
class RecordingLookupProperties(object):
def __init__(self, start_date, end_date=None, metadata=None, limit=None, random_sample=False):
"""
:param start_date: Earliest date of recording
:type start_date: datetime.datetime
:param end_date: Latest date of recording
:type end_date: datetime.datetime
:param metadata: Optional metadata to filter recording by
:type metadata: dict
:param limit: Limit the number of recordings to fetch
:type limit: int
:param random_sample: True/False collect using random.shuffle (use random.seed to change selection)
:type random_sample: boolean
"""
self.start_date = start_date
self.end_date = end_date
self.metadata = metadata
self.limit = limit
self.random_sample = random_sample
def find_matching_recording_ids(tape_recorder, category, lookup_properties):
"""
:param tape_recorder: Tape cassette holding the recordings
:type tape_recorder: playback.tape_recorder.TapeRecorder
:param category: Recording category
:type category: basestring
:param lookup_properties: Recording lookup properties
:type lookup_properties: RecordingLookupProperties
:return: Iterator of recording ids based on lookup parameters
:rtype: collections.Iterator[str]
"""
recording_ids = tape_recorder.tape_cassette.iter_recording_ids(
category, start_date=lookup_properties.start_date, end_date=lookup_properties.end_date,
metadata=lookup_properties.metadata,
limit=(None if lookup_properties.random_sample else lookup_properties.limit))
if lookup_properties.random_sample:
recording_ids = list(recording_ids)
shuffle(recording_ids)
recording_ids = iter(recording_ids[:lookup_properties.limit])
return recording_ids
| [
"eitan@optibus.com"
] | eitan@optibus.com |
19e8a2275e77b19545138e9ac5fef6f97bb8fcff | c1bd4f24c0962c3dde93421ae8a1d468e7ea8b3d | /Practica5/E2P5.py | 6aa82c95e5e939e1344f7f25c83ee6c53b9fd4c1 | [] | no_license | AdriAriasAlonso/Python | e9a8994dd08a7b5a962a5b22dca06ad96634394f | 16c122089dc9e7ec5d06edc12d5ff2d2c3cd5d4b | refs/heads/master | 2020-08-15T11:35:22.618884 | 2019-11-05T17:15:36 | 2019-11-05T17:15:36 | 215,334,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | #Ejercicio 2 Practica 5: Adrian Arias
n1=int(input("Porfavor, introduzca un número\n"))
n2=int(input("Porfavor, introduzca otro numero\n"))
for i in range(n1,(n2+1)):
if i%2==0:
print ("El numero", i,"Es par")
else:
print ("El numero", i,"Es impar")
if n1>n2:
for i in range(n2,(n1+1)):
if i%2==0:
print ("El numero", i,"Es par")
else:
print ("El numero", i,"Es impar")
| [
"aarias@cifpfbmoll.eu"
] | aarias@cifpfbmoll.eu |
3d9dac643af211b1c1a371f8ce54825e4b229f39 | ac7f3b7c5c9501d880671766439018a67b4a8abc | /Mclass.py | 4446c949803a2b04ff5e8de064913b02c9951103 | [] | no_license | Pratikshresth/Advance-Student-Management-System-using-Mysql-Database | ce63de404d277622bec5fed726172545aaeaea79 | d609410a7eeb246482ebefa08f96c2ec6664ee91 | refs/heads/master | 2023-06-18T04:24:35.825911 | 2021-07-16T09:09:46 | 2021-07-16T09:09:46 | 386,276,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,142 | py | class Student:
def __init__(self,fname, lname, city, zipcode, state, gender, email, contact, dob, program,section,clgId):
self.fname=fname
self.lname=lname
self.city=city
self.zipcode=zipcode
self.state=state
self.gender=gender
self.email=email
self.contact=contact
self.dob=dob
self.program=program
self.section=section
self.clgID=clgId
def get_fname(self):
return self.fname
def set_fname(self,fname):
self.fname=fname
return self.fname
def get_lname(self):
return self.lname
def set_lname(self, lname):
self.fname = lname
return self.lname
def get_city(self):
return self.city
def set_city(self, city):
self.city = city
return self.city
def get_zipcode(self):
return self.zipcode
def set_zipcode(self, zipcode):
self.zipcode = zipcode
return self.zipcode
def get_state(self):
return self.state
def set_state(self, state):
self.state = state
return self.state
def get_gender(self):
return self.gender
def set_gender(self, gender):
self.gender = gender
return self.gender
def get_email(self):
return self.email
def set_email(self, email):
self.email = email
return self.email
def get_contact(self):
return self.contact
def set_contact(self, contact):
self.contact = contact
return self.contact
def get_dob(self):
return self.dob
def set_dob(self, dob):
self.dob = dob
return self.dob
def get_programs(self):
return self.program
def set_program(self, program):
self.program = program
return self.program
def get_section(self):
return self.section
def set_section(self, section):
self.section = section
return self.section
def get_clgID(self):
return self.clgID
def set_clgID(self, clgID):
self.clgID = clgID
return self.clgID
| [
"pratik.shrr@gmail.com"
] | pratik.shrr@gmail.com |
96a31bd87d182e38af66c9502dda52cbddd18184 | 9405aa570ede31a9b11ce07c0da69a2c73ab0570 | /aliyun-python-sdk-ons/aliyunsdkons/request/v20190214/OnsMessagePageQueryByTopicRequest.py | 18afe3920e90c8d8b1f8495a693430691275ac77 | [
"Apache-2.0"
] | permissive | liumihust/aliyun-openapi-python-sdk | 7fa3f5b7ea5177a9dbffc99e73cf9f00e640b72b | c7b5dd4befae4b9c59181654289f9272531207ef | refs/heads/master | 2020-09-25T12:10:14.245354 | 2019-12-04T14:43:27 | 2019-12-04T14:43:27 | 226,002,339 | 1 | 0 | NOASSERTION | 2019-12-05T02:50:35 | 2019-12-05T02:50:34 | null | UTF-8 | Python | false | false | 2,300 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class OnsMessagePageQueryByTopicRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ons', '2019-02-14', 'OnsMessagePageQueryByTopic','ons')
def get_PreventCache(self):
return self.get_query_params().get('PreventCache')
def set_PreventCache(self,PreventCache):
self.add_query_param('PreventCache',PreventCache)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Topic(self):
return self.get_query_params().get('Topic')
def set_Topic(self,Topic):
self.add_query_param('Topic',Topic)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_BeginTime(self):
return self.get_query_params().get('BeginTime')
def set_BeginTime(self,BeginTime):
self.add_query_param('BeginTime',BeginTime)
def get_CurrentPage(self):
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self,CurrentPage):
self.add_query_param('CurrentPage',CurrentPage)
def get_TaskId(self):
return self.get_query_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_query_param('TaskId',TaskId) | [
"haowei.yao@alibaba-inc.com"
] | haowei.yao@alibaba-inc.com |
6e6bef69c641d624737033f588ec5773cedcedfa | eda5f94d7d1b8dffe7bd04b78a2002868aa245fa | /json_process.py | 89b33a4538ff6242f2adbb1c60e22b11833a33ef | [] | no_license | oychao1988/myScrapy | 566d353cbadf575a94e1d56ccc1ab80ff0484777 | 10e9ab3ad2878f9cc297d2519b11f77bf233c6f1 | refs/heads/master | 2020-04-24T22:35:38.645097 | 2019-02-24T09:16:03 | 2019-02-24T09:16:03 | 172,317,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | import json
from jsonpath import jsonpath
with open('zitems.json', 'r') as f:
# json_str = f.read()
json_dict = json.load(f)
# print(json_dict)
result = jsonpath(json_dict, '$.data.results')[0]
print(type(result))
[print(each) for each in result] | [
"2296458423@qq.com"
] | 2296458423@qq.com |
425ae4bad5ec2bf6ae6e55096f9b329ab59d9a73 | 022b22d343e2c3d89a865c2b5d684e82c692771e | /frontend_docker/project/main/views.py | 753ec13fe01e0c17192f09af50c9bdade4d1cc2f | [
"MIT"
] | permissive | jessequinn/hbsis | f4050f5f0850001bc3284ce2c94266ccb00a4c70 | 149b8c41c75732dcbcc23e667831fdb42cab786e | refs/heads/master | 2022-12-18T01:13:27.354613 | 2019-02-08T10:27:35 | 2019-02-08T10:27:35 | 169,249,120 | 0 | 0 | MIT | 2022-12-08T01:35:31 | 2019-02-05T13:54:21 | JavaScript | UTF-8 | Python | false | false | 5,545 | py | import datetime
import json
import pytz
import urllib.request
from flask import render_template, request, flash, Blueprint, redirect, url_for
from flask_login import login_required, current_user
from project import app, db
from project.models import WeatherRegistration
from .forms import WeatherRegistrationForm
main_blueprint = Blueprint(
'main', __name__,
template_folder='templates'
)
def datetimefilter(value, format="%A"):
'''
Datetime filter for Jinja. Formats date to US/Eastern from the UTC value.
:param value: input value
:param format: format of return date. default day of week.
:return: formatted date
'''
value = datetime.datetime.fromtimestamp(value)
tz = pytz.timezone('US/Eastern')
utc = pytz.timezone('UTC')
value = utc.localize(value, is_dst=None).astimezone(pytz.utc)
local_dt = value.astimezone(tz)
return local_dt.strftime(format)
app.jinja_env.filters['datetimefilter'] = datetimefilter
@main_blueprint.route('/', methods=['GET', 'POST'])
@login_required
def home():
'''
Main page after login. Contains a search form for city weather forecast.
:return: rendered template
'''
weatherRegistrations = db.session.query(WeatherRegistration).filter_by(user_id=current_user.id).all()
with urllib.request.urlopen('http://localhost:5050/countries') as url:
data = json.loads(url.read().decode())
error = None
form = WeatherRegistrationForm(request.form)
form.country.choices = [(c, c) for c in data['data']] # dyanmically produce countries
if request.method == 'POST':
if form.validate_on_submit():
if form.city.data != '':
with urllib.request.urlopen(
'http://localhost:5050/' + form.country.data.upper() + '/' + form.city.data.capitalize()) as url:
ids = json.loads(url.read().decode())
if not ids['data']:
error = 'No data exists for ' + form.city.data.capitalize() + '!'
return render_template('index.html', form=form, error=error, user=current_user, weatherRegistrations=weatherRegistrations)
else:
if any(ids['data'][0]['id'] == wr.city_id for wr in weatherRegistrations):
error = form.city.data.capitalize() + ' has already been registered.'
return render_template('index.html', form=form, error=error, user=current_user,
weatherRegistrations=weatherRegistrations)
else:
new_weatherregistration = WeatherRegistration(form.city.data, ids['data'][0]['id'],
form.country.data, current_user.id)
db.session.add(new_weatherregistration)
failed = False
try:
db.session.commit()
except Exception as e:
db.session.rollback()
db.session.flush()
failed = True
print(e)
if failed:
error = 'Error with registration.'
return render_template('index.html', form=form, error=error, user=current_user,
weatherRegistrations=weatherRegistrations)
else:
flash(form.city.data.capitalize() + ' was registered successfully.')
return redirect(url_for('main.home'))
else:
error = 'Enter a city name!'
return render_template('index.html', form=form, error=error, user=current_user,
weatherRegistrations=weatherRegistrations)
@main_blueprint.route('/forecast<id>')
@login_required
def forecast(id):
'''
5 day forecast page.
:param id: city id
:return: rendered template
'''
with urllib.request.urlopen(
'http://api.openweathermap.org/data/2.5/forecast/daily?id=' + id + '&cnt=5&APPID=eb8b1a9405e659b2ffc78f0a520b1a46&units=metric') as url:
data = json.loads(url.read().decode())
return render_template('forecast.html', data=data)
@main_blueprint.route('/remove<id>')
@login_required
def remove(id):
'''
Function simply removes city from list of cities.
:param id: city id
:return: rendered template
'''
with urllib.request.urlopen('http://localhost:5050/countries') as url:
data = json.loads(url.read().decode())
form = WeatherRegistrationForm(request.form)
form.country.choices = [(c, c) for c in data['data']] # dyanmically produce countries
db.session.query(WeatherRegistration).filter_by(id=id).delete()
failed = False
try:
db.session.commit()
except Exception as e:
db.session.rollback()
db.session.flush()
failed = True
print(e)
if failed:
error = 'Could not remove registration.'
weatherRegistrations = db.session.query(WeatherRegistration).filter_by(user_id=current_user.id).all()
return render_template('index.html', form=form, error=error, user=current_user,
weatherRegistrations=weatherRegistrations)
else:
flash('Registration was removed successfully.')
return redirect(url_for('main.home'))
| [
"me@jessequinn.info"
] | me@jessequinn.info |
b6e20b9bd79614adeceeaad5b46e60d0cc2692ef | c9d50f032e2ba9144933ec913663d9d6ad264151 | /ShopKeeper/Listener/apps.py | 9e0e89a1f7c1965cbc11959bbe804918c9a70d31 | [] | no_license | RockMeroll/ShopKeeper | 3acf47bfc110f1c26f45c5d91df60ac047794b7a | 471befc0ca967783724afd5b52eb47f061aaedf0 | refs/heads/master | 2021-01-20T08:19:47.336508 | 2017-08-27T13:45:58 | 2017-08-27T13:45:58 | 101,554,938 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class ListenerConfig(AppConfig):
name = 'Listener'
| [
"rockmeroll@hotmail.com"
] | rockmeroll@hotmail.com |
587b7372864dc0539ed6c1cfc0f20d8d6a086cdd | 575c65f490117c12d033a235fa68e3cb4438c602 | /main.py | 0d75c45e71b74f025352fd2536ded28618121711 | [] | no_license | Crucizer/Audio-Player | a115bed97fc37660545f1b51792bfa6e47c59159 | d2617af2002ded1f7b8b88b2aacb5ccf5ae94802 | refs/heads/master | 2022-11-15T00:45:29.742484 | 2020-07-11T14:47:11 | 2020-07-11T14:47:11 | 274,173,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,467 | py | from PyQt5 import QtWidgets, QtGui
# , QLineEdit, QVBoxLayout, QHBoxLayout
from PyQt5.QtWidgets import QApplication, QMainWindow, QSlider
from PyQt5.QtGui import QColor # , QMovie
from PyQt5.QtCore import Qt
import sys
import pygame as pg
from mutagen.mp3 import MP3
import os
import threading
pg.init()
class window(QMainWindow):
def __init__(self):
super(window, self).__init__()
self.setGeometry(425, 65, 400, 190)
self.setWindowIcon(QtGui.QIcon("icon"))
self.setWindowTitle("MultiMedia Player")
# MenuBar
file = QtWidgets.QAction("&Open Mp3", self)
file.setShortcut("Ctrl + O")
file.triggered.connect(self.open_mp3)
# Quit
quit = QtWidgets.QAction("&Quit", self)
quit.setShortcut("Ctrl + Q")
quit.triggered.connect(self.close_app)
# Add Items
items = QtWidgets.QAction("&Add Items", self)
items.setShortcut("Ctrl + A")
# items.triggered.connect(self.items)
mainmenu = self.menuBar()
filemenu = mainmenu.addMenu("&Open")
filemenu.addAction(file)
add_items = mainmenu.addMenu("&Add Items")
add_items.addAction(items)
filemenu.addAction(quit)
self.flag = 0
self.home()
def home(self):
# colors
black = (13, 13, 13)
light_black = (36, 36, 36)
# Pause Button
self.pause_btn = QtWidgets.QPushButton(self)
self.pause_btn.setText("Pause")
self.pause_btn.setShortcut("p")
self.pause_btn.move(0, 120)
self.pause_btn.clicked.connect(self.pause)
# Play Button
self.play_btn = QtWidgets.QPushButton(self)
self.play_btn.setText("Play")
self.play_btn.setShortcut("Space")
self.play_btn.move(150, 120)
self.play_btn.clicked.connect(self.play)
# Stop Button
self.stop_btn = QtWidgets.QPushButton(self)
self.stop_btn.setText("Stop")
self.stop_btn.setShortcut("s")
self.stop_btn.move(300, 120)
self.stop_btn.clicked.connect(self.stop)
# color for the window
color = QColor(70, 70, 70)
# Volume_Up Button
self.vup_btn = QtWidgets.QPushButton(self)
self.vup_btn.setText("V(+)")
self.vup_btn.setShortcut("+")
self.vup_btn.move(300, 160)
self.vup_btn.clicked.connect(self.volume_up)
# Volume_Down Button
self.vdown_btn = QtWidgets.QPushButton(self)
self.vdown_btn.setText("V(-)")
self.vdown_btn.setShortcut("-")
self.vdown_btn.move(0, 160)
self.vdown_btn.clicked.connect(self.volume_down)
# Seek Slider
self.slider = QSlider(Qt.Horizontal, self)
self.slider.setGeometry(20, 75, 350, 20)
# Volume Slider
self.v_slider = QSlider(Qt.Horizontal, self)
self.v_slider.setGeometry(120, 165, 160, 20)
self.v_slider.setMinimum(0)
self.v_slider.setMaximum(100)
self.v_slider.setValue(70)
self.volume_value = self.v_slider.value()
def msg(self, title, message):
msg1 = QtWidgets.QMessageBox() # self maybe
msg1.setWindowIcon(QtGui.QIcon("icon"))
msg1.setWindowTitle(title)
msg1.setText(message)
msg1.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg1.exec_()
def open_mp3(self):
name = QtWidgets.QFileDialog.getOpenFileName(self)
format = os.path.splitext(name[0])
if format[1] == ".mp3":
self.audio = MP3(name[0])
self.duration = self.audio.info.length // 1
self.min = int(self.duration // 60)
self.sec = int(self.duration % 60)
self.total_time = str(self.min) + ":" + str(self.sec)
self.slider.setMaximum(self.duration)
self.slider.setMinimum(0)
self.label = QtWidgets.QLabel(self)
self.label.setText(self.total_time)
self.label.setFont(QtGui.QFont("Arial", 9))
self.label.adjustSize()
self.label.move(373, 77)
self.label.show()
song = name[0]
pg.mixer.music.load(song)
pg.mixer.music.play(1)
pg.mixer.music.set_volume(self.v_slider.value() / 100)
self.label = QtWidgets.QLabel(self)
self.label.setText(song.split("/")[-1])
self.label.setFont(QtGui.QFont("Arial", 15))
self.label.adjustSize()
self.label.move(0, 36)
self.label.show()
threading_1 = threading.Thread(target=self.cur_time).start()
else:
self.msg("Invalid Format", "Choose A .Mp3 File Only!")
volume_level = pg.mixer.music.get_volume()
# print(volume_level)
def cur_time(self):
# NEEDS EDITING-----NEEDS EDITING-----NEEDS EDITING-----NEEDS EDITING-----NEEDS EDITING-----NEEDS EDITING
true = 1
while true == 1:
if self.flag == 0:
self.m_time = pg.mixer.music.get_pos()
self.mm_time = self.m_time * 0.001
self.s_time = self.mm_time // 1
self.slider.setValue(self.s_time)
if self.s_time == -1:
true = 2
def slider_value_changed(self):
self.volume_value = self.v_slider.value()
pg.mixer.music.set_volume(self.v_slider.value() / 100)
def volume_up(self):
#self.v_slider.value() - 10
self.volume_value = self.volume_value + 10
# print(self.volume_value)
self.v_slider.setValue(self.volume_value)
if self.volume_value >= 100:
self.volume_value = 100
# pg.mixer.music.set_volume(self.sound)
pg.mixer.music.set_volume(self.v_slider.value() / 100)
# print(self.v_slider.value() / 100)
def volume_down(self):
self.volume_value = self.volume_value - 10
self.v_slider.setValue(self.volume_value)
if self.volume_value <= 0:
self.volume_value = 0
pg.mixer.music.set_volume(self.v_slider.value() / 100)
# print(self.v_slider.value() / 100)
def pause(self):
pg.mixer.music.pause()
self.flag = 1
def stop(self):
pg.mixer.music.stop()
self.flag = -1
def play(self):
pg.mixer.music.unpause()
self.flag = 0
def close_app(self):
choice = QtWidgets.QMessageBox.question(
self, "QUIT", "You Sure You Wanna Quit?", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
if choice == QtWidgets.QMessageBox.Yes:
sys.exit()
else:
pass
def items(self):
# add item name to a list and then use this to add
layout = QtWidgets.QVBoxLayout(self)
song_name = QtWidgets.QFileDialog.getOpenFileName(self)
widget = QtWidgets.QListWidget()
widget.setAlternatingRowColors(True)
widget.setDragDropMode(
QtWidgets.QAbstractItemView.InternalMove)
widget.addItems([str(i) for i in range(1, 6)])
layout.addWidget(widget)
if __name__ == '__main__':
app = QApplication(sys.argv)
win = window()
win.show()
sys.exit(app.exec_())
| [
"noreply@github.com"
] | noreply@github.com |
e0e0ac6d1685609a798fb83ff12f8a72911f6e01 | e0e255c7f9adb78044e2fb67b5a9cb85e17b7558 | /actions/actions.py | 37a3b1de3088942a544eb3efdfe1ca8964772d87 | [] | no_license | igupta967937/appointment_bot | f4cc3db7aa96d899fa20ee7fa9fe3cd88a98a0ca | 834ab7d38ebdd3437955fccdea34e6506e5af422 | refs/heads/main | 2023-03-07T00:34:27.591527 | 2021-02-12T12:34:34 | 2021-02-12T12:34:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | # This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/custom-actions
# This is a simple example for a custom action which utters "Hello World!"
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
class ActionLive(Action):
def name(self) -> Text:
return "action_live"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Hi, how can I help you?")
return []
class ActionResourcesLink(Action):
def name(self) -> Text:
return "action_resources_link"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="http://www.eap-india.com/resources/")
return []
class ActionBook(Action):
def name(self) -> Text:
return "action_book"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
address = "SP-TBI, Andheri West, Mumbai"
dispatcher.utter_message(text="Here is the address for your appointment: {}".format(address))
return []
class ActionBookTele(Action):
def name(self) -> Text:
return "action_book_tele"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Your telephonic session has been booked")
return []
| [
"noreply@github.com"
] | noreply@github.com |
3055dd516e30c7c07aa5907cd892549bbcf990a3 | 1fccf52e0a694ec03aac55e42795487a69ef1bd4 | /src/euler_python_package/euler_python/medium/p194.py | 8d5449446e6261f855f92632dbc27e892a2191d5 | [
"MIT"
] | permissive | wilsonify/euler | 3b7e742b520ee3980e54e523a018cd77f7246123 | 5214b776175e6d76a7c6d8915d0e062d189d9b79 | refs/heads/master | 2020-05-27T12:15:50.417469 | 2019-09-14T22:42:35 | 2019-09-14T22:42:35 | 188,614,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | def problem194():
pass
| [
"tom.andrew.wilson@gmail.com"
] | tom.andrew.wilson@gmail.com |
5c9d0b80d96f14ba60beab833bd3810b25a334f9 | 139318c3c285aca8e1600dbf10192fd91e62a356 | /app/msg_api/msg_routes.py | d8b580b3ddf131dc3839b7e3da720dc6a3d78673 | [] | no_license | Ym4n/messaging-system-python-flask-api | c6543eb2b2a496a6b79ea20281f589ec4a882c42 | 47a9a6272a19167f15ce3b0bca3847929019beee | refs/heads/main | 2023-08-11T21:34:35.644860 | 2021-09-25T08:43:08 | 2021-09-25T08:43:08 | 372,399,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,257 | py | from flask import request, jsonify
from flask_login import login_required, current_user
from app.msg_api import msgapi_bp
from app.db_users import User, Group
from app.db_msg import Messages, Msg_Recipient
from app import db
def msg_is_valid(msg):
""" message validation """
return "recipient" in msg and "message" in msg and "subject" in msg
@msgapi_bp.route('/send_msg', methods=['POST'])
@login_required
def send_msg():
new_msg = request.get_json() or {}
if not msg_is_valid(new_msg):
return jsonify(error="valid input , missing data"), 404
recipient = User.query.filter_by(username=new_msg['recipient']).first()
if recipient:
msg = Messages(subject=new_msg['subject'], message=new_msg['message'], sent=current_user,
recipient_name=new_msg['recipient'])
msg_r = Msg_Recipient(user_recipient=recipient, msg_id=msg)
db.session.add(msg)
db.session.add(msg_r)
db.session.commit()
return "the message has been sent", 200
group = Group.query.filter_by(name=new_msg['recipient']).first()
if group:
# add Messages
msg = Messages(subject=new_msg['subject'], message=new_msg['message'], sent=current_user,
msg_for_group=True, recipient_name=new_msg['recipient'])
db.session.add(msg)
# add recipients
for user in group.users:
db.session.add(Msg_Recipient(user_recipient=user, group_recipient=group, msg_id=msg))
db.session.commit()
return "the message has been sent to all recipient in the group", 200
return jsonify(error="invalid recipient"), 404
@msgapi_bp.route('/sent', methods=['GET'])
@login_required
def sent():
sents = db.session.query(Messages)\
.filter(Messages.creator_id == current_user.id)\
.filter(Messages.creation_del == False)
return jsonify(sent_Messages=[msg.to_dict(shortened=True) for msg in sents]), 200
@msgapi_bp.route('/inbox', methods=['GET'])
@login_required
def inbox():
inbox_msgs = db.session.query(Messages).join(Msg_Recipient) \
.filter(Msg_Recipient.recipient_id == current_user.id)\
.filter(Msg_Recipient.recipient_del == False).all()
return jsonify(Inbox_messages=[msg.to_dict(shortened=True) for msg in inbox_msgs]), 200
@msgapi_bp.route('/unread', methods=['GET'])
@login_required
def unread_inbox():
unread_msg = db.session.query(Messages).join(Msg_Recipient) \
.filter(Msg_Recipient.recipient_id == current_user.id) \
.filter(Msg_Recipient.recipient_del == False)\
.filter(Msg_Recipient.is_read == False).all()
return jsonify(Inbox_unread_messages=[msg.to_dict(shortened=True) for msg in unread_msg]), 200
@msgapi_bp.route('/inbox/read/<int:msg_id>', methods=['GET'])
@login_required
def inbox_read_msg(msg_id):
msg = Messages.query.filter_by(id=msg_id).first()
# wrong id , id not found.
if not msg:
return jsonify("Invalid input , check msg id"), 404
msg_recipient = db.session.query(Msg_Recipient) \
.filter(Msg_Recipient.recipient_id == current_user.id) \
.filter(Msg_Recipient.message_id == msg.id).first()
# msg not connect to the current user
if not msg_recipient:
return jsonify("Invalid input , check msg id"), 200
# msg deleted
if msg_recipient.recipient_del:
return jsonify("Invalid input , check msg id"), 404
if not msg_recipient.is_read:
msg_recipient.is_read = True
db.session.commit()
return jsonify(message=msg.to_dict()), 200
@msgapi_bp.route('/sent/read/<int:msg_id>', methods=['GET'])
@login_required
def sent_read_msg(msg_id):
msg = Messages.query.filter_by(id=msg_id).first()
# wrong id or msg deleted or msg not connect to current user
if not msg or msg.creation_del or msg.creator_id != current_user.id:
return jsonify("Invalid input , check msg id"), 404
return jsonify(message=msg.to_dict()), 200
@msgapi_bp.route('/delet_sent_msg/<int:msg_id>', methods=['DELETE'])
@login_required
def delet_sent_msg(msg_id):
msg = Messages.query.filter_by(id=msg_id).first()
# wrong id or msg already deleted.
if not msg or msg.creation_del or msg.creator_id != current_user.id:
return jsonify("Invalid input , check msg id"), 404
msg.creation_del = True
db.session.commit()
return jsonify("Message deleted"), 202
@msgapi_bp.route('/delet_inbox_msg/<int:msg_id>', methods=['DELETE'])
@login_required
def delet_inbox_msg(msg_id):
msg = Messages.query.filter_by(id=msg_id).first()
# wrong id or , id not found.
if not msg:
return jsonify("Invalid input , check msg id"), 404
msg_recipient = db.session.query(Msg_Recipient)\
.filter(Msg_Recipient.recipient_id == current_user.id)\
.filter(Msg_Recipient.message_id == msg.id).first()
# msg not for current user
if not msg_recipient:
return jsonify("Invalid input , check msg id"), 404
# msg already deleted
if msg_recipient.recipient_del:
return jsonify("Invalid input , check msg id"), 404
msg_recipient.recipient_del = True
db.session.commit()
return jsonify("Message deleted"), 202
| [
"noreply@github.com"
] | noreply@github.com |
967b041df488aabe60dd198a880e7b38e873786f | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2524/60618/287300.py | 48f5b7e392369d39af807d2dddd3b1123248d6db | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 16 | py | print("1 3 2 4") | [
"1069583789@qq.com"
] | 1069583789@qq.com |
03bb1c24d6f897fedea3dc2569117cf6c7f50df2 | a5ed71c4f897132a3505107acbf27635f7c13f9c | /Программирование/labs/okno.py | 0b1efa39f56e50993a7ab27430cbe52c1426c510 | [] | no_license | Ksupall/BMSTU_1sem | ecee121ccd8be43165040fd4567f34a935283367 | 4fa47faaf42eea7728202d9a1ed0ab86e900bf2e | refs/heads/master | 2022-12-14T04:29:34.317952 | 2020-09-09T11:07:27 | 2020-09-09T11:07:27 | 294,084,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | from tkinter import *
from math import sin, cos
root = Tk()
root.title('Первое окно в Питоне')
root.geometry('1020x620')
canvas = Canvas(root, width=1020, height=620, bg='#002')
for i in range(21):
k = 50*i
canvas.create_line(10+k, 610, 10+k, 10, width=1, fill ='#007')
for i in range(13):
k =50*i
canvas.create_line(10, 10+k, 1010, 10+k, width=1, fill='#007')
canvas.create_line(10, 10, 10, 610, width=1, arrow=FIRST, fill='white')
canvas.create_line(1010, 310, 10, 310, width=1, arrow=FIRST, fill='white')
w = 0.02
phi = 10
A = 100
dy = 310
xy = []
for x in range(1000):
y = sin(x*w)
xy.append(x + phi)
xy.append(y*A + dy)
sin_line = canvas.create_line(xy, fill='pink')
yx = []
for x in range(1000):
y = cos(x*w)
yx.append(x + phi)
yx.append(y*A + dy)
cos_line = canvas.create_line(yx, fill='blue')
canvas.pack()
root.mainloop()
| [
"KsenyaPolyakovaaa@mail.ru"
] | KsenyaPolyakovaaa@mail.ru |
ab8a538e9a834d5700cf3ac3cd559f7204e23eb8 | 5a7812908f03a96d224dd6afd5e902eaa4a1bc72 | /manage.py | 3aaaca92c24e1833de0e32565da906078c62a89d | [] | no_license | abztrakt/django-badger | 6be5da8eb14b4a1c3b5bd494294ad0e26cd4439a | 4e5a927fdcacfd7aa7f78c713c7d30d3f8ecdd3a | refs/heads/master | 2020-07-10T13:39:35.212601 | 2015-06-17T18:31:33 | 2015-06-17T18:31:33 | 15,320,494 | 0 | 0 | null | 2015-11-13T17:10:25 | 2013-12-19T19:20:39 | Python | UTF-8 | Python | false | false | 309 | py | #!/usr/bin/env python
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_settings")
def nose_collector():
import nose
return nose.collector()
if __name__ == "__main__":
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"me@lmorchard.com"
] | me@lmorchard.com |
8ac96c23be4a2472b6a1c35d2727a8277d640cef | e3cc4d1cd686dae93b0519244c883816cf79b794 | /tools/ISMmainplotfunction/ISMtools/hgashalf_out.py | 9d2d854132331f4ec85c283ea6c3da0cff8a3b72 | [] | no_license | tkc004/samsonprogram | 620a095ab04719e5339f77968ce444c4c037841b | db24d26fb2100e6b71e93dc82863c3b95fe95126 | refs/heads/master | 2021-01-20T01:46:14.873434 | 2019-09-17T01:05:24 | 2019-09-17T01:05:24 | 89,327,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,624 | py | from stdmodandoption import *
import collections
def hgashalf_out(ssdict):
nested_dict = lambda: collections.defaultdict(nested_dict)
plotdict = nested_dict()
runtodo=ssdict['runtodo']
wanted=ssdict['wanted']
print 'wanted', wanted
startno=ssdict['startno']
Nsnap=ssdict['Nsnap']
snapsep=ssdict['snapsep']
the_prefix=ssdict['the_prefix']
the_suffix=ssdict['the_suffix']
fmeat=ssdict['fmeat']
maxlength=ssdict['maxlength'] #thickness
withinr=ssdict['withinr']
withoutr = ssdict['withoutr']
title = ssdict['title']
titleneed=title
nogrid=10
xaxis_snapno=0
if not (wanted=='hgas' or wanted=='mgasz' or wanted=='hcr' or wanted=='hth'):
print 'wrong wanted'
return None
resoneed=0
rotface=1
loccen=1
newlabelneed=1
print 'runtodo', runtodo
if wanted=='hgas':
nogrid = 15
dr = withinr/nogrid
rlist = np.linspace(withoutr,withinr,num=nogrid)
zlist = np.linspace(0.01,maxlength,num=50)
hglist=rlist*0.
if wanted=='mgasz':
nogrid = 15
dr = withinr/nogrid
rlist = np.linspace(withoutr,withinr,num=nogrid)
mgzlist=rlist*0.
if wanted=='hcr':
nogrid = 15
dr = withinr/nogrid
rlist = np.linspace(withoutr,withinr,num=nogrid)
zlist = np.linspace(0.01,maxlength,num=50)
hcrlist=rlist*0.
if wanted=='hth':
nogrid = 15
dr = withinr/nogrid
rlist = np.linspace(withoutr,withinr,num=nogrid)
zlist = np.linspace(0.01,maxlength,num=50)
hthlist=rlist*0.
numoftimes=0
for i in range(startno,Nsnap,snapsep):
snaplist=[]
info=SSF.outdirname(runtodo, i)
rundir=info['rundir']
runtitle=info['runtitle']
slabel=info['slabel']
snlabel=info['snlabel']
dclabel=info['dclabel']
resolabel=info['resolabel']
the_snapdir=info['the_snapdir']
Nsnapstring=info['Nsnapstring']
havecr=info['havecr']
Fcal=info['Fcal']
iavesfr=info['iavesfr']
timestep=info['timestep']
cosmo=info['cosmo']
maindir=info['maindir']
color=info['color']
haveB=info['haveB']
M1speed=info['M1speed']
newlabel=info['newlabel']
snumadd=info['snumadd']
usepep=info['usepep']
halostr=info['halostr']
ptitle=title
#if runtitle=='SMC':
# ptitle='Dwarf'
#elif runtitle=='SBC':
# ptitle='Starburst'
#elif runtitle=='MW':
# ptitle=r'$L\star$ Galaxy'
labelneed=dclabel
if newlabelneed==1:
labelneed="\n".join(wrap(newlabel,17))
if cosmo==1:
h0=1
else:
h0=0
Gextra = SSF.readsnapwcen(the_snapdir, Nsnapstring, 0, snapshot_name=the_prefix, extension=the_suffix,\
havecr=havecr,h0=h0,cosmo=cosmo, usepep=usepep, maindir=maindir,snumadd=snumadd,rotface=rotface,\
loccen=loccen,runtodo=runtodo,rundir=rundir,halostr=halostr)
Gx = Gextra['p'][:,0]; Gy = Gextra['p'][:,1]; Gz = Gextra['p'][:,2];
Grxy = np.sqrt(Gx*Gx+Gy*Gy)
Gvx = Gextra['v'][:,0]; Gvy = Gextra['v'][:,1]; Gvz = Gextra['v'][:,2];
Grho = Gextra['rho']; Gu = Gextra['u']; Gm = Gextra['m'];
if wanted=='hgas':
for ir in range(len(rlist)-1):
cutxy = (Grxy > rlist[ir]) & (Grxy < rlist[ir+1])
Gmcutxy = Gm[cutxy]*1e10
Gzcutxy = Gz[cutxy]
gmzlist = []
for iz in range(len(zlist)-1):
cutz = (np.absolute(Gzcutxy) < zlist[iz+1])
Gmcutz = Gmcutxy[cutz]
gmzlist = np.append(gmzlist,np.sum(Gmcutz))
print 'zlist', zlist
print 'gmzlist', gmzlist
hgas = np.interp(gmzlist[-1]/2.0,gmzlist,zlist[1:])
hglist[ir] += hgas
if wanted=='mgasz':
for ir in range(len(rlist)-1):
cutxy = (Grxy > rlist[ir]) & (Grxy < rlist[ir+1])
Gmcutxy = Gm[cutxy]*1e10 #in Msun
Gzcutxy = Gz[cutxy]
cutz = (np.absolute(Gzcutxy) < maxlength/2.0) & (np.absolute(Gzcutxy) > 1.0)
Gmcutz = Gmcutxy[cutz]
mgzlist[ir] += np.sum(Gmcutz)
if wanted=='hcr':
cregy = Gextra['cregy']
for ir in range(len(rlist)-1):
cutxy = (Grxy > rlist[ir]) & (Grxy < rlist[ir+1])
cregycutxy = cregy[cutxy]
Gzcutxy = Gz[cutxy]
cregyzlist = []
for iz in range(len(zlist)-1):
cutz = (np.absolute(Gzcutxy) < zlist[iz+1])
cregycutz = cregycutxy[cutz]
cregyzlist = np.append(cregyzlist,np.sum(cregycutz))
hcr = np.interp(cregyzlist[-1]/2.0,cregyzlist,zlist[1:])
hcrlist[ir] += hcr
if wanted=='hth':
for ir in range(len(rlist)-1):
cutxy = (Grxy > rlist[ir]) & (Grxy < rlist[ir+1])
Ethcutxy = Gu[cutxy]*Gm[cutxy]
Gzcutxy = Gz[cutxy]
thzlist = []
for iz in range(len(zlist)-1):
cutz = (np.absolute(Gzcutxy) < zlist[iz+1])
Ethcutz = Ethcutxy[cutz]
thzlist = np.append(thzlist,np.sum(Ethcutz))
hth = np.interp(thzlist[-1]/2.0,thzlist,zlist[1:])
hthlist[ir] += hth
numoftimes+=1
rmlist = (rlist[:-1]+rlist[1:])/2.
plotdict[wanted]['xlab'] = r'${\rm r\;[kpc]}$'
if wanted=='hgas':
plotdict[wanted]['ylab'] = r'$h_{\rm gas,1/2}\;[{\rm kpc}]$'
plotdict[wanted]['ynl'] = hglist[:-1]/numoftimes
plotdict[wanted]['xnl'] = rmlist
if wanted=='mgasz':
plotdict[wanted]['ylab'] = r'$M_{\rm gas}\;[{\rm M_\odot}]$'
plotdict[wanted]['ynl'] = mgzlist[:-1]/numoftimes
plotdict[wanted]['xnl'] = rmlist
if wanted=='hcr':
plotdict[wanted]['ylab'] = r'$h_{\rm cr,1/2}\;[{\rm kpc}]$'
plotdict[wanted]['ynl'] = hcrlist[:-1]/numoftimes
plotdict[wanted]['xnl'] = rmlist
if wanted=='hth':
plotdict[wanted]['ylab'] = r'$h_{\rm th,1/2}\;[{\rm kpc}]$'
plotdict[wanted]['ynl'] = hthlist[:-1]/numoftimes
plotdict[wanted]['xnl'] = rmlist
plotdict[wanted]['runtodo'] = runtodo
plotdict[wanted]['labelneed'] = labelneed
plotdict[wanted]['lw'] = 2
plotdict[wanted]['lsn'] = 'solid'
plotdict[wanted]['marker'] = 'None'
plotdict[wanted]['color'] = color
plotdict[wanted]['runtitle'] = runtitle
plotdict[wanted]['ptitle'] = ptitle
if wanted=='hgas':
filename=plotloc+'CRplot/hgas/hgas_'+fmeat+'_sn'+str(startno)+'_'+str(Nsnap)+'.pdf'
plotdict[wanted]['filename'] = filename
if wanted=='mgasz':
filename=plotloc+'CRplot/mgasz/mgasz_'+fmeat+'_sn'+str(startno)+'_'+str(Nsnap)+'.pdf'
plotdict[wanted]['filename'] = filename
if wanted=='hcr':
filename=plotloc+'CRplot/hcr/hcr_'+fmeat+'_sn'+str(startno)+'_'+str(Nsnap)+'.pdf'
plotdict[wanted]['filename'] = filename
if wanted=='hth':
filename=plotloc+'CRplot/hth/hth_'+fmeat+'_sn'+str(startno)+'_'+str(Nsnap)+'.pdf'
plotdict[wanted]['filename'] = filename
return plotdict
| [
""
] | |
71066e4fbe1454df9a36b16d98c5833f9c368392 | 4d88b5b2a40f140f9235f46a083b51655cfc0c93 | /orastorage/model.py | e94881927b361671cb80ce230dcfc80f30e8a089 | [
"Apache-2.0"
] | permissive | taewanme/orastorage-py | 1c5aac05497f0e91de9bcbea34d9fc5ab4b871a2 | 33b383ef9858fe16503451de781fcc6553592655 | refs/heads/master | 2021-01-21T06:46:50.577726 | 2017-03-02T06:50:42 | 2017-03-02T06:50:42 | 83,281,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
class Identity:
"""
- 사용자 정보
- Account 정보를 이용하여 인증을 수행
"""
def __init__(self, identity_domain, user_id, password=None):
self._identity_domain = identity_domain
self._user_id = user_id
self._password = password
self._rest_endpoint = 'https://%s.storage.oraclecloud.com' % identity_domain
self._storage_user = 'Storage-%s:%s' % (identity_domain, user_id)
self._account = 'Storage-%s' % identity_domain
def __str__(self):
contents = ['Identity domain: %s' % self._identity_domain,
'user id: %s' % self._user_id,
'Is password saved?: %s' % (self._password is not None),
'REST endpoint: %s' % self._rest_endpoint,
'Storage user: %s' % self._storage_user,
'Account: %s' % self._account]
return '\n'.join(contents)
def get_rest_endpoint(self):
return self._rest_endpoint
def get_identity_domain(self):
return self._identity_domain
def get_password(self):
return self._password
def get_storage_user(self):
return self._storage_user
def get_user_id(self):
return self._user_id
def get_account(self):
return self._account
| [
"taewanme@gmail.com"
] | taewanme@gmail.com |
11081b54eefdf7a0dc6ee6fe71a46da11fa021ff | 7fe128bad2edcc42bfe2f416f57822faadba7aa6 | /home/urls.py | f04627ace266f0508a1c7c867143b656ed422cc8 | [] | no_license | AlexiaDelorme/site-sephora | e3ad6b7ee6f6bff4ebd655d472e55a5e261426d2 | 4503f93bcedb856ac57585ffa021f9ec9759b0d2 | refs/heads/main | 2023-01-23T12:31:20.599344 | 2020-12-04T22:16:09 | 2020-12-04T22:16:09 | 311,388,760 | 0 | 0 | null | 2020-11-29T14:31:49 | 2020-11-09T15:52:49 | CSS | UTF-8 | Python | false | false | 388 | py | from django.urls import path
from .views import specialty_detail_view, sub_specialty_detail_view, honoraires_view
urlpatterns = [
path('compétences/<int:pk>/', specialty_detail_view, name='specialty_detail'),
path('compétences/détails/<int:pk>/',
sub_specialty_detail_view, name='sub_specialty_detail'),
path('honoraires/', honoraires_view, name='honoraires'),
]
| [
"46656955+AlexiaDelorme@users.noreply.github.com"
] | 46656955+AlexiaDelorme@users.noreply.github.com |
41865d0a8d31bd029bcf81770e2e5cc7e4de96f3 | bcc4eb1b96ff9117208a07bf91f479695776b684 | /Fase2/arbol_b/nodo_b.py | 001d63088a5c7cd31ca5dccd324c6325335c701d | [] | no_license | JASAdrian1/EDD_SmartClass_201901704 | 5db668697966cc317415e2466c73e79bcf7b2838 | bfc0a362e24b565b5abcd4cc328134a809b12cc6 | refs/heads/master | 2023-09-02T19:54:20.091149 | 2021-11-12T00:01:07 | 2021-11-12T00:01:07 | 394,545,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py |
class nodo_b:
def __init__(self,curso):
self.curso = curso
self.izquierda = None
self.derecha = None
self.siguiente = None
self.anterior = None
| [
"3022643370101@ingenieria.usac.edu.gt"
] | 3022643370101@ingenieria.usac.edu.gt |
4c0cbbd6b354a7c9beb0926c78c9d3d68e787764 | eece94bc273edadee19a40cee51f787037737422 | /corpus.py | f443471c22224decc1f3c7890717ccb9d2066b95 | [] | no_license | mi-chellenguyen/cs-121-project-3 | 0579b9c3a2e0b2d0fe523fecd9fe76fb7d0df4c7 | 4b8be2857210563ffbf52325672fedc4a25fe846 | refs/heads/master | 2022-01-18T14:51:55.851351 | 2019-06-01T05:12:12 | 2019-06-01T05:12:12 | 187,193,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | """
Corpus class is from the same corpus.py given in project 2
"""
import json
import os
from urllib.parse import urlparse
class Corpus:
"""
This class is responsible for handling corpus related functionalities like mapping a url to its local file name
"""
# The corpus directory name
WEBPAGES_RAW_NAME = "WEBPAGES_RAW"
# The corpus JSON mapping file
JSON_FILE_NAME = os.path.join(".", WEBPAGES_RAW_NAME, "bookkeeping.json")
def __init__(self):
self.file_url_map = json.load(open(self.JSON_FILE_NAME), encoding="utf-8")
self.url_file_map = dict()
for key in self.file_url_map:
self.url_file_map[self.file_url_map[key]] = key
def get_file_name(self, url):
"""
Given a url, this method looks up for a local file in the corpus and, if existed, returns the file address. Otherwise
returns None
"""
url = url.strip()
parsed_url = urlparse(url)
url = url[len(parsed_url.scheme) + 3:]
if url in self.url_file_map:
addr = self.url_file_map[url].split("/")
dir = addr[0]
file = addr[1]
return os.path.join(".", self.WEBPAGES_RAW_NAME, dir, file)
return None
| [
"michn10@uci.edu"
] | michn10@uci.edu |
e7723d9c8eda67ab73bdce61fabc02b82c3baa62 | 0d8ab859bc1848fe0a61e1c926df3b1a4b298141 | /modulo_2/modulo_2/wsgi.py | 5a1eb851fda7ebf1c005e3f62d2c42dfa8aed635 | [] | no_license | alan-damian/django-aplicaciones | 18558ee52e632a09c77c28f90b6505c2a28b4d89 | 59d2f5834f0a6add978ae9f5c1d4cd108e9c8789 | refs/heads/main | 2023-08-22T17:23:04.535882 | 2023-08-08T20:08:25 | 2023-08-08T20:08:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for modulo_2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'modulo_2.settings')
application = get_wsgi_application()
| [
"noreply@github.com"
] | noreply@github.com |
e244467c43c15f133fb60bcf85e4eb8bde84cf17 | ec450b48ea1f074dc61fd6d9fdd8dd91cb40fb56 | /backend/wallet/wallet.py | 4d84aa3a18ba3302a85c02288cb28f8b3d2ea73b | [] | no_license | TwylaWeiyuTang/Blockchain | 4dc4a72ca5538e58aad545f6c7c5c4b72f956610 | f174abd57504c719cff7926f4049a51c4bbefd61 | refs/heads/main | 2023-05-20T16:44:06.053833 | 2021-06-10T04:36:28 | 2021-06-10T04:36:28 | 375,567,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,615 | py | import uuid
import json
from backend.config import STARTING_BALANCE
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric.utils import (
encode_dss_signature,
decode_dss_signature
)
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.exceptions import InvalidSignature
class Wallet:
"""
An individual wallet for a miner.
Keeps track of miner's balance.
Allows a miner to authorize transactions.
"""
def __init__(self):
self.address = str(uuid.uuid4())[0:8] # this method can generate a unique address/ID based on timestamp
self.balance = STARTING_BALANCE
self.private_key = ec.generate_private_key(ec.SECP256K1(), default_backend())
self.public_key = self.private_key.public_key() # We can get the associate public key
# according to the private key
self.serialize_public_key()
def sign(self, data):
"""
Generate a signature based on data using the local private key.
"""
# By using this signature function, other entities will be able to verify if that transaction
# was indeed created by the sender.
return decode_dss_signature(self.private_key.sign(
json.dumps(data).encode('utf-8'), # because the input format of data can only be byte
ec.ECDSA(hashes.SHA256())
)) # convert these bytes into a string
def serialize_public_key(self):
"""
Reset the public key to the serialized version.
"""
self.public_key = self.public_key.public_bytes(
encoding = serialization.Encoding.PEM,
format = serialization.PublicFormat.SubjectPublicKeyInfo
).decode('utf-8')
@staticmethod
def verify(public_key, data, signature):
"""
This will verify a signature based on the original public_key and data.
"""
deserialized_public_key = serialization.load_pem_public_key(
public_key.encode('utf-8'),
default_backend()
) # because above function turn the public key into utf8 format, now we need to change it
# back to the object format to run this function, otherwise it will break our code
(r, s) = signature # because decoded signature returns a tuple value
try: # use try is to prevent if the following code does not work and break all the program
deserialized_public_key.verify(
encode_dss_signature(r, s),
json.dumps(data).encode('utf-8'),
ec.ECDSA(hashes.SHA256())
)
return True
except InvalidSignature: # instead of using Exception, we can use a method called InvalidSignature
# which is more specific
return False
def main():
wallet = Wallet()
print(f'wallet.__dict__: {wallet.__dict__}')
# we can check the wallet's attributes(public key and private key) by calling a dictionary
# function
data = {'foo': 'bar'}
signature = wallet.sign(data)
print(f'signature: {signature}')
should_be_valid = Wallet.verify(wallet.public_key, data, signature)
print(f'should_be_valid: {should_be_valid}')
should_be_invalid = Wallet.verify(Wallet().public_key, data, signature) # we generate a new
# random public key to imitate when hackers want to tamper our data and see if this method
# return correct value
print(f'should_be_invalid: {should_be_invalid}')
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
5a68c04f899f47c89d7192b8ebdfe56a6a2f17e4 | 057d662a83ed85897e9906d72ea90fe5903dccc5 | /.PyCharmCE2019.2/system/python_stubs/cache/2b64a136264952b5fc77d4c27a781542cf8f659109a9e46ce1b22875cea43541/_cython_0_29_2.py | f3272e9cfb67ae70598f0930f6534bae4d67a51e | [] | no_license | Karishma00/AnsiblePractice | 19a4980b1f6cca7b251f2cbea3acf9803db6e016 | 932558d48869560a42ba5ba3fb72688696e1868a | refs/heads/master | 2020-08-05T00:05:31.679220 | 2019-10-04T13:07:29 | 2019-10-04T13:07:29 | 212,324,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,255 | py | # encoding: utf-8
# module _cython_0_29_2
# from /usr/lib/python3/dist-packages/brlapi.cpython-37m-x86_64-linux-gnu.so
# by generator 1.147
# no doc
# no imports
# Variables with simple values
__loader__ = None
__spec__ = None
# no functions
# classes
class cython_function_or_method(object):
def __call__(self, *args, **kwargs): # real signature unknown
""" Call self as a function. """
pass
def __get__(self, *args, **kwargs): # real signature unknown
""" Return an attribute of instance, which is of type owner. """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
func_closure = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_code = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_defaults = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_dict = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_doc = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_globals = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__annotations__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__closure__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__code__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__defaults__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__globals__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__kwdefaults__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__self__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__dict__ = None # (!) real value is "mappingproxy({'__repr__': <slot wrapper '__repr__' of 'cython_function_or_method' objects>, '__call__': <slot wrapper '__call__' of 'cython_function_or_method' objects>, '__get__': <slot wrapper '__get__' of 'cython_function_or_method' objects>, '__reduce__': <method '__reduce__' of 'cython_function_or_method' objects>, '__module__': <member '__module__' of 'cython_function_or_method' objects>, 'func_doc': <attribute 'func_doc' of 'cython_function_or_method' objects>, '__doc__': <attribute '__doc__' of 'cython_function_or_method' objects>, 'func_name': <attribute 'func_name' of 'cython_function_or_method' objects>, '__name__': <attribute '__name__' of 'cython_function_or_method' objects>, '__qualname__': <attribute '__qualname__' of 'cython_function_or_method' objects>, '__self__': <attribute '__self__' of 'cython_function_or_method' objects>, 'func_dict': <attribute 'func_dict' of 'cython_function_or_method' objects>, '__dict__': <attribute '__dict__' of 'cython_function_or_method' objects>, 'func_globals': <attribute 'func_globals' of 'cython_function_or_method' objects>, '__globals__': <attribute '__globals__' of 'cython_function_or_method' objects>, 'func_closure': <attribute 'func_closure' of 'cython_function_or_method' objects>, '__closure__': <attribute '__closure__' of 'cython_function_or_method' objects>, 'func_code': <attribute 'func_code' of 'cython_function_or_method' objects>, '__code__': <attribute '__code__' of 'cython_function_or_method' objects>, 'func_defaults': <attribute 'func_defaults' of 'cython_function_or_method' objects>, '__defaults__': <attribute '__defaults__' of 'cython_function_or_method' objects>, '__kwdefaults__': <attribute '__kwdefaults__' of 'cython_function_or_method' objects>, '__annotations__': <attribute '__annotations__' of 'cython_function_or_method' objects>})"
__name__ = 'cython_function_or_method'
__qualname__ = 'cython_function_or_method'
| [
"karishma11198@gmail.com"
] | karishma11198@gmail.com |
997e8209c2b3d2a126f06dcf2817e43779ec535d | 9e9faf222fe6969b5b0c1652768c1b1db88e8b5d | /old_stuff/tf_old_stuff/train_adaptive_lr.py | a2481a5ee8ec6bc61fe6b9911175864f5dcde110 | [] | no_license | Ravoxsg/UofT_Adaptive_LR_optimizer | b800a068f222bd361e1ac6567c7cdbc74bd24d3c | 18c3f719c89e42e4e84af5840e64b4c89ab7e153 | refs/heads/master | 2021-08-31T00:52:31.674332 | 2017-12-20T02:23:18 | 2017-12-20T02:23:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,574 | py | #Same as train.py, but with an adaptive learning rate schedule.
import numpy as np
import tensorflow as tf
from data import cifar10, utilities
import vgg
# Config:
BATCH_SIZE = 64
NUM_EPOCHS = 0.2
STEP = 20
DATASET_SIZE = 50000
INITIAL_LR = 0.0001
EPSILON = 0.0000001
network = 'vgg'
mode = 'adaptive_2nd_order'
logdir = 'cnn_{}_{}/train_logs/'.format(network,mode)
# Set up training data:
NUM_BATCHES = int(NUM_EPOCHS * DATASET_SIZE / BATCH_SIZE)
data_generator = utilities.infinite_generator(cifar10.get_train(), BATCH_SIZE)
# Define the placeholders:
n_input = tf.placeholder(tf.float32, shape=cifar10.get_shape_input(), name="input")
n_label = tf.placeholder(tf.int64, shape=cifar10.get_shape_label(), name="label")
# Build the model
n_output = vgg.build(n_input)
# Define the loss function
loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=n_output, labels=n_label, name="softmax"))
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(n_output, axis=1), n_label), tf.float32))
# Add summaries to track the state of training:
tf.summary.scalar('summary/loss', loss)
tf.summary.scalar('summary/accuracy', accuracy)
summaries = tf.summary.merge_all()
# Define training operations:
global_step = tf.Variable(0, trainable=False, name='global_step')
inc_global_step = tf.assign(global_step, global_step+1)
# Adaptive learning rate variables
lr = tf.Variable(INITIAL_LR, name='lr')
lr1 = tf.Variable(INITIAL_LR - EPSILON, name = 'lr1')
lr2 = tf.Variable(INITIAL_LR + EPSILON, name = 'lr2')
lr3 = tf.Variable(INITIAL_LR - 2*EPSILON, name = 'lr3')
lr4 = tf.Variable(INITIAL_LR + 2*EPSILON, name = 'lr4')
loss1 = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=n_output, labels=n_label, name="softmax"))
loss2 = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=n_output, labels=n_label, name="softmax"))
loss3 = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=n_output, labels=n_label, name="softmax"))
loss4 = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=n_output, labels=n_label, name="softmax"))
train_op_0 = tf.train.GradientDescentOptimizer(learning_rate = lr, use_locking=True).minimize(loss)
train_op = tf.train.GradientDescentOptimizer(learning_rate = lr).minimize(loss)
train_op_1_0 = tf.train.GradientDescentOptimizer(learning_rate = lr1, use_locking=True).minimize(loss1)
train_op_1 = tf.train.GradientDescentOptimizer(learning_rate = lr1).minimize(loss1)
train_op_2_0 = tf.train.GradientDescentOptimizer(learning_rate = lr2, use_locking=True).minimize(loss2)
train_op_2 = tf.train.GradientDescentOptimizer(learning_rate = lr2).minimize(loss2)
train_op_3_0 = tf.train.GradientDescentOptimizer(learning_rate = lr3, use_locking=True).minimize(loss3)
train_op_3 = tf.train.GradientDescentOptimizer(learning_rate = lr3).minimize(loss3)
train_op_4_0 = tf.train.GradientDescentOptimizer(learning_rate = lr4, use_locking=True).minimize(loss4)
train_op_4 = tf.train.GradientDescentOptimizer(learning_rate = lr4).minimize(loss4)
inc_lr = tf.assign(lr, lr - 2*EPSILON*((loss2 - loss1 + EPSILON)/(loss3 + loss4 - 2*loss + EPSILON)))
inc_lr1 = tf.assign(lr1, lr - EPSILON)
inc_lr2 = tf.assign(lr2, lr + EPSILON)
inc_lr3 = tf.assign(lr3, lr - 2*EPSILON)
inc_lr4 = tf.assign(lr4, lr + 2*EPSILON)
# Keeping track of the loss and the learning rate
batches = []
losses = []
lrs = []
print("Loading training supervisor...")
sv = tf.train.Supervisor(logdir=logdir, global_step=global_step, summary_op=None, save_model_secs=30)
print("Done!")
with sv.managed_session() as sess:
# Get the current global_step
batch = sess.run(global_step)
# Set up tensorboard logging:
logwriter = tf.summary.FileWriter(logdir, sess.graph)
logwriter.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step=batch)
print("Starting training from batch {} to {}. Saving model every {}s.".format(batch, NUM_BATCHES, 30))
while not sv.should_stop():
if batch >= NUM_BATCHES:
print("Saving...")
sv.saver.save(sess, logdir+'model.ckpt', global_step=batch)
sv.stop()
break
inp, lbl = next(data_generator)
current_loss, _ = sess.run((loss, (train_op_0)), feed_dict={n_input: inp,n_label: lbl})
sess.run((inc_lr1, inc_lr2, inc_lr3, inc_lr4))
loss_1, _ = sess.run((loss1, (train_op_1_0)), feed_dict={n_input: inp,n_label: lbl})
loss_2, _ = sess.run((loss2, (train_op_2_0)), feed_dict={n_input: inp,n_label: lbl})
loss_3, _ = sess.run((loss3, (train_op_3_0)), feed_dict={n_input: inp,n_label: lbl})
loss_4, _ = sess.run((loss4, (train_op_4_0)), feed_dict={n_input: inp,n_label: lbl})
summ, loss_0, _ = sess.run((summaries, loss, (train_op, inc_global_step)), feed_dict={n_input: inp,n_label: lbl})
sess.run(inc_lr, feed_dict={n_input: inp, n_label: lbl})
if batch > 0 and batch % 20 == 0:
print('Step {} of {}.'.format(batch, NUM_BATCHES))
print('Current loss: {}'.format(current_loss))
print('Same as above: {}'.format(loss_0))
print('loss1, loss2, loss3, loss4: {}, {}, {}, {}'.format(loss_1, loss_2, loss_3, loss_4))
print('Learning rate: {}'.format(lr))
logwriter.add_summary(summ, global_step=batch)
batch += 1
print("Halting.")
plt.plot(np.array(batches),np.array(losses))
plt.tile('Loss over time')
plt.show()
plt.plot(np.array(batches),np.array(lrs))
plt.title('Learning rate over time')
plt.show() | [
"mathieu.ravaut@student.ecp.fr"
] | mathieu.ravaut@student.ecp.fr |
650e33ce0f21e78b11f013b0560453ea2a85a6be | 94bfb8b44125c6d7b58409e1979dc95941acd753 | /setup.py | 1e7fbea569df3ec736160c40e28066a550226a3e | [
"MIT"
] | permissive | rkhullar/pearsonr-pure-python | 1ef8eb8ada777fa3e2cf97b5c97122fb6d9ca1f0 | 955fbca6af0a234cf5132d5f83d36a2c411fec7a | refs/heads/master | 2023-03-21T12:56:43.945020 | 2021-03-13T19:54:05 | 2021-03-13T19:54:05 | 347,448,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,458 | py | from pathlib import Path
from setuptools import find_packages, setup
from typing import List, Union
import pipfile
import re
import subprocess
def read_file(path: Union[str, Path]) -> str:
with Path(path).open('r') as f:
return f.read().strip()
def infer_version() -> str:
process = subprocess.run(['git', 'describe'], stdout=subprocess.PIPE)
output = process.stdout.decode('utf-8').strip()
version = re.sub('^v', '', output)
return version
def load_requirements() -> List[str]:
# return read_file('requirements.txt').splitlines()
return [f'{package}{version}' for package, version in pipfile.load().data['default'].items()]
def read_python_version() -> str:
return pipfile.load().data['_meta']['requires']['python_version']
setup(name='pearsonr',
version=infer_version(),
url='https://github.com/rkhullar/pearsonr-pure-python',
author='Rajan Khullar',
author_email='rkhullar03@gmail.com',
long_description=read_file('readme.md'),
long_description_content_type='text/markdown',
keywords='pearsonr',
license='MIT',
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
python_requires='~='+read_python_version(),
install_requires=load_requirements(),
include_package_data=True,
zip_safe=False,
test_suite='nose.collector',
tests_require=['nose', 'parameterized'],
entry_points={}
)
| [
"noreply@github.com"
] | noreply@github.com |
ff42edd1001eb920f52266537eb69f4818f84ba8 | 50ed49edf390ddb3b4439caf20a475d887c23e30 | /Code/Process_dataset.py | d08f89618b4f4e304f0bb8bfe20f06ca4f2b1611 | [
"Apache-2.0"
] | permissive | Parth27/ActionRecognitionVideos | 2bfd728773a0e9eff211c3a70b6bd10dd69c706b | 8410843a0f78d5e0ea5aaf2f319fcffb157dcad3 | refs/heads/master | 2022-12-06T16:22:38.194304 | 2020-08-31T16:06:58 | 2020-08-31T16:06:58 | 291,759,332 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | import os
import glob
src_dir = "./Dataset/UCF-101/" # Dir containing the videos
des_dir = './Dataset/UCF-101/' # Output dir to save the videos
for file in os.listdir(src_dir):
vid_files = glob.glob1(src_dir+file, '*.avi')
des_dir = "./Dataset/UCF-101/frames/"
for vid in vid_files:
des_dir_path = os.path.join(des_dir+file, vid[:-4])
if not os.path.exists(des_dir_path):
os.makedirs(des_dir_path)
os.system('ffmpeg -i ' + os.path.join(src_dir+file, vid) + ' -qscale:v 2 ' + des_dir_path + '/frames%05d.jpg') | [
"diwanji.parth@gmail.com"
] | diwanji.parth@gmail.com |
5a1ab60820fd81cdf5a2c9716a6bd8522f875d10 | 0e046754f5151c9e4916115bb8d547cd1210d144 | /ex40b.py | 77394ff783c97a7957ae61e66e40f08a6fff638f | [] | no_license | teoharianto/python1 | f056730813f4559fd01493bf8d455f13dc5871e9 | aa99ba7df0f1acc349de51fdb0f7c8c370a4482e | refs/heads/master | 2020-09-01T06:53:45.198635 | 2019-11-01T03:17:50 | 2019-11-01T03:17:50 | 218,902,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | class MyClass(object):
def pertanyaan(self):
prompt = "> "
print("Masukkan angka pertama : ")
MyClass.angka1 = input(prompt)
print("Masukkan angka kedua : ")
MyClass.angka2 = input(prompt)
print("-"*10)
| [
"teofilushendry5@gmail.com"
] | teofilushendry5@gmail.com |
2e12d79fa9ad4afdc9d45903736aa325321b8bdf | 780b01976dad99c7c2ed948b8473aa4e2d0404ba | /scripts/alphas_archive/zw_contfut/alpha_ichimokucloud_long_bullish_feb02_.py | ac7c5e278698c535bbbd3bc0ba40df1577ab83ca | [] | no_license | trendmanagement/tmqrexo_alexveden | a8ad699c2c3df4ce283346d287aff4364059a351 | 4d92e2ee2bc97ea2fcf075382d4a5f80ce3d72e4 | refs/heads/master | 2021-03-16T08:38:00.518593 | 2019-01-23T08:30:18 | 2019-01-23T08:30:18 | 56,336,692 | 1 | 1 | null | 2019-01-22T14:21:03 | 2016-04-15T17:05:53 | Python | UTF-8 | Python | false | false | 1,430 | py | #
#
# Automatically generated file
# Created at: 2017-02-09 10:10:05.686710
#
from strategies.strategy_ichimokucloud import StrategyIchimokuCloud
from backtester.strategy import OptParam
from backtester.swarms.rebalancing import SwarmRebalance
from backtester.costs import CostsManagerEXOFixed
from backtester.swarms.rankingclasses import RankerBestWithCorrel
from backtester.strategy import OptParamArray
STRATEGY_NAME = StrategyIchimokuCloud.name
STRATEGY_SUFFIX = "_Bullish_Feb02_"
STRATEGY_CONTEXT = {
'swarm': {
'members_count': 1,
'ranking_class': RankerBestWithCorrel(window_size=-1, correl_threshold=-0.5),
'rebalance_time_function': SwarmRebalance.every_friday,
},
'strategy': {
'exo_name': 'ZW_ContFut',
'class': StrategyIchimokuCloud,
'opt_params': [
OptParamArray('Direction', [1]),
OptParam('conversion_line_period', 9, 2, 22, 5),
OptParam('base_line_period', 26, 13, 13, 2),
OptParam('leading_spans_lookahead_period', 26, 26, 26, 13),
OptParam('leading_span_b_period', 52, 2, 106, 30),
OptParamArray('RulesIndex', [14, 6, 13]),
OptParam('MedianPeriod', 5, 14, 26, 12),
],
},
'costs': {
'context': {
'costs_options': 3.0,
'costs_futures': 3.0,
},
'manager': CostsManagerEXOFixed,
},
}
| [
"i@alexveden.com"
] | i@alexveden.com |
8fbc50489eff50ed424d41fd9e73da22a933f129 | 4b17b361017740a4113ba358460293e55c9bee49 | /LAB02/04-CloudAlbum-XRAY/cloudalbum/model/models.py | f21c3b69306956a5492bd0f320264da3289f4e9d | [
"MIT"
] | permissive | liks79/aws-chalice-migration-workshop | aa01fa5a585a548c8408ba7448d731deefbbbd18 | 5115117504a3e2b897dc8444be58de0e4e12586a | refs/heads/master | 2022-12-25T09:50:44.821495 | 2018-11-20T03:05:52 | 2018-11-20T03:05:52 | 139,544,736 | 6 | 8 | MIT | 2022-12-08T02:17:36 | 2018-07-03T07:26:11 | JavaScript | UTF-8 | Python | false | false | 3,505 | py | """
model.models.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
CloudAlbum is a sample application for TechSummit 2018 workshop.
:copyright: © 2018 by Sungshik Jou.
:license: BSD, see LICENSE for more details.
"""
from sqlalchemy import Float, DateTime, ForeignKey, Integer, String
from flask_login import UserMixin
from flask_sqlalchemy import SQLAlchemy
from cloudalbum import login
db = SQLAlchemy()
class User(UserMixin, db.Model):
"""
Database Model class for User table
"""
__tablename__ = 'User'
id = db.Column(Integer, primary_key=True)
username = db.Column(String(50), unique=False)
email = db.Column(String(50), unique=True)
password = db.Column(String(100), unique=False)
photos = db.relationship('Photo',
backref='user',
cascade='all, delete, delete-orphan')
def __init__(self, name, email, password):
self.username = name
self.email = email
self.password = password
def __repr__(self):
return '<%r %r %r>' % (self.__tablename__, self.username, self.email)
class Photo(db.Model):
"""
Database Model class for Photo table
"""
__tablename__ = 'Photo'
id = db.Column(Integer, primary_key=True)
user_id = db.Column(Integer, ForeignKey(User.id))
tags = db.Column(String(400), unique=False)
desc = db.Column(String(400), unique=False)
filename_orig = db.Column(String(400), unique=False)
filename = db.Column(String(400), unique=False)
filesize = db.Column(Integer, unique=False)
geotag_lat = db.Column(Float, unique=False)
geotag_lng = db.Column(Float, unique=False)
upload_date = db.Column(DateTime, unique=False)
taken_date = db.Column(DateTime, unique=False)
make = db.Column(String(400), unique=False)
model = db.Column(String(400), unique=False)
width = db.Column(String(400), unique=False)
height = db.Column(String(400), unique=False)
city = db.Column(String(400), unique=False)
nation = db.Column(String(400), unique=False)
address = db.Column(String(400), unique=False)
def __init__(self, user_id, tags, desc, filename_orig, filename, filesize, geotag_lat, geotag_lng, upload_date,
taken_date, make, model, width, height, city, nation, address):
"""Initialize"""
self.user_id = user_id
self.tags = tags
self.desc = desc
self.filename_orig = filename_orig
self.filename = filename
self.filesize = filesize
self.geotag_lat = geotag_lat
self.geotag_lng = geotag_lng
self.upload_date = upload_date
self.taken_date = taken_date
self.make = make
self.model = model
self.width = width
self.height = height
self.city = city
self.nation = nation
self.address = address
def __repr__(self):
"""print information"""
return '<%r %r %r>' % (self.__tablename__, self.user_id, self.upload_date)
@login.user_loader
def load_user(id):
"""
User information loader for authenticated user
:param id: user id
:return: user record from User table
"""
# user = User.query.get(int(id))
#
# minutes = conf['SESSION_TIMEOUT']
#
# if user.last_seen < (datetime.utcnow() - datetime.timedelta(minutes=minutes)):
# # Session has timed out
# return None
#
# return User.query.get(user)
return User.query.get(int(id)) | [
"liks79@gmail.com"
] | liks79@gmail.com |
dab7e968ff950d4145b0f4b0021112f80ccf01df | a1966ec4fd6e45d5e2ebd04cbac8803ed7df83e3 | /account/views.py | e39f2b49eddfab0f6b4ee7c81e440ca005d036f8 | [] | no_license | panman11111/Django_bootstrap_login | 2b7acdbfabed80d44dfa2bd17422e57b11470795 | e80c80deef8656fb4da1eb86d99d5fd7f01e1f6b | refs/heads/main | 2023-06-21T15:18:23.178322 | 2021-07-23T08:01:16 | 2021-07-23T08:01:16 | 388,710,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | from django.shortcuts import redirect, render
from django.contrib.auth.forms import UserCreationForm
def signup(request):
if request.method == "POST":
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect('login')
else:
form = UserCreationForm()
return render(request, 'account/signup.html', {'form':form}) | [
"sakuma.takuto@shanon.co.jp"
] | sakuma.takuto@shanon.co.jp |
c268e42e33cab54993c7be1e85eaccdb2e98aec1 | 5655a243a3d631c70826910b9bd257bf21f52bcd | /LAB SW 3/MyMQTT2.py | e3854b6fed7183b0be3bb1cbe0fb9cd3890a7f21 | [] | no_license | sebalaterra/Relazioni-IoT | dff379cdba559d324d944ee28aeeb60cf3cccdf7 | 40e4402b4583307f8594ff60d95a413647121c87 | refs/heads/master | 2022-11-08T00:08:57.375245 | 2020-06-25T13:05:54 | 2020-06-25T13:05:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,840 | py | import paho.mqtt.client as PahoMQTT
import requests
import json
class MyMQTT:
def __init__(self, clientID, broker, topic, port):
self.broker = broker
self.port = port
#self.notifier = self
self.clientID = clientID
self._topic = topic
self._isSubscriber = False
# create an instance of paho.mqtt.client
self._paho_mqtt = PahoMQTT.Client(clientID, False)
# register the callback
self._paho_mqtt.on_connect = self.myOnConnect
self._paho_mqtt.on_message = self.myOnMessageReceived
def myOnConnect (self, paho_mqtt, userdata, flags, rc):
print ("Connected to %s with result code: %d" % (self.broker, rc))
def myOnMessageReceived (self, paho_mqtt , userdata, msg):
# A new message is received
print(msg.topic + msg.payload)
def myPublish (self, topic, msg):
# if needed, you can do some computation or error-check before publishing
#uncomment the line below for debugging
#print ("publishing '%s' with topic '%s'" % (msg, topic))
# publish a message with a certain topic
self._paho_mqtt.publish(topic, msg, 2)
def mySubscribe (self, topic):
# if needed, you can do some computation or error-check before subscribing
#assert hasattr(self.notifier,'notify')
print ("subscribing to %s" % (topic))
# subscribe for a topic
self._paho_mqtt.subscribe(topic, 2)
# just to remember that it works also as a subscriber
self._isSubscriber = True
self._topic = topic
def start(self):
#manage connection to broker
self._paho_mqtt.connect(self.broker , self.port)
self._paho_mqtt.loop_start()
def unsubscribe(self):
if (self._isSubscriber):
# remember to unsuscribe if it is working also as subscriber
self._paho_mqtt.unsubscribe(self._topic)
def stop (self):
if (self._isSubscriber):
# remember to unsuscribe if it is working also as subscriber
self._paho_mqtt.unsubscribe(self._topic)
self._paho_mqtt.loop_stop()
self._paho_mqtt.disconnect()
if __name__=="__main__":
payload = {
"serviceID": 789,
"description": "MyMQTT"
}
requests.put("http://localhost:8080/addService", json.dumps(payload))
response = requests.get("http://localhost:8080/deviceID/123")
our_device = response.json()
endpoint = our_device["end-points"]
#endpoint = "tiot/6/device/sensors"
print(endpoint)
#dato che è un subscriber, non può pubblicare -> per ricavare il broker facciamo una
#richiesta GET
broker = requests.get("http://localhost:8080/broker")
our_broker = broker.json()
our_broker2 = our_broker["broker"]
our_broker3 = our_broker["port"]
print(our_broker2)
test=MyMQTT("MySubscriber2", our_broker2, endpoint,1883)
#test.mySubscribe(endpoint)
test.start()
test.mySubscribe(endpoint)
while True:
user_input = input("""Cosa vuoi fare?
lcd: Stampa qualcosa nell'lcd
temp: Per azionare il motore
led: Settare il led in base alla temperatura\n""")
senML = {
"bn": "Yun",
"e": [{
"n": str(user_input),
"u": "",
"v": ""
}]
}
if (user_input == "lcd"):
senML["e"][0]["v"] = input("Cosa vuoi stampare?")
if (user_input == "temp"):
senML["e"][0]["v"] = input("Valore di temperatura: ")
if (user_input == "led"):
senML["e"][0]["v"] = input("Valore di temperatura: ")
test.myPublish(endpoint, json.dumps(senML))
| [
"noreply@github.com"
] | noreply@github.com |
febfee8b916c13278ae85b84a29cbfe7b0fa8325 | 44e60feaefb8afed8b17216c379e43a4e4a4c47b | /venv/lib/python3.6/functools.py | 552df8cf27882cc693945575cca18e19aa27269a | [] | no_license | 19059019/DGHMMR | f7dc0d875019348bfbf727853520bc226ce75df2 | 6487f3a609bcb82102ee0558f2f60da2ceea182b | refs/heads/master | 2022-11-06T06:02:09.853490 | 2018-05-09T12:39:07 | 2018-05-09T12:39:07 | 129,313,764 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | /home/ryan/anaconda3/lib/python3.6/functools.py | [
"myrw344email@gmail.com"
] | myrw344email@gmail.com |
f843bedf9cbff64a0edb8722634e6949dba0b446 | 72b55ff8a72209979f6da96f461c41737270396d | /forms.py | c9ecfa8e05ad83a87870e3b887daf066942a64a6 | [] | no_license | KeithCCC/FlaskSamples | 2426a51325abcb1cf8527ead97f36a0e1ecd9736 | 15b348f3ad798084a2048b272b3b7ce6669f7024 | refs/heads/master | 2021-01-23T18:22:41.765447 | 2017-09-07T22:22:37 | 2017-09-07T22:22:37 | 102,790,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | from flask_wtf import Form
from wtforms import TextField, IntegerField, TextAreaField, SubmitField, RadioField,SelectField
from wtforms import validators, ValidationError
class ContactForm(Form):
name = TextField("Name Of Student",[validators.Required("Please enter your name.")])
Gender = RadioField('Gender', choices = [('M','Male'),('F','Female')])
Address = TextAreaField("Address")
email = TextField("Email",[validators.Required("Please enter your email address."),
validators.Email("Please enter your email address.")])
Age = IntegerField("age")
language = SelectField('Languages', choices = [('cpp', 'C++'), ('py', 'Python')])
submit = SubmitField("Send") | [
"keithc2chen@gmail.com"
] | keithc2chen@gmail.com |
0a5527590c9d0a2c5741e99bc9a397e7d56bc8d6 | 276c86a451c4110ba0885dbe8509d46f23c21715 | /esp32-micropython/_projects/robot_random_roam/robot.py | 0d6ef0ac34c14ae716840ea486d2f36ac8c0081e | [] | no_license | vtt-info/octopuslab | 055c9bfdc1466a6e5acf90a3cd0db2826e72dee7 | 3d20933c05bae3eec4d0c033f228bde369e46e07 | refs/heads/master | 2022-07-27T14:19:16.386241 | 2020-05-16T13:50:21 | 2020-05-16T13:50:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,587 | py | from machine import Pin, Timer, PWM, SPI
from util.pinout import set_pinout
from time import sleep_ms
from random import randrange
from hcsr04 import HCSR04
pinout = set_pinout()
MAX_SPEED = 800
CRUISING_SPEED = 450
APPROACH_SPEED = 350
TURN_SPEED = 450
RANDOM_TURN_MIN = 600 # ms
RANDOM_TURN_MAX = 1000 # ms
COLLISION_THRESHOLD = 30 # cm
APPROACH_THRESHOLD = 100 #cm
MAX_SPEED_THRESHOLD = 250 #cm
LR_COMPENSATION = +10 # %, -10 slows L motor comared to R
ULTRASONIC_SAMPLING = 1000 # ms, how often detect obstacle
moto_L1 = Pin(pinout.MOTOR_1A, Pin.OUT)
moto_L2 = Pin(pinout.MOTOR_2A, Pin.OUT)
moto_L = PWM(Pin(pinout.MOTOR_12EN, Pin.OUT), freq=500, duty = 0)
moto_R3 = Pin(pinout.MOTOR_3A, Pin.OUT)
moto_R4 = Pin(pinout.MOTOR_4A, Pin.OUT)
moto_R = PWM(Pin(pinout.MOTOR_34EN, Pin.OUT), freq=500, duty = 0)
echo = HCSR04(trigger_pin=pinout.PWM2_PIN, echo_pin=pinout.PWM1_PIN)
def compensate_speed_left(speed):
return speed + int(speed/100 * LR_COMPENSATION/2)
def compensate_speed_right(speed):
return speed - int(speed/100 * LR_COMPENSATION/2)
def forward(speed):
moto_L1.value(0)
moto_L2.value(1)
moto_R3.value(0)
moto_R4.value(1)
moto_L.duty(compensate_speed_left(speed))
moto_R.duty(compensate_speed_right(speed))
def stop():
moto_L.duty(0)
moto_R.duty(0)
def turn_left(ms):
moto_L1.value(1)
moto_L2.value(0)
moto_R3.value(0)
moto_R4.value(1)
moto_L.duty(compensate_speed_left(TURN_SPEED))
moto_R.duty(compensate_speed_right(TURN_SPEED))
sleep_ms(ms)
stop()
def turn_right(ms):
moto_L1.value(0)
moto_L2.value(1)
moto_R3.value(1)
moto_R4.value(0)
moto_L.duty(compensate_speed_left(TURN_SPEED))
moto_R.duty(compensate_speed_right(TURN_SPEED))
sleep_ms(ms)
stop()
def random_turn():
if randrange(2):
print('TURN LEFT')
turn_left(randrange(RANDOM_TURN_MIN, RANDOM_TURN_MAX))
else:
print('TURN RIGHT')
turn_right(randrange(RANDOM_TURN_MIN, RANDOM_TURN_MAX))
def start():
while True:
stop()
distance = echo.distance_cm()
if distance < 0 or distance > MAX_SPEED_THRESHOLD:
print('MAX_SPEED')
forward(MAX_SPEED)
elif distance > APPROACH_THRESHOLD:
print('CRUISING')
forward(CRUISING_SPEED)
elif distance > COLLISION_THRESHOLD:
print('APPROACH')
forward(APPROACH_SPEED)
else: # distance < COLLISION_THRESHOLD
stop()
random_turn()
sleep_ms(ULTRASONIC_SAMPLING)
| [
"vasek.chalupnicek@gmail.com"
] | vasek.chalupnicek@gmail.com |
67642b39839af0eb49cdb9903aadc2180913e8b8 | a229bfe7e79df9b1d005771194618568880fcb77 | /learning_complete/hello.py | e042dc336902136f4b271eb064164ffba54c6b44 | [] | no_license | chjdm/PythonCrashCourse | 96c2173097e1cf3b4952dae11e5f087c4a0de6bc | 234beafd69fdd3feb6aa91384888b850f6be04d9 | refs/heads/master | 2020-03-27T23:39:54.933633 | 2018-09-17T14:45:14 | 2018-09-17T14:45:14 | 147,336,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | print('Hello, world!')
print('rest') | [
"liuwee@sina.com"
] | liuwee@sina.com |
18d32d3d4aeafb8a8b5dd7f7e1ae9d34159a2175 | 819776af8bc3160727b7ac8f228fb985fe7594fe | /selfcheck/migrations/0002_auto_20200810_1413.py | c7c6bb32d5865800640bdba707991f51b11a7993 | [] | no_license | davkim1030/mirae_qr_server | ea5850ea222551cae940d918cc9e67e06f81154f | 3ccf1bceb9163a6a51eb29f8d205887ca7e9bce8 | refs/heads/master | 2022-11-30T04:41:21.286352 | 2020-08-18T12:57:08 | 2020-08-18T12:57:08 | 283,763,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # Generated by Django 3.0.3 on 2020-08-10 14:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('selfcheck', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='url_str',
field=models.CharField(max_length=100),
),
]
| [
"davkim1030@gmail.com"
] | davkim1030@gmail.com |
31f6289142f123b7aa1f3408d8c68b0a4c08744b | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/operations/_private_endpoint_connections_operations.py | 4dae2b4662729d3e2714e093eb7ff038afd17b81 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 34,172 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_database_account_request(
resource_group_name: str, account_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/privateEndpointConnections",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str,
account_name: str,
private_endpoint_connection_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"privateEndpointConnectionName": _SERIALIZER.url(
"private_endpoint_connection_name", private_endpoint_connection_name, "str"
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str,
account_name: str,
private_endpoint_connection_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"privateEndpointConnectionName": _SERIALIZER.url(
"private_endpoint_connection_name", private_endpoint_connection_name, "str"
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str,
account_name: str,
private_endpoint_connection_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-15-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"privateEndpointConnectionName": _SERIALIZER.url(
"private_endpoint_connection_name", private_endpoint_connection_name, "str"
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
class PrivateEndpointConnectionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.cosmosdb.CosmosDBManagementClient`'s
:attr:`private_endpoint_connections` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_database_account(
self, resource_group_name: str, account_name: str, **kwargs: Any
) -> Iterable["_models.PrivateEndpointConnection"]:
"""List all private endpoint connections on a Cosmos DB account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.PrivateEndpointConnectionListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_database_account_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_database_account.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_database_account.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/privateEndpointConnections"
}
@distributed_trace
def get(
self, resource_group_name: str, account_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Gets a private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
def _create_or_update_initial(
self,
resource_group_name: str,
account_name: str,
private_endpoint_connection_name: str,
parameters: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> Optional[_models.PrivateEndpointConnection]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.PrivateEndpointConnection]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "PrivateEndpointConnection")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@overload
def begin_create_or_update(
self,
resource_group_name: str,
account_name: str,
private_endpoint_connection_name: str,
parameters: _models.PrivateEndpointConnection,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.PrivateEndpointConnection]:
"""Approve or reject a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param parameters: Required.
:type parameters: ~azure.mgmt.cosmosdb.models.PrivateEndpointConnection
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
account_name: str,
private_endpoint_connection_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.PrivateEndpointConnection]:
"""Approve or reject a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param parameters: Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
account_name: str,
private_endpoint_connection_name: str,
parameters: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> LROPoller[_models.PrivateEndpointConnection]:
"""Approve or reject a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param parameters: Is either a PrivateEndpointConnection type or a IO type. Required.
:type parameters: ~azure.mgmt.cosmosdb.models.PrivateEndpointConnection or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
account_name=account_name,
private_endpoint_connection_name=private_endpoint_connection_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, account_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@distributed_trace
def begin_delete(
self, resource_group_name: str, account_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> LROPoller[None]:
"""Deletes a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
| [
"noreply@github.com"
] | noreply@github.com |
fe3182900da8d8bb4dbc2094bba70c61c293ed2a | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /fyyJRDHcTe9REs4Ni_2.py | fa8d5a9f92273ccb98b5f5ce47ca0d2a51943ab1 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,033 | py | """
Create a function that takes three arguments (first dictionary, second
dictionary, key) in order to:
1. Return the boolean `True` if both dictionaries have the same values for the same keys.
2. If the dictionaries don't match, return the string `"Not the same"`, or the string `"One's empty"` if only one of the dictionaries contains the given key.
### Examples
dict_first = { "sky": "temple", "horde": "orcs", "people": 12, "story": "fine", "sun": "bright" }
dict_second = { "people": 12, "sun": "star", "book": "bad" }
check(dict_first, dict_second, "horde") ➞ "One's empty"
check(dict_first, dict_second, "people") ➞ True
check(dict_first, dict_second, "sun") ➞ "Not the same"
### Notes
* Dictionaries are an unordered data type.
* Double quotes may be helpful.
* `KeyError` can occur when trying to access a dictionary key that doesn't exist.
"""
def check(d1, d2, k):
try: return ["Not the same", True][d1[k] == d2[k]]
except KeyError: return "One's empty"
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
a77b2ff17ace222a1bea8e8a72ff18df355a31a5 | 364e81cb0c01136ac179ff42e33b2449c491b7e5 | /spell/branches/2.0/src/spell/spell/lib/adapter/verifier.py | 0ffd4e9ec0d45331f71f5a42f892936cb51928a1 | [] | no_license | unnch/spell-sat | 2b06d9ed62b002e02d219bd0784f0a6477e365b4 | fb11a6800316b93e22ee8c777fe4733032004a4a | refs/heads/master | 2021-01-23T11:49:25.452995 | 2014-10-14T13:04:18 | 2014-10-14T13:04:18 | 42,499,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,938 | py | ###################################################################################
## MODULE : spell.lib.adapter.verifier
## DATE : Mar 18, 2011
## PROJECT : SPELL
## DESCRIPTION: Telemetry verifier thread
## --------------------------------------------------------------------------------
##
## Copyright (C) 2008, 2011 SES ENGINEERING, Luxembourg S.A.R.L.
##
## This file is part of SPELL.
##
## This component is free software: you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation, either
## version 3 of the License, or (at your option) any later version.
##
## This software is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License and GNU General Public License (to which the GNU Lesser
## General Public License refers) along with this library.
## If not, see <http://www.gnu.org/licenses/>.
##
###################################################################################
#*******************************************************************************
# SPELL imports
#*******************************************************************************
from spell.utils.log import *
from spell.lib.adapter.tm_item import *
from spell.lib.exception import *
from spell.lang.constants import *
from spell.lang.modifiers import *
from spell.lib.adapter.constants.core import COMP_SYMBOLS
#*******************************************************************************
# System imports
#*******************************************************************************
import threading,datetime,time,sys
################################################################################
class TmVerifierClass(threading.Thread):
__tmClass = None
__tmItem = None
__definition = []
step = None
name = None
value = None
status = None
failed = False
reason = " "
updtime = " "
error = None
__paramName = None
__comparison = None
__fromValue = None
__toValue = None
__stepConfig = {}
#===========================================================================
def __init__(self, tmClass, stepNum, parameters, globalConfig = {}):
threading.Thread.__init__(self)
# Initialize handles
self.setName("VRF STEP " + str(stepNum))
self.step = stepNum
self.__tmClass = tmClass
self.__definition = [ stepNum, parameters, globalConfig ]
# Status information accessed from tm interface
self.name = None
self.value = None
self.status = None
self.failed = False
self.error = None
self.updtime = " "
if len(parameters)<3:
raise SyntaxException("Malformed verification step")
# Comparison operator
self.__comparison = parameters[1]
# Check it
if type(self.__comparison)!=str:
raise SyntaxException("Bad parameter, expected comparison operator")
# Check wether first element is a param name or a tm item
if isinstance(parameters[0],TmItemClass):
self.__tmItem = parameters[0]
self.__paramName = parameters[0].name()
elif type(parameters[0]) == str:
self.__paramName = parameters[0]
self.__tmItem = self.__tmClass[self.__paramName]
else:
raise SyntaxException("Bad arguments")
# Guess the structure of the verification step
hasConfig = (type(parameters[-1]) == dict)
if hasConfig:
twoValues = (len(parameters)==5)
else:
twoValues = (len(parameters)==4)
self.__fromValue = parameters[2]
if twoValues:
self.__toValue = parameters[3]
else:
self.__toValue = None
# Build the configuration dictionary
self.__stepConfig = {}
self.__stepConfig.update(globalConfig)
self.__stepConfig["STEP_ID"] = self.step
# Get step specific configuration
if hasConfig:
self.__stepConfig.update(parameters[-1])
self.__updateInfo("UNINIT", False, "", False)
#===========================================================================
def __updateInfo(self, status, failed, reason = " ", notify = True):
# Configuration to be used in this step
useConfig = self.__stepConfig.copy()
# We do not want to wait for notifications
useConfig[Wait] = False
useConfig[Notify] = False
# The name
self.name = repr(self.step) + "@" + self.__paramName
self.updtime = str(datetime.datetime.now())[:-3]
# Build the value
if isinstance(self.__fromValue,TmItemClass):
if useConfig.get(ValueFormat) == ENG:
currentValue = self.__fromValue._getEng()
else:
currentValue = self.__fromValue._getRaw()
fromValue = str(currentValue)
else:
fromValue = str(self.__fromValue)
if self.__toValue is not None:
if isinstance(self.__toValue,TmItemClass):
if useConfig.get(ValueFormat) == ENG:
currentToValue = self.__toValue._getEng()
else:
currentToValue = self.__toValue._getRaw()
self.value = COMP_SYMBOLS[self.__comparison] +\
"[" + fromValue + ", " + str(currentToValue) + "]"
else:
self.value = COMP_SYMBOLS[self.__comparison] +\
"[" + fromValue + "," + str(self.__toValue) + "]"
else:
self.value = COMP_SYMBOLS[self.__comparison] + fromValue
self.status = status
self.failed = failed
self.reason = reason
if notify:
self.__tmClass.updateVerificationStatus( self )
#===========================================================================
def run(self):
# Update verification status info
self.__updateInfo("IN PROGRESS", False)
result = False
reason = " "
self.error = None
LOG("[V] Starting verification with config: " + repr(self.__stepConfig))
try:
if self.__comparison == eq:
result = self.__tmClass.eq( self.__tmItem, self.__fromValue, self.__stepConfig )
elif self.__comparison == neq:
result = self.__tmClass.neq( self.__tmItem, self.__fromValue, self.__stepConfig )
elif self.__comparison == lt:
result = self.__tmClass.lt( self.__tmItem, self.__fromValue, self.__stepConfig )
elif self.__comparison == gt:
result = self.__tmClass.gt( self.__tmItem, self.__fromValue, self.__stepConfig )
elif self.__comparison == le:
result = self.__tmClass.le( self.__tmItem, self.__fromValue, self.__stepConfig )
elif self.__comparison == ge:
result = self.__tmClass.ge( self.__tmItem, self.__fromValue, self.__stepConfig )
elif self.__comparison == bw:
result = self.__tmClass.between( self.__tmItem, self.__fromValue, self.__toValue, self.__stepConfig )
elif self.__comparison == nbw:
result = self.__tmClass.not_between( self.__tmItem, self.__fromValue, self.__toValue, self.__stepConfig )
actualValue = self.getActualValue()
LOG("[V] Comparison result: " + repr(result))
if not result:
reason = "Actual value: " + str(actualValue)
else:
reason = "Value is " + str(actualValue)
except DriverException,ex:
LOG("[V] Verification process failed: " + str(ex), LOG_ERROR)
self.error = ex
reason = ex.message
if (ex.reason != "unknown"):
reason += ". " + ex.reason
result = False
finally:
LOG("[V] Declaring verification process success: " + repr(result))
if result:
self.__updateInfo("SUCCESS", False, reason)
else:
# If PromptUser is false, do not mark it as failed
if self.__stepConfig[PromptUser] == True:
self.__updateInfo("FAILED", True, reason)
# If PromptUser is false but the failed check is caused by system failure, report it
# unless PromptFailure is False
else:
if (self.error is None) or (self.__stepConfig[PromptFailure] == False):
self.__updateInfo("SUPERSEDED", True, reason)
else:
self.__updateInfo("FAILED", True, reason)
#===========================================================================
def getDefinition(self):
return self.__definition
#===========================================================================
def getParamName(self):
desc = self.__tmItem.description()
if desc != "": desc = ": " + desc
return self.__tmItem.name() + desc
#===========================================================================
def getActualValue(self):
useConfig = self.__stepConfig.copy()
useConfig[Wait] = False
useConfig[Notify] = False
actualValue = self.__tmItem.value( useConfig )
return actualValue
| [
"rafael.chinchilla@b4576358-0e6a-c6b8-6e87-62523fae65e4"
] | rafael.chinchilla@b4576358-0e6a-c6b8-6e87-62523fae65e4 |
fd4261e382f0c2af24db00d077a75a556802c706 | 5c579f811f4f400f8605a13ca85f044411fc8fcb | /Phase3/Code/LSH.py | 7acabdf812bcbcf2d0c227aecf08d13f65884b23 | [] | no_license | Satyak22/Multimedia-Retrieval-and-Analysis | 4c230098f101719cf458cf21de4ec66bca04d902 | c6cdabb02505cd4ccecb0e4f02d676b3a86e1545 | refs/heads/master | 2022-11-11T11:41:35.953323 | 2020-06-24T04:04:42 | 2020-06-24T04:04:42 | 274,545,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,117 | py | import json
import math
import os
import random
import numpy
import pandas as pd
from scipy.spatial import distance
from project_utils import get_data_matrix
from dimensionality_reduction import reduce_dimensions_lda, reduce_dimensions_svd
class ImageLSH():
def __init__(self, num_layers, num_hashs):
print("Initializing LSH index with {0} Layers and {1} Hashes".format(num_layers, num_hashs))
self.num_layers = num_layers
self.num_hashs = num_hashs
self.latent_range_dict = {}
self.lsh_points_dict = {}
self.lsh_range_dict = {}
self.image_bucket_df = pd.DataFrame()
self.image_latent_df = pd.DataFrame()
self.w_length = 0.0
def load_data(self):
self.data_matrix, self.image_ids = get_data_matrix("HOG_reduced")
self.reduced_data = self.data_matrix
return self.reduced_data, self.image_ids
def assign_group(self, value):
"""
Assigns bucket
:param value:
:return: bucket
"""
if value < 0:
return math.floor(value/self.w_length)
else:
return math.ceil(value / self.w_length)
def init_lsh_vectors(self, U_dataframe):
"""
initialize lsh vectors
:param U_dataframe:
"""
# First finds range for each column in the df
# Them, finds uniform distributions of the range for each column
# That is assigned to each lsh point
# The lsh distance from 0 vector to the lsh points list is found
print("Initializing the LSH vectors")
origin = list(numpy.zeros(shape=(1, 256)))
for column in U_dataframe:
self.latent_range_dict[column] = (U_dataframe[column].min(), U_dataframe[column].max())
for i in range(0, self.num_layers * self.num_hashs):
cur_vector_list = []
for column in U_dataframe:
cur_vector_list.append(random.uniform(self.latent_range_dict[column][0], self.latent_range_dict[column][1]))
self.lsh_points_dict[i] = cur_vector_list
self.lsh_range_dict[i] = distance.euclidean(origin, cur_vector_list)
def project_on_hash_function(self, image_vector, lsh_vector):
"""
projection of image vector on the hash fn
:param image_vector:
:param lsh_vector:
:return: projection value
"""
image_lsh_dot_product = numpy.dot(image_vector, lsh_vector)
if image_lsh_dot_product == 0.0:
return 0
lsh_vector_dot_product = numpy.dot(lsh_vector, lsh_vector)
projection = image_lsh_dot_product/lsh_vector_dot_product*lsh_vector
projection_magnitude = numpy.linalg.norm(projection)
return projection_magnitude
def LSH(self, vector):
"""
list of buckets for the vector
:param vector:
:return:
"""
bucket_list = []
for lsh_vector in range(0, len(self.lsh_points_dict)):
bucket_list.append(self.assign_group(self.project_on_hash_function(numpy.array(vector), numpy.array(self.lsh_points_dict[lsh_vector]))))
return bucket_list
def group_data(self):
"""
groups all images into buckets
:return:
"""
print("Grouping data into buckets")
reduced_df = pd.DataFrame(self.reduced_data)
self.init_lsh_vectors(reduced_df)
self.w_length = min(self.lsh_range_dict.values()) / float(100)
bucket_matrix = numpy.zeros(shape=(len(self.reduced_data), len(self.lsh_points_dict)))
# the shape is number of samples in U * (L * k)
for image in range(0, len(self.reduced_data)):
bucket_matrix[image] = self.LSH(self.reduced_data[image])
image_id_df = pd.DataFrame(self.image_ids, columns=['image_id'])
self.image_latent_df = reduced_df.join(image_id_df, how="left")
return pd.DataFrame(bucket_matrix).join(image_id_df, how="left")
def index_data(self):
"""
Assigns buckets to images in the dataframe
:param df:
:return:
"""
print("Indexing the structure..")
index_structure_dict = {}
counterval = 0
for index, row in self.image_bucket_df.iterrows():
image_id = row["image_id"]
column = 0
for i in range(0, self.num_layers):
bucket = ""
for j in range(0, self.num_hashs):
interval = row[column]
bucket = bucket + str(int(interval)) + "-"
column += 1
if bucket.strip("-") in index_structure_dict:
index_structure_dict[bucket.strip("-")].add(image_id)
else:
image_set = set()
image_set.add(image_id)
index_structure_dict[bucket.strip("-")] = image_set
return index_structure_dict
def fetch_hash_keys(self, bucket_list):
"""
Obtain the hash keys for the bucket list
:param bucket_list:
:return:
"""
column = 0
hash_key_list = []
for i in range(0, self.num_layers):
bucket = ""
for j in range(0, self.num_hashs):
interval = bucket_list[column]
if(j != self.num_hashs - 1):
bucket = bucket + str(int(interval)) + "-"
else:
bucket = bucket + str(int(interval))
column += 1
hash_key_list.append(bucket)
return hash_key_list
def create_index_structure(self):
"""
Creates index structure for search
:param image_list:
"""
# this contains the bucketed data and the image id on the right most column
self.image_bucket_df = self.group_data()
self.index_structure = self.index_data()
return self.index_structure, self.lsh_points_dict
def load_index_structure(self, idx, points_dict, w_length):
self.index_structure = idx
self.lsh_points_dict = points_dict
self.w_length = w_length
def find_similar_images(self, query_vector, no_of_images, mongo_client):
"""
Nearest neighbor for the query vector
:param query_vector:
:param no_of_nearest_neighbours:
:return: list of r nearest images
"""
query_bucket_list = self.LSH(query_vector)
query_hash_key_list = self.fetch_hash_keys(query_bucket_list)
query_hash_key_list = list(set(query_hash_key_list))
print("Hash Key List {0}".format(query_hash_key_list))
selected_image_set = set()
nearest_neighbour_list = set()
total_images_considered = []
for j in range(0, self.num_hashs):
for bucket in query_hash_key_list:
print("Getting bucket - {0}".format(bucket.rsplit("-", j)[0]))
images_in_current_bucket = self.index_structure.get(bucket.rsplit("-", j)[0], [''])
images_in_current_bucket = set(images_in_current_bucket)
images_in_current_bucket.discard('')
selected_image_set.update(images_in_current_bucket)
total_images_considered.extend(list(images_in_current_bucket))
feature_vectors = []
for img in selected_image_set:
feature_vectors.append(list(mongo_client.mwdb_project.image_features.find({'imageName': img}))[0]["HOG_reduced"])
for img, fv in zip(selected_image_set, feature_vectors):
eucledian_distance = distance.euclidean(fv, query_vector)
if (eucledian_distance != 0):
nearest_neighbour_list.add((img, eucledian_distance))
# if len(nearest_neighbour_list) >= no_of_images:
# break
nearest_neighbour_list = sorted(nearest_neighbour_list, key=lambda x: x[1])
return nearest_neighbour_list[:no_of_images], len(nearest_neighbour_list),len(total_images_considered)
| [
"satyakpatel22@gmail.com"
] | satyakpatel22@gmail.com |
df84280a1f89716d7f5c89b3b84b9f4e5b006cfe | b9d2e6fe3f694b3a7ffc226e308066bcbf781f07 | /supporting_components/graph_io.py | f802256842d8720e449e94fc403103dfb9f16acc | [] | no_license | VeraPrinsen/isomorphisms | 82ccbc6038f48bb8d43aeb2314c14c973331d0df | e131fe6d7f982545255a7405dde44ae49ea027cd | refs/heads/master | 2020-04-25T16:06:49.500823 | 2019-04-08T13:45:48 | 2019-04-08T13:45:48 | 172,899,245 | 0 | 1 | null | 2019-04-08T13:45:49 | 2019-02-27T11:02:24 | Python | UTF-8 | Python | false | false | 8,089 | py | """
Includes functions for reading and writing graphs, in a very simple readable format.
"""
# Version: 30-01-2015, Paul Bonsma
# Version: 29-01-2017, Pieter Bos
# updated 30-01-2015: writeDOT also writes color information for edges.
# updated 2-2-2015: writeDOT can also write directed graphs.
# updated 5-2-2015: no black fill color used, when more than numcolors**2 vertices.
# updated 29-1-2017: pep8 reformat, general improvements
import sys
from typing import IO, Tuple, List, Union
from supporting_components.graph import Graph, Vertex, Edge
DEFAULT_COLOR_SCHEME = "paired12"
NUM_COLORS = 12
def read_line(f: IO[str]) -> str:
"""
Read a single non-comment line from a file
:param f: The file
:return: the line
"""
line = f.readline()
while len(line) > 0 and line[0] == '#':
line = f.readline()
return line
def read_graph(graphclass, f: IO[str]) -> Tuple[Graph, List[str], bool]:
"""
Read a graph from a file
:param graphclass: The class of the graph
:param f: The file
:return: The graph
"""
options = []
while True:
try:
line = read_line(f)
n = int(line)
graph = graphclass(directed=False, n=n)
break
except ValueError:
if len(line) > 0 and line[-1] == '\n':
options.append(line[:-1])
else:
options.append(line)
line = read_line(f)
edges = []
try:
while True:
comma = line.find(',')
if ':' in line:
colon = line.find(':')
edges.append((int(line[:comma]), int(line[comma + 1:colon]), int(line[colon + 1:])))
else:
edges.append((int(line[:comma]), int(line[comma + 1:]), None))
line = read_line(f)
except Exception:
pass
indexed_nodes = list(graph.vertices)
for edge in edges:
graph += Edge(indexed_nodes[edge[0]], indexed_nodes[edge[1]], edge[2])
if line != '' and line[0] == '-':
return graph, options, True
else:
return graph, options, False
def read_graph_list(graph_class, f: IO[str]) -> Tuple[List[Graph], List[str]]:
"""
Read a list of graphs from a file
:param graph_class: The graph class
:param f: The file
:return: A list of graphs
"""
options = []
graphs = []
cont = True
while cont:
graph, new_options, cont = read_graph(graph_class, f)
options += new_options
graphs.append(graph)
return graphs, options
def load_graph(f: IO[str], graph_class=Graph, read_list: bool = False) -> Union[Tuple[List[Graph], List[str]], Graph]:
"""
Load a graph from a file
:param f: The file
:param graph_class: The class of the graph. You may subclass the default graph class and add your own here.
:param read_list: Specifies whether to read a list of graphs from the file, or just a single graph.
:return: The graph, or a list of graphs.
"""
if read_list:
graph_list, options = read_graph_list(graph_class, f)
return graph_list, options
else:
graph, options, tmp = read_graph(graph_class, f)
return graph # ,options
def input_graph(graph_class=Graph, read_list: bool = False) -> Union[Tuple[List[Graph], List[str]], Graph]:
"""
Load a graph from sys.stdin
:param graph_class: The class of the graph. You may subclass the default graph class and add your own here.
:param read_list: Specifies whether to read a list of graphs from the file, or just a single graph.
:return: The graph, or a list of graphs.
"""
return load_graph(f=sys.stdin, graph_class=graph_class, read_list=read_list)
def write_line(f: IO[str], line: str):
"""
Write a line to a file
:param f: The file
:param line: The line
"""
f.write(line + '\n')
def write_graph_list(graph_list: List[Graph], f: IO[str], options=[]):
"""
Write a graph list to a file.
:param graph_list: The list of graphs
:param f: the file
:param options: the (optional) options to write to the file.
"""
# we may only write options that cannot be seen as an integer:
for S in options:
try:
int(S)
except ValueError:
write_line(f, str(S))
for i, g in enumerate(graph_list):
n = len(g)
write_line(f, '# Number of vertices:')
write_line(f, str(n))
# Give the vertices (temporary) labels from 0 to n-1:
label = {}
for vertex_index, vertex in enumerate(g):
label[vertex] = vertex_index
write_line(f, '# Edge list:')
for e in g.edges:
if e.weight:
write_line(f, str(label[e.tail]) + ',' + str(label[e.head]) + ':' + str(e.weight))
else:
write_line(f, str(label[e.tail]) + ',' + str(label[e.head]))
if i + 1 < len(graph_list):
write_line(f, '--- Next graph:')
def save_graph(graph_list: Union[Graph, List[Graph]], f: IO[str], options=[]):
"""
Write a graph, or a list of graphs to a file.
:param graph_list: The graph, or a list of graphs.
:param f: The file
:param options: the (optional) options to write to the file.
"""
if type(graph_list) is list:
write_graph_list(graph_list, f, options)
else:
write_graph_list([graph_list], f, options)
def print_graph(graph_list: Union[Graph, List[Graph]], options=[]):
"""
Print a graph, or a list of graphs to sys.stdout
:param graph_list: The graph, or list of graphs.
:param options: The (optional) options to print.
"""
if type(graph_list) is list:
write_graph_list(graph_list, sys.stdout, options)
else:
write_graph_list([graph_list], sys.stdout, options)
def write_dot(graph: Graph, f: IO[str], directed=False):
"""
Writes a given graph to a file in .dot format.
:param graph: The graph. If its vertices contain attributes `label`, `colortext` or `colornum`, these are also
included in the file. If its edges contain an attribute `weight`, these are also included in the file.
:param f: The file.
:param directed: Whether the graph should be drawn as a directed graph.
"""
if directed:
f.write('digraph G {\n')
else:
f.write('graph G {\n')
name = {}
next_name = 0
for v in graph:
name[v] = next_name
next_name += 1
options = 'penwidth=3,'
if hasattr(v, 'label'):
options += 'label="' + str(v.label) + '",'
if hasattr(v, 'colortext'):
options += 'color="' + v.colortext + '",'
elif hasattr(v, 'colornum'):
options += 'color=' + str(v.colornum % NUM_COLORS + 1) + ', colorscheme=' + DEFAULT_COLOR_SCHEME + ','
if v.colornum >= NUM_COLORS:
options += 'style=filled,fillcolor=' + str((v.colornum // NUM_COLORS) % NUM_COLORS + 1) + ','
if len(options) > 0:
f.write(' ' + str(name[v]) + ' [' + options[:-1] + ']\n')
else:
f.write(' ' + str(name[v]) + '\n')
f.write('\n')
for e in graph.edges:
options = 'penwidth=2,'
if hasattr(e, 'weight'):
options += 'label="' + str(e.weight) + '",'
if hasattr(e, 'colortext'):
options += 'color="' + e.colortext + '",'
elif hasattr(e, 'colornum'):
options += 'color=' + str(e.colornum % NUM_COLORS + 1) + ', colorscheme=' + DEFAULT_COLOR_SCHEME + ','
if len(options) > 0:
options = ' [' + options[:-1] + ']'
if directed:
f.write(' ' + str(name[e.tail]) + ' -> ' + str(name[e.head]) + options + '\n')
else:
f.write(' ' + str(name[e.tail]) + '--' + str(name[e.head]) + options + '\n')
f.write('}')
if __name__ == "__main__":
from mygraphs import MyGraph
with open('examplegraph.gr') as f:
G = load_graph(f, MyGraph)
print(G)
G.del_vert(next(iter(G.vertices)))
print(G)
| [
"vera.prinsen@nedap.com"
] | vera.prinsen@nedap.com |
20da8201bce240d8f32dd27916f1c50b8834eb34 | d3f8dddd55c921f17e68d21db97045d030fdcab8 | /duloi/django_app/duriann/apps.py | e6a476cb1bd9e64b8dad7f9f8f7b79d909e77e36 | [] | no_license | SoyarBeanery/Django-WebApp--Durian | 268bd6eb70ac047451b4ea62bfa3a800d8ac41f6 | 71dda4d991b737aa9029c6bdb2221684e47c1a22 | refs/heads/master | 2022-12-14T12:09:53.273520 | 2020-08-13T16:39:22 | 2020-08-13T16:39:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from django.apps import AppConfig
class DuriannConfig(AppConfig):
name = 'duriann'
| [
"yeoyeewen@gmail.com"
] | yeoyeewen@gmail.com |
bfe5cbc0d0982f816c0f439ccfe312343bd3a6b6 | 5178f5aa20a857f8744fb959e8b246079c800c65 | /02_oop/tr/src/21_str/str_tr1.py | 28f1d95b840592e11cb603addacf54d232aacff2 | [] | no_license | murayama333/python2020 | 4c3f35a0d78426c96f0fbaed335f9a63227205da | 8afe367b8b42fcf9489fff1da1866e88f3af3b33 | refs/heads/master | 2021-05-19T04:03:46.295906 | 2021-03-09T22:23:58 | 2021-03-09T22:23:58 | 251,520,131 | 0 | 3 | null | 2020-10-26T01:20:09 | 2020-03-31T06:35:18 | Python | UTF-8 | Python | false | false | 113 | py | language = "python"
print(language.upper())
print(language.capitalize())
print(language.capitalize().swapcase())
| [
"murayama333@gmail.com"
] | murayama333@gmail.com |
58e41bee66689beee1a09e78b3d3e34bc752c268 | 5b6661638cfb31e40c1175e994dd1405701b2154 | /train/dataset.py | 3176f0a2477eae4674cbb2c9fd314d191a99e12a | [] | no_license | xiscosc/MasterThesis-anp-2016-dist | 5cc580e15358ac725450fe061c4d481fff4c1c27 | 8cb5e45773cf3e45154620747dab139bfd8a0229 | refs/heads/master | 2021-03-24T09:31:43.311499 | 2017-03-02T15:25:19 | 2017-03-02T15:25:19 | 78,958,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,333 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Small library that points to a data set.
Methods of Data class:
data_files: Returns a python list of all (sharded) data set files.
num_examples_per_epoch: Returns the number of examples in the data set.
num_classes: Returns the number of classes in the data set.
reader: Return a reader for a single entry from the data set.
"""
from abc import ABCMeta
from abc import abstractmethod
import os
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_string('data_dir', '/tmp/mydata',
"""Path to the processed data, i.e. """
"""TFRecord of Example protos.""")
class Dataset(object):
"""A simple class for handling data sets."""
__metaclass__ = ABCMeta
def __init__(self, name, subset):
"""Initialize dataset using a subset and the path to the data."""
assert subset in self.available_subsets(), self.available_subsets()
self.name = name
self.subset = subset
@abstractmethod
def num_classes(self):
"""Returns the number of classes in the data set."""
pass
# return 10
@abstractmethod
def num_examples_per_epoch(self):
"""Returns the number of examples in the data subset."""
pass
# if self.subset == 'train':
# return 10000
# if self.subset == 'validation':
# return 1000
@abstractmethod
def download_message(self):
"""Prints a download message for the Dataset."""
pass
def available_subsets(self):
"""Returns the list of available subsets."""
return ['train', 'val']
def data_files(self):
"""Returns a python list of all (sharded) data subset files.
Returns:
python list of all (sharded) data set files.
Raises:
ValueError: if there are not data_files matching the subset.
"""
tf_record_pattern = os.path.join(FLAGS.data_dir, '%s-*' % self.subset)
data_files = tf.gfile.Glob(tf_record_pattern)
if not data_files:
print('No files found for dataset %s/%s at %s' % (self.name,
self.subset,
FLAGS.data_dir))
self.download_message()
exit(-1)
return data_files
def reader(self):
"""Return a reader for a single entry from the data set.
See io_ops.py for details of Reader class.
Returns:
Reader object that reads the data set.
"""
return tf.TFRecordReader()
| [
"xiscosastre@gmail.com"
] | xiscosastre@gmail.com |
ac5ab9f56988ecb664f89a097f82c4affa58d76b | 7b40412c35f5c68ea0f6b4c3b90a454d36e7e328 | /2/2.10.py | f6d7c71ae622346d24fe2924307187a51acdd56d | [] | no_license | VladKitTort/Michael_Dawson | f5085369b99c8da9cf6dbe1b652a4112a55ea9c8 | b8116ccd917379ac343ddd9e7a181cca0d6e52db | refs/heads/master | 2022-04-21T02:44:27.934620 | 2020-04-23T16:40:53 | 2020-04-23T16:40:53 | 257,879,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,511 | py | # Бесполезные факты
#
# Узнает у пользователя его/ ее ичные данные и выдает несколько фактов
# о нем/ ней. Эти факты истинны, но совершенно бесполезны.
name = input("Привет. Как тебя зовут? ")
age = input("Сколько тебе лет? ")
age = int(age)
weight = int(input("Хорошо. И последний вопрос. Сколько в тебе килограммов? "))
print("\nЕсли бы поэт Каммингс адресовал тебе письмо, он бы обратился к тебе так: ", name.lower())
print("А если бы это был рехнувшийся Каммингс, то так: ", name.upper())
called = name * 5
print("\nЕсли бы маленький ребенок решил привлечь твое внимание")
print("он произнес бы твое имя так: ")
print(called)
seconds = age * 365 * 24 * 60 * 60
print("\nТвой нынешний возраст - свыше", seconds, "секунд.")
moon_weight = weight / 6
print("\nЗнаете ли вы что на Луне вы весили бы всего", moon_weight, "кг?")
sun_weight = weight * 27.1
print("А вот находясь на Солнце, вы бы весили", sun_weight, "кг. (Но увы, это продолжалось бы не долго...)")
input("\n\nНажмите Enter, чтобы выйти.")
| [
"vovatortik@mail.ru"
] | vovatortik@mail.ru |
a46f2d6cf64fc5e807fb2b7418fddb89d2b4ca53 | 469e7ea421d40b16ea46aebdd82842eab91eb3fd | /galleryTddProject/gallery/admin.py | fcab762967a1e21c9a17e0aea1a6f1f2ffca1614 | [] | no_license | nathalia1096/KataRest | 5bec2dba7bac439a3c477de75bcb3a4b60029d3e | e34cd42d9bff3ef1de1e0dd543fcf5dbf916a74c | refs/heads/master | 2020-05-01T07:47:07.443928 | 2019-03-26T04:57:33 | 2019-03-26T04:57:33 | 177,359,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from django.contrib import admin
from .models import Image, Portfolio, UserProfile
# Register your models here.
admin.site.register(Image)
admin.site.register(Portfolio)
admin.site.register(UserProfile) | [
"nathalia.alvarez@globant.com"
] | nathalia.alvarez@globant.com |
80b408044a18397d5a1c2dca8ad7f9c7b4411008 | 0fb391112de0ceb363e9b15a5cf16848235fce2f | /semantic_adversarial_examples/main.py | 40d737a68f1cf20695d2385f4399cacc007292c4 | [] | no_license | sarathknv/adversarial-examples-pytorch | d0178b31902627b17c6bdbf537f62e435ac965c9 | 2979544963689d93c9af960e92975e93593c6ddd | refs/heads/master | 2023-05-10T04:38:50.857141 | 2023-04-24T20:15:46 | 2023-04-24T20:15:46 | 119,166,118 | 332 | 67 | null | null | null | null | UTF-8 | Python | false | false | 4,411 | py | import torch
import torch.backends
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from tqdm import tqdm
import attacks
from models import ResNet50, VGG16
def validate(model, val_loader, device):
model.eval()
total_samples = 0
correct_pred = 0
accuracy = None
with torch.no_grad():
with tqdm(val_loader, desc='Val') as pbar:
for i, (x, y) in enumerate(pbar):
x = x.float().to(device)
y = y.long().to(device)
outputs = model(x)
_, y_pred = torch.max(outputs.data, 1)
correct_pred += (y_pred == y).sum().item()
total_samples += x.size(0)
accuracy = correct_pred / total_samples
pbar.set_postfix(acc=accuracy)
return accuracy
def main(args):
train_data = datasets.CIFAR10('./data', train=True, download=True,
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.RandomRotation(20),
transforms.ToTensor(),
]))
val_data = datasets.CIFAR10('./data', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
]))
train_loader = DataLoader(train_data,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers)
val_loader = DataLoader(val_data,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers)
checkpoint = torch.load(args.checkpoint, map_location=args.device)
# model = VGG16()
model = ResNet50()
model.load_state_dict(checkpoint['state_dict'])
model.to(args.device)
model.eval()
criterion = nn.CrossEntropyLoss()
val_acc = validate(model, val_loader, args.device)
print('Baseline accuracy: {}'.format(val_acc))
# Semantic adversarial examples.
model.eval()
total_samples = 0
correct_pred = 0
correct_pred_adv = 0
accuracy = None
with tqdm(val_loader, desc='adv') as pbar:
for i, (x, y) in enumerate(pbar):
x = x.float().to(args.device)
y = y.long().to(args.device)
outputs = model(x)
_, y_pred = torch.max(outputs.data, 1)
x_adv, y_adv, factor = attacks.hue_gradient(
x,
y,
model,
criterion,
step_size=args.step_size,
alpha=args.alpha,
beta=args.beta,
max_iter=args.max_iter,
device=args.device,
verbose=False
)
# x_adv, y_adv = attacks.hue_random(
# x,
# y,
# model,
# alpha=alpha,
# beta=beta,
# max_iter=max_iter,
# device=args.device,
# verbose=False
# )
correct_pred += (y_pred == y).sum().item()
correct_pred_adv += (y_adv == y).sum().item()
total_samples += x.size(0)
accuracy = correct_pred / total_samples
accuracy_adv = correct_pred_adv / total_samples
pbar.set_postfix(acc=accuracy, acc_adv=accuracy_adv)
print('Original accuracy: {}\nAttack accuracy: {}'.format(accuracy, accuracy_adv))
if __name__ == '__main__':
class Args:
alpha = -torch.pi
beta = torch.pi
max_iter = 10 # T
step_size = 2.5 * (beta - alpha) / (2 * max_iter)
checkpoint = 'resnet50.pth.tar'
num_workers = 0
batch_size = 100
if torch.cuda.is_available():
device = torch.device('cuda')
elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
device = torch.device('mps')
else:
device = torch.device('cpu')
# device = torch.device('cpu')
args = Args()
main(args)
| [
"sarathknv@gmail.com"
] | sarathknv@gmail.com |
c6179f3cfacaf581c9ab187719af097876c7fa09 | b235e9b9b43c37aff321aee2319742d3ddeb9f15 | /trees/trees.py | a12ef82ae95ad67ae9568ac9a7c0464175c672fe | [] | no_license | hch001/ml | 9cb714a391834dab0f7e5ed54d79e41c1a6e65e5 | 5cced8ed9f3b01f0f2144a533b60e5c46a40ba9e | refs/heads/master | 2022-04-05T10:48:34.698215 | 2020-02-18T18:31:20 | 2020-02-18T18:31:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,199 | py | # 决策树
import numpy as np
import matplotlib.pyplot as plt
import random
from math import log
def createDataSet():
dataSet = [['yes', 'yes', 'yes'],
['yes', 'yes', 'yes'],
['yes', 'no', 'no'],
['no', 'yes', 'no'],
['no', 'yes', 'no']]
labels = ['No surfacing', 'Flippers']
return dataSet, labels
def calShannonEnt(dataset):
setSize = len(dataset)
labelCount = {}
for featureVec in dataset:
label = featureVec[-1]
if label not in labelCount:
labelCount[label] = 0
labelCount[label] += 1
shannonEnt = 0.0
for key, value in labelCount.items():
prob = float(value / setSize)
shannonEnt -= prob * (log(prob, 2)) # -log2p(x)
return shannonEnt
def splitDataset(dataset, axis, value):
returnMat = []
for featureVec in dataset:
if featureVec[axis] == value:
reducedFeatureVec = featureVec[:axis]
reducedFeatureVec.extend(featureVec[axis + 1:]) # 除去那一个特征值,因为作为划分的标准了
returnMat.append(reducedFeatureVec)
return returnMat
def chooseBestFeatureToSplit(dataset): # 选出最适合的一个特征,但没有选出这个特征的具体划分的值
baseEnt = calShannonEnt(dataset) # 划分前的信息熵
numsFeature = len(dataset[0]) - 1 # 特征数量
bestFeature = -1 # 最佳划分的特征
infoGain = 0.0 # 信息增益
for axis in range(numsFeature):
values = [i[axis] for i in dataset] #一列特征值
values = set(values) # 去重
newEnt = 0.0
for value in values: # 一个维度的按每一个值划分情况 所有子集都不想相互包含 用n个节点的树 不一定是二叉树
subDataSet = splitDataset(dataset, axis, value)
prob = len(subDataSet) / float(len(dataset))
newEnt += prob * calShannonEnt(subDataSet) # 求期望
if baseEnt - newEnt > infoGain:
infoGain = baseEnt - newEnt
bestFeature = axis
return bestFeature
def majorityCnt(classList): # 返回列表中出现最多的值
counts = {}
for i in classList:
if i not in counts:
counts[i] = 0
counts[i] += 1
counts = sorted(counts.items(), key=lambda x: x[1], reverse=True)
return counts[0][0]
def createTree(dataSet, labels): # label指特征类
classList = [i[-1] for i in dataSet]
if classList.count(classList[0]) == len(classList):
return classList[0]
if len(dataSet[0]) == 1:
return majorityCnt(classList)
bestFeature = chooseBestFeatureToSplit(dataSet)
bestFeatureLabel = labels[bestFeature]
myTree = {bestFeatureLabel: {}}
del (labels[bestFeature])
featureValues = [example[bestFeature] for example in dataSet]
featureValues = set(featureValues)
for value in featureValues:
subLabels = labels[:]
myTree[bestFeatureLabel][value] = createTree(splitDataset(dataSet, bestFeature, value), subLabels)
return myTree
if __name__ == '__main__':
dataset, labels = createDataSet()
print(createTree(dataset, labels))
| [
"13959582448@163.com"
] | 13959582448@163.com |
76a74e65ebc0beed5be5ef08265dae29b1d940ae | 320e99e96bbb4c34983a327ae433a36b138c14b6 | /khufu_siteview/templatedir.py | e6186adb3344d5d1260bbb828a271de7a7797db5 | [] | no_license | khufuproject/khufu_siteview | 958d737a32ce60cdd9d638509c5ed5e18c1745a8 | c990b637f25318291a10a1f626f3829025078f26 | refs/heads/master | 2021-01-19T14:33:40.168793 | 2011-04-26T11:33:09 | 2011-04-26T11:33:09 | 1,630,968 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,457 | py | import os
import sys
from paste.fileapp import FileApp
from pyramid.asset import abspath_from_asset_spec
from pyramid.renderers import render_to_response, RendererHelper
from pyramid import path
from pyramid.httpexceptions import HTTPNotFound
import re
import logging
logger = logging.getLogger('khufu_siteview')
class DirLister(object):
root_listdir = staticmethod(os.listdir)
excludes = [
re.compile('^[.].*$'),
re.compile('~$'),
]
def is_valid_file(self, v):
for x in self.excludes:
if x.search(v):
logger.debug('Skipping "%s"' % v)
return False
return True
def listdir(self, path):
return filter(self.is_valid_file, self.root_listdir(path))
listdir = DirLister().listdir
class Curry(object):
def __init__(self, callback, *cb_args, **cb_kwargs):
self.callback = callback
self.cb_args = cb_args
self.cb_kwargs = cb_kwargs
def __call__(self, *args, **kwargs):
newargs = list(self.cb_args) + list(args)
newkwargs = dict(self.cb_kwargs)
newkwargs.update(kwargs)
return self.callback(*newargs, **newkwargs)
class TemplateDirView(object):
dir_exists = staticmethod(os.path.isdir)
lister = DirLister()
def __init__(self, assetspec, package=None):
pname = package
if not isinstance(package, basestring) \
and hasattr(package, '__name__'):
pname = pname.__name__
if ':' not in assetspec and package:
assetspec = pname + ':' + assetspec
if not assetspec.endswith('/'):
assetspec += '/'
self.assetspec = assetspec
self.basepath = os.path.abspath(abspath_from_asset_spec(assetspec))
if not self.dir_exists(self.basepath):
raise ValueError('%s <-> %s does not exist as a directory'
% (assetspec, self.basepath))
def _diritem_iter(self, path):
for x in self.lister.listdir(path):
yield {'label': x, 'link': x}
def render_listing(self, request, path):
relative = request.url[len(request.application_url):]
items = [x for x in self._diritem_iter(path)]
return render_to_response('khufu_siteview:templates/listing.jinja2',
{'path': relative, 'items': items},
request)
def find_index(self, path):
for x in self.lister.listdir(path):
if x.startswith('index.'):
return os.path.join(path, x)
return None
def get_handler(self, asset, request):
if not hasattr(self, '_cache'):
self._cache = {}
cache = self._cache
res = cache.get(asset)
if res is None:
res = cache[asset] = self._build_handler(asset, request)
else:
logger.info('Cache hit for: %s' % asset)
return res
def _build_handler(self, asset, request):
assetpath = os.path.abspath(abspath_from_asset_spec(asset))
if not assetpath.startswith(self.basepath):
# make sure url scheme wasn't tricked into going into parent dirs
return Curry(HTTPNotFound,
comment=request.url[len(request.application_url):])
if os.path.isdir(assetpath):
index = self.find_index(assetpath)
if index:
logger.debug('serving default index file: ' + index)
return Curry(render_to_response, renderer_name=index, value={})
return Curry(self.render_listing, path=assetpath)
if os.path.isfile(assetpath):
helper = RendererHelper(name=asset, registry=request.registry)
try:
if helper.renderer is not None:
return Curry(helper.render_to_response, value={},
system_values=None)
except ValueError:
pass
def serve_file(request, application):
return request.get_response(application)
fileapp = FileApp(filename=assetpath)
return Curry(serve_file, application=fileapp)
return Curry(HTTPNotFound,
comment=request.url[len(request.application_url):])
def __call__(self, request):
assetpath = self.assetspec + '/'.join(request.subpath)
handler = self.get_handler(assetpath, request)
return handler(request=request)
def caller_package():
exclude = ('pdb', 'pyramid', 'khufu_siteview')
package = None
# will only check max of 20 stack levels
for x in range(20):
m = path.caller_package(x)
mname = m.__name__
bad = False
for ex in exclude:
if mname == ex or mname.startswith(ex + '.'):
bad = True
break
if not bad:
package = m
break
if package is not None:
if package.__name__ == '__main__' and hasattr(package, '__package__'):
package = sys.modules[package.__package__]
return package
def add_templateview_route(config, assetspec, path):
newpath = path
if not path.endswith('/'):
newpath += '/'
newpath += '*subpath'
config.add_route('templview', newpath,
view=TemplateDirView(assetspec,
package=caller_package()))
| [
"rocky@serverzen.com"
] | rocky@serverzen.com |
98f608ebac936a9529c1d937758f6d9638c51234 | 5651c5421273957cfdc35902e07531c098efb33e | /consult.py | 4f32faa74add8c43e863a2b99b4203e29f0045c6 | [] | no_license | dianakooo/python | 37ede05ca3163c86aed5b5dcb777c405e4e99402 | ace41acded063caa65ad289a8627222b9b33aba7 | refs/heads/master | 2021-09-16T10:01:31.561531 | 2018-06-19T09:42:34 | 2018-06-19T09:42:34 | 105,384,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | lines2016 = []
a = 0
with open("happiness-cantril-ladder.csv", encoding='utf-8') as f:
#for line in f:
lines = f.readlines()
for line in lines:
#print(line)
cells = line.split(',')
#print(cells)
if cells[2] == '2016':
lines2016.append(cells)
user_country = input("Your country: ")
for line in lines2016:
if line[0] == user_country:
print(line[-1])
a += 1
break
if a != 1:
print("Country not found.")
#AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
#AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
#AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
#AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
#AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
#AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
#AAAAAAAAAAAAAAAAAAAAAAAAAAAAA
#AAAAAAAAAAAAAAAAAAAAA
#AAAAAAAAAAAAA
#AAAAA
#kak sortirovat' strochki??
sorted(..., reverse=True)
| [
"noreply@github.com"
] | noreply@github.com |
818f57f3b284ac9d0364a114f16772bdae2fa0e8 | 83e21dcd88961e01d7b6d76c1e7d3e0c405bb7a2 | /homeassistant/components/climate/velbus.py | ab8542541c8f7c14a6b70d86effd047f61f898f3 | [
"Apache-2.0"
] | permissive | skalavala/home-assistant | 0a61886a8e399d6c46bf791927a69557edfdebb3 | 66d6db7934db1af0c560ccffd92cf4a114ef5841 | refs/heads/dev | 2020-04-04T11:35:24.377362 | 2018-11-02T17:40:05 | 2018-11-02T17:40:05 | 155,896,654 | 3 | 1 | Apache-2.0 | 2018-11-02T17:00:10 | 2018-11-02T17:00:09 | null | UTF-8 | Python | false | false | 2,463 | py | """
Support for Velbus thermostat.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/climate.velbus/
"""
import logging
from homeassistant.components.climate import (
SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE, ClimateDevice)
from homeassistant.components.velbus import (
DOMAIN as VELBUS_DOMAIN, VelbusEntity)
from homeassistant.const import ATTR_TEMPERATURE
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['velbus']
OPERATION_LIST = ['comfort', 'day', 'night', 'safe']
SUPPORT_FLAGS = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the Velbus thermostat platform."""
if discovery_info is None:
return
sensors = []
for sensor in discovery_info:
module = hass.data[VELBUS_DOMAIN].get_module(sensor[0])
channel = sensor[1]
sensors.append(VelbusClimate(module, channel))
async_add_entities(sensors)
class VelbusClimate(VelbusEntity, ClimateDevice):
"""Representation of a Velbus thermostat."""
@property
def supported_features(self):
"""Return the list off supported features."""
return SUPPORT_FLAGS
@property
def temperature_unit(self):
"""Return the unit this state is expressed in."""
return self._module.get_unit(self._channel)
@property
def current_temperature(self):
"""Return the current temperature."""
return self._module.get_state(self._channel)
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return self._module.get_climate_mode()
@property
def operation_list(self):
"""Return the list of available operation modes."""
return OPERATION_LIST
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._module.get_climate_target()
def set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
self._module.set_mode(operation_mode)
self.schedule_update_ha_state()
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
self._module.set_temp(kwargs.get(ATTR_TEMPERATURE))
self.schedule_update_ha_state()
| [
"mail@fabian-affolter.ch"
] | mail@fabian-affolter.ch |
7e5524657678ec0f5d9f1d4e42a104c60bda7e18 | 0d3b16041f49156b45a820c111af33f6b7f5ae90 | /StrangeMarket/settings.py | 4559e334ff04a6bf58779f8a8f3895afe5ec2da8 | [] | no_license | chuanzhangcyh/StrangeMarket | 0b504c032023a15955d8e383f136b1daa0d64bb6 | bd8e0bc9199b3420d6fa8973e28e6401bf1a38f8 | refs/heads/master | 2023-02-24T21:28:59.205924 | 2021-02-02T13:39:01 | 2021-02-02T13:39:01 | 335,293,333 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,502 | py | """
Django settings for StrangeMarket project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7+k-9vio6qxm=wh4zew$3madjprpjxnokg3jwjo$&50a2d_%-j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'goods',
'user',
'goodsave'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'StrangeMarket.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'StrangeMarket.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
#}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'wrtdata',
'USER': 'root',
'PASSWORD': '19931225',
'HOST': 'localhost',
'PORT': '3306'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_dist')
MEDIA_URL = 'wrt/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') | [
"1015223012@qq.com"
] | 1015223012@qq.com |
b50b800f11fcc41d535b71aa89115d03db004d5b | a2398c7582b0688f3598f25b0d2dae41dd3e8ee5 | /application.py | 40b82fcf11d946ea02e7ff4f8fa536b9c46ef1ca | [] | no_license | AhmedSYD/Geoweb-app | 4ebf6c92f831537dae01db35ddd2f493c076f80f | 3b215c6c57704550d1eebd37beb54e4c02a1b73f | refs/heads/main | 2023-03-11T12:59:58.281927 | 2021-02-27T19:29:03 | 2021-02-27T19:29:03 | 341,691,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,525 | py | from flask import Flask, render_template, request
import requests
import datetime
app=Flask(__name__)
@app.route("/")
def search():
return render_template("building_permit_search.html", geocode="", date_value="01/01/2020 - 01/15/2020", initial_page=True)
@app.route("/search", methods=["POST","GET"])
def after_searching():
js_data=""
dateRange="01/01/2020 - 01/15/2020"
if request.method=="POST":
dateRange=request.form.get("daterange")
dates=dateRange.split("-")
start_date,end_date=dates[0].strip(),dates[1].strip()
start_date_format="'"+str(datetime.datetime.strptime(start_date, '%m/%d/%Y').date())+"'"
end_date_format="'"+str(datetime.datetime.strptime(end_date, '%m/%d/%Y').date())+"'"
print("start date:",start_date_format)
print("end date:",end_date_format)
res = requests.get("https://data.calgary.ca/resource/c2es-76ed.geojson",\
params={"$where": "issueddate > "+start_date_format+" and issueddate < "+end_date_format})
# "$select":"issueddate, workclassgroup, contractorname, communityname, originaladdress,latitude,longitude"})
# print("status code=", res.status_code)
# print("data=", res.json())
if res.status_code==200:
js_data=res.json()
else:
js_data=""
return render_template("building_permit_search.html",geocode=js_data,date_value=dateRange, initial_page=False)
if __name__== "__main__" :
app.run(debug=True) | [
"ahmed@domain.com"
] | ahmed@domain.com |
be7503afcbfae63f32619f134faf846ec33a132d | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/meetup/17857a6adde04647acc54269ab6ef4be.py | 84c22eae0a99b1a54a20f6aa508a6a593c16744c | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 750 | py | from collections import defaultdict
from datetime import date, timedelta
def meetup_day(year, month, day_name, date_type):
weekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
operators = {
'1st': lambda x: x[0],
'2nd': lambda x: x[1],
'3rd': lambda x: x[2],
'4th': lambda x: x[3],
'last': lambda x: x[-1],
'teenth': lambda x: [d for d in x if 13 <= d <= 19][0],
}
data = defaultdict(list)
day = date(year=year, month=month, day=1)
while day.month == month:
data[weekdays[day.weekday()]].append(day.day)
day += timedelta(1)
return date(year=year, month=month, day=operators[date_type](data[day_name]))
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
9a2f58091a7f166b6979a7a70d9974314571ef65 | 2fbe7e67b0c58a4b15ca41fb077b96279da8575a | /djreact/backend/src/articles/models.py | e8979b89f93053df5d5404e7a8c0367ba2c3da96 | [] | no_license | Rahul255/Django | dd0fd663ddd424c68fe88f193219653010d4721b | 8d4e15a7ede6499ebbcb98dc4cf5f45c5e89748f | refs/heads/master | 2020-07-03T23:07:46.958097 | 2019-10-17T05:26:47 | 2019-10-17T05:26:47 | 202,081,444 | 0 | 0 | null | 2020-06-06T01:14:07 | 2019-08-13T06:42:36 | Python | UTF-8 | Python | false | false | 280 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Article(models.Model):
title = models.CharField(max_length=120)
content = models.TextField()
def __str__(self):
return self.title | [
"rahulmohan255@gmail.com"
] | rahulmohan255@gmail.com |
56e5ae923d642aadde49b7cf52f67bdf6d7e7f96 | 0572c88bc1e93a0b9bc8c7aa1f78888304f6b838 | /model_VC2.py | 238f739a076f522557031a31ce7125da43379da2 | [] | no_license | Chow549/Pytorch-CycleGAN-VC2 | c8044e7cb4b23493bd700a9969bddfb7b10db4c2 | 54320f322b122415d7792ca90d5aaf36e8070114 | refs/heads/master | 2020-07-04T17:01:35.290966 | 2019-08-14T03:49:33 | 2019-08-14T03:49:33 | 202,348,491 | 1 | 0 | null | 2019-08-14T12:42:19 | 2019-08-14T12:42:18 | null | UTF-8 | Python | false | false | 19,672 | py | import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
import pdb
class GLU(nn.Module):
def __init__(self):
super(GLU, self).__init__()
# Custom Implementation because the Voice Conversion Cycle GAN
# paper assumes GLU won't reduce the dimension of tensor by 2.
def forward(self, input):
return input * torch.sigmoid(input)
class up_2Dsample(nn.Module):
def __init__(self, upscale_factor=2):
super(up_2Dsample, self).__init__()
self.scale_factor = upscale_factor
def forward(self, input):
h = input.shape[2]
w = input.shape[3]
new_size = [h * self.scale_factor, w * self.scale_factor]
return F.upsample(input,new_size)
class PixelShuffle(nn.Module):
def __init__(self, upscale_factor=2):
super(PixelShuffle, self).__init__()
# Custom Implementation because PyTorch PixelShuffle requires,
# 4D input. Whereas, in this case we have have 3D array
self.upscale_factor = upscale_factor
def forward(self, input):
n = input.shape[0]
c_out = input.shape[1] // self.upscale_factor
w_new = input.shape[2] * self.upscale_factor
return input.view(n, c_out, w_new)
class ResidualLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(ResidualLayer, self).__init__()
# self.residualLayer = nn.Sequential(nn.Conv1d(in_channels=in_channels,
# out_channels=out_channels,
# kernel_size=kernel_size,
# stride=1,
# padding=padding),
# nn.InstanceNorm1d(
# num_features=out_channels,
# affine=True),
# GLU(),
# nn.Conv1d(in_channels=out_channels,
# out_channels=in_channels,
# kernel_size=kernel_size,
# stride=1,
# padding=padding),
# nn.InstanceNorm1d(
# num_features=in_channels,
# affine=True)
# )
self.conv1d_layer = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding),
nn.InstanceNorm1d(num_features=out_channels,
affine=True))
self.conv_layer_gates = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding),
nn.InstanceNorm1d(num_features=out_channels,
affine=True))
self.conv1d_out_layer = nn.Sequential(nn.Conv1d(in_channels=out_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=1,
padding=padding),
nn.InstanceNorm1d(num_features=in_channels,
affine=True))
def forward(self, input):
h1_norm = self.conv1d_layer(input)
h1_gates_norm = self.conv_layer_gates(input)
# GLU
h1_glu = h1_norm * torch.sigmoid(h1_gates_norm)
h2_norm = self.conv1d_out_layer(h1_glu)
return input + h2_norm
class downSample_Generator(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(downSample_Generator, self).__init__()
self.convLayer = nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
nn.InstanceNorm2d(num_features=out_channels,
affine=True))
self.convLayer_gates = nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
nn.InstanceNorm2d(num_features=out_channels,
affine=True))
def forward(self, input):
a = self.convLayer(input)
b = self.convLayer_gates(input)
return self.convLayer(input) * torch.sigmoid(self.convLayer_gates(input))
class upSample_Generator(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(upSample_Generator, self).__init__()
self.convLayer = nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
#PixelShuffle(upscale_factor=2),
up_2Dsample(upscale_factor=2),
nn.InstanceNorm2d(num_features=out_channels,
affine=True))
self.convLayer_gates = nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
#PixelShuffle(upscale_factor=2),
up_2Dsample(upscale_factor=2),
nn.InstanceNorm2d(num_features=out_channels,
affine=True))
def forward(self, input):
return self.convLayer(input) * torch.sigmoid(self.convLayer_gates(input))
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1,
out_channels=128,
kernel_size=[5,15],
stride=1,
padding=[2,7])
self.conv1_gates = nn.Conv2d(in_channels=1,
out_channels=128,
kernel_size=[5,15],
stride=1,
padding=[2,7])
# Downsample Layer
self.downSample1 = downSample_Generator(in_channels=128,
out_channels=256,
kernel_size=5,
stride=2,
padding=2)
self.downSample2 = downSample_Generator(in_channels=256,
out_channels=512,
kernel_size=5,
stride=2,
padding=2)
#reshape
self.conv2 = nn.Conv1d(in_channels=512,
out_channels=512,
kernel_size=1,
stride=1)
# Residual Blocks
self.residualLayer1 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer2 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer3 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer4 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer5 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer6 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
#reshape
self.conv3 = nn.Conv1d(in_channels=512,
out_channels=512,
kernel_size=1,
stride=1)
# UpSample Layer
self.upSample1 = upSample_Generator(in_channels=512,
out_channels=1024,
kernel_size=5,
stride=1,
padding=2)
self.upSample2 = upSample_Generator(in_channels=1024,
out_channels=512,
kernel_size=5,
stride=1,
padding=2)
self.lastConvLayer = nn.Conv2d(in_channels=512,
out_channels=24,
kernel_size=[5,15],
stride=1,
padding=[2,7])
def forward(self, input):
# GLU
input = input.unsqueeze(1)
conv1 = self.conv1(input) * torch.sigmoid(self.conv1_gates(input))
downsample1 = self.downSample1(conv1)
downsample2 = self.downSample2(downsample1)
downsample3 = downsample2.view([downsample2.shape[0],downsample2.shape[1],-1])
downsample3 = self.conv2(downsample3)
residual_layer_1 = self.residualLayer1(downsample3)
residual_layer_2 = self.residualLayer2(residual_layer_1)
residual_layer_3 = self.residualLayer3(residual_layer_2)
residual_layer_4 = self.residualLayer4(residual_layer_3)
residual_layer_5 = self.residualLayer5(residual_layer_4)
residual_layer_6 = self.residualLayer6(residual_layer_5)
residual_layer_6 = self.conv3(residual_layer_6)
residual_layer_6 = residual_layer_6.view([downsample2.shape[0],downsample2.shape[1],downsample2.shape[2],downsample2.shape[3]])
upSample_layer_1 = self.upSample1(residual_layer_6)
upSample_layer_2 = self.upSample2(upSample_layer_1)
output = self.lastConvLayer(upSample_layer_2)
output = output.view([output.shape[0],-1,output.shape[3]])
return output
class DownSample_Discriminator(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(DownSample_Discriminator, self).__init__()
self.convLayer = nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
nn.InstanceNorm2d(num_features=out_channels,
affine=True))
self.convLayerGates = nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
nn.InstanceNorm2d(num_features=out_channels,
affine=True))
def forward(self, input):
# GLU
return self.convLayer(input) * torch.sigmoid(self.convLayerGates(input))
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.convLayer1 = nn.Conv2d(in_channels=1,
out_channels=128,
kernel_size=[3, 3],
stride=[1, 1])
self.convLayer1_gates = nn.Conv2d(in_channels=1,
out_channels=128,
kernel_size=[3, 3],
stride=[1, 1])
# Note: Kernel Size have been modified in the PyTorch implementation
# compared to the actual paper, as to retain dimensionality. Unlike,
# TensorFlow, PyTorch doesn't have padding='same', hence, kernel sizes
# were altered to retain the dimensionality after each layer
# DownSample Layer
self.downSample1 = DownSample_Discriminator(in_channels=128,
out_channels=256,
kernel_size=[3, 3],
stride=[2, 2],
padding=0)
self.downSample2 = DownSample_Discriminator(in_channels=256,
out_channels=512,
kernel_size=[3, 3],
stride=[2, 2],
padding=0)
self.downSample3 = DownSample_Discriminator(in_channels=512,
out_channels=1024,
kernel_size=[3, 3],
stride=[2, 2],
padding=0)
self.downSample4 = DownSample_Discriminator(in_channels=1024,
out_channels=1024,
kernel_size=[1, 5],
stride=[1, 1],
padding=[0, 2])
# Fully Connected Layer
'''self.fc = nn.Linear(in_features=1024,
out_features=1)'''
# output Layer
self.output_layer = nn.Conv2d(in_channels=1024,
out_channels=1,
kernel_size=[1, 3],
stride=[1, 1],
padding=[0, 1])
# def downSample(self, in_channels, out_channels, kernel_size, stride, padding):
# convLayer = nn.Sequential(nn.Conv2d(in_channels=in_channels,
# out_channels=out_channels,
# kernel_size=kernel_size,
# stride=stride,
# padding=padding),
# nn.InstanceNorm2d(num_features=out_channels,
# affine=True),
# GLU())
# return convLayer
def forward(self, input):
# input has shape [batch_size, num_features, time]
# discriminator requires shape [batchSize, 1, num_features, time]
input = input.unsqueeze(1)
# GLU
pad_input = nn.ZeroPad2d((1, 1, 1, 1))
layer1 = self.convLayer1(
pad_input(input)) * torch.sigmoid(self.convLayer1_gates(pad_input(input)))
pad_input = nn.ZeroPad2d((1, 0, 1, 0))
downSample1 = self.downSample1(pad_input(layer1))
pad_input = nn.ZeroPad2d((1, 0, 1, 0))
downSample2 = self.downSample2(pad_input(downSample1))
pad_input = nn.ZeroPad2d((1, 0, 1, 0))
downSample3 = self.downSample3(pad_input(downSample2))
downSample4 = self.downSample4(downSample3)
downSample4 = self.output_layer(downSample4)
downSample4 = downSample4.contiguous().permute(0, 2, 3, 1).contiguous()
# fc = torch.sigmoid(self.fc(downSample3))
# Taking off sigmoid layer to avoid vanishing gradient problem
#fc = self.fc(downSample4)
fc = torch.sigmoid(downSample4)
return fc
if __name__ == '__main__':
# Generator Dimensionality Testing
input = torch.randn(10, 24, 1100) # (N, C_in, Width) For Conv1d
np.random.seed(0)
print(np.random.randn(10))
input = np.random.randn(15, 24, 128)
input = torch.from_numpy(input).float()
# print(input)
generator = Generator()
output = generator(input)
print("Output shape Generator", output.shape)
# Discriminator Dimensionality Testing
# input = torch.randn(32, 1, 24, 128) # (N, C_in, height, width) For Conv2d
discriminator = Discriminator()
output = discriminator(output)
print("Output shape Discriminator", output.shape)
| [
"kevintai0304@gmail.com"
] | kevintai0304@gmail.com |
431d18182a5c2d9b0783fd03d183f557c1ba1743 | df20244e5822a428ce0fbb3eaf3cd32651f1c560 | /kgdb/jemalloc/jemalloc_utils.py | 9fa247d355f34c603ccfc643aa5181bb68279715 | [] | no_license | kongjiadongyuan/kgdb | f5e2541934a0dfcab610c85cf8971886cb9f6b14 | ff9bb5c9cf808346a792e8153dcbfe4cecadbf82 | refs/heads/master | 2020-05-22T16:06:13.067575 | 2019-09-12T06:32:32 | 2019-09-12T06:32:32 | 186,422,640 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,610 | py | import struct
sztable = [0x8,
0x10,
0x20,
0x30,
0x40,
0x50,
0x60,
0x70,
0x80,
0xa0,
0xc0,
0xe0,
0x100,
0x140,
0x180,
0x1c0,
0x200,
0x280,
0x300,
0x380,
0x400,
0x500,
0x600,
0x700,
0x800,
0xa00,
0xc00,
0xe00,
0x1000,
0x2000,
0x3000,
0x4000,
0x5000,
0x6000,
0x7000,
0x8000]
BIN_COUNT = 28
TBIN_COUNT = 36
def p64(num):
return struct.pack("<Q", num)
def u64(content):
if not len(content) == 8:
raise Exception("u64 should accept buffer with length 8.")
return struct.unpack("<Q", content)[0]
def p32(num):
return struct.pack("<I", num)
def u32(content):
if not len(content) == 4:
raise Exception("u32 should accept buffer with length 4.")
return struct.unpack("<I", content)[0]
def size2bin(sz):
for i in range(len(sztable)):
if sz <= sztable[i]:
return i
return -1
def bin2size(binidx):
return sztable[binidx]
def map2run(addr):
offsetmask = (0x400 * 0x400 * 4 - 1)
chunkmask = 0xffffffffffffffff ^ offsetmask
chunkaddr = addr & chunkmask
mapoffset = addr & offsetmask
mapidx = int((mapoffset - 0x30) / 0x18)
runaddr = chunkaddr + 0x1000 * (mapidx + 6)
return runaddr
if __name__ == '__main__':
pass | [
"zhaggbl@foxmail.com"
] | zhaggbl@foxmail.com |
5759a3994045c73ec308fd6d0a0209db7f485d10 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/328/74024/submittedfiles/testes.py | d9f0fad052a8e893b10ab6e61accf13576dfb507 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
a=5
print('%.2f'%a)
a=30
b=5
c=10
if a<b<c:
print('comando 1')
else:
if a<c<b:
print('comando 2')
else:
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
47d8e02e074f1b33228838b15e10ea23d3c6ee86 | 4fca17a3dbc3e74ba7e46bd7869eb6d138e4c422 | /_0681_Next_Closest_Time.py | 6c11bbad0971087cef6f5ea28aee8b0e18f175eb | [] | no_license | mingweihe/leetcode | a2cfee0e004627b817a3c0321bb9c74128f8c1a7 | edff905f63ab95cdd40447b27a9c449c9cefec37 | refs/heads/master | 2021-06-19T07:46:46.897952 | 2021-05-02T05:13:17 | 2021-05-02T05:13:17 | 205,740,338 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | class Solution(object):
def nextClosestTime(self, time):
"""
:type time: str
:rtype: str
"""
def helper(start, cur, pool):
if start == 4:
left, right = cur[:2], cur[2:]
hour, minute = int(left), int(right)
if hour > 23 or minute > 59: return
cur_digit = int(left + right)
if cur_digit <= self.original_digit: return
cur_diff = cur_digit - self.original_digit
if cur_diff < self.diff:
self.diff = cur_diff
self.res = left + ':' + right
return
for c in pool: helper(start + 1, cur + c, pool)
self.res = min(time) * 2 + ':' + min(time) * 2
self.original_digit = int(time.replace(':', ''))
self.diff = float('inf')
helper(0, '', set(time) - {':'})
return self.res
| [
"hemingwei2017@gmail.com"
] | hemingwei2017@gmail.com |
10e9fdf6a6b34922bef66358b5ff457a52a28977 | 1d0e36f710ed936d9bec3d88b69edd8a26b62823 | /examples/ble_uart_echo_client.py | ce36ac80691af1a7338da4730e0fbc2c9a45008c | [
"MIT"
] | permissive | dglaude/Adafruit_CircuitPython_BLE | c0336787bbf739ddad9d078eab4edc2a80530bd4 | e8f72b053af8cfcde0c07040a5f2feecd4ca585b | refs/heads/master | 2020-12-12T17:54:52.632742 | 2020-01-09T03:09:22 | 2020-01-14T23:18:46 | 234,190,805 | 0 | 0 | MIT | 2020-01-15T23:05:42 | 2020-01-15T23:05:41 | null | UTF-8 | Python | false | false | 1,086 | py | """
Used with ble_uart_echo_test.py. Transmits "echo" to the UARTService and receives it back.
"""
import time
from adafruit_ble import BLERadio
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
from adafruit_ble.services.nordic import UARTService
ble = BLERadio()
while True:
while ble.connected and any(UARTService in connection for connection in ble.connections):
for connection in ble.connections:
if UARTService not in connection:
continue
print("echo")
uart = connection[UARTService]
uart.write(b"echo")
# Returns b'' if nothing was read.
one_byte = uart.read(4)
if one_byte:
print(one_byte)
print()
time.sleep(1)
print("disconnected, scanning")
for advertisement in ble.start_scan(ProvideServicesAdvertisement, timeout=1):
if UARTService not in advertisement.services:
continue
ble.connect(advertisement)
print("connected")
break
ble.stop_scan()
| [
"scott@tannewt.org"
] | scott@tannewt.org |
f7a3e04c75162a4f4da5a2f86f1e75c3d3bd0723 | 217c2e091dfbd35226507722b0c250188aa060f9 | /core/.ipynb_checkpoints/config-checkpoint.py | 27721f9c6e86eb8e8f08fd93c409f6524a8e882d | [] | no_license | raceychan/mydp | 65ef991073fec152bdc6c508dfe88540b247611e | 85db2741e3e4f2707a6c22f63e4b931e11af95b3 | refs/heads/master | 2023-06-26T05:03:36.500742 | 2021-07-30T09:55:47 | 2021-07-30T09:55:47 | 373,941,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,145 | py | import pika
import ssl
from pika.connection import ConnectionParameters
from typing import Any, Dict, List, Optional, Union, Set
from pydantic import BaseSettings, AnyHttpUrl, EmailStr, HttpUrl, AnyUrl, PostgresDsn, validator
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from core.utils import build_url
class Settings(BaseSettings):
class Config:
env_file = 'core\\.env'
env_file_encoding = 'utf-8'
class DefaultValues:
null = 0
class DataBaseSettings:
allowed_db = {'mysql', 'postgresql'}
allowd_driver = {'pymysql', 'aiomysql'}
default_async_engine = False
EVENTS: Set[str] = {'instantiate', 'sql_update', 'data_update'}
MYSQL_DB: str = 'mysql'
MYSQL_DRIVER: str = 'pymysql'
MYSQL_HOST: str
MYSQL_PORT: str = '3306'
MYSQL_USER: str
MYSQL_PASSWORD: str
DB_PARAMS_SCHEMA: Set[str] = {
'db', 'driver', 'user', 'password', 'host', 'port'}
SQLALCHEMY_DATABASE_URI: Optional[str] = ''
@validator("SQLALCHEMY_DATABASE_URI", pre=True)
def assemble_db_url(cls, v: Optional[str], values: Dict[str, Any], config, field) -> str:
if v and isinstance(v, str):
return v
db_params_schema: Dict[str, Any] = values.get('DB_PARAMS_SCHEMA', '')
db_params = {param: values.get(f'MYSQL_{param.upper()}', '')
for param in db_params_schema}
return build_url(db_params=db_params, db_params_schema=db_params_schema)
SQLALCHEMY_ASYNC_DB_URL: Optional[str] = ''
@validator("SQLALCHEMY_ASYNC_DB_URL", pre=True)
def assemble_aysnc_url(cls, v: Optional[str], values: Dict[str, Any], config, field) -> Any:
if v and isinstance(v, str):
return v
db_params_schema: Dict[str, Any] = values.get('DB_PARAMS_SCHEMA', '')
db_params = {param: values.get(f'MYSQL_{param.upper()}')
for param in db_params_schema}
db_params['MYSQL_DRIVER'] = 'aiomysql'
return build_url(db_params=db_params, db_params_schema=db_params_schema)
RABBITMQ_DRIVER: str
RABBITMQ_HOST: str
RABBITMQ_PORT: str
RABBITMQ_USER: str
RABBITMQ_PASSWORD: str
RMQ_CON_PARAM: Optional[Any]
@validator('RMQ_CON_PARAM', pre=True)
def assemble_mb_con(cls, v: Optional[str], values: Dict[str, Any], config, field) -> ConnectionParameters:
if v and isinstance(v, str):
return v
RABBITMQ_USER = values.get('RABBITMQ_USER')
RABBITMQ_PASSWORD = values.get('RABBITMQ_PASSWORD')
RABBITMQ_HOST = values.get('RABBITMQ_HOST')
RABBITMQ_PORT = values.get('RABBITMQ_PORT')
credentials = pika.PlainCredentials(RABBITMQ_USER, RABBITMQ_PASSWORD)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_options = pika.SSLOptions(context)
connection_params = pika.ConnectionParameters(
host=RABBITMQ_HOST,
port=RABBITMQ_PORT,
credentials=credentials,
ssl_options=ssl_options)
return connection_params
settings = Settings()
defaults = Settings.DefaultValues()
| [
"raceychan@gmail.com"
] | raceychan@gmail.com |
5984781a7bf3e925d29c995794955234adfb0a95 | 1ee3dc4fa096d12e409af3a298ba01f5558c62b5 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/simrouter.py | 02b3d8c591855440623aaa468cf1e02a2f4d36be | [
"MIT"
] | permissive | parthpower/ixnetwork_restpy | 321e64a87be0a4d990276d26f43aca9cf4d43cc9 | 73fa29796a5178c707ee4e21d90ff4dad31cc1ed | refs/heads/master | 2020-07-04T13:34:42.162458 | 2019-08-13T20:33:17 | 2019-08-13T20:33:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,972 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class SimRouter(Base):
"""The SimRouter class encapsulates a system managed simRouter node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the SimRouter property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server by using the find method.
"""
_SDM_NAME = 'simRouter'
def __init__(self, parent):
super(SimRouter, self).__init__(parent)
@property
def Connector(self):
"""An instance of the Connector class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector.Connector)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector import Connector
return Connector(self)
@property
def IsisL3PseudoRouter(self):
"""An instance of the IsisL3PseudoRouter class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.isisl3pseudorouter.IsisL3PseudoRouter)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.isisl3pseudorouter import IsisL3PseudoRouter
return IsisL3PseudoRouter(self)
@property
def LdpPseudoRouter(self):
"""An instance of the LdpPseudoRouter class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldppseudorouter.LdpPseudoRouter)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldppseudorouter import LdpPseudoRouter
return LdpPseudoRouter(self)
@property
def OspfPseudoRouter(self):
"""An instance of the OspfPseudoRouter class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ospfpseudorouter.OspfPseudoRouter)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ospfpseudorouter import OspfPseudoRouter
return OspfPseudoRouter(self)
@property
def Ospfv3PseudoRouter(self):
"""An instance of the Ospfv3PseudoRouter class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ospfv3pseudorouter.Ospfv3PseudoRouter)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ospfv3pseudorouter import Ospfv3PseudoRouter
return Ospfv3PseudoRouter(self)
@property
def Tag(self):
"""An instance of the Tag class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag.Tag)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag import Tag
return Tag(self)
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
Returns:
number
"""
return self._get_attribute('count')
@property
def DescriptiveName(self):
"""Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offers more context
Returns:
str
"""
return self._get_attribute('descriptiveName')
@property
def Name(self):
"""Name of NGPF element, guaranteed to be unique in Scenario
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def RouterId(self):
"""4 Byte Router Id in dotted decimal format.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('routerId')
@property
def SystemId(self):
"""6 Byte System Id in hex format.
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('systemId')
def update(self, Name=None):
"""Updates a child instance of simRouter on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has the associated documentation that details the possible values for those named parameters.
Args:
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def find(self, Count=None, DescriptiveName=None, Name=None):
"""Finds and retrieves simRouter data from the server.
All named parameters support regex and can be used to selectively retrieve simRouter data from the server.
By default the find method takes no parameters and will retrieve all simRouter data from the server.
Args:
Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offers more context
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns:
self: This instance with matching simRouter data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of simRouter data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the simRouter data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, RouterId=None, SystemId=None):
"""Base class infrastructure that gets a list of simRouter device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args:
PortNames (str): optional regex of port names
RouterId (str): optional regex of routerId
SystemId (str): optional regex of systemId
Returns:
list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def Start(self):
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('start', payload=payload, response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('stop', payload=payload, response_object=None)
| [
"srvc_cm_packages@keysight.com"
] | srvc_cm_packages@keysight.com |
c5b50c84a27561cd42e497c41900c80a6f77b56c | 30de452d89eacf48f61ceddfaff86aa62d505507 | /traits/observation/_i_notifier.py | 2b28ed8a676667350285c54a1f6916280271f97f | [
"BSD-3-Clause",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | odidev/traits | 92224376b5444a7a5f805b474f0aa53ac1ca7cd2 | 52f4d00248cec5dbf0826de4e846b4ad83cf072e | refs/heads/master | 2023-06-18T16:53:43.850534 | 2021-07-14T05:48:46 | 2021-07-14T05:48:46 | 388,075,888 | 0 | 0 | NOASSERTION | 2021-07-21T10:31:06 | 2021-07-21T10:11:23 | null | UTF-8 | Python | false | false | 1,620 | py | # (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import abc
class INotifier(abc.ABC):
""" Interface for all notifiers.
An instance of notifier must be a callable, i.e. ``__call__`` must be
implemented and cannot be None. The signature of that callable should be
compatible with the observables the notifier will be given to. This
interface does not define what that signature should be.
"""
def __call__(self, *args, **kwargs):
""" Called by an observable.
The signature is not restricted by the interface.
"""
raise NotImplementedError("__call__ must be implemented.")
def add_to(self, observable):
""" Add this notifier to the observable.
Parameters
----------
observable : IObservable
"""
raise NotImplementedError("add_to must be implemented.")
def remove_from(self, observable):
""" Remove this notifier or a notifier equivalent to this one
from the observable.
Parameters
----------
observable : IObservable
Raises
------
NotifierNotFound
If the notifier cannot be found.
"""
raise NotImplementedError("remove_from must be implemented.")
| [
"noreply@github.com"
] | noreply@github.com |
aac3f6ca9c6c86ef2c98b8a092056fb26f469bbb | a1c0685a80e9a581fec9a88ff7655c639af6ffd6 | /15.py | d6787b62d04a64b22b65d3440e44b55d7e9add59 | [] | no_license | fguan/proj-euler | 734c8278b37949f979cdb197b80f95adc529a754 | 2486d6e5611326aa3f5385989a6e29bc05d71265 | refs/heads/master | 2020-06-06T10:16:31.361733 | 2014-09-03T05:11:55 | 2014-09-03T05:11:55 | 1,130,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | #!/usr/bin/python
"""
Starting in the top left corner of a 2×2 grid, and only being able to move to the right and down, there are exactly 6 routes to the bottom right corner. How many such routes are there through a 20×20 grid?
https://projecteuler.net/problem=15
"""
gridSize = 20
a = [[0 for i in range(0,gridSize+1)] for j in range(0,gridSize+1)]
# initialize bottom and right most boundary to 1.
for i in range(0,gridSize):
a[i][gridSize] = a[gridSize][i] = 1
print(a[20][1])
# number of routes available for each point is equal to the routes(right-point) + routes(down-point)
for i in range(gridSize-1, -1, -1):
for j in range(gridSize-1, -1, -1):
a[i][j] = a[i+1][j] + a[i][j+1]
print(a[0][0])
| [
"frankgimpy@maozer.com"
] | frankgimpy@maozer.com |
51feeb2b10797b3f8b8c6b3924db8cf6fb64b368 | 1ab2c6ddf07180297a233cc695a139b6219c1fa9 | /src/smartsave.py | 4cd20d82fcadcadd922bb083e328855ac29f9a54 | [] | no_license | Braden-H12345/sfa_scripts | 0510236c45f37958c8748e21178faebfbd26bfd1 | 7a1447c5034c64ea9424424388c08ea99f68ecd2 | refs/heads/main | 2023-04-10T05:33:54.954702 | 2021-05-03T05:19:54 | 2021-05-03T05:19:54 | 345,435,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,753 | py | import logging
from PySide2 import QtWidgets, QtCore
from shiboken2 import wrapInstance
import maya.OpenMayaUI as omui
import maya.cmds as cmds
import pymel.core as pmc
from pymel.core.system import Path
log = logging.getLogger(__name__)
def maya_main_window():
"""Return the maya main window widget"""
main_window = omui.MQtUtil.mainWindow()
return wrapInstance(long(main_window), QtWidgets.QWidget)
class SmartSaveUI(QtWidgets.QDialog):
"""Smart save ui class"""
def __init__(self):
super(SmartSaveUI, self).__init__(parent=maya_main_window())
self.setWindowTitle("Smart Save")
self.setMinimumWidth(500)
self.setMaximumHeight(200)
self.setWindowFlags(self.windowFlags() ^
QtCore.Qt.WindowContextHelpButtonHint)
self.scenefile = SceneFile()
self.create_ui()
self.create_connections()
def create_ui(self):
self.title_lbl = QtWidgets.QLabel("Smart Save")
self.title_lbl.setStyleSheet("font: bold 20px")
self.folder_lay = self._create_folder_ui()
self.filename_lay = self._create_filename_ui()
self.button_lay = self._create_button_ui()
self.main_lay = QtWidgets.QVBoxLayout()
self.main_lay.addWidget(self.title_lbl)
self.main_lay.addLayout(self.folder_lay)
self.main_lay.addLayout(self.filename_lay)
self.main_lay.addStretch()
self.main_lay.addLayout(self.button_lay)
self.setLayout(self.main_lay)
def create_connections(self):
"""Connects signals and slots"""
self.folder_browse_btn.clicked.connect(self._browse_folder)
self.save_btn.clicked.connect(self._save)
self.save_increment_btn.clicked.connect(self._save_increment)
@QtCore.Slot()
def _save_increment(self):
"""Save an increment of the scene"""
self._set_scenefile_properties_from_ui()
self.scenefile.increment_save()
self.ver_sbx.setValue(self.scenefile.ver)
@QtCore.Slot()
def _save(self):
"""Save the scene"""
self._set_scenefile_properties_from_ui()
self.scenefile.save()
def _set_scenefile_properties_from_ui(self):
self.scenefile.folder_path = self.folder_le.text()
self.scenefile.descriptor = self.descriptor_le.text()
self.scenefile.task = self.task_le.text()
self.scenefile.ver = self.ver_sbx.value()
self.scenefile.ext = self.ext_lbl.text()
@QtCore.Slot()
def _browse_folder(self):
"""Opens a dialogue box to browse the folder"""
folder = QtWidgets.QFileDialog.getExistingDirectory(
parent=self, caption="Select Folder", dir=self.folder_le.text(),
options=QtWidgets.QFileDialog.ShowDirsOnly |
QtWidgets.QFileDialog.DontResolveSymlinks)
self.folder_le.setText(folder)
def _create_button_ui(self):
self.save_btn = QtWidgets.QPushButton("Save")
self.save_increment_btn = QtWidgets.QPushButton("Save Increment")
layout = QtWidgets.QHBoxLayout()
layout.addWidget(self.save_btn)
layout.addWidget(self.save_increment_btn)
return layout
def _create_filename_ui(self):
layout = self._create_filename_headers()
self.descriptor_le = QtWidgets.QLineEdit(self.scenefile.descriptor)
self.descriptor_le.setMinimumWidth(100)
self.task_le = QtWidgets.QLineEdit(self.scenefile.task)
self.task_le.setFixedWidth(50)
self.ver_sbx = QtWidgets.QSpinBox()
self.ver_sbx.setButtonSymbols(QtWidgets.QAbstractSpinBox.PlusMinus)
self.ver_sbx.setFixedWidth(50)
self.ver_sbx.setValue(self.scenefile.ver)
self.ext_lbl = QtWidgets.QLabel(".ma")
layout.addWidget(self.descriptor_le, 1, 0)
layout.addWidget(QtWidgets.QLabel("_"), 1, 1)
layout.addWidget(self.task_le, 1, 2)
layout.addWidget(QtWidgets.QLabel("_v"), 1, 3)
layout.addWidget(self.ver_sbx, 1, 4)
layout.addWidget(self.ext_lbl, 1, 5)
return layout
def _create_filename_headers(self):
self.descriptor_header_lbl = QtWidgets.QLabel("Descriptor")
self.descriptor_header_lbl.setStyleSheet("font: bold")
self.task_header_lbl = QtWidgets.QLabel("Task")
self.task_header_lbl.setStyleSheet("font: bold")
self.ver_header_lbl = QtWidgets.QLabel("Version")
self.ver_header_lbl.setStyleSheet("font: bold")
layout = QtWidgets.QGridLayout()
layout.addWidget(self.descriptor_header_lbl, 0, 0)
layout.addWidget(self.task_header_lbl, 0, 2)
layout.addWidget(self.ver_header_lbl, 0, 4)
return layout
def _create_folder_ui(self):
default_folder = Path(cmds.workspace(rootDirectory=True, query=True))
default_folder = default_folder / "scenes"
self.folder_le = QtWidgets.QLineEdit(default_folder)
self.folder_browse_btn = QtWidgets.QPushButton("...")
layout = QtWidgets.QHBoxLayout()
layout.addWidget(self.folder_le)
layout.addWidget(self.folder_browse_btn)
return layout
class SceneFile(object):
def __init__(self, path=None):
self._folder_path = Path(cmds.workspace(query=True,
rootDirectory=True)) / "scenes"
self.descriptor = 'main'
self.task = 'model'
self.ver = 1
self.ext = '.ma'
scene = pmc.system.sceneName()
if not path and scene:
path = scene
if not path and not scene:
log.info("Initialize with default properties")
return
self._init_from_path(path)
@property
def folder_path(self):
return self._folder_path
@folder_path.setter
def folder_path(self, val):
self._folder_path = Path(val)
@property
def filename(self):
pattern = "{descriptor}_{task}_v{ver:03d}{ext}"
return pattern.format(descriptor=self.descriptor,
task=self.task,
ver=self.ver,
ext=self.ext)
@property
def path(self):
return self.folder_path / self.filename
def _init_from_path(self, path):
path = Path(path)
self.folder_path = path.parent
self.ext = path.ext
self.descriptor, self.task, ver = path.name.stripext().split("_")
self.ver = int(ver.split("v")[-1])
def save(self):
try:
return pmc.system.saveAs(self.path)
except RuntimeError as err:
log.warning("Missing directories in path. Creating directories...")
self.folder_path.makedirs_p()
return pmc.system.saveAs(self.path)
def next_avail_ver(self):
pattern = "{descriptor}_{task}_v*{ext}".format(
descriptor=self.descriptor,
task=self.task,
ext=self.ext)
matching_scene_files = []
for file_ in self.folder_path.files():
if file_.name.fnmatch(pattern):
matching_scene_files.append(file_)
if not matching_scene_files:
return 1;
matching_scene_files.sort(reverse=True)
latest_scene_file = matching_scene_files[0]
latest_scene_file = latest_scene_file.name.stripext()
latest_ver_num = int(latest_scene_file.split("_v")[-1])
return latest_ver_num + 1
def increment_save(self):
self.ver = self.next_avail_ver()
self.save() | [
"noreply@github.com"
] | noreply@github.com |
67c2f2197e27024fb004512324c888cf06f072a6 | 25a24ff7302040254a0baec244968591488567af | /archive-bioinformatics-2/bio2-wk2/bio2-wk2-3_read-pairs.py | 345f0ab456b7c59610011857fbb47c56addea6ff | [] | no_license | loibucket/biopy | 719dfb3c4623d17b2b07473bb7d0b517013c3d74 | f2490e0f90fcea44ce7daa3acbbc5b46215a7be2 | refs/heads/main | 2023-07-16T05:29:02.264495 | 2021-09-03T03:44:17 | 2021-09-03T03:44:17 | 370,835,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py |
def read_pairs(seq, k, d):
lst = []
for i in range(len(seq)-k-d-2):
b = i + k + d
lst.append("("+seq[i:(i+k)]+"|"+seq[b:(b+k)]+")")
lst.sort()
return("".join(lst))
if __name__ == "__main__":
seq = "TAATGCCATGGGATGTT"
k = 3
d = 2
print(read_pairs(seq, k, d))
| [
"loi@loico.us"
] | loi@loico.us |
b634bfab4fdddbb7c17c5b058c3f608ba3c9fdd9 | 6489f80c1bc2c51f41c186c260a3370f899acd20 | /multi-objective/single_optimize.py | db38c7b383e1fdf0d775a84475fc0963ed41ddf8 | [] | no_license | Este1le/Auto-tuning | 16bc316359d97c3dfff0e41c5aad9a18122b7b25 | 856b6176af770b200897f56b7a46f6699402ef28 | refs/heads/master | 2020-04-13T21:27:57.516254 | 2019-09-16T17:19:07 | 2019-09-16T17:19:07 | 163,456,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,362 | py | import argparse
import pickle
import numpy as np
from multiprocessing import Pool
import os.path
import sys
sys.path.insert(1, '/export/a08/xzhan138/Auto-tuning/multi-objective/regressor')
from gp import GP
from krr import KRR
from gbssl import GBSSL
from preprocess import extract_data
def get_args():
parser = argparse.ArgumentParser(description="Multi-objective Hyperparameter Optimization.")
parser.add_argument("--dataset", choices=["ted-zh-en", "ted-ru-en", "robust19-en-ja", "robust19-ja-en"])
parser.add_argument("--architecture", choices=["rnn", "trans"])
parser.add_argument("--rnn-cell-type", choices=["lstm", "gru"])
parser.add_argument("--model", choices=["krr", "gp", "gbssl"], help="Optimization algorithm.")
parser.add_argument("--acquisition", default="max", choices=["max", "erm"], help="Acquisition function.")
parser.add_argument("--threshold", type=int, default=5, help="Remove samples with bad performance(BLEU).")
parser.add_argument("--output", help="Output directory.")
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
modeldir = "/export/a10/kduh/p/mt/gridsearch/" + args.dataset + "/models/"
x, y, _ = extract_data(modeldir=modeldir, threshold=args.threshold,
architecture=args.architecture, rnn_cell_type=args.rnn_cell_type)
result = np.zeros((len(y)-3, len(y)))
for i in range(len(y)-3):
print("step {0}/{1}".format(i+1, len(y)-3))
label_ids = np.array([i,i+1,i+2])
while len(label_ids) != len(y):
if args.model == "gbssl":
opt_model = GBSSL(x, y[label_ids], label_ids)
elif args.model == "gp":
opt_model = GP(x, y[label_ids], label_ids)
elif args.model == "krr":
opt_model = KRR(x, y[label_ids], label_ids)
y_preds, y_vars = opt_model.fit_predict()
del opt_model
unlabel_ids = np.array([u for u in range(len(y)) if u not in label_ids])
def get_risk(candidate_id):
opt_model = GBSSL(x, np.append(y[label_ids], y_preds[candidate_id]), np.append(label_ids, candidate_id))
new_y_preds, new_y_vars = opt_model.fit_predict()
del opt_model
return np.linalg.norm(np.array(new_y_preds)[label_ids] - y[label_ids])
if args.acquisition == "max":
next_label_id = unlabel_ids[np.argmax(y_preds[unlabel_ids])]
elif (args.model == "gbssl") and (args.acquisition == "erm"):
next_unlabel_ids = np.argsort(y_preds[unlabel_ids])[::-1][:10]
candidate_ids = unlabel_ids[next_unlabel_ids]
p = Pool(10)
risks = p.map(get_risk, candidate_ids)
next_label_id = candidate_ids[np.argmin(risks)]
p.close()
p.join()
label_ids = np.append(label_ids, next_label_id)
print(label_ids)
result[i] = label_ids
model_name = args.model
if args.architecture == "rnn":
model_name += "_{0}".format(args.rnn_cell_type)
output_file = args.output + "/" + args.architecture + "/" + args.dataset + "/" + \
model_name + "_" + args.acquisition + ".pkl"
with open(output_file,'wb') as fobj:
pickle.dump(result, fobj)
| [
"xuanzhang@jhu.edu"
] | xuanzhang@jhu.edu |
9bf5f51bb15906ebe54e8158ffa8d0e1abcdfd05 | 2f6817fc8f6ddb48f5f88c913d8e40b672fc3dbf | /MLP/quiz11-3.py | 331fe654ca3d83ef9ffd1dcf2ebdbcc9879d2b59 | [] | no_license | cutz-j/TodayILearned | 320b5774de68a0f4f68fda28a6a8b980097d6ada | 429b24e063283a0d752ccdfbff455abd30ba3859 | refs/heads/master | 2020-03-23T17:34:51.389065 | 2018-11-24T08:49:41 | 2018-11-24T08:49:41 | 141,865,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,157 | py | ## Q13: K-means ##
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from scipy.spatial.distance import cdist, pdist
## 파일 전처리 ##
def file_open(file_name):
## file_open --> np.array ##
file_open = open(file_name, 'r')
all_data = []
for i in file_open.readlines():
all_data.append(i.strip('\n').split(','))
all_data = np.array(all_data) # shape(9835, None)
return all_data
all_data = file_open("d:/data/prac/groceries.csv")
def numbering(all_data):
## product를 dict에 넣으면서 numbering ##
global all_item_num
k = 0
all_dict = {}
for buy in all_data:
for product in buy:
if product in all_dict:
continue
else:
all_dict[product] = k
k += 1
all_item_num = k
for i in all_data:
for k in range(len(i)):
i[k] = all_dict[i[k]]
return all_data, all_dict
all_transaction = len(all_data) # 전체 거래수 9835건
all_item_num = 0 # 169개
all_data, all_dict = numbering(all_data) # 전체 아이템 개수 169개
## one-hot ##
def one_hot(data):
## 구매자마다 벡터화 시키기 위해 one-hot-encoding ## --> X: shape(9835, 169)
one_hot = np.zeros([all_transaction, all_item_num], dtype=np.int32)
for i in range(len(all_data)):
for j in all_data[i]:
one_hot[i,j] = 1
return one_hot
x_one_hot = one_hot(all_data) # one-hot
## split ##
x_train, x_test = x_one_hot[:9800, :], x_one_hot[9800:, :]
## Kmeans ##
# n_cluster = 10, max_iter=3000 #
k_means = KMeans(n_clusters=10, max_iter=3000, random_state=77)
k_means.fit(x_train)
k_cluster = k_means.predict(x_test)
ss = silhouette_score(x_train, k_means.labels_, metric='euclidean')
print("테스트 데이터 35명의 클러스터: \n", k_cluster)
print("\nsilhouette_score: ", ss)
| [
"cutz309@gmail.com"
] | cutz309@gmail.com |
71270ecbc961e81071f0dae593762e82a27d8428 | 3d77b5892c11c89031d8ef01fb36b0f0a00dfa69 | /src/OpticalElements.py | 1fdb76ece045ecb665dd1ce010f09045ef00b846 | [
"MIT"
] | permissive | sbanik1/sheetTrap | 710748e56b25ef801a70b134012184c777fe760e | 287746bf33b41e7f1066e80ee12bd08f75b155bc | refs/heads/main | 2023-04-25T19:35:40.713942 | 2021-06-09T23:11:18 | 2021-06-09T23:11:18 | 323,198,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,408 | py | # -*- coding: utf-8 -*-
"""
This module contains all functions for response of optical elements.
Created on Wed May 22 12:15:23 2019
@author: Swarnav Banik
sbanik1@umd.edu
"""
import numpy as np
import numpy.fft as fourier
import scipy as scp
from PIL import Image
# %% Common Functions #########################################################
# The following functions take inputs
# Wave Vector k in units um
# Minimum Waist w0 in units um
# Position r,z in units um
# Lens Action ###################################################################
def SphLensAction(E,X,Y,k,f,**kwargs):
# Evaluates the response of a spherical lens at its front focal plane
# Inputs: E - 2D Field pattern
# X,Y - 2D grid representing co-ordinates
# k - Wave vector [um^-1]
# f - focal length [mm]
# FocussedAxis - Along what axis is the beam focused at the back
# focal plane
if (E.shape != X.shape or X.shape != Y.shape):
raise Exception('OpticalElements::SphLensAction::E,X and Y should have same dimensions.')
for key, value in kwargs.items():
if key == 'FocussedAxis': FocAxis = value
f = f*10**3
Transform = fourier.fft2(E)
if FocAxis == 'X':
Transform = fourier.fftshift(Transform, axes = 0)
elif FocAxis == 'Y':
Transform = fourier.fftshift(Transform, axes = 1)
elif FocAxis == 'NONE':
Transform = fourier.fftshift(Transform)
dx = X[0,1]-X[0,0]
Xfrq = (2*np.pi*f/k)*fourier.fftshift(fourier.fftfreq(X.shape[1], d=dx))
dy = Y[1,0]-Y[0,0]
Yfrq = (2*np.pi*f/k)*fourier.fftshift(fourier.fftfreq(Y.shape[0], d=dy))
[X, Y] = np.meshgrid(Xfrq,Yfrq)
return [Transform, X, Y]
def CylLensAction(E,X,Y,k,f,**kwargs):
# Evaluates the response of a cylindrical lens at its front focal plane
# Inputs: E - 2D Field pattern
# X,Y - 2D grid representing co-ordinates
# k - Wave vector [um^-1]
# f - focal length [mm]
# FocussedAxis - Along what axis is the beam focused at the back
# focal plane
# FocusingAxis - Along what axis does the lens focus
if (E.shape != X.shape or X.shape != Y.shape):
raise Exception('OpticalElements::CylLensAction::E,X and Y should have same dimensions.')
for key, value in kwargs.items():
if key == 'FocusingAxis': FocAxis = value
if key == 'FocusedAxis': FocusedAxis = value
f = f*10**3
if FocAxis == 'X':
Transform = fourier.fft(E, axis = 1)
if FocusedAxis != 'X':
Transform = fourier.fftshift(Transform, axes = 1)
dx = X[0,1]-X[0,0]
Xfrq = (2*np.pi*f/k)*fourier.fftshift(fourier.fftfreq(X.shape[1], d=dx))
Yfrq = Y[:,0]
elif FocAxis == 'Y':
Transform = fourier.fft(E, axis = 0)
if FocusedAxis != 'Y':
Transform = fourier.fftshift(Transform, axes = 0)
dy = Y[1,0]-Y[0,0]
Yfrq = (2*np.pi*f/k)*fourier.fftshift(fourier.fftfreq(Y.shape[0], d=dy))
Xfrq = X[0,:]
else: raise Exception('OpticalElements::CylLensAction::Focussing xxis needs to be specified.')
[X, Y] = np.meshgrid(Xfrq,Yfrq)
return [Transform, X, Y]
def PiPlateAction(E,X,Y,y_offset,tilt):
# Evaluates the response of an imaging system via the PSF
# Inputs:
# X,Y - 2D grid representing co-ordinates at the plane of pi plate
# E: The light field at the plane of pi plate
# y_offset, titlt: Offset and tilt of the pi plate
# Outputs:
# The light field after passing through the pi plate
if (E.shape != X.shape or X.shape != Y.shape):
raise Exception('OpticalElements::PiPlateAction::E, X and Y should have same dimensions.')
Phase = np.angle(E)
for ii in range(Y.shape[0]):
for jj in range(Y.shape[1]):
if Y[ii,jj]>(np.tan(tilt)*X[ii,jj]+y_offset):
Phase[ii,jj] = Phase[ii,jj]+np.pi
return np.abs(E)*np.exp(1j*Phase)
def MatrixFreeProp(q_in,d):
A = 1
B = d
C = 0
D = 1
q_out = (A*q_in+B)/(C*q_in+D)
return q_out
def MatrixLens(q_in,f):
A = 1
B = 0
C = -1/f
D = 1
q_out = (A*q_in+B)/(C*q_in+D)
return q_out
# Imaging #####################################################################
def ImageViaPSF(X_o, Y_o, E_o, ASF, **kwargs):
# Evaluates the response of an imaging system via the PSF
# Inputs:
# X_o,Y_o - 2D grid representing co-ordinates in object plane
# E_o: The light field at the object plane
# ASF: Amplitude Spread Function = sqrt(PSF)
# norm (optional): Normalize the ASF by some factor
# Outputs:
# I_i: The light field at the image plane
for key, value in kwargs.items():
if key == 'norm':
ASF = ASF*value
E_ft = fourier.fftshift(fourier.fft2(E_o))
ASF_ft = fourier.fftshift(fourier.fft2(ASF))
E_i = fourier.ifftshift(fourier.ifft2(E_ft*ASF_ft))
I_i = np.abs(E_i)**2
return I_i
def ASF(X_o,Y_o,R_airy,**kwargs):
# Evaluates the Amplitude Spread Function of an imaging system
# Inputs:
# X_o,Y_o - 2D grid representing co-ordinates in object plane
# R_airy: Radial extent of the PSF/ ASF
# kind (optional): Kind of ASF, default is airy
# Outputs:
# ASF: The ASF = sqrt(PSF)
kind = 'airy'
for key, value in kwargs.items():
if key == 'kind':
kind = value
R = np.sqrt(X_o**2+Y_o**2)
if kind == 'airy':
ASF = scp.special.jv(1,3.8317*R/R_airy)/(3.8317*R/R_airy)
ASF[R==0] = 0.5
if kind == 'gaussian':
R_airy = R_airy*2.672/3.8317;
ASF = np.exp(-(X_o**2+Y_o**2)/R_airy**2)
ASF = ASF/np.sum(np.abs(ASF)**2)
return ASF
def PixelizeImage(I_org,X_org,Y_org,PixSize_cam):
# Pixelize the image
# Inputs:
# X_org,Y_org - 2D grid representing co-ordinates in object plane
# I_org: The image
# PixSize_cam: The pixel size of the camera
# Outputs:
# X_cam,Y_cam - 2D grid representing co-ordinates in object plane on camera
# I_cam: The pixelated image
# PixSize_cam: The pixel size on the camera
if (I_org.shape != X_org.shape or X_org.shape != Y_org.shape):
raise Exception('OpticalElements::PixelizeImage::I_org,X_org and Y_org should have same dimensions.')
if (X_org[0,0]-X_org[0,1] != Y_org[0,0]-Y_org[1,0]):
raise Exception('OpticalElements::PixelizeImage::Pixel size in X and Y are not same')
nptsx = int(round(X_org[0,-1]-X_org[0,0]/PixSize_cam))
nptsy = int(round(Y_org[-1,0]-Y_org[0,0]/PixSize_cam))
PixSize_cam = [(X_org[0,0]-X_org[0,-1])/nptsx, (Y_org[0,0]-Y_org[-1,0])/nptsy]
x = np.linspace(X_org[0,0],X_org[0,-1],nptsx)
y = np.linspace(Y_org[0,0],Y_org[-1,0],nptsy)
[X_cam,Y_cam] = np.meshgrid(x,y)
I_org_img = Image.fromarray(I_org)
I_cam_img = I_org_img.resize((nptsy,nptsx),resample=Image.BILINEAR)
I_cam = np.asarray(I_cam_img)
return [X_cam,Y_cam,I_cam, PixSize_cam]
| [
"sbanik1@umd.edu"
] | sbanik1@umd.edu |
a6d0e40b80ef3766737c53fd361866048da25977 | 2e006071dbf60d6340fa26875a60629fa955b993 | /Ryo_Hosokawa/Ryo_Hosokawa/settings.py | b24442d83e31bc12d3640697fecbf87e7fa27aa4 | [] | no_license | 2018-miraikeitai-org/Return_JSON | 79757ac035905803a7bd12b5c91ca11d851334ca | a117b64f369c2ad98a389bcdc09d94753946f0ee | refs/heads/master | 2020-03-19T13:09:38.152887 | 2018-07-10T05:50:21 | 2018-07-10T05:50:21 | 136,564,415 | 0 | 1 | null | 2018-07-10T05:50:22 | 2018-06-08T04:01:02 | Python | UTF-8 | Python | false | false | 3,150 | py | """
Django settings for Ryo_Hosokawa project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+*)r*x4+le#*a5e$8$596u*di&d3-==u7o14%&36s+i&uk($!='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Ryo_Hosokawa.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Ryo_Hosokawa.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"hosokawaryou@enPiT2016MBP-03noMacBook-Pro.local"
] | hosokawaryou@enPiT2016MBP-03noMacBook-Pro.local |
1e1eaa7a694586422bdc9da3b230971d98ace025 | 8da91c26d423bacbeee1163ac7e969904c7e4338 | /pyvisdk/do/host_wwn_conflict_event.py | faa99ccf0a169abcf46f9c22e5db93ed38d7722e | [] | no_license | pexip/os-python-infi-pyvisdk | 5d8f3a3858cdd61fb76485574e74ae525cdc7e25 | 1aadea0afbc306d09f6ecb9af0e683dbbf961d20 | refs/heads/master | 2023-08-28T02:40:28.789786 | 2020-07-16T04:00:53 | 2020-07-16T04:00:53 | 10,032,240 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,209 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostWwnConflictEvent(vim, *args, **kwargs):
'''This event records a conflict of host WWNs (World Wide Name).'''
obj = vim.client.factory.create('{urn:vim25}HostWwnConflictEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 5:
raise IndexError('Expected at least 6 arguments got: %d' % len(args))
required = [ 'wwn', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'conflictedHosts', 'conflictedVms', 'changeTag', 'computeResource',
'datacenter', 'ds', 'dvs', 'fullFormattedMessage', 'host', 'net', 'vm',
'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"jmb@pexip.com"
] | jmb@pexip.com |
6800ca74ab37fb17c3024e97b08a6a603cd06505 | 339b90853d4e9f349d53caf21b7029dbbf686bf2 | /env/bin/googlesamples-assistant-devicetool | c7dc98c0cba530cb5e39ccb4d17c3319a16a6e97 | [] | no_license | hogwart120/lbminh-bot-test | 60965ee34a48e084ab227c0c99ced93c7d34fbeb | 2611c8b8d7a8082809939e92f11b4042416993b9 | refs/heads/master | 2020-07-16T20:10:56.335412 | 2019-07-11T08:10:01 | 2019-07-11T08:10:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | #!/home/pi/lbminh-bot/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from googlesamples.assistant.grpc.devicetool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"thangnd85@gmail.com"
] | thangnd85@gmail.com | |
f75d2596d24b3082ecc461a046ffea67c562c4b9 | 23f3ec824e4d429041f9362084f4f3be0e667732 | /src/org/bccvl/site/content/remotedataset.py | 4562b7f4883d96dac19500676febc96c7ebcb3b9 | [] | no_license | BCCVL/org.bccvl.site | 0c9aff6ccb53a42373ea5e1f3c713eae120a0db1 | d4b3a97571f5b03391ae4e33507cf84c7cef1864 | refs/heads/develop | 2021-01-24T08:21:18.402966 | 2019-11-10T23:25:53 | 2019-11-10T23:25:53 | 10,327,299 | 1 | 1 | null | 2019-01-14T22:52:31 | 2013-05-28T03:38:20 | Python | UTF-8 | Python | false | false | 268 | py | from plone.dexterity.content import Item
from zope.interface import implementer
from plone.app.contenttypes.interfaces import ILink
from org.bccvl.site.content.interfaces import IRemoteDataset
@implementer(IRemoteDataset, ILink)
class RemoteDataset(Item):
pass
| [
"g.weis@griffith.edu.au"
] | g.weis@griffith.edu.au |
90e081344e37878f7f20b3dfb85f48791ce8604c | 1fe4f9eb9b1d756ad17e1ff6585e8ee7af23903c | /saleor/store/migrations/0003_specialpage.py | 5f0354beda8b80442f5c4eb27b7a679dbb897729 | [
"BSD-3-Clause"
] | permissive | Chaoslecion123/Diver | ab762e7e6c8d235fdb89f6c958488cd9b7667fdf | 8c5c493701422eada49cbf95b0b0add08f1ea561 | refs/heads/master | 2022-02-23T10:43:03.946299 | 2019-10-19T23:39:47 | 2019-10-19T23:39:47 | 216,283,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py | # Generated by Django 2.2 on 2019-04-17 02:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('site', '0022_auto_20190413_2016'),
('page', '0007_auto_20190225_0252'),
('store', '0002_socialnetwork'),
]
operations = [
migrations.CreateModel(
name='SpecialPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('about', 'About'), ('faq', 'FAQ'), ('legal', 'Terms and Conditions'), ('privacy', 'Privacy and Cookies'), ('accessibility', 'Accessibility')], max_length=32)),
('page', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='site_settings', related_query_name='site_setting', to='page.Page')),
('site_settings', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='special_pages', related_query_name='special_page', to='site.SiteSettings')),
],
options={
'unique_together': {('site_settings', 'type')},
},
),
]
| [
"chaoslecion71@gmail.com"
] | chaoslecion71@gmail.com |
df139b3c52f395b49ec0f3d292707431c92ddc85 | 7c89a5137839646e7a458f530fdd5315eb371480 | /netmeds/settings.py | f2af58926cfae922d1b9fcf526c2e3ee49d4d617 | [] | no_license | himanshupc11/netmedical | c0588c1113880627c4013e2f3f61dd7be5d1c2d0 | 71dea310f0aebebbb1210f8d684426c894e6c28f | refs/heads/main | 2023-01-05T22:24:53.299113 | 2020-10-31T15:58:28 | 2020-10-31T15:58:28 | 308,917,688 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,584 | py | """
Django settings for netmeds project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
import django_heroku
import cloudinary
import cloudinary.uploader
import cloudinary.api
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tcqd*y^$#g)vqsj+ule-##4e^00%g*wj$(5axm=49)1kpu-z1-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'netmedsapp',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cloudinary',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'netmeds.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'netmeds.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Cloudinary config
cloudinary.config(
cloud_name = "blankc",
api_key = "466946882844141",
api_secret = "B1CfGvlIMd8WS-wnwag_pkJpJfE"
)
# Activate Django-Heroku.
django_heroku.settings(locals())
| [
"himanshuchhatpar@gmail.com"
] | himanshuchhatpar@gmail.com |
29e85819a66211421e49b7d4ad844b7bfd1b732e | 8edc1db29ddde7ece88cb00c7467db4f5d9d4fe0 | /epinephrine.py | 9e9f6b94e7031558ef2b694fd869a7ad6a379e08 | [] | no_license | trevenen/nicotine | 59886c7e3e596ff673fdb84abcdccd1cca8e0646 | 40960bd3fba0f50eb364adda7fc59adeccd3e95e | refs/heads/master | 2020-06-23T02:37:33.346819 | 2019-07-09T14:51:17 | 2019-07-09T14:51:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | import vapor
from doctor import Doctor
class Epinephrine:
"""
Epinephrine automates
rollback in the event
of nicotine patch
allergic reaction.
Running nicotine in
deathwish mode disables
disables Epinephrine.
"""
def __init__(self):
response = client.run_instances(ImageId=image_id,
NetworkInterfaces=[{
'AssociatePublicIpAddress': associate_public_ip_address,
'DeviceIndex': 0,
'Groups': [''],
'SubnetId': ''
}],
InstanceType=instance_type,
MaxCount=max_count,
MinCount=min_count,
IamInstanceProfile=iam_instance_profile,
DryRun=dry_run,
TagSpecifications=tag_specification)
| [
"jonassteinberg1@gmail.com"
] | jonassteinberg1@gmail.com |
4a47fffa44259b959487191994bc1233b3491c11 | 4f75cc33b4d65d5e4b054fc35b831a388a46c896 | /.history/app_20210903124401.py | 2579fc4075f93959be51f7bd4b7d23610331e820 | [] | no_license | Lr-2002/newpage | c3fe2acc451e24f6408996ea1271c61c321de702 | c589ad974e7100aa9b1c2ccc095a959ff68069b6 | refs/heads/main | 2023-09-03T06:13:53.428236 | 2021-11-23T10:41:21 | 2021-11-23T10:41:21 | 402,606,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | from flask import Flask ,render_template,url_for
from flask_sqlalchemy import
app = Flask(__name__)
name = 'Grey Li'
movies = [
{'title': 'My Neighbor Totoro', 'year': '1988'},
{'title': 'Dead Poets Society', 'year': '1989'},
{'title': 'A Perfect World', 'year': '1993'},
{'title': 'Leon', 'year': '1994'},
{'title': 'Mahjong', 'year': '1996'},
{'title': 'Swallowtail Butterfly', 'year': '1996'},
{'title': 'King of Comedy', 'year': '1999'},
{'title': 'Devils on the Doorstep', 'year': '1999'},
{'title': 'WALL-E', 'year': '2008'},
{'title': 'The Pork of Music', 'year': '2012'},
]
# @app.route('/static/<name>')
# def static(name):
# # url_for('static')
# return name
@app.route('/')
def hello():
return render_template('index.html',name=name,movies = movies)
# if __name__ == '__main__':
# app.run()
| [
"2629651228@qq.com"
] | 2629651228@qq.com |
e23e69e261aa9dd07bd77ac5f0e689626045a644 | 9742b6b4297927a47bcaba3d119730065723fd69 | /django/rest/drf的分页,缓存,过滤的实现/api/migrations/0001_initial.py | 473ebfae83812b1b502841e3f4a769f690986e6b | [
"Apache-2.0"
] | permissive | mrg1995/my_note | bd2ce553737e0324fd1d6276f3024219a7937563 | df3c80b048718320b389b4ca8bd8a6b97b838b5a | refs/heads/master | 2020-04-15T11:58:59.764034 | 2019-03-01T14:30:39 | 2019-03-01T14:30:39 | 164,654,506 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-21 14:40
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='User_info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('age', models.IntegerField(blank=True, null=True)),
('gender', models.BooleanField(default=False)),
('phone', models.CharField(blank=True, max_length=16, null=True)),
('isDelete', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'user_info',
'ordering': ['id'],
},
),
]
| [
"38936538+mrg1995@users.noreply.github.com"
] | 38936538+mrg1995@users.noreply.github.com |
d1d3fffdb132de5d0a3663618f087eeb3caf28f7 | 6437a3a4a31ab9ad233d6b2d985beb50ed50de23 | /PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/simpy/test/test_timeout.py | 91e83c4c66e014e0b5c6321a306dd2e9a7bc7ae8 | [] | no_license | sreyemnayr/jss-lost-mode-app | 03ddc472decde3c17a11294d8ee48b02f83b71e7 | 3ff4ba6fb13f4f3a4a98bfc824eace137f6aabaa | refs/heads/master | 2021-05-02T08:50:10.580091 | 2018-02-08T20:32:29 | 2018-02-08T20:32:29 | 120,813,623 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,821 | py | #\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
"""
Tests for ``simpy.events.Timeout``.
"""
# Pytest gets the parameters "env" and "log" from the *conftest.py* file
import pytest
def test_discrete_time_steps(env, log):
"""envple envulation with discrete time steps."""
def pem(env, log):
while True:
log.append(env.now)
yield env.timeout(delay=1)
env.process(pem(env, log))
env.run(until=3)
assert log == [0, 1, 2]
def test_negative_timeout(env):
"""Don't allow negative timeout times."""
def pem(env):
yield env.timeout(-1)
env.process(pem(env))
pytest.raises(ValueError, env.run)
def test_timeout_value(env):
"""You can pass an additional *value* to *timeout* which will be
directly yielded back into the PEM. This is useful to implement some
kinds of resources or other additions.
See :class:`envpy.resources.Store` for an example.
"""
def pem(env):
val = yield env.timeout(1, 'ohai')
assert val == 'ohai'
env.process(pem(env))
env.run()
def test_shared_timeout(env, log):
def child(env, timeout, id, log):
yield timeout
log.append((id, env.now))
timeout = env.timeout(1)
for i in range(3):
env.process(child(env, timeout, i, log))
env.run()
assert log == [(0, 1), (1, 1), (2, 1)]
def test_triggered_timeout(env):
def process(env):
def child(env, event):
value = yield event
env.exit(value)
event = env.timeout(1, 'i was already done')
# Start the child after the timeout has already happened.
yield env.timeout(2)
value = yield env.process(child(env, event))
assert value == 'i was already done'
env.run(env.process(process(env)))
| [
"ryanmeyersweb@gmail.com"
] | ryanmeyersweb@gmail.com |
98f30963f232673487c80b4cda9c2952e3a798d9 | 3eea5c57c8dd8660d8d3adfca7260e9c1ff1316a | /test-backup.py | d7d96136dea7d599c6fa6f4ace9f42872e4ec9df | [] | no_license | fergieis/Networks | 4cc8dac4afdf16184857bf586c1ca0693dc43e57 | 32eb99ee6946d604ca7d483c000f25922bcafc7d | refs/heads/master | 2021-01-21T14:01:44.881478 | 2016-05-23T13:41:01 | 2016-05-23T13:41:01 | 55,792,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,929 | py | from __future__ import division
import pandas as pd
import numpy as np
from gurobipy import *
#from pulp import *
from datetime import date, datetime
def date2t(curr_date):
start_date = date(2016, 2, 1) #Iowa Caucus
diff = datetime.utcfromtimestamp(curr_date)-start_date
diff = 2 * diff.days + diff.seconds//43200,
return diff
data = pd.read_csv('Cities.csv')
cities = {}
for key, s in data.groupby("State")['City']:
cities[key] = list(s)
#city_list = data["City"]
#cities=dict(zip(data["State"]
city_pop = data["City Population"]
state_pop = data["State Population"]
states_list = data["State"]
states = dict(zip(cities, data["State"]))
#### NEED CANDIDATES DATA IN DATA.CSV########
#-------------------------------------------#
cand = np.random.randint(3,90,len(states))
#Currently using start and end dates of elections, will likely
# want to "back off" first election by a bit.
#Maybe also need an indicator for party (R/D)?
start_date = date(2016, 2, 1) #Iowa Caucus
end_date = date(2016, 6, 8) #thru Jun 7 (+1), Jun 14 for DC for Democrats
visits = pd.date_range(start_date, end_date, freq="12H")
visit_index = range(len(visits))
total_visits = [None] * len(visits)
visits_dict = dict(zip(xrange(1,len(visits)+1), visits))
T = len(visits)
city_per = city_pop / state_pop
city_per = dict(zip(cities, city_per))
state_visits = dict(zip(states, np.zeros(len(states))))
candidates = dict(zip(states, cand))
m = Model("OPER617Project")
#prob = pulp.LpProblem("OPER617Project", pulp.LpMaximize)
v ={}
I ={}
mu={}
for state in states:
for city in cities[state]:
I[state] = m.addVar(vtype=GRB.BINARY, name="Win-"+ str(state))
for t in visits_dict:
#NEEDS END DATES ADDED
try:
mu[state,t] = max((50/(T-t)), 0)
except ZeroDivisionError:
mu[state,t] = 0
#What do we want ON election day?
v[city,t] = m.addVar(vtype=GRB.BINARY, name=str(city)+", "+ str(state) + ":"+ str(t))
| [
"fergieis@yahoo.com"
] | fergieis@yahoo.com |
6c0d1cac4f7d4207631446b5ea39072ab40066dd | 18319a52cce2b3f3a3607a18f45cbd5933ad8e31 | /venv/Lib/site-packages/bottle_sqlite.py | f568b58a6389cfdd11c1b2072cadb07f55fc79a2 | [] | no_license | AmithRajMP/Web-Tech-Assignment--2- | 8d9e56ef6bd302661654e32182964b9fe5644801 | ec7f410868f8936199bec19f01fce4ad6e081e79 | refs/heads/master | 2020-03-18T01:54:46.662732 | 2018-09-16T11:06:04 | 2018-09-16T11:06:04 | 134,165,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,833 | py | '''
Bottle-sqlite is a plugin that integrates SQLite3 with your Bottle
application. It automatically connects to a database at the beginning of a
request, passes the database handle to the route callback and closes the
connection afterwards.
To automatically detect routes that need a database connection, the plugin
searches for route callbacks that require a `db` keyword argument
(configurable) and skips routes that do not. This removes any overhead for
routes that don't need a database connection.
Usage Example::
import bottle
from bottle.ext import sqlite
app = bottle.Bottle()
plugin = sqlite.Plugin(dbfile='/tmp/test.db')
app.install(plugin)
@app.route('/show/:item')
def show(item, db):
row = db.execute('SELECT * from items where name=?', item).fetchone()
if row:
return template('showitem', page=row)
return HTTPError(404, "Page not found")
'''
__author__ = "Marcel Hellkamp"
__version__ = '0.1.3'
__license__ = 'MIT'
### CUT HERE (see setup.py)
import sqlite3
import inspect
import bottle
# PluginError is defined to bottle >= 0.10
if not hasattr(bottle, 'PluginError'):
class PluginError(bottle.BottleException):
pass
bottle.PluginError = PluginError
class SQLitePlugin(object):
''' This plugin passes an sqlite3 database handle to route callbacks
that accept a `db` keyword argument. If a callback does not expect
such a parameter, no connection is made. You can override the database
settings on a per-route basis. '''
name = 'sqlite'
api = 2
''' python3 moves unicode to str '''
try:
str
except NameError:
str = str
def __init__(self, dbfile=':memory:', autocommit=True, dictrows=True,
keyword='db', text_factory=str):
self.dbfile = dbfile
self.autocommit = autocommit
self.dictrows = dictrows
self.keyword = keyword
self.text_factory = text_factory
def setup(self, app):
''' Make sure that other installed plugins don't affect the same
keyword argument.'''
for other in app.plugins:
if not isinstance(other, SQLitePlugin):
continue
if other.keyword == self.keyword:
raise PluginError("Found another sqlite plugin with "
"conflicting settings (non-unique keyword).")
elif other.name == self.name:
self.name += '_%s' % self.keyword
def apply(self, callback, route):
# hack to support bottle v0.9.x
if bottle.__version__.startswith('0.9'):
config = route['config']
_callback = route['callback']
else:
config = route.config
_callback = route.callback
# Override global configuration with route-specific values.
if "sqlite" in config:
# support for configuration before `ConfigDict` namespaces
g = lambda key, default: config.get('sqlite', {}).get(key, default)
else:
g = lambda key, default: config.get('sqlite.' + key, default)
dbfile = g('dbfile', self.dbfile)
autocommit = g('autocommit', self.autocommit)
dictrows = g('dictrows', self.dictrows)
keyword = g('keyword', self.keyword)
text_factory = g('keyword', self.text_factory)
# Test if the original callback accepts a 'db' keyword.
# Ignore it if it does not need a database handle.
argspec = inspect.getargspec(_callback)
if keyword not in argspec.args:
return callback
def wrapper(*args, **kwargs):
# Connect to the database
db = sqlite3.connect(dbfile)
# set text factory
db.text_factory = text_factory
# This enables column access by name: row['column_name']
if dictrows:
db.row_factory = sqlite3.Row
# Add the connection handle as a keyword argument.
kwargs[keyword] = db
try:
rv = callback(*args, **kwargs)
if autocommit:
db.commit()
except sqlite3.IntegrityError as e:
db.rollback()
raise bottle.HTTPError(500, "Database Error", e)
except bottle.HTTPError as e:
raise
except bottle.HTTPResponse as e:
if autocommit:
db.commit()
raise
finally:
db.close()
return rv
# Replace the route callback with the wrapped one.
return wrapper
Plugin = SQLitePlugin
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
896b091738f2acb4cb61516a1011ed82b9907efb | a9b1debfd6de31175b84f6047ad7a294b888f843 | /accounts/DB_Transactions.py | 15451f2f401aec4529f1d37ed20375a2eb2b47c2 | [] | no_license | kajalsaphui/lenseploter | 79cf252498cdaf4d28c5b7f10493a5d5aa2eb823 | b172a5cfb98a0560b88a0bfc836f44da1948c50b | refs/heads/main | 2023-09-05T10:15:49.513539 | 2021-11-18T09:07:44 | 2021-11-18T09:07:44 | 429,358,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,089 | py | from . import DBUtils
from . import Defaults
def get_all_nodes():
Defaults.logger("Entering -> | get_all_nodes() |", level = "info")
try:
sql_query = "SELECT * FROM vrx_SFTS_node WHERE status = 1"
DBcontent = DBUtils.DB_Execute_MS(sql_query, "fetch")
print(DBcontent)
Defaults.logger("Exiting -> | get_all_nodes() |", level = "info")
return DBcontent
except:
Defaults.logger("DATABASE OPERATION COULDN'T BE PERFORMED", "", level = "warning")
Defaults.logger("Exiting -> | get_all_nodes() |", level = "info")
return None
def get_all_orders():
Defaults.logger("Entering -> | get_all_orders() |", level = "info")
try:
sql_query = "SELECT POHD_CUST_CD,POHD_APPLICATION_NAME,POHD_SOURCE_REF,OCI_NO,POHD_PATIENT_FIRST_NAME,convert(varchar(10),Entry_date,105) as Entry_date FROM RXNetOrder_Webshop"
# sql_query = "SELECT top 100 POHD_CUST_CD,POHD_APPLICATION_NAME,POHD_SOURCE_REF,OCI_NO,POHD_PATIENT_FIRST_NAME,Entry_date FROM BKP_RXNetOrder With (NOLOCK) where Entry_date < GETDATE()-180 order by Entry_date DESC"
DBcontent = DBUtils.DB_Execute_MS(sql_query, "fetch")
print(DBcontent)
Defaults.logger("Exiting -> | get_all_orders() |", level = "info")
return DBcontent
except:
Defaults.logger("DATABASE OPERATION COULDN'T BE PERFORMED", "", level = "warning")
Defaults.logger("Exiting -> | get_all_orders() |", level = "info")
return None
def get_all_orders_by_castomer(customer_code):
Defaults.logger("Entering -> | get_all_orders_by_castomer() |", level = "info")
try:
sql_query = "SELECT POHD_CUST_CD,POHD_APPLICATION_NAME,POHD_SOURCE_REF,OCI_NO,POHD_PATIENT_FIRST_NAME,convert(varchar(10),Entry_date,105) as Entry_date FROM RXNetOrder_Webshop WHERE POHD_CUST_CD ='%s'"%(customer_code)
# sql_query="SELECT top 100 POHD_CUST_CD,POHD_APPLICATION_NAME,POHD_SOURCE_REF,OCI_NO,POHD_PATIENT_FIRST_NAME,Entry_date FROM BKP_RXNetOrder With (NOLOCK) where POAD_CUST_CD ='%s' AND Entry_date < GETDATE()-180 order by Entry_date DESC"%(customer_code)
res = DBUtils.DB_Execute_MS(sql_query, "fetch")
print(res)
Defaults.logger("Exiting <- | get_all_orders_by_castomer() |", level = "info")
return res
except:
Defaults.logger("Exiting <- | get_all_orders_by_castomer() |", level = "error")
return None
# anirudha=========================
def get_all_orders_by_oci_castomer(oci_no,customer_code):
Defaults.logger("Entering -> | get_all_orders_by_oci_castomer() |", level = "info")
try:
sql_query = "SELECT POHD_CUST_CD,POHD_APPLICATION_NAME,POHD_SOURCE_REF,OCI_NO,POHD_PATIENT_FIRST_NAME,convert(varchar(10),Entry_date,105) as Entry_date FROM RXNetOrder_Webshop WHERE OCI_NO='%s' AND POHD_CUST_CD ='%s'"%(oci_no,customer_code)
# sql_query="SELECT top 100 POHD_CUST_CD,POHD_APPLICATION_NAME,POHD_SOURCE_REF,OCI_NO,POHD_PATIENT_FIRST_NAME,Entry_date FROM BKP_RXNetOrder With (NOLOCK) where POAD_CUST_CD ='%s' AND Entry_date < GETDATE()-180 order by Entry_date DESC"%(customer_code)
res = DBUtils.DB_Execute_MS(sql_query, "fetch")
print(res)
Defaults.logger("Exiting <- | get_all_orders_by_oci_castomer |", level = "info")
return res
except:
Defaults.logger("Exiting <- | get_all_orders_by_oci_castomer() |", level = "error")
return None
def get_all_orders_by_oci_entrydate(day,month,year,customer_code):
Defaults.logger("Entering -> | get_all_orders_by_oci_entrydate() |", level = "info")
try:
sql_query = "SELECT POHD_CUST_CD,POHD_APPLICATION_NAME,POHD_SOURCE_REF,OCI_NO,POHD_PATIENT_FIRST_NAME,convert(varchar(10),Entry_date,105) as Entry_date FROM RXNetOrder_Webshop WHERE POHD_CUST_CD='%s' AND DAY(ENTRY_DATE)='%d' AND MONTH(ENTRY_DATE)= '%d' AND YEAR(ENTRY_DATE)='%s'"%(customer_code,int(day),int(month),year)
# sql_query="SELECT top 100 POHD_CUST_CD,POHD_APPLICATION_NAME,POHD_SOURCE_REF,OCI_NO,POHD_PATIENT_FIRST_NAME,Entry_date FROM BKP_RXNetOrder With (NOLOCK) where POAD_CUST_CD ='%s' AND Entry_date < GETDATE()-180 order by Entry_date DESC"%(customer_code)
print(sql_query)
res = DBUtils.DB_Execute_MS(sql_query, "fetch")
# print(res)
Defaults.logger("Exiting <- | get_all_orders_by_oci_entrydate() |", level = "info")
return res
except:
Defaults.logger("Exiting <- | get_all_orders_by_oci_entrydate() |", level = "error")
return None
def get_all_orders_filter_by_entrydate(entry_year,entry_month,customer_code):
Defaults.logger("Entering -> | get_all_orders_filter_by_entrydate() |", level = "info")
# date_time_obj = datetime.strptime(entry_date, '%d/%m/%y %H:%M:%S')
# print ("The type of the date is now", type(date_time_obj))
# print ("The date is", date_time_obj)
try:
sql_query = "SELECT POHD_CUST_CD,POHD_APPLICATION_NAME,POHD_SOURCE_REF,OCI_NO,POHD_PATIENT_FIRST_NAME,convert(varchar(10),Entry_date,105) as Entry_date FROM RXNetOrder_Webshop WHERE POHD_CUST_CD='%s' AND MONTH(ENTRY_DATE)= '%d' AND YEAR(ENTRY_DATE)='%s'"%(customer_code,int(entry_month),entry_year)
# sql_query="SELECT top 100 POHD_CUST_CD,POHD_APPLICATION_NAME,POHD_SOURCE_REF,OCI_NO,POHD_PATIENT_FIRST_NAME,Entry_date FROM BKP_RXNetOrder With (NOLOCK) where POAD_CUST_CD ='%s' AND Entry_date < GETDATE()-180 order by Entry_date DESC"%(customer_code)
print(sql_query)
res = DBUtils.DB_Execute_MS(sql_query, "fetch")
print(res)
Defaults.logger("Exiting <- | get_all_orders_filter_by_entrydate() |", level = "info")
return res
except:
Defaults.logger("Exiting <- | get_all_orders_filter_by_entrydate() |", level = "error")
return None
def get_all_orders_filter_by_ref_no_val(ref_no,customer_code):
Defaults.logger("Entering -> | get_all_orders_filter_by_ref_no_val() |", level = "info")
try:
sql_query = "SELECT POHD_CUST_CD,POHD_APPLICATION_NAME,POHD_SOURCE_REF,OCI_NO,POHD_PATIENT_FIRST_NAME,convert(varchar(10),Entry_date,105) as Entry_date FROM RXNetOrder_Webshop WHERE POHD_SOURCE_REF ='%s' AND POHD_CUST_CD='%s'"%(ref_no,customer_code)
# sql_query="SELECT top 100 POHD_CUST_CD,POHD_APPLICATION_NAME,POHD_SOURCE_REF,OCI_NO,POHD_PATIENT_FIRST_NAME,Entry_date FROM BKP_RXNetOrder With (NOLOCK) where POAD_CUST_CD ='%s' AND Entry_date < GETDATE()-180 order by Entry_date DESC"%(customer_code)
res = DBUtils.DB_Execute_MS(sql_query, "fetch")
print(res)
Defaults.logger("Exiting <- | get_all_orders_filter_by_ref_no_val() |", level = "info")
return res
except:
Defaults.logger("Exiting <- | get_all_orders_filter_by_ref_no_val() |", level = "error")
return None
# anirudha=============================
def get_tracevalue(oci_no):
Defaults.logger("Entering -> | get_tracevalue() |", level = "info")
try:
sql_query = "SELECT TOP 1 NON_TRACER FROM RXNetOrder_Webshop WHERE OCI_NO ='%s'"%(oci_no)
res = DBUtils.DB_Execute_MS(sql_query, "fetch")
print(res)
Defaults.logger("Exiting <- | get_tracevalue() |", level = "info")
return res
except:
Defaults.logger("Exiting <- | get_tracevalue() |", level = "error")
return None
# ===============anirudha 20 july===============
def get_all_year_history(customer_code):
Defaults.logger("Entering -> | get_all_year_history() |", level = "info")
try:
sql_query = "select distinct YEAR(entry_date) AS [YEAR] from RXNetOrder_Webshop WHERE POHD_CUST_CD='%s' ORDER BY YEAR(entry_date)"%(customer_code)
res = DBUtils.DB_Execute_MS(sql_query, "fetch")
print(res)
Defaults.logger("Exiting <- | get_all_year_history() |", level = "info")
return res
except:
Defaults.logger("Exiting <- | get_all_year_history() |", level = "error")
return None
def get_month_by_year(year,customer_code):
Defaults.logger("Entering -> | get_month_by_year() |", level = "info")
try:
sql_query = "select distinct LEFT(DATENAME(MONTH,entry_date),10) AS [MONTHNAME], MONTH(entry_date)[MONTH] from RXNetOrder_Webshop WHERE POHD_CUST_CD='%s' AND YEAR(entry_date)='%s' ORDER BY MONTH(entry_date)"%(customer_code,year)
res = DBUtils.DB_Execute_MS(sql_query, "fetch")
print(res)
Defaults.logger("Exiting <- | get_month_by_year() |", level = "info")
return res
except:
Defaults.logger("Exiting <- | get_month_by_year() |", level = "error")
return None
#========================vrxlab lab view===========================
def Cridential_mapping_vrxlab(customer_code):
Defaults.logger("Entering -> | Cridential_mapping_vrxlab() |", level = "info")
try:
sql_query = "select Table_name from accounts_master WHERE Cust_code='%s'"%(customer_code)
res = DBUtils.DB_Execute_MS(sql_query, "fetch")
print(res)
Defaults.logger("Exiting <- | Cridential_mapping_vrxlab() |", level = "info")
return res
except:
Defaults.logger("Exiting <- | Cridential_mapping_vrxlab() |", level = "error")
return None
# def Cridential_mapping_vrxlab_ref_cast_code(ref_customer_code):
# print(ref_customer_code)
# Defaults.logger("Entering -> | Cridential_mapping_vrxlab_ref_cast_code() |", level = "info")
# # try:
# sql_query = "select top 1 Table_name FROM accounts_master WHERE Ref_cust_code ='%s'"%(ref_customer_code)
# res = DBUtils.DB_Execute_MS(sql_query, "fetch")
# print(res)
# Defaults.logger("Exiting <- | Cridential_mapping_vrxlab_ref_cast_code() |", level = "info")
# return res
# # except:
# # Defaults.logger("Exiting <- | Cridential_mapping_vrxlab_ref_cast_code() |", level = "error")
# # return None
def get_all_orders_by_oci_vrx_labworker(table_name,oci_no):
print(table_name)
print(oci_no)
Defaults.logger("Entering -> | get_all_orders_by_oci_castomer_vrx_labworker() |", level = "info")
try:
sql_query = "SELECT POHD_CUST_CD,POHD_APPLICATION_NAME,POHD_SOURCE_REF,OCI_NO,POHD_PATIENT_FIRST_NAME,convert(varchar(10),Entry_date,105) as Entry_date FROM %s WHERE OCI_NO='%s'"%(table_name,oci_no)
print(sql_query)
res = DBUtils.DB_Execute_MS(sql_query, "fetch")
print(res)
Defaults.logger("Exiting <- | get_all_orders_by_oci_castomer_vrx_labworker |", level = "info")
return res
except:
Defaults.logger("Exiting <- | get_all_orders_by_oci_castomer_vrx_labworker() |", level = "error")
return None
def get_all_orders_by_ref_vrx_labworker(table_name,ref_no):
Defaults.logger("Entering -> | get_all_orders_by_oci_castomer_vrx_labworker() |", level = "info")
try:
sql_query = "SELECT POHD_CUST_CD,POHD_APPLICATION_NAME,POHD_SOURCE_REF,OCI_NO,POHD_PATIENT_FIRST_NAME,convert(varchar(10),Entry_date,105) as Entry_date FROM '%s' WHERE POHD_SOURCE_REF='%s'"%(table_name,ref_no)
res = DBUtils.DB_Execute_MS(sql_query, "fetch")
print(res)
Defaults.logger("Exiting <- | get_all_orders_by_oci_castomer_vrx_labworker |", level = "info")
return res
except:
Defaults.logger("Exiting <- | get_all_orders_by_oci_castomer_vrx_labworker() |", level = "error")
return None
def get_OMA_DATA(oci_no):
# def get_OMA_DATA():
Defaults.logger("Entering -> | get_OMA_DATA() |", level = "info")
try:
sql_query = "SELECT TOP 1 OMA_DATA FROM RXNetOrder_Webshop WHERE OCI_NO ='%s'"%(oci_no)
# sql_query = "SELECT TOP 1 OMA_DATA FROM RXNetOrder_Webshop WHERE OCI_NO ='EXP/129058/18'"
res = DBUtils.DB_Execute_MS(sql_query, "fetch")
print(res)
Defaults.logger("Exiting <- | get_OMA_DATA() |", level = "info")
return res
except:
Defaults.logger("Exiting <- | get_OMA_DATA() |", level = "error")
return None
def get_NON_TRACE_DATA(oci_no):
# def get_OMA_DATA():
Defaults.logger("Entering -> | get_NON_TRACE_DATA() |", level = "info")
try:
sql_query = "SELECT TOP 1 NON_TRACER FROM RXNetOrder_Webshop WHERE OCI_NO ='%s'"%(oci_no)
# sql_query = "SELECT TOP 1 NON_TRACE FROM RXNetOrder_Webshop WHERE OCI_NO ='EXP/129058/18'"
res = DBUtils.DB_Execute_MS(sql_query, "fetch")
print(res)
Defaults.logger("Exiting <- | get_NON_TRACE_DATA() |", level = "info")
return res
except:
Defaults.logger("Exiting <- | get_NON_TRACE_DATA() |", level = "error")
return None
#======================================================================================
def get_all_orders_report_entrydate(day,month,year,day2,month2,year2,customer_code):
Defaults.logger("Entering -> | get_all_orders_by_oci_entrydate() |", level = "info")
try:
sql_query = "SELECT convert(varchar(10),Entry_date,105) [ORDERDATE],OCI_NO [OCINUMBER],POHD_ACCOUNT_CD [CUSTCODE],POHD_SOURCE_REF [REFNUMBER],RIGHT_POPR_PROD_CD [RECODE], RIGHT_POLN_COMM_DIAM [RECOMMDIA],RIGHT_POPR_REQ_DIAMETER [REREQDIA],RIGHT_POPR_SPHERE [RESPHERE],RIGHT_POPR_CYLINDER [RECYLINDER],RIGHT_POPR_AXIS [REAXIS],RIGHT_POPR_ADDITION [READDITION],RIGHT_POLN_QUANTITY [REQTY],RIGHT_POPR_REQ_BASE [REBASE],RIGHT_POPR_REQ_CT [RECT], RIGHT_POPR_REQ_ET [REET],RIGHT_POPR_PRISM_1 [REPRISM1],RIGHT_POPR_PRISM_DIR_1 [REPRISM1DIA],RIGHT_POPR_PRISM_2 [REPRISM2],RIGHT_POPR_PRISM_DIR_2 [REPRISM2DIA],RIGHT_POPR_PD [REPD], RIGHT_POPR_NPD [RENPD],RIGHT_POPR_HEIGHT [REHEIGHT],LEFT_POPR_PROD_CD [LECODE], LEFT_POLN_COMM_DIAM [LECOMMDIA],LEFT_POPR_REQ_DIAMETER [LEREQDIA],LEFT_POPR_SPHERE [LESPHERE],LEFT_POPR_CYLINDER [LECYLINDER],LEFT_POPR_AXIS [LEAXIS],LEFT_POPR_ADDITION [LEADDITION],LEFT_POLN_QUANTITY [LEQTY],LEFT_POPR_REQ_BASE [LEBASE],LEFT_POPR_REQ_CT [LECT], LEFT_POPR_REQ_ET [LEET],LEFT_POPR_PRISM_1 [LEPRISM1],LEFT_POPR_PRISM_DIR_1 [LEPRISM1DIA],LEFT_POPR_PRISM_2 [LEPRISM2],LEFT_POPR_PRISM_DIR_2 [LEPRISM2DIA],LEFT_POPR_PD [LEPD], LEFT_POPR_NPD [LENPD],LEFT_POPR_HEIGHT [LEHEIGHT],SERVICE1_POLN_PROD_CD [COATINGCOADE],SERVICE1_SOURCE_PROD_DESC [COATINGDESC], SERVICE2_POLN_PROD_CD [TINTCOADE],SERVICE2_SOURCE_PROD_DESC [TINTDESC],POTR_A [BOXA],POTR_B [BOXB], POTR_DBL [DBL],POTR_FRAME [FRAMETYPE],VERTEX_DIST [VERTEXDIST], WRAPANGLE [WRAPANGLE], PANTOANGLE [PANTOANGLE],SPLINSTRUCTION [SPLINSTRUCTION], EDGINGTYPE [EDGINGTYPE] FROM RXNetOrder_Webshop WHERE POHD_CUST_CD='%s'AND Entry_date Between '%s' + '%s' + '%s' And '%s' + '%s' + '%s'"%(customer_code,year,month,day,year2,month2,day2)
# sql_query="SELECT top 100 POHD_CUST_CD,POHD_APPLICATION_NAME,POHD_SOURCE_REF,OCI_NO,POHD_PATIENT_FIRST_NAME,Entry_date FROM BKP_RXNetOrder With (NOLOCK) where POAD_CUST_CD ='%s' AND Entry_date < GETDATE()-180 order by Entry_date DESC"%(customer_code)
print(sql_query)
res = DBUtils.DB_Execute_MS(sql_query, "fetch")
# print(res)
Defaults.logger("Exiting <- | get_all_orders_by_oci_entrydate() |", level = "info")
return res
except:
Defaults.logger("Exiting <- | get_all_orders_by_oci_entrydate() |", level = "error")
return None
def get_Extra_DATA_Axcepta(oci_no):
# def get_OMA_DATA():
# Defaults.logger("Entering -> | get_Extra_DATA_Axcepta() |", level = "info")
try:
# print(oci_no)
sql_query ="SELECT LELENSNAME,LEREFINDEX,LEFOCALITY,LELENSMAT,CUSTLELENSNAME as LE_PRODUCT_DESCRIPTION,RELENSNAME,OCINUMBER,REREFINDEX,REFOCALITY,RELENSMAT,CUSTRELENSNAME as RE_PRODUCT_DESCRIPTION FROM OCI_EXPORTFINAL WHERE OCINUMBER = '%s' AND PARTITION='5637144576' AND DATAAREAID='dub'"%(oci_no)
# sql_query = "SELECT TOP 1 NON_TRACE FROM RXNetOrder_Webshop WHERE OCI_NO ='EXP/129058/18'"
res = DBUtils.DB_Execute_MS_Axapta(sql_query, "fetch")
# print(res)
# Defaults.logger("Exiting <- | get_Extra_DATA_Axcepta() |", level = "info")
return res
except:
Defaults.logger("Exiting <- | get_Extra_DATA_Axcepta() |", level = "error")
return None | [
"kajalsaphui@gkbrxlens.com"
] | kajalsaphui@gkbrxlens.com |
dee8a6fa242eaa17b94d0e6b419c56fdcdd8f742 | 0fa00ecf2dd671515dc001d4b14049ec6a0c1f1c | /custom_components/powercalc/power_profile/power_profile.py | 666c4af626a6097f47d1c1015b83eeac17df2eda | [
"Unlicense"
] | permissive | bacco007/HomeAssistantConfig | d91a5368344f50abbea881bd1e6dfc57a0e456ca | 8548d9999ddd54f13d6a307e013abcb8c897a74e | refs/heads/master | 2023-08-30T07:07:33.571959 | 2023-08-29T20:00:00 | 2023-08-29T20:00:00 | 230,585,631 | 98 | 16 | Unlicense | 2023-09-09T08:28:39 | 2019-12-28T09:05:02 | Python | UTF-8 | Python | false | false | 13,870 | py | from __future__ import annotations
import json
import logging
import os
import re
from typing import NamedTuple, Protocol
from awesomeversion.awesomeversion import AwesomeVersion
from homeassistant.const import __version__ as HA_VERSION # noqa
if AwesomeVersion(HA_VERSION) >= AwesomeVersion("2023.8.0"):
from enum import StrEnum
else:
from homeassistant.backports.enum import StrEnum # pragma: no cover
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.camera import DOMAIN as CAMERA_DOMAIN
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.components.media_player import DOMAIN as MEDIA_PLAYER_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.core import HomeAssistant, State
from homeassistant.helpers.typing import ConfigType
from custom_components.powercalc.common import SourceEntity
from custom_components.powercalc.const import CONF_POWER, CalculationStrategy
from custom_components.powercalc.errors import (
ModelNotSupportedError,
PowercalcSetupError,
UnsupportedStrategyError,
)
_LOGGER = logging.getLogger(__name__)
class DeviceType(StrEnum):
CAMERA = "camera"
LIGHT = "light"
SMART_SWITCH = "smart_switch"
SMART_SPEAKER = "smart_speaker"
NETWORK = "network"
class SubProfileMatcherType(StrEnum):
ATTRIBUTE = "attribute"
ENTITY_ID = "entity_id"
ENTITY_STATE = "entity_state"
INTEGRATION = "integration"
DEVICE_DOMAINS = {
DeviceType.CAMERA: CAMERA_DOMAIN,
DeviceType.LIGHT: LIGHT_DOMAIN,
DeviceType.SMART_SWITCH: SWITCH_DOMAIN,
DeviceType.SMART_SPEAKER: MEDIA_PLAYER_DOMAIN,
DeviceType.NETWORK: BINARY_SENSOR_DOMAIN,
}
class PowerProfile:
def __init__(
self,
hass: HomeAssistant,
manufacturer: str,
model: str,
directory: str,
json_data: ConfigType,
) -> None:
self._manufacturer = manufacturer
self._model = model.replace("#slash#", "/")
self._hass = hass
self._directory = directory
self._json_data = json_data
self.sub_profile: str | None = None
self._sub_profile_dir: str | None = None
def get_model_directory(self, root_only: bool = False) -> str:
"""Get the model directory containing the data files."""
if self.linked_lut:
return os.path.join(os.path.dirname(__file__), "../data", self.linked_lut)
if root_only:
return self._directory
return self._sub_profile_dir or self._directory
def supports(self, model: str) -> bool:
"""Check whether this power profile supports a given model ID.
Also looks at possible aliases.
"""
model = model.lower().replace("#slash#", "/")
if self._model.lower() == model:
return True
# @todo implement Regex/Json path
for alias in self.aliases:
if alias.lower() == model:
return True
# Also try to match model ID between parentheses.
if match := re.search(r"\(([^\(\)]+)\)$", model):
return self.supports(match.group(1))
return False
@property
def manufacturer(self) -> str:
return self._manufacturer
@property
def model(self) -> str:
return self._model
@property
def name(self) -> str:
return self._json_data.get("name") or ""
@property
def standby_power(self) -> float:
return self._json_data.get("standby_power") or 0
@property
def standby_power_on(self) -> float:
return self._json_data.get("standby_power_on") or 0
@property
def calculation_strategy(self) -> CalculationStrategy:
"""Get the calculation strategy this profile provides.
supported modes is here for BC purposes.
"""
if "calculation_strategy" in self._json_data:
return CalculationStrategy(str(self._json_data.get("calculation_strategy")))
return CalculationStrategy.LUT
@property
def linked_lut(self) -> str | None:
return self._json_data.get("linked_lut")
@property
def calculation_enabled_condition(self) -> str | None:
return self._json_data.get("calculation_enabled_condition")
@property
def aliases(self) -> list[str]:
return self._json_data.get("aliases") or []
@property
def linear_mode_config(self) -> ConfigType | None:
"""Get configuration to setup linear strategy."""
if not self.is_strategy_supported(CalculationStrategy.LINEAR):
raise UnsupportedStrategyError(
f"Strategy linear is not supported by model: {self._model}",
)
return self._json_data.get("linear_config")
@property
def fixed_mode_config(self) -> ConfigType | None:
"""Get configuration to setup fixed strategy."""
if not self.is_strategy_supported(CalculationStrategy.FIXED):
raise UnsupportedStrategyError(
f"Strategy fixed is not supported by model: {self._model}",
)
fixed_config = self._json_data.get("fixed_config")
if fixed_config is None and self.standby_power_on:
fixed_config = {CONF_POWER: 0}
return fixed_config
@property
def sensor_config(self) -> ConfigType:
"""Additional sensor configuration."""
return self._json_data.get("sensor_config") or {}
def is_strategy_supported(self, mode: CalculationStrategy) -> bool:
"""Whether a certain calculation strategy is supported by this profile."""
return mode == self.calculation_strategy
@property
def needs_fixed_config(self) -> bool:
"""Used for smart switches which only provides standby power values.
This indicates the user must supply the power values in the config flow.
"""
return self.is_strategy_supported(
CalculationStrategy.FIXED,
) and not self._json_data.get("fixed_config")
@property
def device_type(self) -> DeviceType:
device_type = self._json_data.get("device_type")
if not device_type:
return DeviceType.LIGHT
return DeviceType(device_type)
@property
def config_flow_discovery_remarks(self) -> str | None:
return self._json_data.get("config_flow_discovery_remarks")
def get_sub_profiles(self) -> list[str]:
"""Get listing of possible sub profiles."""
return sorted(next(os.walk(self.get_model_directory(True)))[1])
@property
def has_sub_profiles(self) -> bool:
return len(self.get_sub_profiles()) > 0
@property
def sub_profile_select(self) -> SubProfileSelectConfig | None:
"""Get the configuration for automatic sub profile switching."""
select_dict = self._json_data.get("sub_profile_select")
if not select_dict:
return None
return SubProfileSelectConfig(**select_dict)
def select_sub_profile(self, sub_profile: str) -> None:
"""Select a sub profile. Only applicable when to profile actually supports sub profiles."""
if not self.has_sub_profiles:
return
# Sub profile already selected, no need to load it again
if self.sub_profile == sub_profile:
return
self._sub_profile_dir = os.path.join(self._directory, sub_profile)
_LOGGER.debug(f"Loading sub profile directory {sub_profile}")
if not os.path.exists(self._sub_profile_dir):
raise ModelNotSupportedError(
f"Sub profile not found (manufacturer: {self._manufacturer}, model: {self._model}, "
f"sub_profile: {sub_profile})",
)
# When the sub LUT directory also has a model.json (not required),
# merge this json into the main model.json data.
file_path = os.path.join(self._sub_profile_dir, "model.json")
if os.path.exists(file_path):
with open(file_path) as json_file:
self._json_data = {**self._json_data, **json.load(json_file)}
self.sub_profile = sub_profile
def is_entity_domain_supported(self, source_entity: SourceEntity) -> bool:
"""Check whether this power profile supports a given entity domain."""
entity_entry = source_entity.entity_entry
if (
self.device_type == DeviceType.SMART_SWITCH
and entity_entry
and entity_entry.platform in ["hue"]
and source_entity.domain == LIGHT_DOMAIN
): # see https://github.com/bramstroker/homeassistant-powercalc/issues/1491
return True
return DEVICE_DOMAINS[self.device_type] == source_entity.domain
class SubProfileSelector:
def __init__(
self,
hass: HomeAssistant,
config: SubProfileSelectConfig,
source_entity: SourceEntity,
) -> None:
self._hass = hass
self._config = config
self._source_entity = source_entity
self._matchers: list[SubProfileMatcher] = self._build_matchers()
def _build_matchers(self) -> list[SubProfileMatcher]:
matchers: list[SubProfileMatcher] = []
for matcher_config in self._config.matchers:
matchers.append(self._create_matcher(matcher_config))
return matchers
def select_sub_profile(self, entity_state: State) -> str:
"""Dynamically tries to select a sub profile depending on the entity state.
This method always need to return a sub profile, when nothing is matched it will return a default.
"""
for matcher in self._matchers:
sub_profile = matcher.match(entity_state, self._source_entity)
if sub_profile:
return sub_profile
return self._config.default
def get_tracking_entities(self) -> list[str]:
"""Get additional list of entities to track for state changes."""
return [
entity_id
for matcher in self._matchers
for entity_id in matcher.get_tracking_entities()
]
def _create_matcher(self, matcher_config: dict) -> SubProfileMatcher:
"""Create a matcher from json config. Can be extended for more matchers in the future."""
matcher_type: SubProfileMatcherType = matcher_config["type"]
if matcher_type == SubProfileMatcherType.ATTRIBUTE:
return AttributeMatcher(matcher_config["attribute"], matcher_config["map"])
if matcher_type == SubProfileMatcherType.ENTITY_STATE:
return EntityStateMatcher(
self._hass,
self._source_entity,
matcher_config["entity_id"],
matcher_config["map"],
)
if matcher_type == SubProfileMatcherType.ENTITY_ID:
return EntityIdMatcher(matcher_config["pattern"], matcher_config["profile"])
if matcher_type == SubProfileMatcherType.INTEGRATION:
return IntegrationMatcher(
matcher_config["integration"],
matcher_config["profile"],
)
raise PowercalcSetupError(f"Unknown sub profile matcher type: {matcher_type}")
class SubProfileSelectConfig(NamedTuple):
default: str
matchers: list[dict]
class SubProfileMatcher(Protocol):
def match(self, entity_state: State, source_entity: SourceEntity) -> str | None:
"""Returns a sub profile."""
def get_tracking_entities(self) -> list[str]:
"""Get extra entities to track for state changes."""
class EntityStateMatcher(SubProfileMatcher):
def __init__(
self,
hass: HomeAssistant,
source_entity: SourceEntity | None,
entity_id: str,
mapping: dict[str, str],
) -> None:
self._hass = hass
if source_entity:
entity_id = entity_id.replace(
"{{source_object_id}}",
source_entity.object_id,
)
self._entity_id = entity_id
self._mapping = mapping
def match(self, entity_state: State, source_entity: SourceEntity) -> str | None:
state = self._hass.states.get(self._entity_id)
if state is None:
return None
return self._mapping.get(state.state)
def get_tracking_entities(self) -> list[str]:
return [self._entity_id]
class AttributeMatcher(SubProfileMatcher):
def __init__(self, attribute: str, mapping: dict[str, str]) -> None:
self._attribute = attribute
self._mapping = mapping
def match(self, entity_state: State, source_entity: SourceEntity) -> str | None:
val = entity_state.attributes.get(self._attribute)
if val is None:
return None
return self._mapping.get(val)
def get_tracking_entities(self) -> list[str]:
return []
class EntityIdMatcher(SubProfileMatcher):
def __init__(self, pattern: str, profile: str) -> None:
self._pattern = pattern
self._profile = profile
def match(self, entity_state: State, source_entity: SourceEntity) -> str | None:
if re.search(self._pattern, entity_state.entity_id):
return self._profile
return None
def get_tracking_entities(self) -> list[str]:
return []
class IntegrationMatcher(SubProfileMatcher):
def __init__(self, integration: str, profile: str) -> None:
self._integration = integration
self._profile = profile
def match(self, entity_state: State, source_entity: SourceEntity) -> str | None:
registry_entry = source_entity.entity_entry
if not registry_entry:
return None
if registry_entry.platform == self._integration:
return self._profile
return None
def get_tracking_entities(self) -> list[str]:
return []
| [
"thomas@thomasbaxter.info"
] | thomas@thomasbaxter.info |
65f82aa6bff2df68e83b88de5c9a6c2f55297b8c | 5f23d8e91ca941749a6127d35a2d0194512d2987 | /daemon.py | fa7a20ed12ff3d3ce9d4b632dbdc5b55c2374ed0 | [] | no_license | cmal/sc2mafia-server | 7ea52d1293929a6044bb97eea2866c2feb00e81d | ad48674576d0e675dc0aacf59a393a5d0f9c6a7d | refs/heads/master | 2016-09-05T15:49:24.425225 | 2015-07-30T02:24:35 | 2015-07-30T02:24:35 | 39,053,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This is the Twisted Fast Poetry Server, version 2.0
from twisted.application import internet, service
# import these classes from another module.
from server.server import GameServerProtocol, GameServerFactory, GameServerService
import sys
reload(sys)
sys.setdefaultencoding('utf8')
# configuration parameters
port = 10000
iface = 'localhost'
# this will hold the services that combine to form the server
top_service = service.MultiService()
# create multiple services and then add to the top service
game_service = GameServerService()
game_service.setServiceParent(top_service)
# the tcp service connects the factory to a listening socket. it will
# create the listening socket when it is started
factory = GameServerFactory(game_service)
tcp_service = internet.TCPServer(port, factory, interface=iface)
tcp_service.setServiceParent(top_service)
# this variable has to be named 'application'
application = service.Application("sc2mafia")
# this hooks the collection we made to the application
top_service.setServiceParent(application)
# at this point, the application is ready to go. when started by
# twistd it will start the child services, thus starting up the
# poetry server
| [
"zyzy5730@163.com"
] | zyzy5730@163.com |
509baa5595f18af9b6609d75c3eb70cd7f7b8834 | 1593d6393efb987b24e9e008c275725ff489bc95 | /dxm/lib/DxRuleset/DxRulesetList.py | 7281428ff08bc501d5ed1a98cab152eccb258957 | [
"Apache-2.0"
] | permissive | SLEEP1NG/dxm-toolkit | fedab63ac21305652f047ab51af7de090fdd8035 | 1c2aae49701da47c5932e3b7e67844bf8c3d0ccf | refs/heads/master | 2020-12-07T14:41:59.080025 | 2019-11-04T12:10:36 | 2019-11-04T12:10:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,579 | py | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
# Author : Marcin Przepiorowski
# Date : April 2018
import logging
from dxm.lib.DxRuleset.DxDatabaseRuleset import DxDatabaseRuleset
from dxm.lib.DxRuleset.DxFileRuleset import DxFileRuleset
from dxm.lib.DxTools.DxTools import get_objref_by_val_and_attribute
from dxm.lib.DxTools.DxTools import paginator
from dxm.lib.DxEngine.DxMaskingEngine import DxMaskingEngine
from dxm.lib.DxEnvironment.DxEnvironmentList import DxEnvironmentList
from masking_apis.apis.database_ruleset_api import DatabaseRulesetApi
from masking_apis.apis.file_ruleset_api import FileRulesetApi
from masking_apis.rest import ApiException
from dxm.lib.DxLogging import print_error
from dxm.lib.DxConnector.DxConnectorsList import DxConnectorsList
class DxRulesetList(object):
__rulesetList = {}
__engine = None
__logger = None
@classmethod
def __init__(self, environment_name=None):
"""
Constructor
:param engine: DxMaskingEngine object
"""
self.__engine = DxMaskingEngine
self.__logger = logging.getLogger()
self.__logger.debug("creating DxRulesetList object")
self.LoadRulesets(environment_name)
@classmethod
def LoadRulesets(self, environment_name):
"""
Load list of rule sets
Return None if OK
"""
return self.LoadRulesets_worker(environment_name, None)
@classmethod
def LoadRulesetsbyId(self, env_id):
"""
Load list of rule sets for env_id
Return None if OK
"""
return self.LoadRulesets_worker(None, env_id)
@classmethod
def LoadRulesets_worker(self, environment_name, env_id):
"""
Load list of rule sets
Return None if OK
"""
DxConnectorsList(environment_name)
self.__rulesetList = {}
try:
api_instance = DatabaseRulesetApi(self.__engine.api_client)
if environment_name:
environment_id = DxEnvironmentList.get_environmentId_by_name(
environment_name)
if environment_id:
database_rulesets = paginator(
api_instance,
"get_all_database_rulesets",
environment_id=environment_id,
_request_timeout=self.__engine.get_timeout())
else:
return 1
else:
if env_id:
environment_id = env_id
database_rulesets = paginator(
api_instance,
"get_all_database_rulesets",
environment_id=environment_id,
_request_timeout=self.__engine.get_timeout())
else:
environment_id = None
database_rulesets = paginator(
api_instance,
"get_all_database_rulesets")
if database_rulesets.response_list:
for c in database_rulesets.response_list:
ruleset = DxDatabaseRuleset(self.__engine)
ruleset.from_ruleset(c)
self.__rulesetList[c.database_ruleset_id] = ruleset
else:
if environment_id:
self.__logger.error("No database ruleset found for "
"environment name %s"
% environment_name)
else:
self.__logger.error("No database ruleset found")
api_instance = FileRulesetApi(self.__engine.api_client)
if environment_id:
file_rulesets = paginator(
api_instance,
"get_all_file_rulesets",
environment_id=environment_id)
else:
file_rulesets = paginator(
api_instance,
"get_all_file_rulesets")
if file_rulesets.response_list:
for c in file_rulesets.response_list:
ruleset = DxFileRuleset(self.__engine)
ruleset.from_ruleset(c)
self.__rulesetList[c.file_ruleset_id] = ruleset
else:
if environment_id:
self.__logger.error("No file ruleset found for "
"environment name %s"
% environment_name)
else:
self.__logger.error("No file ruleset found")
except ApiException as e:
print_error("Can't load ruleset %s" % e.body)
return 1
@classmethod
def get_by_ref(self, reference):
"""
return a Ruleset object by refrerence
return None if not found
"""
try:
self.__logger.debug("reference %s" % reference)
return self.__rulesetList[reference]
except KeyError as e:
self.__logger.debug("can't find Ruleset object"
" for reference %s" % reference)
self.__logger.debug(e)
return None
@classmethod
def get_allref(self):
"""
return a list of all references
"""
return self.__rulesetList.keys()
@classmethod
def get_rulesetId_by_name(self, name):
"""
Return ruleset id by name.
:param1 name: name of ruleset
return ref if OK
return None if ruleset not found or not unique
"""
reflist = self.get_rulesetId_by_name_worker(name)
# convert list to single value
# as there will be only one element in list
if reflist:
return reflist[0]
else:
return None
@classmethod
def get_all_rulesetId_by_name(self, name):
"""
Return ruleset id by name.
:param1 name: name of ruleset
return list of references if OK
return None if ruleset not found
"""
return self.get_rulesetId_by_name_worker(name, None)
@classmethod
def get_rulesetId_by_name_worker(self, name, check_uniqueness=1):
"""
:param1 name: name of ruleset
:param2 check_uniqueness: check uniqueness put None if skip this check
return list of rulesets
"""
reflist = get_objref_by_val_and_attribute(name, self, 'ruleset_name')
if len(reflist) == 0:
self.__logger.error('Ruleset %s not found' % name)
print_error('Ruleset %s not found' % name)
return None
if check_uniqueness:
if len(reflist) > 1:
self.__logger.error('Ruleset name %s is not unique' % name)
print_error('Ruleset name %s is not unique' % name)
return None
return reflist
@classmethod
def get_all_database_rulesetIds(self):
"""
Return list of database ruleset ids.
return list of references if OK
return None if ruleset not found
"""
return get_objref_by_val_and_attribute('Database', self, 'type')
@classmethod
def get_all_file_rulesetIds(self):
"""
Return list of database ruleset ids.
return list of references if OK
return None if ruleset not found
"""
return get_objref_by_val_and_attribute('File', self, 'type')
@classmethod
def add(self, ruleset):
"""
Add an Ruleset to a list and Engine
:param ruleset: Ruleset object to add to Engine and list
return None if OK
"""
if (ruleset.add() is None):
self.__logger.debug("Adding ruleset %s to list" % ruleset)
self.__rulesetList[ruleset.ruleset_id] = ruleset
return None
else:
return 1
@classmethod
def delete(self, RulesetId):
"""
Delete a ruleset from a list and Engine
:param RulesetId: Ruleset id to delete from Engine and list
return None if OK
"""
ruleset = self.get_by_ref(RulesetId)
if ruleset is not None:
if ruleset.delete() is None:
return None
else:
return 1
else:
print "Ruleset with id %s not found" % RulesetId
return 1
@classmethod
def copy(self, ruleset_id, newname):
"""
Add an Ruleset to a list and Engine
:param ruleset: Ruleset id of the existing ruleset
:param newname: Name of the new ruleset
return new ruleset_id if OK, None if failure
"""
ruleset = self.get_by_ref(ruleset_id)
if ruleset.type == 'Database':
newruleset = DxDatabaseRuleset(self.__engine)
newruleset.from_ruleset(ruleset)
newruleset.ruleset_name = newname
elif ruleset.type == 'File':
newruleset = DxFileRuleset(self.__engine)
newruleset.from_ruleset(ruleset)
newruleset.ruleset_name = newname
if (newruleset.add() is None):
self.__logger.debug("Adding ruleset %s to list" % newruleset)
self.__rulesetList[newruleset.ruleset_id] = newruleset
return newruleset.ruleset_id
else:
return None
@classmethod
def refresh(self, RulesetId):
"""
Refresh a ruleset on the Engine
:param RulesetId: Ruleset id to delete from Engine and list
return None if OK
"""
ruleset = self.get_by_ref(RulesetId)
if ruleset is not None:
if ruleset.refresh() is None:
return None
else:
return 1
else:
print "Ruleset with id %s not found" % RulesetId
return 1 | [
"marcin@delphix.com"
] | marcin@delphix.com |
b9e9272f46c85f1ee025f3b870ae8ea0907eaeb0 | 70b9761fdcda427ea7a31c5ab60be202037936b7 | /correction/correction_model.py | ba695183cc0cbdbc318e4740f8cf4d08ac72b73f | [
"MIT"
] | permissive | CharlesRenyh/Erroneous-Old-German-Text-Correction | 9e4a82ed3daf3414b3a50ea8061ed8d3fa139ef5 | a9064a9ffe10a1a932369196df5bf60da3d9eeeb | refs/heads/master | 2020-08-27T12:28:40.310469 | 2020-05-10T15:56:56 | 2020-05-10T15:56:56 | 217,368,215 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,424 | py | import torch.nn as nn
from torch import Tensor
import torch
import torch.nn.functional as F
import torch.optim as optim
import random
class Encoder(nn.Module):
def __init__(self,
input_dim: int,
emb_dim: int,
enc_hid_dim: int,
dec_hid_dim: int,
dropout: float):
super().__init__()
self.input_dim = input_dim
self.emb_dim = emb_dim
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.dropout = dropout
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional = True)
self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self,
src: Tensor):
embedded = self.dropout(self.embedding(src))
outputs, hidden = self.rnn(embedded)
hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)))
return outputs, hidden
class Attention(nn.Module):
def __init__(self,
enc_hid_dim: int,
dec_hid_dim: int,
attn_dim: int):
super().__init__()
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.attn_in = (enc_hid_dim * 2) + dec_hid_dim
self.attn = nn.Linear(self.attn_in, attn_dim)
def forward(self,
decoder_hidden: Tensor,
encoder_outputs: Tensor) -> Tensor:
src_len = encoder_outputs.shape[0]
repeated_decoder_hidden = decoder_hidden.unsqueeze(1).repeat(1, src_len, 1)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
energy = torch.tanh(self.attn(torch.cat((
repeated_decoder_hidden,
encoder_outputs),
dim = 2)))
attention = torch.sum(energy, dim=2)
return F.softmax(attention, dim=1)
class Decoder(nn.Module):
def __init__(self,
output_dim: int,
emb_dim: int,
enc_hid_dim: int,
dec_hid_dim: int,
dropout: int,
attention: nn.Module):
super().__init__()
self.emb_dim = emb_dim
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.output_dim = output_dim
self.dropout = dropout
self.attention = attention
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim)
self.out = nn.Linear(self.attention.attn_in + emb_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def _weighted_encoder_rep(self,
decoder_hidden: Tensor,
encoder_outputs: Tensor) -> Tensor:
a = self.attention(decoder_hidden, encoder_outputs)
a = a.unsqueeze(1)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
weighted_encoder_rep = torch.bmm(a, encoder_outputs)
weighted_encoder_rep = weighted_encoder_rep.permute(1, 0, 2)
return weighted_encoder_rep
def forward(self,
input: Tensor,
decoder_hidden: Tensor,
encoder_outputs: Tensor):
input = input.unsqueeze(0)
embedded = self.dropout(self.embedding(input))
weighted_encoder_rep = self._weighted_encoder_rep(decoder_hidden,
encoder_outputs)
rnn_input = torch.cat((embedded, weighted_encoder_rep), dim = 2)
output, decoder_hidden = self.rnn(rnn_input, decoder_hidden.unsqueeze(0))
embedded = embedded.squeeze(0)
output = output.squeeze(0)
weighted_encoder_rep = weighted_encoder_rep.squeeze(0)
output = self.out(torch.cat((output,
weighted_encoder_rep,
embedded), dim = 1))
return output, decoder_hidden.squeeze(0)
class Seq2Seq(nn.Module):
def __init__(self,
encoder: nn.Module,
decoder: nn.Module,
device: torch.device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
def forward(self,
src: Tensor,
trg: Tensor,
teacher_forcing_ratio: float = 0.5) -> Tensor:
batch_size = src.shape[1]
max_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
outputs = torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device)
encoder_outputs, hidden = self.encoder(src)
# first input to the decoder is the <sos> token
output = trg[0,:]
for t in range(1, max_len):
output, hidden = self.decoder(output, hidden, encoder_outputs)
outputs[t] = output
teacher_force = random.random() < teacher_forcing_ratio
top1 = output.max(1)[1]
output = (trg[t] if teacher_force else top1)
return outputs
# INPUT_DIM = len(SRC.vocab)
# OUTPUT_DIM = len(TRG.vocab)
# ENC_EMB_DIM = 256
# DEC_EMB_DIM = 256
# ENC_HID_DIM = 512
# DEC_HID_DIM = 512
# ATTN_DIM = 64
# ENC_DROPOUT = 0.5
# DEC_DROPOUT = 0.5
ENC_EMB_DIM = 32
DEC_EMB_DIM = 32
ENC_HID_DIM = 64
DEC_HID_DIM = 64
ATTN_DIM = 8
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
enc = Encoder(5000, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT)
attn = Attention(ENC_HID_DIM, DEC_HID_DIM, ATTN_DIM)
dec = Decoder(5000, DEC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, DEC_DROPOUT, attn)
model = Seq2Seq(enc, dec, device).to(device)
# print(model)
def init_weights(m: nn.Module):
for name, param in m.named_parameters():
if 'weight' in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
model.apply(init_weights)
optimizer = optim.Adam(model.parameters())
# a = torch.LongTensor([[1,2,3,4,5]])
# b = torch.LongTensor([[2,3,4,5,6,7]])
# print(model(a,b).size())
# def count_parameters(model: nn.Module):
# return sum(p.numel() for p in model.parameters() if p.requires_grad)
#
#
# print(f'The model has {count_parameters(model):,} trainable parameters')
| [
"charlesren1996@gmail.com"
] | charlesren1996@gmail.com |
99f018bf90917d21408c07b6e353157c8c6f52a9 | 94aee27fff524c450148dafa2972a727df232498 | /CurrencyAr.py | 08222bc4c977874ed6fd715e92dc932dd3453137 | [] | no_license | aisonet/Solution | 016c69ad130cf1250c788dce6afcd47e4ffd2ae9 | c21bcc9e000ea5520b11494ec50c9b7951737ef6 | refs/heads/master | 2021-01-22T09:48:24.837704 | 2018-02-22T17:15:32 | 2018-02-22T17:15:32 | 81,965,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py |
def arbitrage(quotes):
finaList=[]
for items in quotes:
#print items
primary=100000
for num in items.split(' '):
#print num
primary=primary/float(num)
# finalResult=primary/float(num)
# finalResult/float(num)
#print primary
if primary > 100000:
result= int(primary)-100000
finaList.append(result)
else:
finaList.append('0')
return finaList
a= ['1.1837 1.3829 0.6102', '1.1234 1.2134 1.2311']
listA= arbitrage(a)
| [
"aisonet@yahoo.com"
] | aisonet@yahoo.com |
021445b9bdf634afaf800add0b93f940291238f7 | 25420a17e4202a13cc7a896513bde1c042e1f545 | /repository/declaratie_firma_repository.py | 2c153c05cc6cb45f724399d5e485a3926db15246 | [] | no_license | lzradrian/practicaServer | 6ae74f3909f84769dad253aec2bdb0cda1e470f9 | 650e921af6267ce1b7da20f6f694864a483be0c9 | refs/heads/master | 2023-03-05T22:02:38.627131 | 2021-02-13T08:03:13 | 2021-02-13T08:03:13 | 321,683,575 | 0 | 0 | null | 2021-02-13T08:03:14 | 2020-12-15T13:56:18 | Python | UTF-8 | Python | false | false | 1,226 | py | class DeclaratieFirmaRepository:
def getAll(self):
from domain.declaratie_firma import DeclaratieFirma
declaratii = DeclaratieFirma.query.all()
return declaratii
def getOne(self, id):
from domain.declaratie_firma import DeclaratieFirma
declaratie = DeclaratieFirma.query.get(id)
return declaratie
def get_with_student_id(self, student_id):
from domain.declaratie_firma import DeclaratieFirma
declaratie = DeclaratieFirma.query.filter_by(student_id=student_id).first()
return declaratie
def add(self, declaratie):
from controller import db
db.session.add(declaratie)
db.session.commit()
return declaratie
def remove(self, declaratie):
from controller import db
db.session.delete(declaratie)
db.session.commit()
def update(self, declaratie):
from controller import db
declaratie_found = self.get_with_student_id(declaratie.student_id)
declaratie_found.submitted = declaratie.submitted
declaratie_found.content = declaratie.content
declaratie_found.checked = declaratie.checked
db.session.commit()
return declaratie
| [
"dombinorbert1@gmail.com"
] | dombinorbert1@gmail.com |
d4a34ee20df7bcfc81d4f08d997084a701fe6793 | 6ea84a1ee3f08cc0e2c50b452ccda0469dda0b6c | /projectLimat/manage.py | 5ce2506a4a3d51b3e6927ce569073b28003bf100 | [] | no_license | frestea09/django_note | b818d9d95f2f1e43ba47f8f2168bc5980d5da1f7 | b8d1e41a450f5c452afd36319779740bed874caa | refs/heads/master | 2020-11-24T03:54:00.000949 | 2020-01-01T06:50:12 | 2020-01-01T06:50:12 | 227,950,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'projectLimat.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"ilmanfrasetya@gmail.com"
] | ilmanfrasetya@gmail.com |
f4e6f2a11be9b1c9b26e680848c56ec23e147339 | e78154abbb8bacf5afccda9da371684cbeabad36 | /popego/popserver/build/lib/popserver/tests/agents/test_lastfm.py | 96e4e5360546f9480b42ef1450462b3d3a5daae1 | [
"BSD-3-Clause"
] | permissive | enterstudio/popego | 1a196fabc374c0f45764e5c74bd7752236424040 | 2d09e793d9d2f297139edb325b8a70ddda9b2705 | refs/heads/master | 2021-04-09T16:39:40.781634 | 2016-10-14T16:53:47 | 2016-10-14T16:53:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,723 | py | # -*- coding: utf-8 -*-
__docformat__='restructuredtext'
from popserver.tests.nodb_model import *
from popserver.tests import *
from fixture import DataTestCase
from popserver.tests import popfixtures
from popserver.agents.lastfm_agent import LastFmAgent
from popserver.agents.lastfm_client import LastFMClient
import popserver.agents
import types
import unittest
class TestLastFmClient(unittest.TestCase):
def setUp(self):
self._restoreMethod = popserver.agents.lastfm_client.LastFMClient._getFeed
LastFMClient._getFeed = types.MethodType(mock_lastfm_getFeed, None, LastFMClient)
self.client = LastFMClient()
def tearDown(self):
LastFMClient._getFeed = types.MethodType(self._restoreMethod, None, LastFMClient)
def testRecentTracks(self):
t = self.client.getRecentTracksForUser('maristaran')
assert type(t) == type([])
assert len(t) == 1
assert type(t[0]) == type({})
assert t[0]['artist'] == 'Willie Bobo'
def testTopTracks(self):
t = self.client.getTopTracksForUser('maristaran')
assert type(t) == type([])
assert len(t) == 1
assert type(t[0]) == type({})
assert t[0]['artist'] == 'Brian Wilson'
assert t[0]['name'] == 'Our Prayer Gee'
def testTopArtists(self):
t = self.client.getTopArtistsForUser('maristaran')
assert type(t) == type([])
assert len(t) == 1
assert type(t[0]) == type({})
assert t[0]['name'] == 'The Beatles'
def testUserTagsForTrack(self):
t = self.client.getUserTagsForTrack('maristaran', 'Brian Wilson', 'Our Prayer Gee')
assert type(t) == type([])
assert len(t) == 1
assert t == ['bombastic']
def testTopArtistsForUser(self):
t = self.client.getTopArtistsForUser('maristaran')
assert type(t) == type([])
assert len(t) == 1
assert t[0]['name'] == 'The Beatles'
def testTopTagsForTrack(self):
t = self.client.getTopTagsForTrack('Willie Bobo', 'Funky Sneakers')
assert type(t) == type([])
assert len(t) == 0
def testGetArtistData(self):
t = self.client.getArtistData('Brian Wilson')
assert type(t) == type({})
assert t['name'] == 'Brian Wilson'
# TODO: tests para el agente
# class TestLastFmAgent(TestModel, DataTestCase):
# fixture = dbfixture
# datasets = [popfixtures.UserData, popfixtures.ServiceTypeData, popfixtures.ServiceData, popfixtures.AccountData]
# def setUp(self):
# TestModel.setUp(self)
# DataTestCase.setUp(self)
# self._restoreMethod = popserver.agents.lastfm_client.LastFMClient._getFeed
# LastFMClient._getFeed = types.MethodType(mock_lastfm_getFeed, None, LastFMClient)
# self.agent = LastFmAgent()
# self.user = self.data.UserData.dartagnan
# self.lastfm_svc = self.data.ServiceData.lastfm
# self.account = Account.get_by(user_id=self.user.id, service_id=self.lastfm_svc.id)
# def tearDown(self):
# dbsession.clear()
# DataTestCase.tearDown(self)
# TestModel.tearDown(self)
# LastFMClient._getFeed = types.MethodType(self._restoreMethod, None, LastFMClient)
# def test_getUserGraph(self):
# r = self.agent.getUserGraph(self.account)
# assert len(r) == 3 # grupos: top artists, top tracks y recently_listened
# assert map(type, r) == [popserver.model.ItemGroup, popserver.model.ItemGroup, popserver.model.ItemGroup]
# assert map(lambda g: type(g.items[0]), r) == [popserver.model.UserItem, popserver.model.UserItem,popserver.model.UserItem]
# assert map(lambda g: len(g.items), r) == [1, 1, 1]
# top_artists = r[0]
# assert type(top_artists.items[0].item) == popserver.model.Artist
# assert top_artists.items[0].item.title == 'The Beatles'
# top_tracks = r[1]
# assert type(top_tracks.items[0].item) == popserver.model.Song
# assert top_tracks.items[0].item.title == 'Our Prayer Gee'
# assert top_tracks.items[0].item.artist.title == 'Brian Wilson'
# recently_listened = r[2]
# assert type(recently_listened.items[0].item) == popserver.model.Song
# assert recently_listened.items[0].item.title == 'Funky Sneakers'
# assert recently_listened.items[0].item.artist.title == 'Willie Bobo'
# assert True
def mock_lastfm_getFeed(self, url):
samples = {
'http://ws.audioscrobbler.com/1.0/user/maristaran/recenttracks.xml' : 'recenttracks.xml',
'http://ws.audioscrobbler.com/1.0/artist/Willie%2BBobo/similar.xml' : 'willie-bobo-similar.xml',
'http://ws.audioscrobbler.com/1.0/track/Willie%2BBobo/Funky%2BSneakers/toptags.xml' : 'funky-sneakers-toptags.xml',
'http://ws.audioscrobbler.com/1.0/user/maristaran/tracktags.xml?artist=Willie+Bobo&track=Funky+Sneakers' : 'funky-sneakers-tracktags.xml',
'http://ws.audioscrobbler.com/1.0/user/maristaran/toptracks.xml' : 'toptracks.xml',
'http://ws.audioscrobbler.com/1.0/artist/Brian%2BWilson/similar.xml' : 'brian-wilson-similar.xml',
'http://ws.audioscrobbler.com/1.0/track/Brian%2BWilson/Our%2BPrayer%2BGee/toptags.xml' : 'our-prayer-gee-toptags.xml',
'http://ws.audioscrobbler.com/1.0/user/maristaran/tracktags.xml?artist=Brian+Wilson&track=Our+Prayer+Gee' : 'maristaran-our-prayer-gee-toptags.xml',
'http://ws.audioscrobbler.com/1.0/user/maristaran/topartists.xml' : 'topartists.xml',
'http://ws.audioscrobbler.com/1.0/artist/The%2BBeatles/similar.xml' : 'beatles-similar.xml',
'http://ws.audioscrobbler.com/1.0/user/maristaran/artisttags.xml?artist=The+Beatles' : 'maristaran-beatles-tags.xml'
}
import xml.dom.minidom
if samples[url] == 404:
import urllib2
raise urllib2.HTTPError
else:
return xml.dom.minidom.parse(popserver.tests.__path__[0] + '/samples/lastfm/' + samples[url])
# class TestLastfmAgent(DataTestCase, TestModel):
# fixture = dbfixture
# datasets = [popfixtures.UserData, popfixtures.ServiceTypeData, popfixtures.ServiceData, popfixtures.AccountData]
# def setUp(self):
# TestModel.setUp(self)
# DataTestCase.setUp(self)
# self.user = User.get_by(username='darty')
# self.lastfm_svc = Service.get_by(name='Last.FM')
# self.account = Account.get_by(user=self.user, service=self.lastfm_svc)
# self.agent = self.lastfm_svc.getAgent()
# def tearDown(self):
# DataTestCase.tearDown(self)
# TestModel.tearDown(self)
# LastFmAgent._getFeed = orig_getFeed
| [
"santisiri@gmail.com"
] | santisiri@gmail.com |
acf902bf1f5deb602370a46b4322225c30df8dd2 | d8f436e02ad3b0d6a99006ee0cb85b68f0deedcb | /api/urls.py | ae3531f374928c7296e854621a0d6ab66739d6a1 | [] | no_license | m0hit1712/PincodeApi | 7cabb29c3b4e954c4806f9bf161b594baff20ac7 | b9dd3368f01437dca58c31fced906eb6816d8c07 | refs/heads/main | 2023-02-15T08:24:16.819802 | 2021-01-11T12:51:15 | 2021-01-11T12:51:15 | 325,280,340 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | from django.urls import path, include
from . import views
urlpatterns = [
path('getpincode/<str:state>/<str:district>/<str:taluk>', views.GetPinCode.as_view(), name="pincode_url"),
path('getstates/<str:country>', views.GetDistricts.as_view(), name="state_url"),
path('getdistricts/<str:state>', views.GetStates.as_view(), name="district_url"),
path('gettaluks/<str:district>', views.GetTaluk.as_view(), name="taluk_url"),
]
| [
"mohit.patidar1503@gmail.com"
] | mohit.patidar1503@gmail.com |
35ba48508ec9ca4b040084ea5c9dba817fda7374 | 6163d256743eca49df4a17cbd8ecbde59f098929 | /base/number.py | 30dc3b168f4259bef334bb9052b7f2c910f34015 | [] | no_license | splaiser/newbie | 7220709668038c452eac080a26f6a9b73b3f17d0 | 8b4d3125b8b599dbefbd822cd4b838ed9d0b3906 | refs/heads/master | 2023-06-18T05:14:48.704480 | 2021-07-16T07:20:04 | 2021-07-16T07:20:04 | 375,288,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | numbers = [1,2,3,4,5,6,7,8,9]
for n in numbers:
if n == 1:
print(f"1st")
elif n == 2:
print(f"2nd")
elif n == 3:
print(f"3rd")
else:
print(f'{n}th') | [
"76589075+splaiser@users.noreply.github.com"
] | 76589075+splaiser@users.noreply.github.com |
37dd9e9f28c87fe2ffc64cf8d982011f7a3854ad | 7ca63f2778faa22d0d3043b87d1eaedac6cdfe62 | /generateMusic.py | edccc4368bb846095f5c1401e9f6a8f2432ebc7d | [] | no_license | justineDelo/generate-clips | cb333136f652f4fef3c6a2512a43610b74b6e0a2 | ee127210dd03d3882acec5cf4faaff05dd398a59 | refs/heads/master | 2021-06-27T02:56:53.499440 | 2017-09-13T15:34:55 | 2017-09-13T15:34:55 | 103,270,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,549 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 8 09:57:38 2017
@author: justine
"""
import matplotlib.pyplot as plt
import inception
import os
import generation
import extractionImages
import moviepy.editor as mp
import shutil
import numpy as np
import features
def featuresImages(path, imagesPaths) :
"""
this function takes a list of images in parameter
It computes a feature vector using inception for each of these images and returns an array with all the features
"""
images=[]
for ima in imagesPaths :
images.append(plt.imread(ima))
out=[]
c=0
#image = [cv2.imread("test.png")]
inception.maybe_download()
model = inception.Inception()
from inception import transfer_values_cache
for im in images :
c+=1
image=[im]
#file_path_cache_train = os.path.join(os.path.expanduser("~"),"Desktop/stage/TensorFlow-Tutorials/inception_uneImage_train.pkl")
file_path_cache_train = os.path.join(path,"inception_uneImage_train"+str(c)+".pkl")
print("Processing Inception transfer-values for training-images ...")
# If transfer-values have already been calculated then reload them,
# otherwise calculate them and save them to a cache-file.
transfer_values_train = transfer_values_cache(cache_path=file_path_cache_train,
images=image,
model=model)
out.append(transfer_values_train)
return out
import pickle
def mainPart1(imagesNames, path, ideaNb=1, extraction=0) :
#extracts and computes features from references :
featsRef=[]
listVideosRef= [path+n for n in os.listdir(path) if n[-4:]=='.mp4']
if extraction == 1 :
for video in listVideosRef :
extractionImages.extractionImagesAndSounds(video, "png", "wav", 5, 10)
rename(listVideosRef)
a=0
for name in listVideosRef :
a+=1
featsRef.append(features.featuresImages(name, path))
np.save(path+"featsRefImages"+".npy",featsRef)
#work with the images given in parameters :
imagFeats = featuresImages(path, imagesNames) #computes the features for all the images
f= np.load(path+"featsRefImages"+".npy")
sounds=[]
featsMeans=[]
if ideaNb==2 :
print("2")
while(len(imagFeats)>0) :
if len(imagFeats) >=3 :
featsMeans.append((np.array(imagFeats[0])+np.array(imagFeats[1])+np.array(imagFeats[2]))/3)
imagFeats=imagFeats[3:]
elif len(imagFeats) ==2 :
featsMeans.append((np.array(imagFeats[0])+np.array(imagFeats[1]))/3)
imagFeats=imagFeats[2:]
elif len(imagFeats)==1 :
featsMeans.append(np.array(imagFeats[0]))
imagFeats=imagFeats[1:]
elif ideaNb==1 :
featsMeans=imagFeats
for extract in featsMeans :
neighbours= generation.closest(extract, f)[0]
sounds.append(neighbours)
f=open("closests", "wb")
pickle.dump(sounds, f, protocol=2)
f.close()
return
import subprocess
def rename(listVideosRef) :
directory = "datasets/YourMusicLibrary/"
if (not (os.path.isdir(directory))) :
os.makedirs(directory, mode=0o777)
else :
shutil.rmtree(directory)
os.makedirs(directory, mode=0o777)
compteur=0
for video in listVideosRef :
compteur+=1
for soundName in os.listdir(video[:-4]+"son"):
src= video[:-4]+"son/"+soundName
dst=directory+"v"+str(compteur)+soundName[:-4]+".mp3"
print(src, dst)
c="ffmpeg -i "+src+" -codec:a libmp3lame -qscale:a 2 "+dst
subprocess.call(c, shell=True)
return
def mainPart2(path, imagesNames) :
"""
retrieves the sounds created in python2.7 and use them to create the final videoclip
"""
n=0
liste=[]
while len(imagesNames)>0 :
n+=1
if len(imagesNames)>=3 :
imagesNb=imagesNames[:3]
imagesNames=imagesNames[3:]
elif len(imagesNames) == 2:
imagesNb=imagesNames[:2]
imagesNames=imagesNames[2:]
else :
imagesNb=imagesNames
imagesNames=[]
soundName = path + "sound"+str(n)+".wav"
generation.generateVideo(soundName, imagesNb, [], path, 2.5, "yes", str(n) ) # now we have created many videos that lasts 15seconds with 3images and one music
liste.append(path+"output/videoFinale"+str(n)+".mp4")
listeBis = []
for name in liste :
listeBis.append(mp.VideoFileClip(name))
video = mp.concatenate_videoclips(listeBis)
video.write_videofile(path+"output/videoFinale"+"all"+".mp4")
return
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("part", default=1)
parser.add_argument('-i','--list', nargs='+', help='<Required> Set flag', required=True)
parser.add_argument("-p","--path")
parser.add_argument("-e", "--extraction", nargs='?', default=1)
parser.add_argument("-nb", "--ideaNb", nargs='?', default = 1)
args = parser.parse_args()
if int(args.part)==1 :
mainPart1(args.list, args.path, int(args.ideaNb), int(args.extraction))
elif int(args.part)==2 :
mainPart2(args.path, args.list)
| [
"ju.nyc@hotmail.fr"
] | ju.nyc@hotmail.fr |
15c3aae3a58a05ed1db45db23627c6c8807dace2 | 62b6de5739dcb83d2504e5b73dc91240266aef78 | /bin/dnsstamp.py | 98dc0ceedb74aee161bf0ff822dbd4053a7b9b82 | [
"MIT"
] | permissive | power-dns/python-dnsstamps | e5c0f568b46a1e0be6e4da7ff408396b4c4b0718 | 79ba64ed2c3ee6c72700fdbc8a578e734bbd2a20 | refs/heads/master | 2023-02-15T20:34:46.791220 | 2021-01-15T12:53:53 | 2021-01-15T12:53:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,476 | py | #!/usr/bin/env python
import argparse
import sys
import dnsstamps
from dnsstamps import Option
class DnsStampCli(object):
@staticmethod
def append_common_arguments(parser):
parser.add_argument('-s', '--dnssec',
dest='dnssec',
action='store_true',
help="use if DNSSEC is supported (default: not supported)")
parser.set_defaults(dnssec=False)
parser.add_argument('-l', '--no-logs',
dest='logs',
action='store_true',
help="use if queries are not logged (default: are logged)")
parser.set_defaults(logs=False)
parser.add_argument('-f', '--no-filter',
dest='filter',
action='store_true',
help="use if domains are not filtered (default: are filtered)")
parser.set_defaults(filter=False)
parser.add_argument('-a', '--address',
type=str,
help="the ip address of the DNS server")
def __init__(self):
parser = argparse.ArgumentParser(usage='%(prog)s <command> [<args>]')
parser.add_argument('command',
choices=['parse', 'plain', 'dnscrypt', 'doh', 'dot', 'doq', 'doh_target', 'dnscrypt_relay',
'doh_relay'],
help='The command to execute.')
args = parser.parse_args(sys.argv[1:2])
getattr(self, args.command)()
def parse(self):
parser = argparse.ArgumentParser(description='Parse DNS stamp.')
parser.add_argument('stamp', type=str, help='The stamp to parse.')
args = parser.parse_args(sys.argv[2:])
try:
parameter = dnsstamps.parse(args.stamp)
dnsstamps.format(parameter)
except:
print("Unable to parse DNS stamp <%s>" % args.stamp)
def plain(self):
parser = argparse.ArgumentParser(description='Create plain stamp')
self.append_common_arguments(parser)
args = parser.parse_args(sys.argv[2:])
options = []
if args.dnssec:
options.append(Option.DNSSEC)
if args.logs:
options.append(Option.NO_LOGS)
if args.filter:
options.append(Option.NO_FILTERS)
parameter = dnsstamps.prepare_plain("" if args.address is None else args.address, options)
dnsstamps.format(parameter)
def dnscrypt(self):
parser = argparse.ArgumentParser(description='Create DNSCrypt stamp')
self.append_common_arguments(parser)
parser.add_argument('-k', '--public_key',
required=True,
type=str,
help="the DNSCrypt public key (e.g.: CB6A:DC5C:29F9:5510:0B65:BF12:94FE:5684:579A:B349:9CC9:798F:00D0:1BB5:C1A9:A2C7)")
parser.add_argument('-n', '--provider_name',
required=True,
type=str,
help="the DNSCrypt provider name (e.g.: 2.dnscrypt-cert.example.com)")
args = parser.parse_args(sys.argv[2:])
options = []
if args.dnssec:
options.append(Option.DNSSEC)
if args.logs:
options.append(Option.NO_LOGS)
if args.filter:
options.append(Option.NO_FILTERS)
parameter = dnsstamps.prepare_dnscrypt("" if args.address is None else args.address, args.public_key,
args.provider_name, options)
dnsstamps.format(parameter)
def doh(self):
parser = argparse.ArgumentParser(description='Create DNS-over-HTTPS stamp')
self.append_common_arguments(parser)
parser.add_argument('-t', '--hashes',
type=str,
help="a comma-separated list of tbs certificate hashes (e.g.: 3e1a1a0f6c53f3e97a492d57084b5b9807059ee057ab1505876fd83fda3db838)")
parser.add_argument('-n', '--hostname',
required=True,
type=str,
help="the server hostname which will also be used as a SNI name (e.g.: doh.example.com)")
parser.add_argument('-p', '--path',
required=True,
type=str,
help="the absolute URI path (e.g.: /dns-query)")
parser.add_argument('-b', '--bootstrap_ips',
type=str,
help="a comma-separated list of bootstrap ips (e.g.: 1.1.1.1,1.0.0.1)")
args = parser.parse_args(sys.argv[2:])
options = []
if args.dnssec:
options.append(Option.DNSSEC)
if args.logs:
options.append(Option.NO_LOGS)
if args.filter:
options.append(Option.NO_FILTERS)
parameter = dnsstamps.prepare_doh("" if args.address is None else args.address,
[] if args.hashes is None else args.hashes.split(','), args.hostname,
args.path, options,
[] if args.bootstrap_ips is None else args.bootstrap_ips.split(','))
dnsstamps.format(parameter)
def dot(self):
parser = argparse.ArgumentParser(description='Create DNS-over-TLS stamp')
self.append_common_arguments(parser)
parser.add_argument('-t', '--hashes',
type=str,
help="a comma-separated list of tbs certificate hashes (e.g.: 3e1a1a0f6c53f3e97a492d57084b5b9807059ee057ab1505876fd83fda3db838)")
parser.add_argument('-n', '--hostname',
required=True,
type=str,
help="the server hostname which will also be used as a SNI name (e.g.: dot.example.com)")
parser.add_argument('-b', '--bootstrap_ips',
type=str,
help="a comma-separated list of bootstrap ips (e.g.: 1.1.1.1,1.0.0.1)")
args = parser.parse_args(sys.argv[2:])
options = []
if args.dnssec:
options.append(Option.DNSSEC)
if args.logs:
options.append(Option.NO_LOGS)
if args.filter:
options.append(Option.NO_FILTERS)
parameter = dnsstamps.prepare_dot("" if args.address is None else args.address,
[] if args.hashes is None else args.hashes.split(','), args.hostname, options,
[] if args.bootstrap_ips is None else args.bootstrap_ips.split(','))
dnsstamps.format(parameter)
def doq(self):
parser = argparse.ArgumentParser(description='Create DNS-over-QUIC stamp')
self.append_common_arguments(parser)
parser.add_argument('-t', '--hashes',
type=str,
help="a comma-separated list of tbs certificate hashes (e.g.: 3e1a1a0f6c53f3e97a492d57084b5b9807059ee057ab1505876fd83fda3db838)")
parser.add_argument('-n', '--hostname',
required=True,
type=str,
help="the server hostname which will also be used as a SNI name (e.g.: doq.example.com)")
parser.add_argument('-b', '--bootstrap_ips',
type=str,
help="a comma-separated list of bootstrap ips (e.g.: 1.1.1.1,1.0.0.1)")
args = parser.parse_args(sys.argv[2:])
options = []
if args.dnssec:
options.append(Option.DNSSEC)
if args.logs:
options.append(Option.NO_LOGS)
if args.filter:
options.append(Option.NO_FILTERS)
parameter = dnsstamps.prepare_doq("" if args.address is None else args.address,
[] if args.hashes is None else args.hashes.split(','), args.hostname, options,
[] if args.bootstrap_ips is None else args.bootstrap_ips.split(','))
dnsstamps.format(parameter)
def doh_target(self):
parser = argparse.ArgumentParser(description='Create DoH target stamp')
self.append_common_arguments(parser)
parser.add_argument('-n', '--hostname',
required=True,
type=str,
help="the server hostname which will also be used as a SNI name (e.g.: doh-target.example.com)")
parser.add_argument('-p', '--path',
required=True,
type=str,
help="the absolute URI path (e.g.: /dns-query)")
args = parser.parse_args(sys.argv[2:])
options = []
if args.dnssec:
options.append(Option.DNSSEC)
if args.logs:
options.append(Option.NO_LOGS)
if args.filter:
options.append(Option.NO_FILTERS)
parameter = dnsstamps.prepare_doh_target(args.hostname, args.path, options)
dnsstamps.format(parameter)
def dnscrypt_relay(self):
parser = argparse.ArgumentParser(description='Create DNSCrypt relay stamp')
self.append_common_arguments(parser)
args = parser.parse_args(sys.argv[2:])
parameter = dnsstamps.prepare_dnscrypt_relay("" if args.address is None else args.address)
dnsstamps.format(parameter)
def doh_relay(self):
parser = argparse.ArgumentParser(description='Create DoH relay stamp')
self.append_common_arguments(parser)
parser.add_argument('-t', '--hashes',
type=str,
help="a comma-separated list of tbs certificate hashes (e.g.: 3e1a1a0f6c53f3e97a492d57084b5b9807059ee057ab1505876fd83fda3db838)")
parser.add_argument('-n', '--hostname',
required=True,
type=str,
help="the server hostname which will also be used as a SNI name (e.g.: doh-relay.example.com)")
parser.add_argument('-p', '--path',
required=True,
type=str,
help="the absolute URI path (e.g.: /dns-query)")
parser.add_argument('-b', '--bootstrap_ips',
type=str,
help="a comma-separated list of bootstrap ips (e.g.: 1.1.1.1,1.0.0.1)")
args = parser.parse_args(sys.argv[2:])
options = []
if args.dnssec:
options.append(Option.DNSSEC)
if args.logs:
options.append(Option.NO_LOGS)
if args.filter:
options.append(Option.NO_FILTERS)
parameter = dnsstamps.prepare_doh_relay("" if args.address is None else args.address,
[] if args.hashes is None else args.hashes.split(','), args.hostname,
args.path, options,
[] if args.bootstrap_ips is None else args.bootstrap_ips.split(','))
dnsstamps.format(parameter)
if __name__ == '__main__':
DnsStampCli()
| [
"chrisss404@gmail.com"
] | chrisss404@gmail.com |
8b5d4352186d89264215e37706a1f8423f823c3a | 12bbad1cb13fe7bc244dd100f4c724feab818fe1 | /backups/edge.py | 66b39145dd873384c446614f4c04247048bbc164 | [] | no_license | gkahl/nimble_hub | 4f0625fcd06f208cc33d0520b06b30dd7ecc9589 | 5d4c2fab3b30f385727641eb92a72771cb45d4e8 | refs/heads/master | 2020-05-04T18:33:05.760430 | 2020-04-21T23:08:21 | 2020-04-21T23:08:21 | 179,357,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,149 | py | import pyrealsense2 as rs
import sys
import numpy as np
import cv2
np.set_printoptions(threshold=sys.maxsize)
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
x=0
try:
while x<16:
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
# Stack both images horizontally
images = np.hstack((color_image, depth_colormap))
x=x+1
finally:
row = 500
col = 480
depth_array = np.zeros((row,480))
for y in range(480):
for x in range(row):
depth_array[x,y] = round(depth_frame.get_distance(x+140,y),3)
print(depth_array)
# Stop streaming
cv2.imwrite('/home/pi/nimble_hub/pictures/foo.jpg',color_image)
cv2.imwrite('/home/pi/nimble_hub/pictures/depth_array_no_edge.jpg',depth_array*255)
cv2.imwrite('/home/pi/nimble_hub/pictures/depth.jpg',depth_image)
pipeline.stop()
err = 0.05
#row_edges[row]
for x in range(row):
valid = 0
edge1 = -1
edge2 = -1
valid_e1 = 1
valid_e2 = 1
skip = 10
for y in range(col-20):
if(skip>0):
skip = skip - 1
continue
pixel = [None] * 20
for i in range(20):
pixel[i] = depth_array[x,y+i]
if(abs(pixel[0] - pixel[1]) >= err):
if(valid == 0):
for i in range(19):
if(abs(pixel[0] - pixel[1+i])< err):
valid_e1 = 0
if(valid_e1 == 1):
edge1 = y
valid = 1
if(valid_e1 == 0):
valid_e1=1
skip = 20
if(valid):
for i in range(19):
if(abs(pixel[0] - pixel[1+i])< err):
valid_e2 = 0
if(valid_e2 == 1):
edge2 = y
valid = 1
if(valid_e2 == 0):
valid_e2=1
skip = 20
print("Row:" + str(x) + " edge1:"+str(edge1)+" edge2:"+str(edge2))
depth_array[x,edge1]=255
depth_array[x,edge2]=255
#row_edges[x] = (edge1, edge2)
cv2.imwrite('/home/pi/nimble_hub/pictures/depth_array.jpg',depth_array*255)
| [
"gkahl@gwu.edu"
] | gkahl@gwu.edu |
2d35c9f7b2714f22110dcc242119d77931b4acad | 2d574bbff00fa4cb5204eeeb212b2afb48416254 | /Services/emailService.py | a55c924791bed6d71569beedd770b9d548f22980 | [
"MIT"
] | permissive | prodProject/WorkkerAndConsumerServer | 966a32502297d808e9f217d05170aaacac6731ca | 95496f026109279c9891e08af46040c7b9487c81 | refs/heads/master | 2020-06-25T09:34:47.517054 | 2020-01-31T10:00:42 | 2020-01-31T10:00:42 | 199,272,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | from SendGridEmail.sendEmailFromSendgrid import SendMail
class EmailService:
m_service = SendMail()
def send(self,emailbuilder):
assert emailbuilder is not None, "Email Cannot be empty"
self.m_service.start(emailBuilder=emailbuilder)
return self.m_service.done()
| [
"prod.projectmanager@gmail.com"
] | prod.projectmanager@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.