blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e8bde86277f01bc58c8ad8188141d71a9cda07b1
|
6ab3d9e0b923bd8b0859a17c0355a7a1a9cc433e
|
/lab6/prims.py
|
e981790988d4cb1702de9bf50c4e200be04eea4e
|
[] |
no_license
|
vishal-nirne/DAA-LAB-WORK
|
1328237b2b9249f250411d3cd955beb550cbf6b6
|
7b3157b56446feeb433c8cc79ff9b210b1be350b
|
refs/heads/master
| 2020-08-17T03:53:00.280864
| 2019-10-16T17:17:34
| 2019-10-16T17:17:34
| 215,603,473
| 0
| 0
| null | 2019-10-16T17:17:35
| 2019-10-16T17:16:36
| null |
UTF-8
|
Python
| false
| false
| 1,177
|
py
|
import heapq as heapq
class ver:
def __init__(self,u,v,w):
self.u=u
self.v=v
self.w=w
class Graph:
def __init__(self,n):
self.V=[[] for i in range(n)]
self.V1=[float("infinity") for i in range(n)]
self.pre=[None for i in range(n)]
self.T=[]
def addEdge(self,u,v,w):
self.V[u].append(ver(u,v,w))
self.V[v].append(ver(v,u,w))
def prims(self,s):
self.V1[s]=0
key=[False for i in range(len(self.V))]
h=[]
for i in range(len(self.V)):
heapq.heappush(h,(self.V1[i],i))
while h:
u=heapq.heappop(h)[1]
print(u)
key[u]=True
self.T.append(u)
for j in self.V[u]:
if j.w<self.V1[j.v] and key[j.v]==False:
self.V1[j.v]=j.w
self.pre[j.v]=u
for i in range(len(h)):
if h[i][1]==j.v:
h[i]=(j.w,j.v)
break
heapq.heapify(h)
def prin(self):
for i in range(len(self.V)):
if self.pre[i]!=None:
print(i,"-",self.pre[i],":",self.V1[i])
n=int(input("Enter no of nodes:"))
e=int(input("Enter no of edges:"))
g=Graph(n)
for i in range(e):
arr=list(map(int,input().rstrip().split()))
g.addEdge(arr[0],arr[1],int(arr[2]))
s=int(input("Enter start node:"))
g.prims(s)
g.prin()
|
[
"noreply@github.com"
] |
vishal-nirne.noreply@github.com
|
212f22679ed0529f1852746b99dd1ab93923569e
|
956132aef29feb0dc8d7d86cba70eb2f4e3578dd
|
/app.py
|
ff8f5b32ec2fa09fa66f0466179f7c8a1ebebf16
|
[] |
no_license
|
jminango20/ExploradorDatasetStreamlit
|
8122df13b82ce55688e9ad743638a0f07eeba540
|
5560ea6a2f5ebbed680326cc6059bb7f6ee3da42
|
refs/heads/master
| 2022-11-18T08:49:49.704343
| 2020-07-23T14:14:57
| 2020-07-23T14:14:57
| 281,966,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,655
|
py
|
#Created By Juan Minango
import os
import streamlit as st
#EDA pkgs
import pandas as pd
#Viz Pkgs
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
def main():
#Common ML Dataset#
st.title("App para Explorar una Base de Datos o Dataset")
st.subheader("Simple Explorador Base de Datos")
html_temp = """
<div style="background-color:tomato;"><p style="color:white;font-size:50px">JD-Techn</p></div>
"""
st.markdown(html_temp, unsafe_allow_html=True)
def file_selector(folder_path='./datasets'): #ubicacion actual
filenames = os.listdir(folder_path)
selected_filename = st.selectbox("Selecciona el archivo", filenames)
return os.path.join(folder_path,selected_filename)
filename = file_selector()
st.info("Has seleccionado {}".format(filename))
#Leer Datos
df = pd.read_csv(filename)
#Mostrar Datos
if st.checkbox("Mostrar Datos"):
number = st.number_input("Numero de Filas",1)
st.dataframe(df.head(number))
#Mostrar Columnas
st.text("Pulsas para Conocer el Nombre de las Columnas: ")
if st.button("Nombre de las Columnas"):
st.write(df.columns)
#Mostrar Dimensiones
if st.checkbox("Dimensiones Base de Datos"):
data_dim = st.radio("Mostrar Dimensiones de la Base de Datos por: ", ("Filas","Columnas"))
if data_dim == "Filas":
st.text("Numero de Filas: ")
st.write(df.shape[0])
elif data_dim == "Columnas":
st.text("Numero de Columnas: ")
st.write(df.shape[1])
else:
st.write(df.shape)
#Seleccionar Columna
if st.checkbox("Seleccionar Columna para Mostrar"):
all_columns = df.columns.tolist()
select_columns = st.multiselect("Seleccionar", all_columns)
new_df = df[select_columns]
st.dataframe(new_df)
#Mostrar Valores Target/Clase
if st.button("Conteo de Valores"):
st.text("Conteo de Valores por Target/Clase")
st.write(df.iloc[:,-1].value_counts())
#Mostrar Tipo de Datos
if st.button("Tipo de Datos"):
st.write(df.dtypes)
#Mostrar Resumen
if st.checkbox("Resumen"):
st.write(df.describe().T)
#Grafico y Visualizacion
st.subheader("Visualizacion Datos")
#Correlation
#Seaborn
if st.checkbox("Grafico Correlacion[Seaborn]"):
st.write(sns.heatmap(df.corr(),annot=True))
st.pyplot()
#Pie Chart
if st.checkbox("Grafico Pizza"):
all_columns_name = df.columns.tolist()
if st.button("Generamos Grafico Pizza"):
st.success("Generando un Grafico Pizza")
st.write(df.iloc[:,-1].value_counts().plot.pie(autopct="%1.1f%%"))
st.pyplot()
#Count Plot
if st.checkbox("Grafico de Conteo de Valores"):
st.text("Conteo de Valores por Target/Clase")
all_columns_names = df.columns.tolist()
primary_col = st.selectbox("Columna Primaria Agrupada por",all_columns_names)
selected_columns_names = st.multiselect("Columnas Seleccionadas",all_columns_names)
if st.button("Plot"):
st.text("Generar Plot")
if selected_columns_names:
vc_plot = df.groupby(primary_col)[selected_columns_names].count()
else:
vc_plot = df.iloc[:,-1].value_counts()
st.write(vc_plot.plot(kind="bar"))
st.pyplot()
#Grafico Personalizado
st.subheader("Grafico Personalizado")
all_columns_name = df.columns.tolist()
type_of_plot = st.selectbox("Selecciona Tipo de Grafico",["area","bar","linea","hist","box","kde"])
select_columns_names = st.multiselect("Columnas Seleccionadas para Graficar",all_columns_name)
if st.button("Generar Grafico"):
st.success("Generar Grafico Personalizado de {} para {}".format(type_of_plot,select_columns_names))
#Graficando
if type_of_plot == "area":
cust_data = df[select_columns_names]
st.area_chart(cust_data)
elif type_of_plot == "bar":
cust_data = df[select_columns_names]
st.bar_chart(cust_data)
elif type_of_plot == "linea":
cust_data = df[select_columns_names]
st.line_chart(cust_data)
#Grafico Personalizado
elif type_of_plot:
cust_plot = df[select_columns_names].plot(kind=type_of_plot)
st.write(cust_plot)
st.pyplot()
if st.button("Gracias"):
st.balloons()
if __name__ == "__main__":
main()
|
[
"42250853+jminango20@users.noreply.github.com"
] |
42250853+jminango20@users.noreply.github.com
|
cfdd1871b3a9eaeeee623b209ee13b76e09ca38f
|
00bed06ab42cc42e8b7c0854a3f824279a3b3012
|
/JetBrains Academy/Coffee Machine/Problems/Tax brackets/main.py
|
9cc914f2ac7f55c19998ecdf8652e0ad96a26f3f
|
[] |
no_license
|
GioByte10/Python
|
b0cfd400cde3c3bd92d95f1136d32938d70624a1
|
a9bbfb6b61c37f1f0305252f8603256131b436ef
|
refs/heads/master
| 2023-04-08T18:51:58.721577
| 2023-04-01T06:38:50
| 2023-04-01T06:38:50
| 136,672,731
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
income = int(input())
tax = 0
if income > 132406:
tax = 28
elif income > 42707:
tax = 25
elif income > 15527:
tax = 15
else:
tax = 0
taxed = round(income * tax / 100)
print(f"The tax for {income} is {tax}%. That is {taxed} dollars!")
|
[
"gvanni.bernal10@gmail.com"
] |
gvanni.bernal10@gmail.com
|
891ebc1c1426431c417dd2be87ac1f5264ccc288
|
adc84231a69b2415eb6262e390a43170b6f98c62
|
/featurization/create_dataset_parts.py
|
a24e163f196efac19a3a6d9abdcc2d841cebeff6
|
[] |
no_license
|
KavrakiLab/pHLA-RFclassifier-from-structure
|
b467a15b39dc5e7ad027ace58318d2edcd9e7a5b
|
044384973414e2df9d91a999790fe7772db71730
|
refs/heads/master
| 2022-09-30T09:24:41.986801
| 2020-06-07T15:46:57
| 2020-06-07T15:46:57
| 258,570,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,170
|
py
|
import sys
import create_contact_features
import numpy as np
import glob
import pickle
from threading import Thread
from subprocess import call
import os
import time
label = sys.argv[1]
file_with_confs = "confs" + str(label) + ".txt"
mode = sys.argv[2]
pHLA_to_label = {}
f = open("all_nonbinders.txt", 'r')
for line in f:
allele, peptide = line.split()
allele_name = allele[4] + allele[6:8] + allele[-2:]
pHLA_to_label[allele_name + "-" + peptide] = 0
f.close()
f = open("all_binders.txt", 'r')
for line in f:
allele, peptide = line.split()
allele_name = allele[4] + allele[6:8] + allele[-2:]
pHLA_to_label[allele_name + "-" + peptide] = 1
f.close()
#os.chdir("test")
all_files = []
f = open(file_with_confs, 'r')
for line in f:
fname = line.rstrip()
all_files.append(fname)
f.close()
#all_files = glob.glob("*.pdb")
print(len(all_files))
start = time.time()
X = []
y = []
peptides = []
alleles = []
for i in range(len(all_files)):
print(i)
#allele, peptide = all_files[i].split()
#peptide = peptide.rstrip()
#a_name = allele[4] + allele[6:8] + allele[-2:]
#fi_name = a_name + "-" + peptide
#confs = glob.glob("min_confs/" + fi_name + ".pdb")
fullpdbname = all_files[i]
phla, pdbname = fullpdbname.split("/")
fi_name = phla
allele, peptide = phla.split("-")
a_name = allele
conf_name = "ensemble/" + fullpdbname
confs = glob.glob(conf_name)
#print(conf_name, confs, fi_name)
for c in confs:
try:
#peptide = f[:-4]
if mode == "reg": feature_vec = create_contact_features.featurize(c)
elif mode == "r2": feature_vec = create_contact_features.featurize_r2(c)
elif mode == "sig": feature_vec = create_contact_features.featurize_sig(c)
peptides.append(peptide)
alleles.append(a_name)
except: continue
X.append(feature_vec)
y.append(pHLA_to_label[fi_name])
end = time.time()
print(end-start)
X = np.array(X)
y = np.array(y)
data = {'X':X, 'y':y, 'peptides':peptides, 'alleles':alleles}
f = open("data" + mode + label + ".pkl", 'wb')
pickle.dump(data, f)
f.close()
|
[
"j.abella@utexas.edu"
] |
j.abella@utexas.edu
|
68c13c60c0dae64974ee3a6536579285bb4e9c0f
|
11705adf999ccf62e35585c8f172bbff0a96b0f9
|
/swagger_server/controllers/link_controller.py
|
4980f1ab0a40541d7f5b60192410abcd4cfec039
|
[
"MIT"
] |
permissive
|
congwang09/sdx-test
|
22ab9fb4742e85926c022f44523c9e4f93cccf50
|
b6ea0c0496e305af5c248993e4f06fe0a02f7401
|
refs/heads/main
| 2023-06-29T22:07:56.075652
| 2021-07-14T01:41:11
| 2021-07-14T01:41:11
| 393,104,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
import connexion
import six
from swagger_server import util
def get_link(): # noqa: E501
"""get an existing link
ID of the link # noqa: E501
:rtype: None
"""
return 'do some magic!'
|
[
"yxin@renci.org"
] |
yxin@renci.org
|
a7c2d7d29bd9c8d2027d79539bd06e49d5e4d075
|
2c699e03232318f0ddcbcc882ada3a25bf4120ed
|
/PROBLEM SET 3/flask_app.py
|
efe819c2c0ee495a6e9d7f35a112914033f0b743
|
[] |
no_license
|
ohmybobobobo/COM5940
|
9303034156d09d898605316645b84bb6148b27df
|
d4c05821008703f6781d791c03ba1efd9bc84323
|
refs/heads/master
| 2020-04-21T23:27:52.764476
| 2019-10-14T07:53:27
| 2019-10-14T07:53:27
| 169,947,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,455
|
py
|
# A very simple Flask Hello World app for you to get started with...
from flask import Flask, render_template, json, request, redirect, session
from flask import Markup
import requests
app = Flask(__name__)
app.config["DEBUG"] = False
@app.route("/")
def main():
user = {"name":"Bowie"}
return render_template("index.html",user=user,title="Home Page")
@app.route('/about')
def about():
r = requests.get('https://api.airtable.com/v0/appSshOqyvPyfmYFh/mydata1?api_key=keyzw1peVZWig6wbC&sortField=_createdTime&sortDirection=desc')
dict = r.json()
dataset = []
for i in dict['records']:
dict = i['fields']
del dict["url"]
dataset.append(dict)
return render_template('about.html', entries=dataset)
@app.route("/blog")
def blog():
r = requests.get('https://api.airtable.com/v0/appEL20vPQnJvnXSv/action?api_key=keyzw1peVZWig6wbC&sortField=_createdTime&sortDirection=desc')
dict3 = r.json()
dataset4 = []
for i in dict3['records']:
dict3 = i['fields']
del dict3["level"]
del dict3["membership_level"]
del dict3["user_name"]
del dict3["levels2"]
del dict3["ref2"]
dataset4.append(dict3)
return render_template('blog.html', entries=dataset4)
@app.route("/chart")
def chart():
r = requests.get('https://api.airtable.com/v0/appEL20vPQnJvnXSv/points?api_key=keyzw1peVZWig6wbC&sortField=_createdTime&sortDirection=desc')
dict1 = r.json()
dict2 = {}
dataset2 = []
name_list = []
points = []
for i in dict1['records']:
dict2 = i['fields']
dataset2.append(dict2)
for item in dataset2:
name_list.append(item.get('user'))
points.append(item.get('totalpoints'))
return render_template('blog-single.html', entries = zip(name_list, points))
@app.route("/chart2")
def chart2():
r = requests.get('https://api.airtable.com/v0/appEL20vPQnJvnXSv/actions?api_key=keyzw1peVZWig6wbC&sortField=_createdTime&sortDirection=desc')
dict5 = r.json()
dict6 = {}
dataset3 = []
action = []
num = []
for i in dict5['records']:
dict6 = i['fields']
dataset3.append(dict6)
for item in dataset3:
action.append(item.get('action'))
num.append(item.get('number'))
return render_template('blog-single2.html', entries = zip(action, num))
if __name__ == '__main__':
app.run(debug=True)
|
[
"1155112753@gmai.com"
] |
1155112753@gmai.com
|
79ed0967d587118767b614b43000140abe053db7
|
905fcdc6e5febde43a9d6963792e4f526186e299
|
/cride/circles/managers/invitations.py
|
35c209fe9af061428c26901df716c8d373295b4a
|
[
"MIT"
] |
permissive
|
lhernerremon/demo-rider
|
462e926d9f1c845171c67f7cb20749bd1b2d0a5c
|
30783cf58513698d23730f5fa477dfeddda8ee6b
|
refs/heads/master
| 2022-12-30T14:15:35.624092
| 2020-10-05T16:18:39
| 2020-10-05T16:18:39
| 301,468,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
#Django
from django.db import models
import random
from string import ascii_letters,digits
class InvitationManager(models.Manager):
CODE_LENGTH=10
def create(self,**kwargs):
pool=ascii_letters + digits + ".-"
code=kwargs.get("code","".join(random.choices(pool,k=self.CODE_LENGTH)))
while self.filter(code=code).exists():
code="".join(random.choices(pool,k=self.CODE_LENGTH))
kwargs["code"]=code
return super(InvitationManager,self).create(**kwargs)
|
[
"lherner.remon.27@unsch.edu.pe"
] |
lherner.remon.27@unsch.edu.pe
|
4993b111fc068469f0c22937337072e11f91fe65
|
812045c3ec6587827aeb18bde666237dfffc21ae
|
/tf_quant_finance/experimental/pricing_platform/framework/__init__.py
|
b8fcbcfb73a8cb71bc0bed15b4150533c7925160
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
google/tf-quant-finance
|
2062082c85e8679b71e69bbeb579fe338c1b0288
|
0d3a2193c0f2d320b65e602cf01d7a617da484df
|
refs/heads/master
| 2023-08-31T01:58:15.415811
| 2023-08-15T07:37:46
| 2023-08-15T07:38:22
| 198,669,252
| 4,165
| 557
|
Apache-2.0
| 2023-08-04T19:25:55
| 2019-07-24T16:09:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,195
|
py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Framework module."""
from tf_quant_finance.experimental.pricing_platform.framework import core
from tf_quant_finance.experimental.pricing_platform.framework import equity_instruments
from tf_quant_finance.experimental.pricing_platform.framework import market_data
from tf_quant_finance.experimental.pricing_platform.framework import rate_instruments
from tensorflow.python.util.all_util import remove_undocumented # pylint: disable=g-direct-tensorflow-import
_allowed_symbols = [
"core",
"equity_instruments",
"market_data",
"rate_instruments",
]
remove_undocumented(__name__, _allowed_symbols)
|
[
"tf-quant-finance-robot@google.com"
] |
tf-quant-finance-robot@google.com
|
c33bbc90f3b92146aa718a36362b3e8147840ef8
|
e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d
|
/a10sdk/core/cm/cm_ut_post_modify_parent.py
|
7518cb270dd800886ec93cfb8a6187c9c97a2057
|
[
"Apache-2.0"
] |
permissive
|
amwelch/a10sdk-python
|
4179565afdc76cdec3601c2715a79479b3225aef
|
3e6d88c65bd1a2bf63917d14be58d782e06814e6
|
refs/heads/master
| 2021-01-20T23:17:07.270210
| 2015-08-13T17:53:23
| 2015-08-13T17:53:23
| 40,673,499
| 0
| 0
| null | 2015-08-13T17:51:35
| 2015-08-13T17:51:34
| null |
UTF-8
|
Python
| false
| false
| 1,043
|
py
|
from a10sdk.common.A10BaseClass import A10BaseClass
class PostModifyParent(A10BaseClass):
"""Class Description::
Unit test of post modify ineligible.
Class post-modify-parent supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param k1: {"minLength": 1, "maxLength": 32, "type": "string", "optional": false, "format": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cm-ut/post-modify-parent/{k1}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "k1"]
self.b_key = "post-modify-parent"
self.a10_url="/axapi/v3/cm-ut/post-modify-parent/{k1}"
self.DeviceProxy = ""
self.post_modify_child = {}
self.k1 = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
[
"doug@parksidesoftware.com"
] |
doug@parksidesoftware.com
|
b511950c22b5568da7f4893798de2e90eaa81866
|
093f33fc8016d6274d165fed9faf7fc715c47db1
|
/已完成/Highest and Lowest.py
|
39a4483bd88dcf57468cb9cf5b1d0266803fdc7a
|
[
"MIT"
] |
permissive
|
karchi/codewars_kata
|
678b1b7b72548ed308ba462f5c7231e85753f92a
|
18b389134747df34eee637b352bf9dd420a601fe
|
refs/heads/master
| 2020-05-21T18:07:04.970635
| 2017-03-08T05:35:55
| 2017-03-08T05:35:55
| 62,529,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,657
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
'''
# 题目地址:
https://www.codewars.com/kata/554b4ac871d6813a03000035/train/python
'''
import time
import unittest
class TestCases(unittest.TestCase):
def test1(self):self.assertEqual(high_and_low("4 5 29 54 4 0 -214 542 -64 1 -3 6 -6"), "542 -214")
def test2(self):self.assertEqual(high_and_low("1 -1"), "1 -1")
def test3(self):self.assertEqual(high_and_low("1 1"), "1 1")
def test4(self):self.assertEqual(high_and_low("-1 -1"), "-1 -1")
def test5(self):self.assertEqual(high_and_low("1 -1 0"), "1 -1")
def test6(self):self.assertEqual(high_and_low("1 1 0"), "1 0")
def test7(self):self.assertEqual(high_and_low("-1 -1 0"), "0 -1")
def test8(self):self.assertEqual(high_and_low("42"), "42 42")
def test9(self):self.assertEqual(high_and_low("24 3 18"), "24 3")
def high_and_low(strings):
numbers = strings.split(" ")
for i in range(len(numbers)):
numbers[i] = int(numbers[i])
result = " ".join([str(max(numbers)), str(min(numbers))])
return result
if __name__ == '__main__':
unittest.main()
# 测试时间:
# start = time.clock()
# for i in range(100000):
# a = sum_pairs([20, -13, 40], -7)
# b = sum_pairs([20, -13, 40, 23, 122, 492, 324, -245, 58, -132, -49, 942], -7)
# end = time.clock()
# print(end - start)
'''
参考解法:
def high_and_low(numbers):
n = map(int, numbers.split(' '))
return str(max(n)) + ' ' + str(min(n))
解法2:
def high_and_low(numbers): #z.
nn = [int(s) for s in numbers.split(" ")]
return "%i %i" % (max(nn),min(nn))
'''
|
[
"karchi@users.noreply.github.com"
] |
karchi@users.noreply.github.com
|
ae7a680710ee18f59df41e4904682ea5e82f7320
|
2627881c5c58fb5f58c2a13ceb2600206bc7cf14
|
/ExchangeMechanisms/OldVersions/V1/DatabaseScripts/MTC's/DBSearchMTCBook.py
|
44bc047da48504c6e9a74d441be34fdcf3ac7572
|
[] |
no_license
|
cnxtech/ProjectExchange
|
f430b613cc84144ae69f5a8c3e7b753bdab068ca
|
e2591bcdd834c708a0f585ab7ef48c4ac67c68a9
|
refs/heads/master
| 2023-08-03T16:31:13.343854
| 2016-12-21T21:36:48
| 2016-12-21T21:36:48
| 185,272,433
| 0
| 0
| null | 2023-08-28T17:18:10
| 2019-05-06T21:10:09
|
CSS
|
UTF-8
|
Python
| false
| false
| 20,532
|
py
|
#-------------------------------------------------------------------------------
# Name: DBSearchMTCBook
# Version: 1.0
# Purpose:
#
# Author: Matthew
#
# Created: 05/26/2014
# Copyright: (c) Matthew 2014
# Licence: <your licence>
# Modified: 05/30/2014
#-------------------------------------------------------------------------------
import MySQLdb
db = MySQLdb.connect("localhost","root","***","exchangedatabase")
cursor = db.cursor()
cursor.execute("SELECT VERSION()")
data = cursor.fetchone()
print "Database version : %s " % data
MTCHeaderPrinted = False
MTCFound = False
SearchParameter = raw_input("Search by: ")
SearchParameter = SearchParameter.upper()
ParameterCheck = "DESCRIBE MTCBook"
try:
cursor.execute(ParameterCheck)
TableDescription = cursor.fetchall()
for Row in TableDescription:
TargetParameter = Row[0]
if TargetParameter.upper() == SearchParameter or SearchParameter == "MTC NUMBER" or SearchParameter == "DATE ENTERED" or SearchParameter == "INTEREST COMPOUND RATE" or SearchParameter == "INTEREST RATE" or SearchParameter == "STOP LOSS PRICE" or SearchParameter == "FULFILLMENT PRICE" or SearchParameter == "END POINT" or SearchParameter == "DIVIDEND TYPE" or SearchParameter == "MINIMUM BORROWER CONSTRAINTS" or SearchParameter == "USER INTERVENTION CONSTRAINTS" or SearchParameter == "USER REQUESTS":
break;
while TargetParameter.upper() != SearchParameter and SearchParameter != "MTC NUMBER" and SearchParameter != "DATE ENTERED" and SearchParameter != "INTEREST COMPOUND RATE" and SearchParameter != "INTEREST RATE" and SearchParameter != "STOP LOSS PRICE" and SearchParameter != "FULFILLMENT PRICE" and SearchParameter != "END POINT" and SearchParameter != "DIVIDEND TYPE" and SearchParameter != "MINIMUM BORROWER CONSTRAINTS" and SearchParameter != "USER INTERVENTION CONSTRAINTS" and SearchParameter != "USER REQUESTS":
print "Cannot search by that attribute. Please enter again:"
print "Choices: " + str([Row[0] for Row in TableDescription])
SearchParameter = raw_input("Search by: ")
SearchParameter = SearchParameter.upper()
for Row in TableDescription:
TargetParameter = Row[0]
if TargetParameter.upper() == SearchParameter or SearchParameter == "MTC NUMBER" or SearchParameter == "DATE ENTERED" or SearchParameter == "INTEREST COMPOUND RATE" or SearchParameter == "INTEREST RATE" or SearchParameter == "STOP LOSS PRICE" or SearchParameter == "FULFILLMENT PRICE" or SearchParameter == "END POINT" or SearchParameter == "DIVIDEND TYPE" or SearchParameter == "MINIMUM BORROWER CONSTRAINTS" or SearchParameter == "USER INTERVENTION CONSTRAINTS" or SearchParameter == "USER REQUESTS":
break;
except:
print "ERROR: Database execution unsuccessful"
'''Standardizing Parameter Names'''
if SearchParameter == "MTCNUMBER":
SearchParameter = "MTC NUMBER"
print "Searching by: MTC Number"
elif SearchParameter == "INTERESTCOMPOUNDRATE":
SearchParameter = "INTEREST COMPOUND RATE"
print "Searching by: Interest Compound Rate"
elif SearchParameter == "INTERESTRATE":
SearchParameter = "INTEREST RATE"
print "Searching by: Interest Rate"
elif SearchParameter == "STOPLOSSPRICE":
SearchParameter = "STOP LOSS PRICE"
print "Searching by: Stop Loss Price"
elif SearchParameter == "FULFILLMENTPRICE":
SearchParameter = "FULFILLMENT PRICE"
print "Searching by: Fulfillment Price"
elif SearchParameter == "ENDPOINT":
SearchParameter = "END POINT"
print "Searching by: End Point"
elif SearchParameter == "DIVIDENDTYPE":
SearchParameter = "DIVIDEND TYPE"
print "Searching by: Dividend Type"
elif SearchParameter == "MINIMUMBORROWERCONSTRAINTS":
SearchParameter = "MINIMUM BORROWER CONSTRAINTS"
print "Searching by: Minimum Borrower Constraints"
elif SearchParameter == "USERINTERVENTIONCONSTRAINTS":
SearchParameter = "USER INTERVENTION CONSTRAINTS"
print "Searching by: User Intervention Constraints"
elif SearchParameter == "DATEENTERED":
SearchParameter = "DATE ENTERED"
print "Searching by: Date Entered"
else:
print "Searching by: " + SearchParameter.title()
'''Prompting For Extra Inputs'''
if SearchParameter == "INTEREST COMPOUND RATE" or SearchParameter == "DURATION":
SearchValueInterval = raw_input("Search for interval: ")
SearchValueValue = raw_input("Search for value: ")
elif SearchParameter != "DATE ENTERED":
SearchValue = raw_input("Search for value: ")
else:
DateSearchYear = raw_input("Search Year: ")
try:
DateSearchYear = int(DateSearchYear)
DateSearchYearOmit = False
except:
if DateSearchYear == "":
DateSearchYearOmit = True
else:
DateSearchYearOmit = False
DateSearchMonth = raw_input("Search Month: ")
try:
DateSearchMonth = int(DateSearchMonth)
DateSearchMonthOmit = False
except:
if DateSearchMonth == "":
DateSearchMonthOmit = True
else:
DateSearchMonthOmit = False
DateSearchDay = raw_input("Search Day: ")
try:
DateSearchDay = int(DateSearchDay)
DateSearchDayOmit = False
except:
if DateSearchDay == "":
DateSearchDayOmit = True
else:
DateSearchDayOmit = False
DateSearchHour = raw_input("Search Hour: ")
try:
DateSearchHour = int(DateSearchHour)
DateSearchHourOmit = False
except:
if DateSearchHour == "":
DateSearchHourOmit = True
else:
DateSearchHourOmit = False
DateSearchMinute = raw_input("Search Minute: ")
try:
DateSearchMinute = int(DateSearchMinute)
DateSearchMinuteOmit = False
except:
if DateSearchMinute == "":
DateSearchMinuteOmit = True
else:
DateSearchMinuteOmit = False
DateSearchSecond = raw_input("Search Second: ")
try:
DateSearchSecond = int(DateSearchSecond)
DateSearchSecondOmit = False
except:
if DateSearchSecond == "":
DateSearchSecondOmit = True
else:
DateSearchSecondOmit = False
'''Checking Parameters'''
if SearchParameter == "MTC NUMBER":
SearchValue = int(SearchValue)
sql = "SELECT * FROM MTCBook WHERE MTCNumber = %d" % (SearchValue)
try:
cursor.execute(sql)
MTC = cursor.fetchall()
print ""
if MTC != ():
MTC = MTC[0]
print "MTC Number: " + str(MTC[0])
print "Username: " + MTC[1]
print "Type: MTC"
print "Action: " + MTC[4]
print "Price: " + str(MTC[2])
print "Volume: " + str(MTC[3])
print "Date Entered: " + str(MTC[15])
MTCFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "USERNAME":
SearchValue = str(SearchValue.capitalize())
try:
sql = """SELECT * FROM MTCBook WHERE Username = "%s" """ % (SearchValue)
cursor.execute(sql)
MTCList = cursor.fetchall()
for MTC in MTCList:
if MTCHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
MTCHeaderPrinted = True
print ""
print "MTC Number: " + str(MTC[0])
print "Username: " + MTC[1]
print "Type: MTC"
print "Action: " + MTC[4]
print "Price: " + str(MTC[2])
print "Volume: " + str(MTC[3])
print "Date Entered: " + str(MTC[15])
MTCFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "PRICE":
SearchValue = float(SearchValue)
try:
sql = "SELECT * FROM MTCBook WHERE Price = %f" % (SearchValue)
cursor.execute(sql)
MTCList = cursor.fetchall()
for MTC in MTCList:
if MTCHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
MTCHeaderPrinted = True
print ""
print "MTC Number: " + str(MTC[0])
print "Username: " + MTC[1]
print "Type: MTC"
print "Action: " + MTC[4]
print "Price: " + str(MTC[2])
print "Volume: " + str(MTC[3])
print "Date Entered: " + str(MTC[15])
MTCFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "VOLUME":
SearchValue = float(SearchValue)
try:
sql = "SELECT * FROM MTCBook WHERE Volume = %f" % (SearchValue)
cursor.execute(sql)
MTCList = cursor.fetchall()
for MTC in MTCList:
if MTCHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
MTCHeaderPrinted = True
print ""
print "MTC Number: " + str(MTC[0])
print "Username: " + MTC[1]
print "Type: MTC"
print "Action: " + MTC[4]
print "Price: " + str(MTC[2])
print "Volume: " + str(MTC[3])
print "Date Entered: " + str(MTC[15])
MTCFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "ACTION":
SearchValue = str(SearchValue.capitalize())
try:
sql = """SELECT * FROM MTCBook WHERE Action = "%s" """ % (SearchValue)
cursor.execute(sql)
MTCList = cursor.fetchall()
for MTC in MTCList:
if MTCHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
MTCHeaderPrinted = True
print ""
print "MTC Number: " + str(MTC[0])
print "Username: " + MTC[1]
print "Type: MTC"
print "Action: " + MTC[4]
print "Price: " + str(MTC[2])
print "Volume: " + str(MTC[3])
print "Date Entered: " + str(MTC[15])
MTCFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "INTEREST COMPOUND RATE":
SearchValueInterval = str(SearchValueInterval.upper())
SearchValueValue = float(SearchValueValue)
SearchValue = str(SearchValueValue) + " " + SearchValueInterval
#print "Interest Compound Rate: " + str(SearchValue)
try:
sql = """SELECT * FROM MTCBook WHERE InterestCompoundRate = "%s" """ % (SearchValue)
cursor.execute(sql)
MTCList = cursor.fetchall()
for MTC in MTCList:
if MTCHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
MTCHeaderPrinted = True
print ""
print "MTC Number: " + str(MTC[0])
print "Username: " + MTC[1]
print "Type: MTC"
print "Action: " + MTC[4]
print "Price: " + str(MTC[2])
print "Volume: " + str(MTC[3])
print "Date Entered: " + str(MTC[15])
MTCFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "INTEREST RATE":
SearchValue = float(SearchValue)
try:
sql = "SELECT * FROM MTCBook WHERE InterestRate = %f" % (SearchValue)
cursor.execute(sql)
MTCList = cursor.fetchall()
for MTC in MTCList:
if MTCHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
MTCHeaderPrinted = True
print ""
print "MTC Number: " + str(MTC[0])
print "Username: " + MTC[1]
print "Type: MTC"
print "Action: " + MTC[4]
print "Price: " + str(MTC[2])
print "Volume: " + str(MTC[3])
print "Date Entered: " + str(MTC[15])
MTCFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "STOP LOSS PRICE":
SearchValue = float(SearchValue)
try:
sql = "SELECT * FROM MTCBook WHERE StopLossPrice = %f" % (SearchValue)
cursor.execute(sql)
MTCList = cursor.fetchall()
for MTC in MTCList:
if MTCHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
MTCHeaderPrinted = True
print ""
print "MTC Number: " + str(MTC[0])
print "Username: " + MTC[1]
print "Type: MTC"
print "Action: " + MTC[4]
print "Price: " + str(MTC[2])
print "Volume: " + str(MTC[3])
print "Date Entered: " + str(MTC[15])
MTCFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "FULFILLMENT PRICE":
SearchValue = float(SearchValue)
try:
sql = "SELECT * FROM MTCBook WHERE FulfillmentPrice = %f" % (SearchValue)
cursor.execute(sql)
MTCList = cursor.fetchall()
for MTC in MTCList:
if MTCHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
MTCHeaderPrinted = True
print ""
print "MTC Number: " + str(MTC[0])
print "Username: " + MTC[1]
print "Type: MTC"
print "Action: " + MTC[4]
print "Price: " + str(MTC[2])
print "Volume: " + str(MTC[3])
print "Date Entered: " + str(MTC[15])
MTCFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "DURATION":
SearchValueInterval = str(SearchValueInterval.upper())
SearchValueValue = float(SearchValueValue)
SearchValue = str(SearchValueValue) + " " + SearchValueInterval
#print "Duration: " + str(SearchValue)
try:
sql = """SELECT * FROM MTCBook WHERE Duration = "%s" """ % (SearchValue)
cursor.execute(sql)
MTCList = cursor.fetchall()
for MTC in MTCList:
if MTCHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
MTCHeaderPrinted = True
print ""
print "MTC Number: " + str(MTC[0])
print "Username: " + MTC[1]
print "Type: MTC"
print "Action: " + MTC[4]
print "Price: " + str(MTC[2])
print "Volume: " + str(MTC[3])
print "Date Entered: " + str(MTC[15])
MTCFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "DIVIDEND TYPE":
SearchValue = str(SearchValue.capitalize())
try:
sql = """SELECT * FROM MTCBook WHERE DividendType = "%s" """ % (SearchValue)
cursor.execute(sql)
MTCList = cursor.fetchall()
for MTC in MTCList:
if MTCHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
MTCHeaderPrinted = True
print ""
print "MTC Number: " + str(MTC[0])
print "Username: " + MTC[1]
print "Type: MTC"
print "Action: " + MTC[4]
print "Price: " + str(MTC[2])
print "Volume: " + str(MTC[3])
print "Date Entered: " + str(MTC[15])
MTCFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "MINIMUM BORROWER CONSTRAINTS":
SearchValue = int(SearchValue)
sql = "SELECT * FROM MTCBook WHERE MinimumBorrowerConstraints = %d" % (SearchValue)
try:
cursor.execute(sql)
MTCList = cursor.fetchall()
for MTC in MTCList:
if MTCHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
MTCHeaderPrinted = True
print ""
print "MTC Number: " + str(MTC[0])
print "Username: " + MTC[1]
print "Type: MTC"
print "Action: " + MTC[4]
print "Price: " + str(MTC[2])
print "Volume: " + str(MTC[3])
print "Date Entered: " + str(MTC[15])
MTCFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "USER INTERVENTION CONSTRAINTS":
SearchValue = int(SearchValue)
sql = "SELECT * FROM MTCBook WHERE UserInterventionConstraints = %d" % (SearchValue)
try:
cursor.execute(sql)
MTCList = cursor.fetchall()
for MTC in MTCList:
if MTCHeaderPrinted != True:
print ""
print "Contracts that meet search criteria: "
MTCHeaderPrinted = True
print ""
print "MTC Number: " + str(MTC[0])
print "Username: " + MTC[1]
print "Type: MTC"
print "Action: " + MTC[4]
print "Price: " + str(MTC[2])
print "Volume: " + str(MTC[3])
print "Date Entered: " + str(MTC[15])
MTCFound = True
except:
print "ERROR: Database fetch exception"
if SearchParameter == "DATEENTERED" or SearchParameter == "DATE ENTERED":
try:
sql = "SELECT * FROM MTCBook"
cursor.execute(sql)
MTCList = cursor.fetchall()
for MTC in MTCList:
if DateSearchYearOmit == True:
YearValue = ""
for Character in str(MTC[15])[:4]:
YearValue += str(Character)
DateSearchYear = YearValue
if DateSearchMonthOmit == True:
MonthValue = ""
for Character in str(MTC[15])[5:7]:
MonthValue += str(Character)
DateSearchMonth = MonthValue
if DateSearchDayOmit == True:
DayValue = ""
for Character in str(MTC[15])[8:10]:
DayValue += str(Character)
DateSearchDay = DayValue
if DateSearchHourOmit == True:
HourValue = ""
for Character in str(MTC[15])[11:13]:
HourValue += str(Character)
DateSearchHour = HourValue
if DateSearchMinuteOmit == True:
MinuteValue = ""
for Character in str(MTC[15])[14:16]:
MinuteValue += str(Character)
DateSearchMinute = MinuteValue
if DateSearchSecondOmit == True:
SecondValue = ""
for Character in str(MTC[15])[17:19]:
SecondValue += str(Character)
DateSearchSecond = SecondValue
if DateSearchMonth < 10:
DateSearchMonth = "0" + str(DateSearchMonth)
if DateSearchDay < 10:
DateSearchDay = "0" + str(DateSearchDay)
if DateSearchHour < 10:
DateSearchHour = "0" + str(DateSearchHour)
if DateSearchMinute < 10:
DateSearchMinute = "0" + str(DateSearchMinute)
if DateSearchSecond < 10:
DateSearchSecond = "0" + str(DateSearchSecond)
if str(DateSearchYear) == str(MTC[15])[:4] and str(DateSearchMonth) == str(MTC[15])[5:7] and str(DateSearchDay) == str(MTC[15])[8:10] and str(DateSearchHour) == str(MTC[15])[11:13] and str(DateSearchMinute) == str(MTC[15])[14:16] and str(DateSearchSecond) == str(MTC[15])[17:19]:
if MTCHeaderPrinted != True:
MTCHeaderPrinted = True
print ""
print "Orders that meet search parameters:"
print ""
print "MTC Number: " + str(MTC[0])
print "Username: " + MTC[1]
print "Type: MTC"
print "Action: " + MTC[4]
print "Price: " + str(MTC[2])
print "Volume: " + str(MTC[3])
print "Date Entered: " + str(MTC[15])
MTCFound = True
except:
print "ERROR: Database fetch exception"
if MTCFound != True:
print ""
print "No orders meet search criteria"
db.close()
|
[
"matthewrastovac@yahoo.com"
] |
matthewrastovac@yahoo.com
|
cc05740753efb5538a022e429587b3ddd0ed2f59
|
6c547e3312e2d1bd3dab123b831053ed7aef7b6d
|
/pages/BICL/login/LoginPage.py
|
2596902d6d4e41bb5105b84915ea1e972a78b50c
|
[] |
no_license
|
kenito2050/BICL
|
8c4239f1e897e4dfc04aa35e827816242b41d5dd
|
82891aba56cc49c9cf96ce82472847c4cb10828f
|
refs/heads/master
| 2020-12-31T22:10:44.784193
| 2020-02-10T23:00:10
| 2020-02-10T23:00:10
| 239,039,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,330
|
py
|
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from config_globals import *
class LoginPage():
def __init__(self, driver):
self.driver = driver
def Page_Elements(self):
# User Name Field
self.user_field = self.driver.find_element(By.ID, "UserName")
# Password Field
self.password_field = self.driver.find_element(By.ID, "Password")
# Submit Button
self.submit_button = self.driver.find_element(By.XPATH, "/html/body/div/div/div[1]/form/button")
return self
def login(self, username, password):
LoginPage.Page_Elements(self).user_field.clear()
LoginPage.Page_Elements(self).user_field.send_keys(username)
LoginPage.Page_Elements(self).password_field.click()
LoginPage.Page_Elements(self).password_field.clear()
LoginPage.Page_Elements(self).password_field.send_keys(password)
self.driver.implicitly_wait(3)
def click_login_button(self):
LoginPage.Page_Elements(self).submit_button.click()
# IE Login method
# problem is that IE hangs on the login button click
def IE_login(self, username, password):
LoginPage.Page_Elements(self).user_field.clear()
LoginPage.Page_Elements(self).user_field.send_keys(username)
LoginPage.Page_Elements(self).password_field.click()
LoginPage.Page_Elements(self).password_field.clear()
LoginPage.Page_Elements(self).password_field.send_keys(password)
LoginPage.Page_Elements(self).password_field.send_keys(Keys.TAB)
LoginPage.Page_Elements(self).password_field.send_keys(Keys.ENTER)
def verify_username_field_displays(self, test_case_ID, browser, env, time_stamp):
# Verify if page loads (username_field should be clickable), if not, throw exception and take screenshot
try:
LoginPage.Page_Elements(self).user_field.click()
except NoSuchElementException:
screenshot_name = "FAIL" + "_" + test_case_ID + "_" + browser + "_" + env + "_" + time_stamp + ".png"
saved_screenshot_location = str(screenshot_directory / screenshot_name)
self.driver.get_screenshot_as_file(saved_screenshot_location)
raise
|
[
"ken.villarruel@gmail.com"
] |
ken.villarruel@gmail.com
|
f51061be65fbe76e22ba88ad8e114082a67173b2
|
0f2b08b31fab269c77d4b14240b8746a3ba17d5e
|
/onnxruntime/test/providers/cpu/tensor/gen_mvn_test_data.py
|
a7c1be435d7125434f43e787233637a8d1f3e1fb
|
[
"MIT"
] |
permissive
|
microsoft/onnxruntime
|
f75aa499496f4d0a07ab68ffa589d06f83b7db1d
|
5e747071be882efd6b54d7a7421042e68dcd6aff
|
refs/heads/main
| 2023-09-04T03:14:50.888927
| 2023-09-02T07:16:28
| 2023-09-02T07:16:28
| 156,939,672
| 9,912
| 2,451
|
MIT
| 2023-09-14T21:22:46
| 2018-11-10T02:22:53
|
C++
|
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("--shape", type=int, nargs="+", required=True)
parser.add_argument("--axes", type=int, nargs="+", required=True)
args = parser.parse_args()
shape = tuple(args.shape)
axes = tuple(args.axes)
random_seed = 0
rng = np.random.default_rng(random_seed)
X = rng.random(size=shape, dtype=float)
# Calculate expected output data
X_mean = np.mean(X, axis=axes, keepdims=True)
X_std = np.std(X, axis=axes, keepdims=True)
Y = (X - X_mean) / X_std
def to_c_float_literals(arr):
literals_per_line = 8
literals = [f"{literal:.7f}f" for literal in arr.flatten().tolist()]
result = ""
for i, literal in enumerate(literals):
result += "{},{}".format(literal, "\n" if (i + 1) % literals_per_line == 0 else " ")
return result
print(f"input:\n{to_c_float_literals(X)}")
print(f"expected output:\n{to_c_float_literals(Y)}")
|
[
"noreply@github.com"
] |
microsoft.noreply@github.com
|
17e0b04f1a43263e764801314f361eeab2c08818
|
7e6b07e49f41d99022583b4653ea2178d68f9a1e
|
/src/python_code/examples/examples.py
|
c51c550c50a46cbde65329b45cc1b50ea22dbfbf
|
[] |
no_license
|
TDougy52/backup-project
|
532793174662c5637104d935869dc1f6988e8cdd
|
86089baa5524be70281ae4158e8cdd4e6c4cf36e
|
refs/heads/master
| 2021-01-11T14:25:38.663929
| 2016-12-06T17:30:23
| 2016-12-06T17:30:23
| 81,406,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,413
|
py
|
# Code below kept only for example
@route('/todo')
def todo_list():
conn = sqlite3.connect('backup_info.db')
c = conn.cursor()
c.execute("SELECT id, task FROM todo WHERE status LIKE '1'")
result = c.fetchall()
conn.close()
output = template('make_table', rows=result)
return output
@route('/new', method='GET')
def new_item():
if request.GET.save:
new = request.GET.task.strip()
conn = sqlite3.connect('backup_info.db')
c = conn.cursor()
c.execute("INSERT INTO todo (task,status) VALUES (?,?)", (new, 1))
new_id = c.lastrowid
conn.commit()
conn.close()
return '<p>The new task was inserted into the database, the ID is %s</p>' % new_id
else:
return template('new_task.tpl')
@route('/edit/<no:int>', method='GET')
def edit_item(no):
if request.GET.save:
edit = request.GET.task.strip()
status = request.GET.status.strip()
if status == 'open':
status = 1
else:
status = 0
conn = sqlite3.connect('backup_info.db')
c = conn.cursor()
c.execute("UPDATE todo SET task = ?, status = ? WHERE id LIKE ?", (edit, status, no))
conn.commit()
return '<p>The item number %s was successfully updated</p>' % no
else:
conn = sqlite3.connect('backup_info.db')
c = conn.cursor()
c.execute("SELECT task FROM todo WHERE id LIKE ?", (str(no)))
cur_data = c.fetchone()
return template('edit_task', old=cur_data, no=no)
@route('/item<item:re:[0-9]+>')
def show_item(item):
conn = sqlite3.connect('backup_info.db')
c = conn.cursor()
c.execute("SELECT task FROM todo WHERE id LIKE ?", (item,))
result = c.fetchall()
conn.close()
if not result:
return 'This item number does not exist!'
else:
return 'Task: %s' % result[0]
@route('/help')
def help():
static_file('help.html', root='.')
@route('/json<json:re:[0-9]+>')
def show_json(json):
conn = sqlite3.connect('backup_info.db')
c = conn.cursor()
c.execute("SELECT task FROM todo WHERE id LIKE ?", (json,))
result = c.fetchall()
conn.close()
if not result:
return {'task': 'This item number does not exist!'}
else:
return {'task': result[0]}
@error(403)
def mistake403(code):
return 'There is a mistake in your url!'
|
[
"toddcookevt@gmail.com"
] |
toddcookevt@gmail.com
|
00203a466d9d39ac178150437328df3a2e5589bb
|
f15fb8f399e8fae1d2b5c1f5351c91d2cd73610b
|
/app.py
|
7f9bee4950e39a85d8247108346a809094d4fcfd
|
[] |
no_license
|
ItaloRFeitosa/stems_separator
|
66250482a11b4a1c765b3882898ef31cf84ebb25
|
728094272146e0b993bd8c9ae680f532c37871ac
|
refs/heads/master
| 2022-12-17T11:19:57.456212
| 2020-09-05T15:21:17
| 2020-09-05T15:21:17
| 293,101,918
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,782
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'interface.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
import os
from worker import Worker
from separator import initSeparator
from PyQt5 import QtCore, QtGui, QtWidgets
import time
class Ui_MainWindow(QtWidgets.QWidget):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setEnabled(True)
MainWindow.resize(500, 280)
MainWindow.setMinimumSize(QtCore.QSize(500, 280))
MainWindow.setMaximumSize(QtCore.QSize(500, 280))
font = QtGui.QFont()
font.setFamily("Comic Sans MS")
MainWindow.setFont(font)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.gridLayoutWidget.setGeometry(QtCore.QRect(20, 20, 466, 241))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setHorizontalSpacing(8)
self.gridLayout.setVerticalSpacing(16)
self.gridLayout.setObjectName("gridLayout")
self.output_label = QtWidgets.QLabel(self.gridLayoutWidget)
self.output_label.setObjectName("output_label")
self.gridLayout.addWidget(self.output_label, 4, 0, 1, 1)
self.foldername = QtWidgets.QLabel(self.gridLayoutWidget)
self.foldername.setObjectName("foldername")
self.gridLayout.addWidget(self.foldername, 2, 1, 1, 1)
self.filename_label = QtWidgets.QLabel(self.gridLayoutWidget)
self.filename_label.setObjectName("filename_label")
self.gridLayout.addWidget(self.filename_label, 1, 0, 1, 1)
self.cancelButton = QtWidgets.QPushButton(self.gridLayoutWidget)
self.cancelButton.setObjectName("pushButton")
self.gridLayout.addWidget(self.cancelButton, 5, 0, 1, 1)
self.finishLabel = QtWidgets.QLabel(self.gridLayoutWidget)
self.finishLabel.setObjectName("finishLabel")
self.gridLayout.addWidget(self.finishLabel, 5, 1, 1, 1)
self.startButton = QtWidgets.QPushButton(self.gridLayoutWidget)
self.startButton.setObjectName("start")
self.gridLayout.addWidget(self.startButton, 5, 2, 1, 1)
self.filename = QtWidgets.QLabel(self.gridLayoutWidget)
self.filename.setObjectName("filename")
self.gridLayout.addWidget(self.filename, 1, 1, 1, 1)
self.dir_label = QtWidgets.QLabel(self.gridLayoutWidget)
self.dir_label.setObjectName("dir_label")
self.gridLayout.addWidget(self.dir_label, 2, 0, 1, 1)
self.choose_file = QtWidgets.QPushButton(self.gridLayoutWidget)
self.choose_file.setObjectName("choose_file")
self.gridLayout.addWidget(self.choose_file, 1, 2, 1, 1)
self.label_6 = QtWidgets.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setFamily("Comic Sans MS")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 0, 1, 1, 1)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.stems_2 = QtWidgets.QRadioButton(self.gridLayoutWidget)
self.stems_2.setChecked(True)
self.stems_2.setObjectName("stems_2")
self.ntracksgroup = QtWidgets.QButtonGroup(MainWindow)
self.ntracksgroup.setObjectName("ntracksgroup")
self.ntracksgroup.addButton(self.stems_2)
self.horizontalLayout_3.addWidget(self.stems_2)
self.stems_4 = QtWidgets.QRadioButton(self.gridLayoutWidget)
self.stems_4.setObjectName("stems_4")
self.ntracksgroup.addButton(self.stems_4)
self.horizontalLayout_3.addWidget(self.stems_4)
self.gridLayout.addLayout(self.horizontalLayout_3, 3, 1, 1, 1)
self.choose_folder = QtWidgets.QPushButton(self.gridLayoutWidget)
self.choose_folder.setObjectName("choose_folder")
self.gridLayout.addWidget(self.choose_folder, 2, 2, 1, 1)
self.label = QtWidgets.QLabel(self.gridLayoutWidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 3, 0, 1, 1)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.mp3_out = QtWidgets.QRadioButton(self.gridLayoutWidget)
self.mp3_out.setChecked(True)
self.mp3_out.setObjectName("mp3_out")
self.saidagroup = QtWidgets.QButtonGroup(MainWindow)
self.saidagroup.setObjectName("saidagroup")
self.saidagroup.addButton(self.mp3_out)
self.horizontalLayout_6.addWidget(self.mp3_out)
self.wav_out = QtWidgets.QRadioButton(self.gridLayoutWidget)
self.wav_out.setObjectName("radioButton")
self.saidagroup.addButton(self.wav_out)
self.horizontalLayout_6.addWidget(self.wav_out)
self.gridLayout.addLayout(self.horizontalLayout_6, 4, 1, 1, 1)
self.gridLayout.setColumnStretch(0, 1)
self.gridLayout.setColumnStretch(1, 3)
self.gridLayout.setColumnStretch(2, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.separator_params = dict()
self.threadpool = QtCore.QThreadPool()
self.timer = QtCore.QTimer()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Stems Separator"))
self.output_label.setText(_translate("MainWindow", "Saída:"))
self.foldername.setText(_translate("MainWindow", ""))
self.filename_label.setText(_translate("MainWindow", "Arquivo:"))
self.finishLabel.setText(_translate("MainWindow", ""))
self.cancelButton.setText(_translate("MainWindow", "Cancelar"))
self.startButton.setText(_translate("MainWindow", "Iniciar"))
self.filename.setText(_translate("MainWindow", ""))
self.dir_label.setText(_translate("MainWindow", "Diretório:"))
self.choose_file.setText(_translate("MainWindow", "Escolher"))
self.label_6.setText(_translate("MainWindow", "Stems Separatator - ALPHA"))
self.stems_2.setText(_translate("MainWindow", "2 stems"))
self.stems_4.setText(_translate("MainWindow", "4 stems"))
self.choose_folder.setText(_translate("MainWindow", "Escolher"))
self.label.setText(_translate("MainWindow", "N° de Tracks:"))
self.mp3_out.setText(_translate("MainWindow", ".mp3"))
self.wav_out.setText(_translate("MainWindow", ".wav"))
self.choose_file.clicked.connect(self.choose_file_handler)
self.choose_folder.clicked.connect(self.choose_folder_handler)
self.startButton.clicked.connect(self.startHandler)
self.cancelButton.clicked.connect(app.quit)
def finishedHandler(self):
self.finishLabel.setText("Processamento Finalizado, a Aplicação será fechada em 5s.")
self.timer.timeout.connect(app.quit)
self.timer.start(5000)
def startHandler(self):
if(self.stems_2.isChecked()):
self.separator_params['stems'] = 'spleeter:2stems'
else:
self.separator_params['stems'] = 'spleeter:4stems'
if(self.mp3_out.isChecked()):
self.separator_params['codec'] = 'mp3'
else:
self.separator_params['codec'] = 'wav'
self.startButton.setText('Carregando...')
self.startButton.setEnabled(False)
worker = Worker(initSeparator, self.separator_params)
worker.signals.finished.connect(self.finishedHandler)
self.threadpool.start(worker)
def choose_file_handler(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, "Select File", os.getcwd(), "Audio Files (*.mp3 *.wav)")
self.filename.setText(filename[0])
self.separator_params['filename'] = filename[0]
def choose_folder_handler(self):
foldername = QtWidgets.QFileDialog.getExistingDirectory()
self.foldername.setText(foldername)
self.separator_params['foldername'] = foldername
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
[
"italo85199@gmail.com"
] |
italo85199@gmail.com
|
7a0961c2b76f7efda10d25369297ceffaf9d029f
|
5fe709d0643394168dd919bbc721adabebe60a97
|
/optimizer/inference_optimizer_graph.py
|
4027661f7a5b090871b18fd5198cf7fc7a7c06f5
|
[
"MIT"
] |
permissive
|
vibhatha/pipedream
|
8232b67366a0dd84e41fd496c9b2e8b86dbfdd89
|
af6b811f5d01a68e9eb91065e5242fc1a075f279
|
refs/heads/master
| 2020-12-20T18:21:35.337352
| 2020-07-06T04:54:23
| 2020-07-06T04:54:23
| 236,167,878
| 0
| 0
|
MIT
| 2020-01-25T12:34:04
| 2020-01-25T12:34:03
| null |
UTF-8
|
Python
| false
| false
| 12,670
|
py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import csv
import math
import os
import sys
sys.path.append("..")
import graph
import utils
def main(num_machines, profile_filename, time_between_inputs, network_bandwidth, memory_size,
straight_pipeline, use_memory_constraint, use_fewer_machines,
activation_compression, output_directory, num_machines_in_first_level=None,
print_configuration=True, verbose=False):
if (num_machines_in_first_level is not None and
num_machines_in_first_level > num_machines):
raise Exception("num_machines_in_first_level has to less than num_machines!")
gr = graph.Graph.from_str(open(profile_filename, 'r').read())
antichain_gr = gr.antichain_dag()
states = antichain_gr.topological_sort()
if verbose:
print("Total number of states: %d" % len(states))
states_indices = {}
for i in range(len(states)):
states_indices[states[i]] = i
for i in range(len(states)):
for antichain_node in states[i].antichain:
states[i].output_activation_size += gr.nodes[antichain_node].activation_size
A = []
for i in range(len(states)):
row_A = []
for j in range(num_machines):
row_A.append((None, None, None, None))
A.append(row_A)
for i in range(len(states)):
antichain = states[i].antichain
all_predecessors = gr.all_predecessors(antichain)
states[i].compute_time = 0.0
states[i].activation_size = 0.0
states[i].parameter_size = 0.0
for predecessor in all_predecessors:
states[i].compute_time += (predecessor.forward_compute_time / 1000.0)
states[i].activation_size += predecessor.activation_size
states[i].parameter_size += predecessor.parameter_size
gr.reset()
for i in range(len(states)):
cum_compute_time = states[i].compute_time
cum_activation_size = states[i].activation_size
cum_parameter_size = states[i].parameter_size
max_j = 1 if straight_pipeline else num_machines
for j in range(max_j):
stashed_data_size = cum_activation_size + cum_parameter_size
if use_memory_constraint and stashed_data_size > memory_size:
A[i][j] = (None, None, None, None)
continue
if num_machines_in_first_level is not None and j != (num_machines_in_first_level - 1):
A[i][j] = (None, None, None, None)
else:
if (cum_compute_time / (j+1)) < time_between_inputs:
A[i][j] = (cum_compute_time / (j+1), cum_compute_time, None, (j+1))
min_machines = 1 if num_machines_in_first_level is None else num_machines_in_first_level
for m in range(min_machines, num_machines):
for i in range(1, len(states)):
(min_pipeline_time, min_pipeline_latency, optimal_split, optimal_num_machines) = A[i][m]
if use_fewer_machines and m > 0 and (min_pipeline_time is None or A[i][m-1][0] < min_pipeline_time):
(min_pipeline_time, min_pipeline_latency, optimal_split, optimal_num_machines) = A[i][m-1]
predecessors = antichain_gr.predecessors(states[i].node_id)
predecessor_ids = [states_indices[predecessor] for predecessor in predecessors]
for j in predecessor_ids:
max_m_prime = 2 if straight_pipeline else (m+1)
for m_prime in range(1, max_m_prime):
input_transfer_time = states[j].output_activation_size / (network_bandwidth * m_prime)
output_transfer_time = None
if i < len(states) -1:
output_transfer_time = states[i].output_activation_size / (network_bandwidth * m_prime)
last_stage_time = states[i].compute_time - states[j].compute_time
last_stage_parameter_size = states[i].parameter_size - states[j].parameter_size
stashed_data_size = (states[i].activation_size - states[j].activation_size) + last_stage_parameter_size
if use_memory_constraint and stashed_data_size > memory_size:
continue
last_stage_time /= m_prime
if A[j][m-m_prime][0] is None:
continue
pipeline_latency = sum([A[j][m-m_prime][1], last_stage_time * m_prime])
pipeline_time = max(A[j][m-m_prime][0], last_stage_time)
if not activation_compression:
pipeline_time = max(pipeline_time, input_transfer_time)
pipeline_latency = sum([pipeline_latency, input_transfer_time * m_prime])
if output_transfer_time is not None:
pipeline_time = max(pipeline_time, output_transfer_time)
pipeline_latency = sum([pipeline_latency, output_transfer_time * m_prime])
if pipeline_time > time_between_inputs:
continue
if min_pipeline_latency is None or min_pipeline_latency > pipeline_latency:
optimal_split = (j, m-m_prime)
optimal_num_machines = m_prime
min_pipeline_time = pipeline_time
min_pipeline_latency = pipeline_latency
A[i][m] = (min_pipeline_time, min_pipeline_latency, optimal_split, optimal_num_machines)
metadata = A[len(states)-1][num_machines-1]
next_split = metadata[2]
remaining_machines_left = num_machines
splits = []
replication_factors = []
prev_split = len(states)
while next_split is not None:
num_machines_used = metadata[3]
if verbose:
print("-------------------------------------")
print("Number of machines used: %d..." % num_machines_used)
print("Split between layers %d and %d..." % (next_split[0], next_split[0] + 1))
print("Split before antichain %s..." % (states[next_split[0]+1].antichain))
splits.append(next_split[0]+1)
compute_time = states[prev_split-1].compute_time - states[next_split[0]].compute_time
parameter_size = states[prev_split-1].parameter_size - states[next_split[0]].parameter_size
pp_communication_time_input = states[next_split[0]].output_activation_size / network_bandwidth
pp_communication_time_output = states[prev_split-1].output_activation_size / network_bandwidth
if activation_compression:
pp_communication_time_input = 0.0
pp_communication_time_output = 0.0
compute_time /= num_machines_used
if verbose:
print("Compute time = %f, Pipeline-parallel communication time = %f..." % (
compute_time, max(pp_communication_time_input, pp_communication_time_output)))
prev_split = splits[-1]
metadata = A[next_split[0]][next_split[1]]
next_split = metadata[2]
replication_factors.append(num_machines_used)
remaining_machines_left -= num_machines_used
if verbose:
print("-------------------------------------")
print("Number of machines used: %d..." % metadata[3])
num_machines_used = metadata[3]
remaining_machines_left -= num_machines_used
compute_time = states[prev_split-1].compute_time
parameter_size = states[prev_split-1].parameter_size
compute_time /= num_machines_used
if verbose:
print("Compute time = %f..." % compute_time)
print("-------------------------------------")
if print_configuration:
print("Number of machines in budget not used: %d..." % remaining_machines_left)
if print_configuration:
print("(Split start, split end) / compute time taken per stage / replication factor per stage:")
prev_split = 0
splits.reverse()
splits.append(len(states))
replication_factors.append(num_machines_used)
replication_factors.reverse()
for i in range(len(splits)):
time = 0.0
if prev_split > 0:
time = states[splits[i]-1].compute_time - states[prev_split-1].compute_time
else:
time = states[splits[i]-1].compute_time
if print_configuration:
print(prev_split, splits[i], time, replication_factors[i])
if splits[i] < len(states):
predecessors = gr.all_predecessors(states[splits[i]-1].antichain)
for predecessor in predecessors:
if predecessor.stage_id is None:
predecessor.set_stage_id(i)
prev_split = splits[i]
for node in gr.nodes.values():
if node.stage_id is None:
node.set_stage_id(len(splits)-1)
if output_directory is not None:
gr.to_dot(os.path.join(output_directory, "gpus=%d" % num_machines))
gr_str = str(gr)
with open(os.path.join(output_directory, "gpus=%d.txt" % num_machines), 'w') as f:
f.write(gr_str)
total_time = states[-1].compute_time
total_parameter_size = states[-1].parameter_size
pipeline_parallel_total_time = A[len(states)-1][num_machines-1][0]
pipeline_parallel_latency = A[len(states)-1][num_machines-1][1]
if verbose:
print()
print("Time taken by single-stage pipeline:", total_time)
print("Total latency:", pipeline_parallel_latency)
print("Time per stage in pipeline:", pipeline_parallel_total_time)
print("Throughput increase (compared to single machine):",
total_time / pipeline_parallel_total_time)
print("[Note that single-machine and %d-machine DP might not fit given memory constraints]")
return pipeline_parallel_total_time
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=("Run PipeDream's optimizer for replicated settings")
)
parser.add_argument('-n', "--num_machines", required=True, type=int,
help="Number of machines available")
parser.add_argument('-f', "--profile_filename", required=True,
help="Profile filename")
parser.add_argument('-b', "--network_bandwidth", type=float, default=1000000000,
help="Available network bandwidth in bytes/sec")
parser.add_argument('-m', "--num_machines_in_first_level", type=int, default=None,
help="Number of machines in first level")
parser.add_argument('-s', "--memory_size", type=float, default=16000000000,
help="Amount of memory available on each machine")
parser.add_argument("--straight_pipeline", action='store_true',
help="No replication across stages")
parser.add_argument('-o', "--output_directory", default=None, type=str,
help="Output directory to dump processed graph")
parser.add_argument("--use_memory_constraint", action='store_true',
help="Enforce memory constraint per machine")
parser.add_argument("--use_fewer_machines", action='store_true',
help="Use fewer machines, if possible")
parser.add_argument("--activation_compression", action='store_true',
help="Compress activations")
parser.add_argument('-t', "--time_between_inputs", required=True, type=float,
help="Time between inputs")
args = parser.parse_args()
args = vars(args)
num_machines = args["num_machines"]
profile_filename = args["profile_filename"]
network_bandwidth = args["network_bandwidth"]
memory_size = args["memory_size"]
num_machines_in_first_level = args["num_machines_in_first_level"]
straight_pipeline = args["straight_pipeline"]
output_directory = args["output_directory"]
use_memory_constraint = args["use_memory_constraint"]
use_fewer_machines = args["use_fewer_machines"]
activation_compression = args["activation_compression"]
time_between_inputs = args["time_between_inputs"]
main(num_machines, profile_filename, time_between_inputs, network_bandwidth, memory_size,
straight_pipeline, use_memory_constraint, use_fewer_machines,
activation_compression, output_directory, num_machines_in_first_level=num_machines_in_first_level,
verbose=True)
|
[
"vibhatha@gmail.com"
] |
vibhatha@gmail.com
|
d4cfddd6735e751ff39658a9aa42e87da72d91cc
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_118/2547.py
|
16ddce5b215d59bfc9c72b24f494d31fb7fbd169
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 866
|
py
|
import math
import sys
def isPalindrome(x):
palindrome = True
xstr = str(x)
for dindex in range(len(xstr)):
if (xstr[dindex] != xstr[-dindex-1]):
palindrome = False
return palindrome
def solve(a,b):
count = 0
x = math.sqrt(a)
xs = int(x * x)
if (x==int(x)) and isPalindrome(int(x)) and isPalindrome(xs):
count = 1
# print 'pal: ', x, xs
x = int(x) + 1
xs = x*x
while (xs <= b):
if isPalindrome(xs) and isPalindrome(x):
count += 1
# print 'pal: ', x, xs
x = x+1
xs = x*x
return count
filename = sys.argv[1]
fin = open(filename, 'r')
#fout = open('p3res.txt', 'w')
cases = int(fin.readline())
for case in range(cases):
[a, b] = fin.readline().split()
# print a, b
print "Case #{}: {}".format(case + 1, solve(int(a), int(b)))
#fout.write("Case #{}: {}".format(case + 1, solve(int(a), int(b))))
fin.close()
#fout.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
9c61fdde2bf632e456310592db5e8388abce9d14
|
98232ec22da94b2cb02551f4bbf868123fe56482
|
/test/mean_shift.py
|
55b76a195635f23b4280424d0d91b0803a1abd29
|
[] |
no_license
|
SWMaestro8th/WatchCoach_ML
|
ab2e005ffdf22ff15b9a73205d2e509d30e45b08
|
bcef6ea32ac9246fd112a7561a3fd08445587ddc
|
refs/heads/master
| 2021-05-06T10:04:01.717500
| 2017-12-13T07:45:23
| 2017-12-13T07:45:23
| 109,802,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,257
|
py
|
import numpy as np
import cv2
cap = cv2.VideoCapture('/Users/itaegyeong/Desktop/camshifttest.mov')
ret, frame = cap.read()
r, h, c, w = 80, 5, 159, 5 # simply hardcoded the values
track_window = (c, r, w, h)
# set up the ROI for tracking
roi = frame[r:r + h, c:c + w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
while (1):
ret, frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
ret, track_window = cv2.meanShift(dst, track_window, term_crit)
x, y, w, h = track_window
img2 = cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)
cv2.imshow('img2', img2)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
else:
cv2.imwrite(chr(k) + ".jpg", img2)
else:
break
cv2.destroyAllWindows()
|
[
"taegyeong7202@gmail.com"
] |
taegyeong7202@gmail.com
|
eddd244d5be28ac7fdcecb64007f923d79223ba1
|
0faef5908abdbef2a950b2c857e68e42246cb4f6
|
/gsuite_reports_rules/gsuite_brute_force_login.py
|
1eedb9291d8ad57b878b7476ffe8d000bfa0b337
|
[
"Apache-2.0"
] |
permissive
|
kennycumppanther/panther-analysis
|
254f6b21fab093de195d832f04fd427374f00b58
|
78d09332a3851c6b2b47c2d411b8ab4188bf54a1
|
refs/heads/master
| 2022-12-23T10:59:07.213436
| 2020-09-30T15:45:35
| 2020-09-30T15:45:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 896
|
py
|
from panther_oss_helpers import evaluate_threshold # pylint: disable=import-error
# TODO change to native thresholding once support is added
# tentatively slated for 1.7
THRESH = 10
THRESH_TTL = 600 # 10 minutes
def rule(event):
# Filter events
if event['id'].get('applicationName') != 'login':
return False
# Pattern match this event to the recon actions
for detail in event.get('events', [{}]):
if detail.get('type') == 'login' and detail.get(
'name') == 'login_failure':
return evaluate_threshold(
'{}-GSuiteLoginFailedCounter'.format(
event.get('actor', {}).get('email')),
THRESH,
THRESH_TTL,
)
return False
def title(event):
return 'User [{}] exceeded the failed logins threshold'.format(
event.get('actor', {}).get('email'))
|
[
"noreply@github.com"
] |
kennycumppanther.noreply@github.com
|
dea526edd69b48eaa64cbe09fe86f64a4a19d3b4
|
451e06f79d47bc224a543a098d946ad4b80af490
|
/app_registration/backends/simple/views.py
|
07097999e85590ba644b2a9c75aed80baf088a11
|
[] |
no_license
|
emsia/TECS
|
dd9de33a9838409035e4a8d527dc5893df7660c2
|
1db1f0c1ab802644ff507c544d6f92285988143a
|
refs/heads/master
| 2020-06-04T08:02:35.593980
| 2016-01-04T06:33:14
| 2016-01-04T06:33:14
| 13,290,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,775
|
py
|
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.contrib.auth.models import User
from registration import signals
from registration.views import RegistrationView as BaseRegistrationView
class RegistrationView(BaseRegistrationView):
"""
A registration backend which implements the simplest possible
workflow: a user supplies a username, email address and password
(the bare minimum for a useful account), and is immediately signed
up and logged in).
"""
def register(self, request, **cleaned_data):
username, email, password = cleaned_data['username'], cleaned_data['email'], cleaned_data['password1']
User.objects.create_user(username, email, password)
new_user = authenticate(username=username, password=password)
login(request, new_user)
signals.user_registered.send(sender=self.__class__,
user=new_user,
request=request)
return new_user
def registration_allowed(self, request):
"""
Indicate whether account registration is currently permitted,
based on the value of the setting ``REGISTRATION_OPEN``. This
is determined as follows:
* If ``REGISTRATION_OPEN`` is not specified in settings, or is
set to ``True``, registration is permitted.
* If ``REGISTRATION_OPEN`` is both specified and set to
``False``, registration is not permitted.
"""
return getattr(settings, 'REGISTRATION_OPEN', True)
def get_success_url(self, request, user):
return (user.get_absolute_url(), (), {})
|
[
"esia.rizal@gmail.com"
] |
esia.rizal@gmail.com
|
999474fd03e44566516cc597a511df5c01ae83f1
|
79189e18b1f73cb1adbd6da7afd9646e06189b01
|
/gitprivacy/timestamp.py
|
c655e52936c822ee962456d21529c5e2d39d37c7
|
[
"BSD-2-Clause"
] |
permissive
|
Grotax/pyGitPrivacy
|
dc7a9788f9a2aa4bd2884131a108a77332eaf96a
|
72ac006ea578a8b59606a08440a138bd2c05fe1b
|
refs/heads/master
| 2020-04-01T13:28:17.898553
| 2019-01-18T11:02:49
| 2019-01-18T11:02:49
| 153,253,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,399
|
py
|
"""defines git timestamps"""
import time
import datetime
import re
import itertools
import random
import calendar
class TimeStamp:
""" Class for dealing with git timestamps"""
def __init__(self, pattern="s", limit=False, mode="simple"):
super(TimeStamp, self).__init__()
try:
foo_bar = re.search('([0-9]+)-([0-9]+)', str(limit))
self.limit = [int(foo_bar.group(1)), int(foo_bar.group(2))]
except AttributeError:
self.limit = False
self.mode = mode
self.pattern = pattern
@staticmethod
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
first, second = itertools.tee(iterable)
next(second, None)
return zip(first, second)
@staticmethod
def utc_now():
""" time in utc + offset"""
utc_offset_sec = time.altzone if time.localtime().tm_isdst else time.timezone
utc_offset = datetime.timedelta(seconds=-utc_offset_sec)
return datetime.datetime.utcnow().replace(tzinfo=datetime.timezone(offset=utc_offset)).strftime("%a %b %d %H:%M:%S %Y %z")
@staticmethod
def now():
"""local time + offset"""
utc_offset_sec = time.altzone if time.localtime().tm_isdst else time.timezone
utc_offset = datetime.timedelta(seconds=-utc_offset_sec)
return datetime.datetime.now().replace(tzinfo=datetime.timezone(offset=utc_offset)).strftime("%a %b %d %H:%M:%S %Y %z")
@staticmethod
def get_timezone(timestamp):
"""returns list of timestamp and corresponding timezone"""
timezone = datetime.datetime.strptime(timestamp, "%a %b %d %H:%M:%S %Y %z").strftime("%z")
return [timestamp, timezone]
@staticmethod
def simple(timestamp):
"""parses timestamp for anonymizing Repo"""
try:
date = datetime.datetime.strptime(timestamp, "%d.%m.%Y %H:%M:%S %z")
except:
date = datetime.datetime.strptime(timestamp, "%a %b %d %H:%M:%S %Y %z")
return date.strftime("%d.%m.%Y %H:%M:%S %z")
@staticmethod
def to_string(timestamp, git_like=False):
"""converts timestamp to string"""
if git_like:
return timestamp.strftime("%a %b %d %H:%M:%S %Y %z")
return timestamp.strftime("%d.%m.%Y %H:%M:%S %z")
def datelist(self, start_date, end_date, amount):
""" returns datelist """
start = datetime.datetime.strptime(start_date, "%d.%m.%Y %H:%M:%S %z")
end = datetime.datetime.strptime(end_date, "%d.%m.%Y %H:%M:%S %z")
diff = (end - start) / (amount - 1)
datelist = []
current_date = start
datelist.append(self.to_string(current_date))
for i in range(amount - 2):
current_date += diff
datelist.append(self.to_string(current_date))
datelist.append(self.to_string(end))
return datelist
def reduce(self, input_timestamp):
"""replaces the values specifed by the pattern
y = Year
M = Month
d = day
h = hour
m = minute
s = second"""
try:
timestamp = datetime.datetime.strptime(input_timestamp, "%a %b %d %H:%M:%S %Y %z")
except TypeError:
timestamp = input_timestamp
if "y" in self.pattern:
# MIN-year: 1970 and MAX-year: 2099
timestamp = timestamp.replace(year=random.randrange(1970, 2099, 1))
if "M" in self.pattern:
timestamp = timestamp.replace(month=random.randrange(1, 12, 1))
if "d" in self.pattern:
max_day = calendar.monthrange(timestamp.year, timestamp.month)[1]
timestamp = timestamp.replace(day=random.randrange(1, max_day, 1))
if "h" in self.pattern:
if self.limit is False:
timestamp = timestamp.replace(hour=random.randrange(1, 24, 1))
else:
timestamp = timestamp.replace(hour=random.randrange(self.limit[0], self.limit[1], 1))
if "m" in self.pattern:
timestamp = timestamp.replace(minute=random.randrange(1, 60, 1))
if "s" in self.pattern:
timestamp = timestamp.replace(second=random.randrange(1, 60, 1))
return timestamp
@staticmethod
def custom(year, month, day, hour, minute, second, timezone): # pylint: disable=too-many-arguments
"""Some custom time"""
utc_offset = datetime.timedelta(hours=timezone)
time_stamp = datetime.datetime(year,
month,
day,
hour,
minute,
second).replace(
tzinfo=datetime.timezone(offset=utc_offset)).strftime("%a %b %d %H:%M:%S %Y %z")
return time_stamp
def plus_hour(self, timestamp, hours):
"""adds hour to timestamp and returns"""
timestamp = datetime.datetime.strptime(timestamp, "%a %b %d %H:%M:%S %Y %z")
timestamp += datetime.timedelta(hours=hours)
return timestamp.strftime("%a %b %d %H:%M:%S %Y %z")
@staticmethod
def average(stamp_list):
"""adds hour to timestamp and returns"""
list_of_dates = []
for first, second in stamp_list:
stamp_first = datetime.datetime.strptime(first, "%a %b %d %H:%M:%S %Y %z")
stamp_second = datetime.datetime.strptime(second, "%a %b %d %H:%M:%S %Y %z")
list_of_dates.append(stamp_first)
list_of_dates.append(stamp_second)
timedeltas = [list_of_dates[i-1]-list_of_dates[i] for i in range(1, len(list_of_dates))]
average_timedelta = sum(timedeltas, datetime.timedelta(0)) / len(timedeltas)
return average_timedelta
@staticmethod
def seconds_to_gitstamp(seconds, time_zone):
""" time in utc + offset"""
return datetime.datetime.fromtimestamp(seconds, datetime.timezone(datetime.timedelta(seconds=-time_zone))).strftime("%a %b %d %H:%M:%S %Y %z")
def get_next_timestamp(self, repo):
""" returns the next timestamp"""
if self.mode == "reduce":
stamp = self.reduce(self.now())
return stamp
if self.mode == "simple":
commit_id = repo.git.rev_list(repo.active_branch.name).splitlines()[1]
commit = repo.commit(commit_id)
last_timestamp = self.seconds_to_gitstamp(commit.authored_date, commit.author_tz_offset)
return self.plus_hour(last_timestamp, 1)
if self.mode == "average":
commits = repo.git.rev_list(repo.active_branch.name).splitlines()
list_of_stamps = []
for a, b in self.pairwise(commits):
list_of_stamps.append([self.seconds_to_gitstamp(repo.commit(a).authored_date, repo.commit(a).author_tz_offset),
self.seconds_to_gitstamp(repo.commit(b).authored_date, repo.commit(b).author_tz_offset)])
last_commit_id = commits[1]
last_commit = commit = repo.commit(last_commit_id)
last_timestamp = self.seconds_to_gitstamp(last_commit.authored_date, last_commit.author_tz_offset)
next_stamp = last_timestamp + self.average(list_of_stamps)
return next_stamp
return None
|
[
"info@b-brahmer.de"
] |
info@b-brahmer.de
|
b9d391af6cb9d1c2b13c6ea95b0a1aec6ff83535
|
047fb1bf7954a2de4bd91c5f2403d8d6b203108c
|
/photo/productupload/migrations/0005_auto_20200812_0243.py
|
fc5144b5c952dc8992c1904467293041b1891976
|
[] |
no_license
|
vidhyaen/django
|
333aa2528fefe7fffda65693e0b4c19c9d2a8fca
|
3bddbb781a443e2f07db2a4e8ddf86cfe9bcb604
|
refs/heads/master
| 2023-04-05T06:07:43.588199
| 2021-04-07T12:39:02
| 2021-04-07T12:39:02
| 355,538,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
# Generated by Django 3.0.7 on 2020-08-11 21:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('productupload', '0004_auto_20200812_0233'),
]
operations = [
migrations.AlterField(
model_name='productimage',
name='url',
field=models.ImageField(blank=True, null=True, upload_to='media/photos/'),
),
]
|
[
"envidhya99@gmail.com"
] |
envidhya99@gmail.com
|
c5b45087c548c99af18b87637c0194c599f18048
|
158acfe1fb6e12e1f57f11e2690c74b0a9000644
|
/juju/client/_client11.py
|
baa709ad4b8f6a883b9db16bd707f880292c219d
|
[
"Apache-2.0"
] |
permissive
|
ycliuhw/python-libjuju
|
9c8329a2da3ca3d5a28ecce1c9c63f8b26103eba
|
271e0b65c246e88ae15ac65a8da8e71e72b4c53c
|
refs/heads/master
| 2020-04-16T21:51:16.657165
| 2020-01-10T16:16:45
| 2020-01-10T16:16:45
| 165,942,748
| 0
| 0
|
Apache-2.0
| 2019-01-16T00:01:53
| 2019-01-16T00:01:53
| null |
UTF-8
|
Python
| false
| false
| 91,751
|
py
|
# DO NOT CHANGE THIS FILE! This file is auto-generated by facade.py.
# Changes will be overwritten/lost when the file is regenerated.
from juju.client.facade import Type, ReturnMapping
from juju.client._definitions import *
class ApplicationFacade(Type):
name = 'Application'
version = 11
schema = {'definitions': {'AddApplicationUnits': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'attach-storage': {'items': {'type': 'string'},
'type': 'array'},
'num-units': {'type': 'integer'},
'placement': {'items': {'$ref': '#/definitions/Placement'},
'type': 'array'},
'policy': {'type': 'string'}},
'required': ['application',
'num-units',
'placement'],
'type': 'object'},
'AddApplicationUnitsResults': {'additionalProperties': False,
'properties': {'units': {'items': {'type': 'string'},
'type': 'array'}},
'required': ['units'],
'type': 'object'},
'AddRelation': {'additionalProperties': False,
'properties': {'endpoints': {'items': {'type': 'string'},
'type': 'array'},
'via-cidrs': {'items': {'type': 'string'},
'type': 'array'}},
'required': ['endpoints'],
'type': 'object'},
'AddRelationResults': {'additionalProperties': False,
'properties': {'endpoints': {'patternProperties': {'.*': {'$ref': '#/definitions/CharmRelation'}},
'type': 'object'}},
'required': ['endpoints'],
'type': 'object'},
'ApplicationCharmRelations': {'additionalProperties': False,
'properties': {'application': {'type': 'string'}},
'required': ['application'],
'type': 'object'},
'ApplicationCharmRelationsResults': {'additionalProperties': False,
'properties': {'charm-relations': {'items': {'type': 'string'},
'type': 'array'}},
'required': ['charm-relations'],
'type': 'object'},
'ApplicationConfigSet': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'config': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'generation': {'type': 'string'}},
'required': ['application',
'generation',
'config'],
'type': 'object'},
'ApplicationConfigSetArgs': {'additionalProperties': False,
'properties': {'Args': {'items': {'$ref': '#/definitions/ApplicationConfigSet'},
'type': 'array'}},
'required': ['Args'],
'type': 'object'},
'ApplicationConfigUnsetArgs': {'additionalProperties': False,
'properties': {'Args': {'items': {'$ref': '#/definitions/ApplicationUnset'},
'type': 'array'}},
'required': ['Args'],
'type': 'object'},
'ApplicationConstraint': {'additionalProperties': False,
'properties': {'constraints': {'$ref': '#/definitions/Value'},
'error': {'$ref': '#/definitions/Error'}},
'required': ['constraints'],
'type': 'object'},
'ApplicationDeploy': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'attach-storage': {'items': {'type': 'string'},
'type': 'array'},
'channel': {'type': 'string'},
'charm-url': {'type': 'string'},
'config': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'config-yaml': {'type': 'string'},
'constraints': {'$ref': '#/definitions/Value'},
'devices': {'patternProperties': {'.*': {'$ref': '#/definitions/Constraints'}},
'type': 'object'},
'endpoint-bindings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'num-units': {'type': 'integer'},
'placement': {'items': {'$ref': '#/definitions/Placement'},
'type': 'array'},
'policy': {'type': 'string'},
'resources': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'series': {'type': 'string'},
'storage': {'patternProperties': {'.*': {'$ref': '#/definitions/Constraints'}},
'type': 'object'}},
'required': ['application',
'series',
'charm-url',
'channel',
'num-units',
'config-yaml',
'constraints'],
'type': 'object'},
'ApplicationDestroy': {'additionalProperties': False,
'properties': {'application': {'type': 'string'}},
'required': ['application'],
'type': 'object'},
'ApplicationExpose': {'additionalProperties': False,
'properties': {'application': {'type': 'string'}},
'required': ['application'],
'type': 'object'},
'ApplicationGet': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'branch': {'type': 'string'}},
'required': ['application', 'branch'],
'type': 'object'},
'ApplicationGetArgs': {'additionalProperties': False,
'properties': {'args': {'items': {'$ref': '#/definitions/ApplicationGet'},
'type': 'array'}},
'required': ['args'],
'type': 'object'},
'ApplicationGetConfigResults': {'additionalProperties': False,
'properties': {'Results': {'items': {'$ref': '#/definitions/ConfigResult'},
'type': 'array'}},
'required': ['Results'],
'type': 'object'},
'ApplicationGetConstraintsResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/ApplicationConstraint'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'ApplicationGetResults': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'application-config': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'channel': {'type': 'string'},
'charm': {'type': 'string'},
'config': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'constraints': {'$ref': '#/definitions/Value'},
'endpoint-bindings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'series': {'type': 'string'}},
'required': ['application',
'charm',
'config',
'constraints',
'series',
'channel'],
'type': 'object'},
'ApplicationInfo': {'additionalProperties': False,
'properties': {'channel': {'type': 'string'},
'charm': {'type': 'string'},
'constraints': {'$ref': '#/definitions/Value'},
'endpoint-bindings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'exposed': {'type': 'boolean'},
'principal': {'type': 'boolean'},
'remote': {'type': 'boolean'},
'series': {'type': 'string'},
'tag': {'type': 'string'}},
'required': ['tag',
'principal',
'exposed',
'remote'],
'type': 'object'},
'ApplicationInfoResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'$ref': '#/definitions/ApplicationInfo'}},
'type': 'object'},
'ApplicationInfoResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/ApplicationInfoResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'ApplicationMergeBindings': {'additionalProperties': False,
'properties': {'application-tag': {'type': 'string'},
'bindings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'force': {'type': 'boolean'}},
'required': ['application-tag',
'bindings',
'force'],
'type': 'object'},
'ApplicationMergeBindingsArgs': {'additionalProperties': False,
'properties': {'args': {'items': {'$ref': '#/definitions/ApplicationMergeBindings'},
'type': 'array'}},
'required': ['args'],
'type': 'object'},
'ApplicationMetricCredential': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'metrics-credentials': {'items': {'type': 'integer'},
'type': 'array'}},
'required': ['application',
'metrics-credentials'],
'type': 'object'},
'ApplicationMetricCredentials': {'additionalProperties': False,
'properties': {'creds': {'items': {'$ref': '#/definitions/ApplicationMetricCredential'},
'type': 'array'}},
'required': ['creds'],
'type': 'object'},
'ApplicationOfferDetails': {'additionalProperties': False,
'properties': {'application-description': {'type': 'string'},
'bindings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'endpoints': {'items': {'$ref': '#/definitions/RemoteEndpoint'},
'type': 'array'},
'offer-name': {'type': 'string'},
'offer-url': {'type': 'string'},
'offer-uuid': {'type': 'string'},
'source-model-tag': {'type': 'string'},
'spaces': {'items': {'$ref': '#/definitions/RemoteSpace'},
'type': 'array'},
'users': {'items': {'$ref': '#/definitions/OfferUserDetails'},
'type': 'array'}},
'required': ['source-model-tag',
'offer-uuid',
'offer-url',
'offer-name',
'application-description'],
'type': 'object'},
'ApplicationSet': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'branch': {'type': 'string'},
'options': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'}},
'required': ['application',
'branch',
'options'],
'type': 'object'},
'ApplicationSetCharm': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'channel': {'type': 'string'},
'charm-url': {'type': 'string'},
'config-settings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'config-settings-yaml': {'type': 'string'},
'endpoint-bindings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'force': {'type': 'boolean'},
'force-series': {'type': 'boolean'},
'force-units': {'type': 'boolean'},
'generation': {'type': 'string'},
'resource-ids': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'storage-constraints': {'patternProperties': {'.*': {'$ref': '#/definitions/StorageConstraints'}},
'type': 'object'}},
'required': ['application',
'generation',
'charm-url',
'channel',
'force',
'force-units',
'force-series'],
'type': 'object'},
'ApplicationUnexpose': {'additionalProperties': False,
'properties': {'application': {'type': 'string'}},
'required': ['application'],
'type': 'object'},
'ApplicationUnset': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'branch': {'type': 'string'},
'options': {'items': {'type': 'string'},
'type': 'array'}},
'required': ['application',
'branch',
'options'],
'type': 'object'},
'ApplicationUpdate': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'charm-url': {'type': 'string'},
'constraints': {'$ref': '#/definitions/Value'},
'force': {'type': 'boolean'},
'force-charm-url': {'type': 'boolean'},
'force-series': {'type': 'boolean'},
'generation': {'type': 'string'},
'min-units': {'type': 'integer'},
'settings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'settings-yaml': {'type': 'string'}},
'required': ['application',
'charm-url',
'force-charm-url',
'force-series',
'force',
'settings-yaml',
'generation'],
'type': 'object'},
'ApplicationsDeploy': {'additionalProperties': False,
'properties': {'applications': {'items': {'$ref': '#/definitions/ApplicationDeploy'},
'type': 'array'}},
'required': ['applications'],
'type': 'object'},
'CharmRelation': {'additionalProperties': False,
'properties': {'interface': {'type': 'string'},
'limit': {'type': 'integer'},
'name': {'type': 'string'},
'optional': {'type': 'boolean'},
'role': {'type': 'string'},
'scope': {'type': 'string'}},
'required': ['name',
'role',
'interface',
'optional',
'limit',
'scope'],
'type': 'object'},
'ConfigResult': {'additionalProperties': False,
'properties': {'config': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'error': {'$ref': '#/definitions/Error'}},
'required': ['config'],
'type': 'object'},
'Constraints': {'additionalProperties': False,
'properties': {'Count': {'type': 'integer'},
'Pool': {'type': 'string'},
'Size': {'type': 'integer'}},
'required': ['Pool', 'Size', 'Count'],
'type': 'object'},
'ConsumeApplicationArg': {'additionalProperties': False,
'properties': {'ApplicationOfferDetails': {'$ref': '#/definitions/ApplicationOfferDetails'},
'application-alias': {'type': 'string'},
'application-description': {'type': 'string'},
'bindings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'endpoints': {'items': {'$ref': '#/definitions/RemoteEndpoint'},
'type': 'array'},
'external-controller': {'$ref': '#/definitions/ExternalControllerInfo'},
'macaroon': {'$ref': '#/definitions/Macaroon'},
'offer-name': {'type': 'string'},
'offer-url': {'type': 'string'},
'offer-uuid': {'type': 'string'},
'source-model-tag': {'type': 'string'},
'spaces': {'items': {'$ref': '#/definitions/RemoteSpace'},
'type': 'array'},
'users': {'items': {'$ref': '#/definitions/OfferUserDetails'},
'type': 'array'}},
'required': ['source-model-tag',
'offer-uuid',
'offer-url',
'offer-name',
'application-description',
'ApplicationOfferDetails'],
'type': 'object'},
'ConsumeApplicationArgs': {'additionalProperties': False,
'properties': {'args': {'items': {'$ref': '#/definitions/ConsumeApplicationArg'},
'type': 'array'}},
'type': 'object'},
'DestroyApplicationInfo': {'additionalProperties': False,
'properties': {'destroyed-storage': {'items': {'$ref': '#/definitions/Entity'},
'type': 'array'},
'destroyed-units': {'items': {'$ref': '#/definitions/Entity'},
'type': 'array'},
'detached-storage': {'items': {'$ref': '#/definitions/Entity'},
'type': 'array'}},
'type': 'object'},
'DestroyApplicationParams': {'additionalProperties': False,
'properties': {'application-tag': {'type': 'string'},
'destroy-storage': {'type': 'boolean'},
'force': {'type': 'boolean'},
'max-wait': {'type': 'integer'}},
'required': ['application-tag',
'force'],
'type': 'object'},
'DestroyApplicationResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'info': {'$ref': '#/definitions/DestroyApplicationInfo'}},
'type': 'object'},
'DestroyApplicationResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/DestroyApplicationResult'},
'type': 'array'}},
'type': 'object'},
'DestroyApplicationUnits': {'additionalProperties': False,
'properties': {'unit-names': {'items': {'type': 'string'},
'type': 'array'}},
'required': ['unit-names'],
'type': 'object'},
'DestroyApplicationsParams': {'additionalProperties': False,
'properties': {'applications': {'items': {'$ref': '#/definitions/DestroyApplicationParams'},
'type': 'array'}},
'required': ['applications'],
'type': 'object'},
'DestroyConsumedApplicationParams': {'additionalProperties': False,
'properties': {'application-tag': {'type': 'string'},
'force': {'type': 'boolean'},
'max-wait': {'type': 'integer'}},
'required': ['application-tag'],
'type': 'object'},
'DestroyConsumedApplicationsParams': {'additionalProperties': False,
'properties': {'applications': {'items': {'$ref': '#/definitions/DestroyConsumedApplicationParams'},
'type': 'array'}},
'required': ['applications'],
'type': 'object'},
'DestroyRelation': {'additionalProperties': False,
'properties': {'endpoints': {'items': {'type': 'string'},
'type': 'array'},
'force': {'type': 'boolean'},
'max-wait': {'type': 'integer'},
'relation-id': {'type': 'integer'}},
'required': ['relation-id'],
'type': 'object'},
'DestroyUnitInfo': {'additionalProperties': False,
'properties': {'destroyed-storage': {'items': {'$ref': '#/definitions/Entity'},
'type': 'array'},
'detached-storage': {'items': {'$ref': '#/definitions/Entity'},
'type': 'array'}},
'type': 'object'},
'DestroyUnitParams': {'additionalProperties': False,
'properties': {'destroy-storage': {'type': 'boolean'},
'force': {'type': 'boolean'},
'max-wait': {'type': 'integer'},
'unit-tag': {'type': 'string'}},
'required': ['unit-tag', 'force'],
'type': 'object'},
'DestroyUnitResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'info': {'$ref': '#/definitions/DestroyUnitInfo'}},
'type': 'object'},
'DestroyUnitResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/DestroyUnitResult'},
'type': 'array'}},
'type': 'object'},
'DestroyUnitsParams': {'additionalProperties': False,
'properties': {'units': {'items': {'$ref': '#/definitions/DestroyUnitParams'},
'type': 'array'}},
'required': ['units'],
'type': 'object'},
'Entities': {'additionalProperties': False,
'properties': {'entities': {'items': {'$ref': '#/definitions/Entity'},
'type': 'array'}},
'required': ['entities'],
'type': 'object'},
'Entity': {'additionalProperties': False,
'properties': {'tag': {'type': 'string'}},
'required': ['tag'],
'type': 'object'},
'Error': {'additionalProperties': False,
'properties': {'code': {'type': 'string'},
'info': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'message': {'type': 'string'}},
'required': ['message', 'code'],
'type': 'object'},
'ErrorResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'}},
'type': 'object'},
'ErrorResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/ErrorResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'ExternalControllerInfo': {'additionalProperties': False,
'properties': {'addrs': {'items': {'type': 'string'},
'type': 'array'},
'ca-cert': {'type': 'string'},
'controller-alias': {'type': 'string'},
'controller-tag': {'type': 'string'}},
'required': ['controller-tag',
'controller-alias',
'addrs',
'ca-cert'],
'type': 'object'},
'Macaroon': {'additionalProperties': False, 'type': 'object'},
'OfferUserDetails': {'additionalProperties': False,
'properties': {'access': {'type': 'string'},
'display-name': {'type': 'string'},
'user': {'type': 'string'}},
'required': ['user',
'display-name',
'access'],
'type': 'object'},
'Placement': {'additionalProperties': False,
'properties': {'directive': {'type': 'string'},
'scope': {'type': 'string'}},
'required': ['scope', 'directive'],
'type': 'object'},
'RelationSuspendedArg': {'additionalProperties': False,
'properties': {'message': {'type': 'string'},
'relation-id': {'type': 'integer'},
'suspended': {'type': 'boolean'}},
'required': ['relation-id',
'message',
'suspended'],
'type': 'object'},
'RelationSuspendedArgs': {'additionalProperties': False,
'properties': {'args': {'items': {'$ref': '#/definitions/RelationSuspendedArg'},
'type': 'array'}},
'required': ['args'],
'type': 'object'},
'RemoteEndpoint': {'additionalProperties': False,
'properties': {'interface': {'type': 'string'},
'limit': {'type': 'integer'},
'name': {'type': 'string'},
'role': {'type': 'string'}},
'required': ['name',
'role',
'interface',
'limit'],
'type': 'object'},
'RemoteSpace': {'additionalProperties': False,
'properties': {'cloud-type': {'type': 'string'},
'name': {'type': 'string'},
'provider-attributes': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'provider-id': {'type': 'string'},
'subnets': {'items': {'$ref': '#/definitions/Subnet'},
'type': 'array'}},
'required': ['cloud-type',
'name',
'provider-id',
'provider-attributes',
'subnets'],
'type': 'object'},
'ScaleApplicationInfo': {'additionalProperties': False,
'properties': {'num-units': {'type': 'integer'}},
'required': ['num-units'],
'type': 'object'},
'ScaleApplicationParams': {'additionalProperties': False,
'properties': {'application-tag': {'type': 'string'},
'force': {'type': 'boolean'},
'scale': {'type': 'integer'},
'scale-change': {'type': 'integer'}},
'required': ['application-tag',
'scale',
'force'],
'type': 'object'},
'ScaleApplicationResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'info': {'$ref': '#/definitions/ScaleApplicationInfo'}},
'type': 'object'},
'ScaleApplicationResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/ScaleApplicationResult'},
'type': 'array'}},
'type': 'object'},
'ScaleApplicationsParams': {'additionalProperties': False,
'properties': {'applications': {'items': {'$ref': '#/definitions/ScaleApplicationParams'},
'type': 'array'}},
'required': ['applications'],
'type': 'object'},
'SetConstraints': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'constraints': {'$ref': '#/definitions/Value'}},
'required': ['application', 'constraints'],
'type': 'object'},
'StorageConstraints': {'additionalProperties': False,
'properties': {'count': {'type': 'integer'},
'pool': {'type': 'string'},
'size': {'type': 'integer'}},
'type': 'object'},
'StringResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'type': 'string'}},
'required': ['result'],
'type': 'object'},
'Subnet': {'additionalProperties': False,
'properties': {'cidr': {'type': 'string'},
'life': {'type': 'string'},
'provider-id': {'type': 'string'},
'provider-network-id': {'type': 'string'},
'provider-space-id': {'type': 'string'},
'space-tag': {'type': 'string'},
'status': {'type': 'string'},
'vlan-tag': {'type': 'integer'},
'zones': {'items': {'type': 'string'},
'type': 'array'}},
'required': ['cidr',
'vlan-tag',
'life',
'space-tag',
'zones'],
'type': 'object'},
'UnitsResolved': {'additionalProperties': False,
'properties': {'all': {'type': 'boolean'},
'retry': {'type': 'boolean'},
'tags': {'$ref': '#/definitions/Entities'}},
'type': 'object'},
'UpdateSeriesArg': {'additionalProperties': False,
'properties': {'force': {'type': 'boolean'},
'series': {'type': 'string'},
'tag': {'$ref': '#/definitions/Entity'}},
'required': ['tag', 'force', 'series'],
'type': 'object'},
'UpdateSeriesArgs': {'additionalProperties': False,
'properties': {'args': {'items': {'$ref': '#/definitions/UpdateSeriesArg'},
'type': 'array'}},
'required': ['args'],
'type': 'object'},
'Value': {'additionalProperties': False,
'properties': {'arch': {'type': 'string'},
'container': {'type': 'string'},
'cores': {'type': 'integer'},
'cpu-power': {'type': 'integer'},
'instance-type': {'type': 'string'},
'mem': {'type': 'integer'},
'root-disk': {'type': 'integer'},
'root-disk-source': {'type': 'string'},
'spaces': {'items': {'type': 'string'},
'type': 'array'},
'tags': {'items': {'type': 'string'},
'type': 'array'},
'virt-type': {'type': 'string'},
'zones': {'items': {'type': 'string'},
'type': 'array'}},
'type': 'object'}},
'properties': {'AddRelation': {'properties': {'Params': {'$ref': '#/definitions/AddRelation'},
'Result': {'$ref': '#/definitions/AddRelationResults'}},
'type': 'object'},
'AddUnits': {'properties': {'Params': {'$ref': '#/definitions/AddApplicationUnits'},
'Result': {'$ref': '#/definitions/AddApplicationUnitsResults'}},
'type': 'object'},
'ApplicationsInfo': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ApplicationInfoResults'}},
'type': 'object'},
'CharmConfig': {'properties': {'Params': {'$ref': '#/definitions/ApplicationGetArgs'},
'Result': {'$ref': '#/definitions/ApplicationGetConfigResults'}},
'type': 'object'},
'CharmRelations': {'properties': {'Params': {'$ref': '#/definitions/ApplicationCharmRelations'},
'Result': {'$ref': '#/definitions/ApplicationCharmRelationsResults'}},
'type': 'object'},
'Consume': {'properties': {'Params': {'$ref': '#/definitions/ConsumeApplicationArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Deploy': {'properties': {'Params': {'$ref': '#/definitions/ApplicationsDeploy'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Destroy': {'properties': {'Params': {'$ref': '#/definitions/ApplicationDestroy'}},
'type': 'object'},
'DestroyApplication': {'properties': {'Params': {'$ref': '#/definitions/DestroyApplicationsParams'},
'Result': {'$ref': '#/definitions/DestroyApplicationResults'}},
'type': 'object'},
'DestroyConsumedApplications': {'properties': {'Params': {'$ref': '#/definitions/DestroyConsumedApplicationsParams'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'DestroyRelation': {'properties': {'Params': {'$ref': '#/definitions/DestroyRelation'}},
'type': 'object'},
'DestroyUnit': {'properties': {'Params': {'$ref': '#/definitions/DestroyUnitsParams'},
'Result': {'$ref': '#/definitions/DestroyUnitResults'}},
'type': 'object'},
'DestroyUnits': {'properties': {'Params': {'$ref': '#/definitions/DestroyApplicationUnits'}},
'type': 'object'},
'Expose': {'properties': {'Params': {'$ref': '#/definitions/ApplicationExpose'}},
'type': 'object'},
'Get': {'properties': {'Params': {'$ref': '#/definitions/ApplicationGet'},
'Result': {'$ref': '#/definitions/ApplicationGetResults'}},
'type': 'object'},
'GetCharmURL': {'properties': {'Params': {'$ref': '#/definitions/ApplicationGet'},
'Result': {'$ref': '#/definitions/StringResult'}},
'type': 'object'},
'GetConfig': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ApplicationGetConfigResults'}},
'type': 'object'},
'GetConstraints': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ApplicationGetConstraintsResults'}},
'type': 'object'},
'MergeBindings': {'properties': {'Params': {'$ref': '#/definitions/ApplicationMergeBindingsArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'ResolveUnitErrors': {'properties': {'Params': {'$ref': '#/definitions/UnitsResolved'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'ScaleApplications': {'properties': {'Params': {'$ref': '#/definitions/ScaleApplicationsParams'},
'Result': {'$ref': '#/definitions/ScaleApplicationResults'}},
'type': 'object'},
'Set': {'properties': {'Params': {'$ref': '#/definitions/ApplicationSet'}},
'type': 'object'},
'SetApplicationsConfig': {'properties': {'Params': {'$ref': '#/definitions/ApplicationConfigSetArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetCharm': {'properties': {'Params': {'$ref': '#/definitions/ApplicationSetCharm'}},
'type': 'object'},
'SetConstraints': {'properties': {'Params': {'$ref': '#/definitions/SetConstraints'}},
'type': 'object'},
'SetMetricCredentials': {'properties': {'Params': {'$ref': '#/definitions/ApplicationMetricCredentials'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetRelationsSuspended': {'properties': {'Params': {'$ref': '#/definitions/RelationSuspendedArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Unexpose': {'properties': {'Params': {'$ref': '#/definitions/ApplicationUnexpose'}},
'type': 'object'},
'Unset': {'properties': {'Params': {'$ref': '#/definitions/ApplicationUnset'}},
'type': 'object'},
'UnsetApplicationsConfig': {'properties': {'Params': {'$ref': '#/definitions/ApplicationConfigUnsetArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Update': {'properties': {'Params': {'$ref': '#/definitions/ApplicationUpdate'}},
'type': 'object'},
'UpdateApplicationSeries': {'properties': {'Params': {'$ref': '#/definitions/UpdateSeriesArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'}},
'type': 'object'}
@ReturnMapping(AddRelationResults)
async def AddRelation(self, endpoints=None, via_cidrs=None):
'''
endpoints : typing.Sequence[str]
via_cidrs : typing.Sequence[str]
Returns -> typing.Mapping[str, ~CharmRelation]
'''
if endpoints is not None and not isinstance(endpoints, (bytes, str, list)):
raise Exception("Expected endpoints to be a Sequence, received: {}".format(type(endpoints)))
if via_cidrs is not None and not isinstance(via_cidrs, (bytes, str, list)):
raise Exception("Expected via_cidrs to be a Sequence, received: {}".format(type(via_cidrs)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='AddRelation',
version=11,
params=_params)
_params['endpoints'] = endpoints
_params['via-cidrs'] = via_cidrs
reply = await self.rpc(msg)
return reply
@ReturnMapping(AddApplicationUnitsResults)
async def AddUnits(self, application=None, attach_storage=None, num_units=None, placement=None, policy=None):
'''
application : str
attach_storage : typing.Sequence[str]
num_units : int
placement : typing.Sequence[~Placement]
policy : str
Returns -> typing.Sequence[str]
'''
if application is not None and not isinstance(application, (bytes, str)):
raise Exception("Expected application to be a str, received: {}".format(type(application)))
if attach_storage is not None and not isinstance(attach_storage, (bytes, str, list)):
raise Exception("Expected attach_storage to be a Sequence, received: {}".format(type(attach_storage)))
if num_units is not None and not isinstance(num_units, int):
raise Exception("Expected num_units to be a int, received: {}".format(type(num_units)))
if placement is not None and not isinstance(placement, (bytes, str, list)):
raise Exception("Expected placement to be a Sequence, received: {}".format(type(placement)))
if policy is not None and not isinstance(policy, (bytes, str)):
raise Exception("Expected policy to be a str, received: {}".format(type(policy)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='AddUnits',
version=11,
params=_params)
_params['application'] = application
_params['attach-storage'] = attach_storage
_params['num-units'] = num_units
_params['placement'] = placement
_params['policy'] = policy
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationInfoResults)
async def ApplicationsInfo(self, entities=None):
'''
entities : typing.Sequence[~Entity]
Returns -> typing.Sequence[~ApplicationInfoResult]
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='ApplicationsInfo',
version=11,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationGetConfigResults)
async def CharmConfig(self, args=None):
'''
args : typing.Sequence[~ApplicationGet]
Returns -> typing.Sequence[~ConfigResult]
'''
if args is not None and not isinstance(args, (bytes, str, list)):
raise Exception("Expected args to be a Sequence, received: {}".format(type(args)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='CharmConfig',
version=11,
params=_params)
_params['args'] = args
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationCharmRelationsResults)
async def CharmRelations(self, application=None):
'''
application : str
Returns -> typing.Sequence[str]
'''
if application is not None and not isinstance(application, (bytes, str)):
raise Exception("Expected application to be a str, received: {}".format(type(application)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='CharmRelations',
version=11,
params=_params)
_params['application'] = application
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def Consume(self, args=None):
'''
args : typing.Sequence[~ConsumeApplicationArg]
Returns -> typing.Sequence[~ErrorResult]
'''
if args is not None and not isinstance(args, (bytes, str, list)):
raise Exception("Expected args to be a Sequence, received: {}".format(type(args)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Consume',
version=11,
params=_params)
_params['args'] = args
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def Deploy(self, applications=None):
'''
applications : typing.Sequence[~ApplicationDeploy]
Returns -> typing.Sequence[~ErrorResult]
'''
if applications is not None and not isinstance(applications, (bytes, str, list)):
raise Exception("Expected applications to be a Sequence, received: {}".format(type(applications)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Deploy',
version=11,
params=_params)
_params['applications'] = applications
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def Destroy(self, application=None):
'''
application : str
Returns -> None
'''
if application is not None and not isinstance(application, (bytes, str)):
raise Exception("Expected application to be a str, received: {}".format(type(application)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Destroy',
version=11,
params=_params)
_params['application'] = application
reply = await self.rpc(msg)
return reply
@ReturnMapping(DestroyApplicationResults)
async def DestroyApplication(self, applications=None):
'''
applications : typing.Sequence[~DestroyApplicationParams]
Returns -> typing.Sequence[~DestroyApplicationResult]
'''
if applications is not None and not isinstance(applications, (bytes, str, list)):
raise Exception("Expected applications to be a Sequence, received: {}".format(type(applications)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='DestroyApplication',
version=11,
params=_params)
_params['applications'] = applications
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def DestroyConsumedApplications(self, applications=None):
'''
applications : typing.Sequence[~DestroyConsumedApplicationParams]
Returns -> typing.Sequence[~ErrorResult]
'''
if applications is not None and not isinstance(applications, (bytes, str, list)):
raise Exception("Expected applications to be a Sequence, received: {}".format(type(applications)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='DestroyConsumedApplications',
version=11,
params=_params)
_params['applications'] = applications
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def DestroyRelation(self, endpoints=None, force=None, max_wait=None, relation_id=None):
'''
endpoints : typing.Sequence[str]
force : bool
max_wait : int
relation_id : int
Returns -> None
'''
if endpoints is not None and not isinstance(endpoints, (bytes, str, list)):
raise Exception("Expected endpoints to be a Sequence, received: {}".format(type(endpoints)))
if force is not None and not isinstance(force, bool):
raise Exception("Expected force to be a bool, received: {}".format(type(force)))
if max_wait is not None and not isinstance(max_wait, int):
raise Exception("Expected max_wait to be a int, received: {}".format(type(max_wait)))
if relation_id is not None and not isinstance(relation_id, int):
raise Exception("Expected relation_id to be a int, received: {}".format(type(relation_id)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='DestroyRelation',
version=11,
params=_params)
_params['endpoints'] = endpoints
_params['force'] = force
_params['max-wait'] = max_wait
_params['relation-id'] = relation_id
reply = await self.rpc(msg)
return reply
@ReturnMapping(DestroyUnitResults)
async def DestroyUnit(self, units=None):
'''
units : typing.Sequence[~DestroyUnitParams]
Returns -> typing.Sequence[~DestroyUnitResult]
'''
if units is not None and not isinstance(units, (bytes, str, list)):
raise Exception("Expected units to be a Sequence, received: {}".format(type(units)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='DestroyUnit',
version=11,
params=_params)
_params['units'] = units
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def DestroyUnits(self, unit_names=None):
'''
unit_names : typing.Sequence[str]
Returns -> None
'''
if unit_names is not None and not isinstance(unit_names, (bytes, str, list)):
raise Exception("Expected unit_names to be a Sequence, received: {}".format(type(unit_names)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='DestroyUnits',
version=11,
params=_params)
_params['unit-names'] = unit_names
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def Expose(self, application=None):
'''
application : str
Returns -> None
'''
if application is not None and not isinstance(application, (bytes, str)):
raise Exception("Expected application to be a str, received: {}".format(type(application)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Expose',
version=11,
params=_params)
_params['application'] = application
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationGetResults)
async def Get(self, application=None, branch=None):
'''
application : str
branch : str
Returns -> typing.Union[str, typing.Mapping[str, typing.Any], _ForwardRef('Value'), typing.Mapping[str, str]]
'''
if application is not None and not isinstance(application, (bytes, str)):
raise Exception("Expected application to be a str, received: {}".format(type(application)))
if branch is not None and not isinstance(branch, (bytes, str)):
raise Exception("Expected branch to be a str, received: {}".format(type(branch)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Get',
version=11,
params=_params)
_params['application'] = application
_params['branch'] = branch
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResult)
async def GetCharmURL(self, application=None, branch=None):
'''
application : str
branch : str
Returns -> typing.Union[_ForwardRef('Error'), str]
'''
if application is not None and not isinstance(application, (bytes, str)):
raise Exception("Expected application to be a str, received: {}".format(type(application)))
if branch is not None and not isinstance(branch, (bytes, str)):
raise Exception("Expected branch to be a str, received: {}".format(type(branch)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='GetCharmURL',
version=11,
params=_params)
_params['application'] = application
_params['branch'] = branch
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationGetConfigResults)
async def GetConfig(self, entities=None):
'''
entities : typing.Sequence[~Entity]
Returns -> typing.Sequence[~ConfigResult]
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='GetConfig',
version=11,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationGetConstraintsResults)
async def GetConstraints(self, entities=None):
'''
entities : typing.Sequence[~Entity]
Returns -> typing.Sequence[~ApplicationConstraint]
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='GetConstraints',
version=11,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def MergeBindings(self, args=None):
'''
args : typing.Sequence[~ApplicationMergeBindings]
Returns -> typing.Sequence[~ErrorResult]
'''
if args is not None and not isinstance(args, (bytes, str, list)):
raise Exception("Expected args to be a Sequence, received: {}".format(type(args)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='MergeBindings',
version=11,
params=_params)
_params['args'] = args
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def ResolveUnitErrors(self, all_=None, retry=None, tags=None):
'''
all_ : bool
retry : bool
tags : Entities
Returns -> typing.Sequence[~ErrorResult]
'''
if all_ is not None and not isinstance(all_, bool):
raise Exception("Expected all_ to be a bool, received: {}".format(type(all_)))
if retry is not None and not isinstance(retry, bool):
raise Exception("Expected retry to be a bool, received: {}".format(type(retry)))
if tags is not None and not isinstance(tags, (dict, Entities)):
raise Exception("Expected tags to be a Entities, received: {}".format(type(tags)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='ResolveUnitErrors',
version=11,
params=_params)
_params['all'] = all_
_params['retry'] = retry
_params['tags'] = tags
reply = await self.rpc(msg)
return reply
@ReturnMapping(ScaleApplicationResults)
async def ScaleApplications(self, applications=None):
'''
applications : typing.Sequence[~ScaleApplicationParams]
Returns -> typing.Sequence[~ScaleApplicationResult]
'''
if applications is not None and not isinstance(applications, (bytes, str, list)):
raise Exception("Expected applications to be a Sequence, received: {}".format(type(applications)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='ScaleApplications',
version=11,
params=_params)
_params['applications'] = applications
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def Set(self, application=None, branch=None, options=None):
'''
application : str
branch : str
options : typing.Mapping[str, str]
Returns -> None
'''
if application is not None and not isinstance(application, (bytes, str)):
raise Exception("Expected application to be a str, received: {}".format(type(application)))
if branch is not None and not isinstance(branch, (bytes, str)):
raise Exception("Expected branch to be a str, received: {}".format(type(branch)))
if options is not None and not isinstance(options, dict):
raise Exception("Expected options to be a Mapping, received: {}".format(type(options)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Set',
version=11,
params=_params)
_params['application'] = application
_params['branch'] = branch
_params['options'] = options
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def SetApplicationsConfig(self, args=None):
'''
args : typing.Sequence[~ApplicationConfigSet]
Returns -> typing.Sequence[~ErrorResult]
'''
if args is not None and not isinstance(args, (bytes, str, list)):
raise Exception("Expected args to be a Sequence, received: {}".format(type(args)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='SetApplicationsConfig',
version=11,
params=_params)
_params['Args'] = args
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def SetCharm(self, application=None, channel=None, charm_url=None, config_settings=None, config_settings_yaml=None, endpoint_bindings=None, force=None, force_series=None, force_units=None, generation=None, resource_ids=None, storage_constraints=None):
'''
application : str
channel : str
charm_url : str
config_settings : typing.Mapping[str, str]
config_settings_yaml : str
endpoint_bindings : typing.Mapping[str, str]
force : bool
force_series : bool
force_units : bool
generation : str
resource_ids : typing.Mapping[str, str]
storage_constraints : typing.Mapping[str, ~StorageConstraints]
Returns -> None
'''
if application is not None and not isinstance(application, (bytes, str)):
raise Exception("Expected application to be a str, received: {}".format(type(application)))
if channel is not None and not isinstance(channel, (bytes, str)):
raise Exception("Expected channel to be a str, received: {}".format(type(channel)))
if charm_url is not None and not isinstance(charm_url, (bytes, str)):
raise Exception("Expected charm_url to be a str, received: {}".format(type(charm_url)))
if config_settings is not None and not isinstance(config_settings, dict):
raise Exception("Expected config_settings to be a Mapping, received: {}".format(type(config_settings)))
if config_settings_yaml is not None and not isinstance(config_settings_yaml, (bytes, str)):
raise Exception("Expected config_settings_yaml to be a str, received: {}".format(type(config_settings_yaml)))
if endpoint_bindings is not None and not isinstance(endpoint_bindings, dict):
raise Exception("Expected endpoint_bindings to be a Mapping, received: {}".format(type(endpoint_bindings)))
if force is not None and not isinstance(force, bool):
raise Exception("Expected force to be a bool, received: {}".format(type(force)))
if force_series is not None and not isinstance(force_series, bool):
raise Exception("Expected force_series to be a bool, received: {}".format(type(force_series)))
if force_units is not None and not isinstance(force_units, bool):
raise Exception("Expected force_units to be a bool, received: {}".format(type(force_units)))
if generation is not None and not isinstance(generation, (bytes, str)):
raise Exception("Expected generation to be a str, received: {}".format(type(generation)))
if resource_ids is not None and not isinstance(resource_ids, dict):
raise Exception("Expected resource_ids to be a Mapping, received: {}".format(type(resource_ids)))
if storage_constraints is not None and not isinstance(storage_constraints, dict):
raise Exception("Expected storage_constraints to be a Mapping, received: {}".format(type(storage_constraints)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='SetCharm',
version=11,
params=_params)
_params['application'] = application
_params['channel'] = channel
_params['charm-url'] = charm_url
_params['config-settings'] = config_settings
_params['config-settings-yaml'] = config_settings_yaml
_params['endpoint-bindings'] = endpoint_bindings
_params['force'] = force
_params['force-series'] = force_series
_params['force-units'] = force_units
_params['generation'] = generation
_params['resource-ids'] = resource_ids
_params['storage-constraints'] = storage_constraints
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def SetConstraints(self, application=None, constraints=None):
'''
application : str
constraints : Value
Returns -> None
'''
if application is not None and not isinstance(application, (bytes, str)):
raise Exception("Expected application to be a str, received: {}".format(type(application)))
if constraints is not None and not isinstance(constraints, (dict, Value)):
raise Exception("Expected constraints to be a Value, received: {}".format(type(constraints)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='SetConstraints',
version=11,
params=_params)
_params['application'] = application
_params['constraints'] = constraints
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def SetMetricCredentials(self, creds=None):
'''
creds : typing.Sequence[~ApplicationMetricCredential]
Returns -> typing.Sequence[~ErrorResult]
'''
if creds is not None and not isinstance(creds, (bytes, str, list)):
raise Exception("Expected creds to be a Sequence, received: {}".format(type(creds)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='SetMetricCredentials',
version=11,
params=_params)
_params['creds'] = creds
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def SetRelationsSuspended(self, args=None):
'''
args : typing.Sequence[~RelationSuspendedArg]
Returns -> typing.Sequence[~ErrorResult]
'''
if args is not None and not isinstance(args, (bytes, str, list)):
raise Exception("Expected args to be a Sequence, received: {}".format(type(args)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='SetRelationsSuspended',
version=11,
params=_params)
_params['args'] = args
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def Unexpose(self, application=None):
'''
application : str
Returns -> None
'''
if application is not None and not isinstance(application, (bytes, str)):
raise Exception("Expected application to be a str, received: {}".format(type(application)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Unexpose',
version=11,
params=_params)
_params['application'] = application
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def Unset(self, application=None, branch=None, options=None):
'''
application : str
branch : str
options : typing.Sequence[str]
Returns -> None
'''
if application is not None and not isinstance(application, (bytes, str)):
raise Exception("Expected application to be a str, received: {}".format(type(application)))
if branch is not None and not isinstance(branch, (bytes, str)):
raise Exception("Expected branch to be a str, received: {}".format(type(branch)))
if options is not None and not isinstance(options, (bytes, str, list)):
raise Exception("Expected options to be a Sequence, received: {}".format(type(options)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Unset',
version=11,
params=_params)
_params['application'] = application
_params['branch'] = branch
_params['options'] = options
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def UnsetApplicationsConfig(self, args=None):
'''
args : typing.Sequence[~ApplicationUnset]
Returns -> typing.Sequence[~ErrorResult]
'''
if args is not None and not isinstance(args, (bytes, str, list)):
raise Exception("Expected args to be a Sequence, received: {}".format(type(args)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='UnsetApplicationsConfig',
version=11,
params=_params)
_params['Args'] = args
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def Update(self, application=None, charm_url=None, constraints=None, force=None, force_charm_url=None, force_series=None, generation=None, min_units=None, settings=None, settings_yaml=None):
'''
application : str
charm_url : str
constraints : Value
force : bool
force_charm_url : bool
force_series : bool
generation : str
min_units : int
settings : typing.Mapping[str, str]
settings_yaml : str
Returns -> None
'''
if application is not None and not isinstance(application, (bytes, str)):
raise Exception("Expected application to be a str, received: {}".format(type(application)))
if charm_url is not None and not isinstance(charm_url, (bytes, str)):
raise Exception("Expected charm_url to be a str, received: {}".format(type(charm_url)))
if constraints is not None and not isinstance(constraints, (dict, Value)):
raise Exception("Expected constraints to be a Value, received: {}".format(type(constraints)))
if force is not None and not isinstance(force, bool):
raise Exception("Expected force to be a bool, received: {}".format(type(force)))
if force_charm_url is not None and not isinstance(force_charm_url, bool):
raise Exception("Expected force_charm_url to be a bool, received: {}".format(type(force_charm_url)))
if force_series is not None and not isinstance(force_series, bool):
raise Exception("Expected force_series to be a bool, received: {}".format(type(force_series)))
if generation is not None and not isinstance(generation, (bytes, str)):
raise Exception("Expected generation to be a str, received: {}".format(type(generation)))
if min_units is not None and not isinstance(min_units, int):
raise Exception("Expected min_units to be a int, received: {}".format(type(min_units)))
if settings is not None and not isinstance(settings, dict):
raise Exception("Expected settings to be a Mapping, received: {}".format(type(settings)))
if settings_yaml is not None and not isinstance(settings_yaml, (bytes, str)):
raise Exception("Expected settings_yaml to be a str, received: {}".format(type(settings_yaml)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='Update',
version=11,
params=_params)
_params['application'] = application
_params['charm-url'] = charm_url
_params['constraints'] = constraints
_params['force'] = force
_params['force-charm-url'] = force_charm_url
_params['force-series'] = force_series
_params['generation'] = generation
_params['min-units'] = min_units
_params['settings'] = settings
_params['settings-yaml'] = settings_yaml
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def UpdateApplicationSeries(self, args=None):
'''
args : typing.Sequence[~UpdateSeriesArg]
Returns -> typing.Sequence[~ErrorResult]
'''
if args is not None and not isinstance(args, (bytes, str, list)):
raise Exception("Expected args to be a Sequence, received: {}".format(type(args)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='UpdateApplicationSeries',
version=11,
params=_params)
_params['args'] = args
reply = await self.rpc(msg)
return reply
|
[
"stickupkid@gmail.com"
] |
stickupkid@gmail.com
|
43f2e4cd48afe1f7c0e752b61f5f5fbec6114870
|
56ad91351e3a089240ff9279c82b819c44308acc
|
/code/utils/regression.py
|
01fc7ac5b48b95f8896881375afc98775f45ac27
|
[] |
no_license
|
ariaaay/project-beta-1
|
a77a840de247f732d784a2b2b5e7c0c6594c1c4c
|
3ef27fb9eeb368bbb3dde3711cac4964cfb31900
|
refs/heads/master
| 2021-01-18T01:03:36.730811
| 2015-12-03T02:53:26
| 2015-12-03T02:53:26
| 46,064,284
| 0
| 0
| null | 2015-11-12T16:06:42
| 2015-11-12T16:06:42
| null |
UTF-8
|
Python
| false
| false
| 39,431
|
py
|
"""
Module to handle all regression functions.
Largely taken from Alex Huth's regression functions
Notes:
Split off predictions from fitting?
Advantages: cleaner code
Disadvantages: less efficient - need to run through chunking code 2x
"""
### --- Imports --- ###
import warnings
import numpy as np
import scipy.stats as _stats
import time
from . import io
from . import utils as _utils
from .Stats import utils as _sutils
### --- Parameters --- ###
DEFAULT_ALPHAS=np.array([0]+[2**x for x in range(10,21)])
# DEFAULT_ALPHAS = np.logspace(0,4,10)
del x
def compute_vif(X,**kwargs):
"""Compute Variance Inflation Factor for a design matrix
Parameters
----------
X : array-like
design matrix of variables; time x channels
kwargs : dict
named inputs to vmt.regression.ols
Returns
-------
VIF : array-like, 1D
vector of Variance Inflation Factors (one for each channel [column] in X)
"""
n_tps,n_chans = X.shape
if n_chans>n_tps:
raise ValueError("Number of channels cannot be greater than number of time points\n for Variance Inflation Factor computation!")
VIF = np.zeros((n_chans,))
for ic,c in enumerate(X.T):
if len(X.T)>200:
if ic%200==0:
print('Computing VIF for channel %d'%ic)
ci = np.arange(n_chans)!=ic
out = ols(X[:,ci],X[:,ic][:,None])
y_hat = (X[:,ci].dot(out['weights'])).flatten()
R2 = 1-(np.var(X[:,ic]-y_hat)/np.var(X[:,ic]))
VIF[ic] = 1/(1-R2)
return VIF
def ols(trn_fs,trn_data,val_fs=None,val_data=None,chunk_sz=5000,dtype=np.single,is_verbose=False,input_weights=None):
"""
Parameters
----------
trn_fs : array-like
feature space array, time x channels; representation of a stimulus
val_fs : array-like
feature space array, time x channels; representation of a stimulus
trn_data : array-like
data array, time x voxels
val_data : array-like
data array, time x voxels
chunk_sz : scalar
maximum number of voxels to analyze at once (constrains matrix multiplication size for large data sets)
Other Parameters
----------======
is_verbose : bool
verbose / not
dtype : np.dtype
data type for weights / predictions
input_weights : array-like, 1D
For weighted least squares... Frankly IDKWTF this is useful for. Look it up.
Default (None) creates identity matrix (i.e. has no effect) (See code)
"""
# Check on first column
if np.sum(trn_fs[:,0])!=trn_fs.shape[0]:
warnings.warn('First column of trn_fs is NOT all ones! Consider including a DC term!')
# Size of matrices
n_tps,n_voxels = trn_data.shape
_,n_channels = trn_fs.shape
# For weighted least squares, if desired
if input_weights is None:
W = np.eye(n_tps)
else:
W = np.diag(1/input_weights**2)
# Compute pseudo-inverse of (weighted) squared design matrix
XtXinv = np.linalg.pinv(trn_fs.T.dot(W.dot(trn_fs)))
if (not val_fs is None) and (not val_data is None):
# Validation data / model supplied implies we want predictions
Xv = val_fs
do_pred = True
if np.sum(Xv[:,0]) != Xv.shape[0]:
warnings.warn('First column of val_fs is NOT all ones! Consider including a DC term!')
# Pre-allocate for predictions, with or without separate validation sequences to predict
is_rpts = np.ndim(val_data)==3
if is_rpts:
n_rpts,n_tps_val,n_voxels_val = val_data.shape
cc = np.zeros((n_rpts,n_voxels),dtype);
else:
n_rpts,(n_tps_val,n_voxels_val) = 0,val_data.shape
cc = np.zeros((n_voxels),dtype);
pred = np.zeros((n_tps_val,n_voxels_val),dtype)
else:
# No Validation data / model supplied
do_pred = False;
# Pre-allocate variables
weights = np.zeros((n_channels,n_voxels),dtype=dtype)
# Divide data into chunks if necessary for memory saving:
n_chunks = np.ceil(n_voxels/float(chunk_sz)).astype(np.uint32)
for iChunk in range(n_chunks):
if is_verbose and (n_chunks>1):
print('Running chunk %d of %d...'%(iChunk+1,n_chunks))
ChIdx = np.arange(chunk_sz) + chunk_sz*iChunk
ChIdx = ChIdx[ChIdx<n_voxels] # clip extra voxels in last run.
Ychunk = trn_data[:,ChIdx]
# 'reduce' takes the dot products of the matrices in order from left to right
weights[:,ChIdx] = reduce(np.dot,[XtXinv,trn_fs.T,W,Ychunk])
if do_pred:
if is_verbose:
print('Obtaining model predictions...')
# Compute correlations btw validation data and model prediction
pred[:,ChIdx] = Xv.dot(weights[:,ChIdx]).astype(dtype)
if is_rpts:
# The transpose here is related to deep mysteries in python. See
cc[:,ChIdx] = np.vstack([_sutils.column_corr(pred[:,ChIdx],val_data[rpt,:,ChIdx].T) for rpt in range(n_rpts)])
else:
cc[ChIdx] = _sutils.column_corr(pred[:,ChIdx],val_data[:,ChIdx])
out = dict(weights=weights)
if do_pred:
out.update(dict(pred=pred,cc=cc))
return out
def ridge(trn_fs, trn_data, val_fs=None, val_data=None, alpha=0,
chunk_sz=5000, dtype=np.single,square_alpha=False,is_verbose=False):
"""Vanilla ridge regression.
Regularization parameter (alpha) must be supplied (for computation of regularization parameter,
see ridge_cv or ridge_boot)
Validation predictions and correlations are returned if val_fs and val_data are provided.
Parameters
----------
Returns
-------
"""
## --- Housekeeping --- ###
n_resp, n_voxels = trn_data.shape
_,n_channels = trn_fs.shape
n_chunks = np.ceil(n_voxels/np.float(chunk_sz)).astype(np.int32)
### --- Set up SVD-based weight computation --- ###
U,S,Vt = np.linalg.svd(trn_fs, full_matrices=False)
### --- Set up predictions --- ###
if (not val_fs is None) and (not val_data is None):
do_pred = True
if np.sum(val_fs[:,0]) != val_fs.shape[0]:
warnings.warn('First column of val_fs is NOT all ones! Consider including a DC term!')
# Pre-allocate for predictions, with or without separate validation sequences to predict
is_rpts = np.ndim(val_data)==3
if is_rpts:
n_rpts,n_tps_val,n_voxels_val = val_data.shape
cc = np.zeros((n_rpts,n_voxels),dtype);
else:
n_rpts,(n_tps_val,n_voxels_val) = 0,val_data.shape
cc = np.zeros((n_voxels),dtype);
pred = np.zeros((n_tps_val,n_voxels_val),dtype)
else:
# No Validation data / model supplied
do_pred = False;
### --- Loop over groups of voxels to compute weights & predictions --- ###
wt = np.zeros((n_channels,n_voxels),dtype=dtype)
if is_verbose:
predstr = ' and model predictions...' if do_pred else "..."
print('Computing weights'+predstr)
for iChunk in range(n_chunks):
if is_verbose and (n_chunks>1):
print('Running chunk %d of %d...\n'%(iChunk+1,n_chunks))
ChIdx = np.arange(chunk_sz) + chunk_sz*iChunk
ChIdx = ChIdx[ChIdx<n_voxels] # clip extra voxels in last run.
Ychunk = trn_data[:,ChIdx]
UtYchunk = np.dot(U.T, np.nan_to_num(Ychunk))
if square_alpha:
wt[:,ChIdx] = reduce(np.dot, [Vt.T, np.diag(S/(S**2+alpha**2)), UtYchunk])
else:
wt[:,ChIdx] = reduce(np.dot, [Vt.T, np.diag(S/(S**2+alpha)), UtYchunk])
### --- Find test correlations if validation data is present --- ###
if do_pred:
# Compute correlations btw validation data and model prediction
pred[:,ChIdx] = val_fs.dot(wt[:,ChIdx]).astype(dtype)
nnpred = np.nan_to_num(pred[:,ChIdx])
if is_rpts:
# The transpose here is related to deep mysteries in python. See
cc[:,ChIdx] = np.vstack([_sutils.column_corr(nnpred,val_data[rpt,:,ChIdx].T) for rpt in range(n_rpts)])
else:
cc[ChIdx] = _sutils.column_corr(nnpred,val_data[:,ChIdx])
# Output
out = dict(
weights=wt,
alpha=alpha,
n_sig_vox_byalpha=n_sig_vox_byalpha,
#trncc_byvox=trncc_byvox,
#trncc_byvox_byalpha=Rcmats
)
if not val_data is None:
out['cc'] = cc
return out
def _fit_ridge_alpha(trn_fs,trn_data,val_fs,val_data,alphas=DEFAULT_ALPHAS,
chunk_sz=5000,is_efficient=True,dtype=np.single, is_verbose=False, pthr=0.005,
square_alpha=False,return_resids=False):
"""Get prediction correlations for a set of alphas on val_data, without ever computing weights on trn_fs
Uses ridge regression to find a linear transformation of `trn_fs` that approximates `trn_data`.
Then tests by comparing the transformation of `val_fs` to `val_data`. This procedure is repeated
for each regularization parameter (alpha) in `alphas`. The correlation between each prediction and
each response for each alpha is returned. Note that the regression weights are NOT returned.
This is more efficient than full ridge regression (with weight computation); it is meant to be
used inside other ridge functions (after data has been split into bootstrap / cross-validation
splits) to find optimal alpha values.
Parameters
----------
trn_fs : array_like, shape (TR, N)
Training stimuli with TR time points and N features. Each feature should be Z-scored across time.
trn_data : array_like, shape (TR, M)
Training responses with TR time points and M responses (voxels, neurons, what-have-you).
Each response should be Z-scored across time.
val_fs : array_like, shape (TP, N)
Test stimuli with TP time points and N features. Each feature should be Z-scored across time.
val_data : array_like, shape (TP, M)
Test responses with TP time points and M responses.
alphas : list or array_like, shape (A,)
Ridge parameters to be tested. Should probably be log-spaced. np.logspace(0, 3, 20) works well.
normalpha : boolean
Whether ridge parameters should be normalized by the Frobenius norm of trn_fs. Good for rigorously
comparing models with different numbers of parameters.
dtype : np.dtype
All data will be cast as this dtype for computation. np.single is used by default for memory
efficiency.
singcutoff : float [WIP: not implemented yet]
The first step in ridge regression is computing the singular value decomposition (SVD) of the
stimulus trn_fs. If trn_fs is not full rank, some singular values will be approximately equal
to zero and the corresponding singular vectors will be noise. These singular values/vectors
should be removed both for speed (the fewer multiplications the better!) and accuracy. Any
singular values less than singcutoff will be removed.
Returns
-------
trn_corrs : array_like, shape (A, M)
The correlation between each predicted response and each column of val_data for each alpha.
"""
n_tps,n_voxels = trn_data.shape
n_chunks = np.ceil(n_voxels/np.float(chunk_sz)).astype(np.int32)
cc = np.zeros((n_voxels,len(alphas)),dtype=dtype)
if return_resids:
resids = np.zeros((n_tps,n_voxels,len(alphas)),dtype=dtype)
pred_A = []
if is_efficient:
# Efficient Ridge regression from A. Huth, Part (1):
# Full multiplication for validation (here, random split of
# training data) prediction is:
# pred = (Xval*Vx) * Dx * (pinv(Ux)*Ychunk) # NOTE: pinv(Ux) = Ux'
# We will pre-compute the first and third terms in parentheses:
# pred = XvalVx * Dx * UxYchunk
if is_verbose:
print('->Doing SVD of stimulus design matrix')
t0 = time.time()
#time.sleep(.01); # To ensure printing?
m,n = trn_fs.shape
if m>n:
Ux,Sx,Vx = _utils._svd(trn_fs,full_matrices=False)
else:
Vx,Sx,Ux = _utils._svd(trn_fs.T,full_matrices=False)
# Switcheroo of Vx and Ux due to transpose of input matrix
Ux = Ux.T
Vx = Vx.T
if is_verbose:
t1 = time.time()
print('->Done with SVD in %0.2f sec'%(t0-t1))
# For more efficient computation:
#k = len(Sx)
## OR:
## singcutoff = (XX);
## k = sum(sx > singcutoff);
## sx = sx(1:k);
XvalVx = val_fs.dot(Vx.T) # NOTE: IN MATLAB, No Vx', because Matlab leaves V in transposed form!
else:
raise NotImplementedError("Sorry, not done yet!")
for iChunk in range(n_chunks):
print('Running chunk %d of %d...\n'%(iChunk+1,n_chunks))
ChIdx = np.arange(chunk_sz) + chunk_sz*iChunk
ChIdx = ChIdx[ChIdx<n_voxels] # clip extra voxels in last run.
Ychunk = trn_data[:,ChIdx]
# Fit model with all lambdas (for subset of voxels)
if not is_efficient:
raise Exception('LAME! no slow reliable ridge implemented.')
#[Wt L] = ridgemulti(X,Ychunk,params.lambdas);
else:
# Efficient Ridge regression from A. Huth, part (2)
# NOTE: weights are never explicitly computed!
UxYchunk = Ux.T.dot(Ychunk)
if is_verbose:
print('Checking model predictions...')
for iA,A in enumerate(alphas):
if not is_efficient:
pred = np.cast(np.single)[Xval.dot(Wt[:,:,iA])]
else:
# Efficient Ridge regression from A. Huth, part (3)
# Normalize lambda by Frobenius norm for stim matrix
aX = A # * norm(X,'fro'); # ... or not
# Need to decide for final whether aX**2 or not
if square_alpha:
Dx = Sx/(Sx**2 + aX**2)
else:
Dx = Sx/(Sx**2 + aX)
# Compute predicitons (XvalVx and UxYchunk computed above)
# (mult diag is slightly faster than matrix multiplication in timing tests)
pred = _utils.mult_diag(Dx, XvalVx, left=False).dot(UxYchunk)
# Compute prediction accuracy (correlations)
cc[ChIdx,iA]=_sutils.column_corr(pred,val_data[:,ChIdx])
if return_resids:
resids[:,ChIdx,iA] = val_data[:,ChIdx]-pred
if return_resids:
return cc,resids
else:
return cc
def ridge_cv(trn_fs, trn_data, val_fs=None, val_data=None, alphas=DEFAULT_ALPHAS,
n_resamps=10, n_splits=10, chunk_sz=5000, dtype=np.single,pthr=0.005,
square_alpha=False,is_verbose=False):
"""Compute ridge regression solution for beta weights and predictions of validation data.
Regularization parameter (alpha) is computed by cross-validation within the
training data (trn_fs and trn_data).
Validation predictions and correlations are returned if val_fs and val_data are provided.
Parameters
----------
pthr : float in [0..0.05]
Used to select the alpha parameter. For each alpha tested, pthr is used to define a minimum
significant correlation (r_sig). The function then computes the number voxels with training-set
correlations greater than r_sig minus the number of responses with correlation less than -r_sig
This is a vague metric of non-centered skewness, and works better (?) than the mean correlation
across voxels to select an optimal alpha parameter.
Uses ridge regression with a bootstrapped held-out set to get a single optimal alpha values for all voxels.
[n_chunks] random chunks of length [chunklen] will be taken from [trn_fs] and [trn_data] for each regression
run. [nboots] total regression runs will be performed.
"""
#def ridge_cv(model,data,n_splits=10,n_resamps=10,alpha=DEFAULT_ALPHAS,efficient=np.nan,):
# (trn_fs, trn_data, val_fs, val_data, alphas, nboots, chunklen, n_chunks, dtype=np.single, corrmin=0.2):
n_resp, n_voxels = trn_data.shape
_,n_channels = trn_fs.shape
n_chunks = np.ceil(n_voxels/np.float(chunk_sz)).astype(np.int32)
bestalphas = np.zeros((n_resamps, n_voxels)) ## Will hold the best alphas for each voxel
trn_idx,val_idx = _utils.contig_partition(trn_fs.shape[0],n_splits)
Rcmats = np.zeros((n_voxels,len(alphas),n_resamps))
for iRpt,cvi in enumerate(np.random.permutation(n_splits)[:n_resamps]):
if is_verbose:
print('Running split %d/%d'%(iRpt+1,n_resamps))
ti,vi = trn_idx[cvi],val_idx[cvi]
trn_fs_split = trn_fs[ti,:]
val_fs_split = trn_fs[vi,:]
trn_data_split = trn_data[ti,:]
val_data_split = trn_data[vi,:]
# Run ridge regression to estimate predictions (within training set) for different alphas
Rcmats[:,:,iRpt] = _fit_ridge_alpha(trn_fs_split, trn_data_split, val_fs_split, val_data_split, alphas,
dtype=dtype, chunk_sz=chunk_sz,pthr=pthr,square_alpha=square_alpha)
if is_verbose:
print("Finding best alpha...")
## Find best alpha for each voxel
#trncc_byvox = np.nansum(Rcmats,axis=2)/np.sum(np.logical_not(np.isnan(Rcmats)),axis=2)
trncc_byvox = np.nanmean(Rcmats,axis=2)
# Taking mean is BS: too many voxels poorly predicted, floor effect.
#mean_cv_corr = np.nanmean(mean_cv_corr_byvox,axis=0)
#bestalphaind = np.argmax(mean_cv_corr)
# Thus just count voxels over significance threshold (with a lenient threshold)
#print(len(vi))
sig_thresh = _sutils.pval2r(pthr,len(vi),is_two_sided=False)
n_sig_vox_byalpha = sum(trncc_byvox>sig_thresh)-sum(trncc_byvox<-sig_thresh)
bestalphaind = np.argmax(n_sig_vox_byalpha)
alpha = alphas[bestalphaind]
if is_verbose:
print("Best alpha = %0.3f"%alpha)
## Find weights for each voxel
U,S,Vt = np.linalg.svd(trn_fs, full_matrices=False)
# Loop over groups of voxels
wt = np.zeros((n_channels,n_voxels),dtype=dtype)
###
if (not val_fs is None) and (not val_data is None):
# Validation data / model supplied implies we want predictions
do_pred = True
if np.sum(val_fs[:,0]) != val_fs.shape[0]:
warnings.warn('First column of val_fs is NOT all ones! Consider including a DC term!')
# Pre-allocate for predictions, with or without separate validation sequences to predict
is_rpts = np.ndim(val_data)==3
if is_rpts:
n_rpts,n_tps_val,n_voxels_val = val_data.shape
cc = np.zeros((n_rpts,n_voxels),dtype);
else:
n_rpts,(n_tps_val,n_voxels_val) = 0,val_data.shape
cc = np.zeros((n_voxels),dtype);
pred = np.zeros((n_tps_val,n_voxels_val),dtype)
else:
# No Validation data / model supplied
do_pred = False;
if is_verbose:
predstr = ' and model predictions...' if do_pred else "..."
print('Computing weights'+predstr)
for iChunk in range(n_chunks):
if is_verbose:
print('Running chunk %d of %d...\n'%(iChunk+1,n_chunks))
ChIdx = np.arange(chunk_sz) + chunk_sz*iChunk
ChIdx = ChIdx[ChIdx<n_voxels] # clip extra voxels in last run.
Ychunk = trn_data[:,ChIdx]
UtYchunk = np.dot(U.T, np.nan_to_num(Ychunk))
if square_alpha:
wt[:,ChIdx] = reduce(np.dot, [Vt.T, np.diag(S/(S**2+alpha**2)), UtYchunk])
else:
wt[:,ChIdx] = reduce(np.dot, [Vt.T, np.diag(S/(S**2+alpha)), UtYchunk])
## Find test correlations if validation data is present
if do_pred:
# Compute correlations btw validation data and model prediction
pred[:,ChIdx] = val_fs.dot(wt[:,ChIdx]).astype(dtype)
nnpred = np.nan_to_num(pred[:,ChIdx])
if is_rpts:
# The transpose here is related to deep mysteries in python. See
cc[:,ChIdx] = np.vstack([_sutils.column_corr(nnpred,val_data[rpt,:,ChIdx].T) for rpt in range(n_rpts)])
else:
cc[ChIdx] = _sutils.column_corr(nnpred,val_data[:,ChIdx])
# Output
out = dict(
weights=wt,
alpha=alpha,
n_sig_vox_byalpha=n_sig_vox_byalpha,
#trncc_byvox=trncc_byvox,f
#trncc_byvox_byalpha=Rcmats
)
if not val_data is None:
out['cc'] = cc
return out
# def fit_joint_model_fMRI(dbi, trn_fs, trn_data, val_fs=None, val_data=None, reg='ridge_cv',
# add_dc=False, noise_preds=None, lags=[2,3,4], chunk_sz=5000, save_weights=False,
# dtype=np.single, run_local=False, is_overwrite=False, sdir='/auto/k8/mark/fMRIDB/',
# pred_metrics=('cc','ccFull','valPred')):
# # Needs to deal w/ different alphas, potentially with different models being fit.
# # Pass out parallelized model runs if the models haven't been run individually.
# pass
def ridge_joint(trn_fs,trn_data,alphas,val_fs=None,val_data=None,square_alphas=False,chunk_sz=5000,is_verbose=True):
"""
Alphas are required. Should be one per model, we're not fitting them here.
trn_fs should be a list of the matrices of the feature spaces you wish to concatenate.
Stim should already have any lags applied to them
"""
n_resp, n_voxels = trn_data.shape
n_channels = np.sum([tfs.shape[1] for tfs in trn_fs])
n_chunks = np.ceil(n_voxels/np.float(chunk_sz)).astype(np.int32)
if square_alphas:
alphas = [a**2 for a in alphas]
num_train_points = float(list(trn_data.shape)[0])
num_val_points = float(list(val_data.shape)[0])
n_abc = [np.minimum(*t_fs.shape) for t_fs in trn_fs]
######################################################################################
### --- First up: compute modified covariance matrix & scaled/rotated stimulus --- ###
######################################################################################
# Perform SVD on training sets for all three models
if is_verbose:
print("computing SVD")
U_trn,W_trn,Vt_trn = [],[],[]
for t_fs in trn_fs:
uu,ww,vv = np.linalg.svd(trn_stim_A, full_matrices=False)
U_trn.append(uu)
W_trn.append(ww)
Vt_trn.append(vv)
# The square of Ws (the singular values from the SVD) are the eigenvalues of the covariance matrix but have not been divided by n-1.
L = [ww**2/float(num_train_points-1) for ww in W_trn]
### --- IDK WTF. Ask Wendino. --- ###
## to change: make sure that Ws are in the right units (divided by n-1) when bootstrapping, so that alphas are already in correct units
## at that point you can change the lines below and not divide alpha by (n-1)
# TO DO: make this more than one line for clarity.
w_alpha_trn = [np.diag(np.sqrt(1./(LL + aa)/(num_train_points-1))) for LL,aa in zip(L,alphas)]
#w1_alpha_trn = sqrt(1./(L1+ alphas_A2[0]/(num_train_points-1)))
#w1_alpha_trn = diag(w1_alpha_trn) #%turn it from an array to a matrix
# Create & combine rotated & scaled stimulus space
X_prime_trn_t = [ww.dot(vv).dot(t_fs) for ww,vv,t_fs in zip(W_trn,Vt_trn,trn_fs)]
#S1_prime_trn_t = np.dot(np.dot(w1_alpha_trn, Vt1_trn), trn_stim_A.T) #w1_alpha_trn = 1200x1200, Vt1_trn = 1200x1200, trn_stim_A.T = 1200x3737
Xcomb_prime_trn_t = np.vstack(X_prime_trn_t)
# Create & modify covariance matrix
stim_cov_mat_r = X_prime_trn_t.dot(X_prime_trn_t.T) / float(num_train_points-1)
cov_diag = np.sqrt(np.diag(stim_cov_mat_r))
full_mat_cov_diag = np.tile(cov_diag, [cov_diag.shape[0], 1])
# re-do w/ simpler syntax?
all_divisor = np.multiply(full_mat_cov_diag.T, full_mat_cov_diag)
corr_mat_r = np.divide(stim_cov_mat_r, all_divisor)
### --- Clean up the correlation matrix to have zeros where we know they exist and use that data to set a threshold --- ###
idx_ct = np.cumsum([0]+n_abc)
idxs = [(a,b) for a,b in zip(idx_ct[:-1],idx_ct[1:])]
# Block diagonal components of covariance matrix
for n,(ii,jj) in zip(n_abc,idxs):
corr_mat_r[ii:jj] = np.eye(n)
# Off-diagonal elements: ignore for now?
#for i1,i2 in zip(idxs[:-1],idxs[1:]):
# (ii,jj),(kk,ll) = i1,i2
# ##### --- WORKING HERE - SEE IPYTHON NOTEBOOK --- #########
# upper_right_corr = np.ravel(corr_mat_r[0:nA, nA:])
# middle_right_corr = np.ravel(corr_mat_r[nA:(nA+nB),(nA+nB):])
# right_corr = np.hstack([upper_right_corr, middle_right_corr])
# s_right_corr = argsort(right_corr)
# # WTF is this?
# #corr_cutoff = 954 # WH magic number; something to do with the fact that it's needless to have
# # ALL the block-diagonal diagonals, since we have limited data
# #goodcorrs_idx = np.hstack([s_right_corr[0:corr_cutoff], s_right_corr[-1:-(corr_cutoff+1):-1]])
# new_right_corrs = np.squeeze(np.zeros([s_right_corr.shape[0],1]))
# #new_right_corrs[goodcorrs_idx] = right_corr[goodcorrs_idx]
# new_upper_right_corrs = np.reshape(new_right_corrs[0:(nB+nC)*nA],[nA,nB+nC])
# new_lower_left_corrs = new_upper_right_corrs.T
# new_middle_right_corrs = np.reshape(new_right_corrs[(nB+nC)*nA:],[nB,nC])
# new_middle_left_corrs = new_middle_right_corrs.T
# ##NEED TO CHANGE THIS: REMOVE HARDCODED MATRIX SIZES
# new_corr_mat_r = copy.copy(corr_mat_r)
# new_corr_mat_r[0:nA, nA:]= new_upper_right_corrs
# new_corr_mat_r[nA:(nA+nB), (nA+nB):] = new_middle_right_corrs
# new_corr_mat_r[(nA+nB):, nA:(nA+nB)]= new_middle_left_corrs # More like bottom middle
# new_corr_mat_r[nA:,0:nA] = new_lower_left_corrs #
# new_corr_mat_r[0:nA,0:nA]= np.identity(nA)
# new_corr_mat_r[nA:(nA+nB), nA:(nA+nB)] = np.identity(nB)
# new_corr_mat_r[(nA+nB):,(nA+nB):] = np.identity(nC)
#perform eigenvalue decomposition (WHAT FOR? delete this?)
#w, v = np.linalg.eigh(new_corr_mat_r)
# Invert modified covariance matrix
#corr_r_inv = np.linalg.inv(new_corr_mat_r)
corr_r_inv = np.linalg.inv(corr_mat_r)
#for
##create filter
dot1 = np.dot(X_prime_trn_t, trn_data) #precompute for speed
dot2 = np.dot(corr_r_inv, dot1) #precompute for speed
# Weights
h_123_prime = np.divide(dot2, (float(num_train_points-1)))
##create estimated responses from training data
#r_hat = np.dot(X_prime_trn_t.T, h_123_prime) # not usually done...
#if do_pred:
#validation set results
val_stim_A_prime = np.dot(np.dot(w1_alpha_r, Vt1_r), val_stim_A.T)
val_stim_B_prime = np.dot(np.dot(w2_alpha_r, Vh2_r), val_stim_B.T)
val_stim_C_prime = np.dot(np.dot(w3_alpha_r, Vh3_r), val_stim_C.T)
#S1_prime = S1_prime[0:200,:]
#S2_prime = S2_prime[0:200,:]
S123_val_prime_t = np.vstack([val_stim_A_prime, val_stim_B_prime, val_stim_C_prime])
#create validation set correlations
r_hat_val = np.dot(S123_val_prime_t.T, h_123_prime)
#look at performance
valcorr = _sutils.column_corr(r_hat_val, val_data)
out = dict(
#weights=wt,
#alphas=alphas,
#n_sig_vox_byalpha=n_sig_vox_byalpha,
cc=valcorr
)
return out
### --- Alex functions, keep / get rid of... --- ###
def ridge_AH(trn_fs, val_fs, trn_data, val_data, alphas, rval_data=None, rval_fs=None, saveallwts=True,
stop_early=False, dtype=np.single, corrmin=0.2, singcutoff=1e-10):
"""Ridge regresses [trn_fs] onto [trn_data] for each ridge parameter in [alpha]. Returns the fit
linear weights for each alpha, as well as the distributions of correlations on a held-out test
set ([val_fs] and [val_data]). Note that these should NOT be the "real" held-out test set, only a
small test set used to find the optimal ridge parameter.
If an [rval_data] and [rval_fs], or 'real' val_data and val_fs, are given, correlations on that dataset
will be computed and displayed for each alpha.
If [savallewts] is True, all weights will be returned. Otherwise only the best weights will be
returned.
If [stop_early] is True, the weights and correlations will be returned as soon as the mean
correlation begins to drop. Does NOT imply early-stopping in the regularized regression sense.
The given [dtype] will be applied to the regression weights as they are computed.
Singular values less than [singcutoff] will be truncated.
"""
## Precalculate SVD to do ridge regression
print "Doing SVD..."
U,S,Vt = np.linalg.svd(trn_fs, full_matrices=False)
ngoodS = np.sum(S>singcutoff)
U = U[:ngoodS]
S = S[:ngoodS]
Vt = Vt[:ngoodS]
print "Dropped %d tiny singular values.. (U is now %s)"%(np.sum(S<singcutoff), str(U.shape))
val_datanorms = np.apply_along_axis(np.linalg.norm, 0, val_data) ## Precompute test response norms
trn_corrs = [] ## Holds training correlations for each alpha
Pcorrs = [] ## Holds test correlations for each alpha
wts = [] ## Holds weights for each alpha
bestcorr = -1.0 ## Keeps track of the best correlation across all alphas
UR = np.dot(U.T, trn_data) ## Precompute this matrix product for speed
for a in alphas:
D = np.diag(S/(S**2+a**2)) ## Reweight singular vectors by the ridge parameter
#wt = reduce(np.dot, [Vt.T, D, U.T, trn_data]).astype(dtype)
wt = reduce(np.dot, [Vt.T, D, UR]).astype(dtype)
pred = np.dot(val_fs, wt) ## Predict test responses
prednorms = np.apply_along_axis(np.linalg.norm, 0, pred) ## Compute predicted test response norms
#trn_corr = np.array([np.corrcoef(val_data[:,ii], pred[:,ii].ravel())[0,1] for ii in range(val_data.shape[1])]) ## Slowly compute correlations
trn_corr = np.array(np.sum(np.multiply(val_data, pred), 0)).squeeze()/(prednorms*val_datanorms) ## Efficiently compute correlations
trn_corr[np.isnan(trn_corr)] = 0
trn_corrs.append(trn_corr)
if saveallwts:
wts.append(wt)
elif trn_corr.mean()>bestcorr:
bestcorr = trn_corr.mean()
wts = wt
print "Training: alpha=%0.3f, mean corr=%0.3f, max corr=%0.3f, over-under(%0.2f)=%d" % (a,
np.mean(trn_corr),
np.max(trn_corr),
corrmin,
(trn_corr>corrmin).sum()-(-trn_corr>corrmin).sum())
## Test alpha on real test set if given
if rval_data is not None and rval_fs is not None:
rpred = np.dot(rval_fs, wt)
Pcorr = np.array([np.corrcoef(rval_data[:,ii], rpred[:,ii].ravel())[0,1] for ii in range(rval_data.shape[1])])
Pcorr[np.isnan(Pcorr)] = 0.0
print "Testing: alpha=%0.3f, mean corr=%0.3f, max corr=%0.3f" % (a, np.mean(Pcorr), np.max(Pcorr))
Pcorrs.append(Pcorr)
if sum(np.isnan(Pcorr)):
raise Exception("nan correlations")
## Quit if mean correlation decreases
if stop_early and trn_corr.mean()<bestcorr:
break
if rval_data is not None and rval_fs is not None:
return wts, trn_corrs, Pcorrs
else:
return wts, trn_corrs
def ridge_corr(trn_fs, val_fs, trn_data, val_data, alphas, normalpha=False, dtype=np.single, corrmin=0.2, singcutoff=1e-10):
"""
Fits only alpha parameter (through n_splits cross-validation splits of data)
AH Notes:
Uses ridge regression to find a linear transformation of [trn_fs] that approximates [trn_data].
Then tests by comparing the transformation of [val_fs] to [val_data]. This procedure is repeated
for each regularization parameter alpha in [alphas]. The correlation between each prediction and
each response for each alpha is returned. Note that the regression weights are NOT returned.
Parameters
----------
trn_fs : array_like, shape (TR, N)
Training stimuli with TR time points and N features. Each feature should be Z-scored across time.
trn_data : array_like, shape (TR, M)
Training responses with TR time points and M responses (voxels, neurons, what-have-you).
Each response should be Z-scored across time.
val_fs : array_like, shape (TP, N)
Test stimuli with TP time points and N features. Each feature should be Z-scored across time.
val_data : array_like, shape (TP, M)
Test responses with TP time points and M responses.
alphas : list or array_like, shape (A,)
Ridge parameters to be tested. Should probably be log-spaced. np.logspace(0, 3, 20) works well.
normalpha : boolean
Whether ridge parameters should be normalized by the Frobenius norm of trn_fs. Good for rigorously
comparing models with different numbers of parameters.
dtype : np.dtype
All data will be cast as this dtype for computation. np.single is used by default for memory
efficiency.
corrmin : float in [0..1]
Purely for display purposes. After each alpha is tested, the number of responses with correlation
greater than corrmin minus the number of responses with correlation less than negative corrmin
will be printed. For long-running regressions this vague metric of non-centered skewness can
give you a rough sense of how well the model is working before it's done.
singcutoff : float
The first step in ridge regression is computing the singular value decomposition (SVD) of the
stimulus trn_fs. If trn_fs is not full rank, some singular values will be approximately equal
to zero and the corresponding singular vectors will be noise. These singular values/vectors
should be removed both for speed (the fewer multiplications the better!) and accuracy. Any
singular values less than singcutoff will be removed.
Returns
-------
trn_corrs : array_like, shape (A, M)
The correlation between each predicted response and each column of val_data for each alpha.
"""
## Calculate SVD of stimulus matrix
print "Doing SVD..."
try:
U,S,Vt = np.linalg.svd(trn_fs, full_matrices=False)
except np.linalg.LinAlgError, e:
print "NORMAL SVD FAILED, trying more robust dgesvd.."
from .svd_dgesvd import svd_dgesvd
U,S,Vt = svd_dgesvd(trn_fs, full_matrices=False)
## Truncate tiny singular values for speed
origsize = S.shape[0]
ngoodS = np.sum(S>singcutoff)
nbad = origsize-ngoodS
U = U[:,:ngoodS]
S = S[:ngoodS]
Vt = Vt[:ngoodS]
print "Dropped %d tiny singular values.. (U is now %s)"%(nbad, str(U.shape))
## Normalize alpha by the Frobenius norm
frob = np.sqrt((S**2).sum()) ## Frobenius!
#frob = S.sum()
print "Training stimulus has Frobenius norm: %0.03f"%frob
if normalpha:
nalphas = alphas * frob
else:
nalphas = alphas
## Precompute some products for speed
UR = np.dot(U.T, trn_data) ## Precompute this matrix product for speed
PVh = np.dot(val_fs, Vt.T) ## Precompute this matrix product for speed
val_datanorms = np.apply_along_axis(np.linalg.norm, 0, val_data) ## Precompute test response norms
trn_corrs = [] ## Holds training correlations for each alpha
for na, a in zip(nalphas, alphas):
#D = np.diag(S/(S**2+a**2)) ## Reweight singular vectors by the ridge parameter
D = S/(S**2+na**2) ## Reweight singular vectors by the (normalized?) ridge parameter
pred = np.dot(_utils.mult_diag(D, PVh, left=False), UR) ## Best? (1.75 seconds to prediction in test)
prednorms = np.apply_along_axis(np.linalg.norm, 0, pred) ## Compute predicted test response norms
#trn_corr = np.array([np.corrcoef(val_data[:,ii], pred[:,ii].ravel())[0,1] for ii in range(val_data.shape[1])]) ## Slowly compute correlations
trn_corr = np.array(np.sum(np.multiply(val_data, pred), 0)).squeeze()/(prednorms*val_datanorms) ## Efficiently compute correlations
trn_corr[np.isnan(trn_corr)] = 0
trn_corrs.append(trn_corr)
print "Training: alpha=%0.3f, mean corr=%0.3f, max corr=%0.3f, over-under(%0.2f)=%d" % (a,
np.mean(trn_corr),
np.max(trn_corr),
corrmin,
(trn_corr>corrmin).sum()-(-trn_corr>corrmin).sum())
return trn_corrs
def ridge_boot(trn_fs, trn_data, val_fs, val_data, alphas, nboots, chunklen, n_chunks, dtype=np.single, corrmin=0.2):
"""Uses ridge regression with a bootstrapped held-out set to get a single optimal alpha values for all voxels.
[n_chunks] random chunks of length [chunklen] will be taken from [trn_fs] and [trn_data] for each regression
run. [nboots] total regression runs will be performed.
"""
n_resp, n_voxels = trn_data.shape
bestalphas = np.zeros((nboots, n_voxels)) ## Will hold the best alphas for each voxel
Rcmats = []
for bi in range(nboots):
print "Selecting held-out test set.."
allinds = range(n_resp)
indchunks = zip(*[iter(allinds)]*chunklen)
random.shuffle(indchunks)
heldinds = list(itools.chain(*indchunks[:n_chunks]))
notheldinds = list(set(allinds)-set(heldinds))
trn_fs_split = trn_fs[notheldinds,:]
val_fs_split = trn_fs[heldinds,:]
trn_data_split = trn_data[notheldinds,:]
val_data_split = trn_data[heldinds,:]
## Run ridge regression using this test set
Rwts, trn_corrs = ridge_AH(trn_fs_split, val_fs_split, trn_data_split, val_data_split, alphas,
saveallwts=False, dtype=dtype, corrmin=corrmin)
Rcmat = np.vstack(trn_corrs)
Rcmats.append(Rcmat)
#bestainds = np.array(map(np.argmax, Rcmat.T))
#bestalphas[bi,:] = alphas[bestainds]
print "Finding best alpha.."
## Find best alpha for each voxel
cc = np.dstack(Rcmats)
meanbootcorr = cc.mean(2).mean(1)
bestalphaind = np.argmax(meanbootcorr)
alpha = alphas[bestalphaind]
print "Best alpha = %0.3f"%alpha
## Find weights for each voxel
U,S,Vt = np.linalg.svd(trn_fs, full_matrices=False)
UR = np.dot(U.T, np.nan_to_num(trn_data))
pred = np.zeros(val_data.shape)
wt = reduce(np.dot, [Vt.T, np.diag(S/(S**2+alpha**2)), UR])
pred = np.dot(val_fs, wt)
## Find test correlations
nnpred = np.nan_to_num(pred)
cc = np.nan_to_num(np.array([np.corrcoef(val_data[:,ii], nnpred[:,ii].ravel())[0,1] for ii in range(val_data.shape[1])]))
return wt, cc
|
[
"wangy@berkeley.edu"
] |
wangy@berkeley.edu
|
8934e68b32bdf088bd36dc74b98b372e299a8d82
|
6ba5c3d3755d7d295bc57d43c26235b8cb8af553
|
/src/gated_transformers_nlp/utils/gated_transformers/training_utils.py
|
8c6cc9e87042a9c1841baf56dd2dc54fedc1c5ee
|
[
"MIT"
] |
permissive
|
mnguyen0226/gated_transformers_nlp
|
b23e016d9e7f05ffd40c588adefc86c95a80bb52
|
a6c870d4ed0788f15cfdf58c85ed5201dff60ee9
|
refs/heads/main
| 2023-08-18T02:49:17.220571
| 2021-09-26T17:59:12
| 2021-09-26T17:59:12
| 410,442,575
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,443
|
py
|
# Script trainining gated transformers model
import torch
import torch.nn as nn
from typing import Tuple
import math
import time
from utils.gated_transformers.seq2seq import Seq2Seq
from utils.gated_transformers.preprocess import (
SRC,
TRG,
device,
train_iterator,
valid_iterator,
)
from utils.gated_transformers.encoder import Encoder
from utils.gated_transformers.decoder import Decoder
# Define encoder and decoder
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
HID_DIM = 256
GATED_ENC_LAYERS = 3
GATED_DEC_LAYERS = 3
GATED_ENC_HEADS = 8
GATED_DEC_HEADS = 8
ENC_PF_DIM = 512
DEC_PF_DIM = 512
ENC_DROPOUT = 0.1
DEC_DROPOUT = 0.1
enc = Encoder(
INPUT_DIM,
HID_DIM,
GATED_ENC_LAYERS,
GATED_ENC_HEADS,
ENC_PF_DIM,
ENC_DROPOUT,
device,
)
dec = Decoder(
OUTPUT_DIM,
HID_DIM,
GATED_DEC_LAYERS,
GATED_DEC_HEADS,
DEC_PF_DIM,
DEC_DROPOUT,
device,
)
# Define whole Seq2Seq encapsulating model
SRC_PAD_IDX = SRC.vocab.stoi[SRC.pad_token]
TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
model = Seq2Seq(enc, dec, SRC_PAD_IDX, TRG_PAD_IDX, device).to(device)
def count_parameters(model: Tuple[tuple, tuple, tuple, tuple, str]) -> int:
"""Check number of training parameters
Parameters
----------
model: [tuple, tuple, tuple, tuple, str]
input seq2seq model
Return
----------
Total number of training parameters
"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f"The model has {count_parameters(model):,} trainable parameters")
def initialize_weights(m: Tuple[tuple, tuple, tuple, tuple, str]):
"""Xavier uniform initialization
Parameters
----------
m: [tuple, tuple, tuple, tuple, str]
input model
"""
if hasattr(m, "weight") and m.weight.dim() > 1:
nn.init.xavier_uniform_(m.weight.data)
model.apply(initialize_weights)
LEARNING_RATE = 0.0005
# Adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# Cross Entropy Loss Function
criterion = nn.CrossEntropyLoss(ignore_index=TRG_PAD_IDX)
def train(
model: Tuple[tuple, tuple, tuple, tuple, str],
iterator: int,
optimizer: int,
criterion: int,
clip: int,
) -> float:
"""Train by calculating losses and update parameters
Parameters
----------
model: [tuple, tuple, tuple, tuple, str]
input seq2seq model
iterator: int
SRC, TRG iterator
optimizer: int
Adam optimizer
criterion: int
Cross Entropy Loss function
clip: int
Clip training process
Return
----------
epoch_loss / len(iterator): float
Loss percentage during training
"""
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
optimizer.zero_grad()
output, _ = model(src, trg[:, :-1])
# output = [batch size, trg len - 1, output dim]
# trg = [batch size, trg len]
output_dim = output.shape[-1]
output = output.contiguous().view(-1, output_dim)
trg = trg[:, 1:].contiguous().view(-1)
# output = [batch size * trg len - 1, output dim]
# trg = [batch size * trg len - 1]
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def evaluate(model, iterator: int, criterion: int) -> float:
"""Evaluate same as training but no gradient calculation and parameter updates
Parameters
----------
iterator: int
SRC, TRG iterator
criterion: int
Cross Entropy Loss function
Return
----------
epoch_loss / len(iterator): float
Loss percentage during validating
"""
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
output, _ = model(src, trg[:, :-1])
# output = [batch size, trg len - 1, output dim]
# trg = [batch size, trg len]
output_dim = output.shape[-1]
output = output.contiguous().view(-1, output_dim)
trg = trg[:, 1:].contiguous().view(-1)
# output = [batch size * trg len - 1, output dim]
# trg = [batch size * trg len - 1]
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def epoch_time(start_time: float, end_time: float) -> Tuple[int, int]:
"""Tells how long an epoch takes
Parameters
----------
start_time:
start time
end_time:
end_time
Return
----------
elapsed_mins: float
elapse minutes
elapsed_secs: float
elapse seconds
"""
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
N_EPOCHS = 10
CLIP = 1
train_loss = 0
valid_loss = 0
def gated_transformers_main() -> Tuple[float, float, float, float]:
"""Run Training and Evaluating procedure
Return
----------
train_loss: float
training loss of the current epoch
valid_loss: float
validating loss of the current epoch
math.exp(train_loss): float
training PPL
math.exp(valid_loss): float
validating PPL
"""
best_valid_loss = float("inf")
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
valid_loss = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), "gated-tut6-model.pt")
print(f"Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s")
print(
f"\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}"
)
print(
f"\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}"
)
return train_loss, valid_loss, math.exp(train_loss), math.exp(valid_loss)
|
[
"mnguyen0226@vt.edu"
] |
mnguyen0226@vt.edu
|
fc44a6c0a0541b435b013973d6089aa56bfdddb9
|
5666697be62a75f6d5676c04bd1849f22c900cc3
|
/WARSZTATY/Zadanie2.py
|
9a92a9885877471d368f14fe61d8b6c94248c830
|
[] |
no_license
|
bartoszkoper/workshop1
|
35ec9c6ec398bd69b71b5b34e58a926d67e79587
|
3f04be9d576c22438fe1af763b419f07b6017c23
|
refs/heads/master
| 2020-09-16T04:03:41.217149
| 2019-11-23T20:35:43
| 2019-11-23T20:35:43
| 223,646,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,819
|
py
|
"""#### Warsztat: Symulator LOTTO.
Jak wszystkim wiadomo, LOTTO to gra liczbowa polegająca na losowaniu 6 liczb z zakresu 1–49.
Zadaniem gracza jest poprawne wytypowanie losowanych liczb. Nagradzane jest trafienie 3, 4, 5 lub 6 poprawnych liczb.
Napisz program, który:
* zapyta o typowane liczby, przy okazji sprawdzi następujące warunki:
* czy wprowadzony ciąg znaków jest poprawną liczbą,
* czy użytkownik nie wpisał tej liczby już poprzednio,
* czy liczba należy do zakresu 1-49,
* po wprowadzeniu 6 liczb, posortuje je rosnąco i wyświetli na ekranie,
* wylosuje 6 liczb z zakresu i wyświetli je na ekranie,
* poinformuje gracza, czy trafił przynajmniej "trójkę"."""
from random import randint
def lotto():
print("TROLOLOLOLOLOLO, witamy w LOTTO\n=================================")
podane_liczby = []
while len(podane_liczby) < 6:
print(f"Pozstało do dodania {6 - len(podane_liczby)}")
try:
user_input = int(input("Podaj liczbę: "))
if user_input in podane_liczby:
print("Podałeś już tę liczbę")
elif 1 <= user_input <= 49:
podane_liczby.append(user_input)
else:
print("Liczba nie jest z zakresu 1 do 49.")
except Exception as err:
print("Coś się popsuło:", err)
lottomat = [randint(1, 50) for each in range(6)] # Lottomat losuje podane liczby.
print(f"Twoje liczby: {sorted(podane_liczby)}")
print("Liczby Lottomat:", lottomat)
ile_trafione = len(set(lottomat) - set(podane_liczby))
if ile_trafione <= 3:
print(f"Brawo trafileś: {6 - ile_trafione} liczb.")
else:
print(f"Przykro mi, przegrałeś kolejne 5 zł. Trafiłeś: {6 - ile_trafione} liczb.")
lotto()
|
[
"bartosz.koper@outlook.com"
] |
bartosz.koper@outlook.com
|
5631925dad6cde26aa9cb49d8f88d3a19c44a9dd
|
993d84420b2e3005b1b59370db26c79d702e380a
|
/quantization/SLSQ/process_tune.py
|
79d45419625b19030d150e09795b287c8dc0d531
|
[
"MIT"
] |
permissive
|
SKKU-ESLAB/Auto-Compression
|
25588c5e87a141049b5816940d157a7a1c478e04
|
614048140f00e977e53178ce0417caaf31a29bc4
|
refs/heads/master
| 2022-12-14T20:33:25.242853
| 2022-12-01T07:13:45
| 2022-12-01T07:13:45
| 224,101,851
| 16
| 13
|
MIT
| 2022-12-01T07:13:46
| 2019-11-26T04:25:02
|
C
|
UTF-8
|
Python
| false
| false
| 6,537
|
py
|
import logging
import math
import operator
import time
import torch as t
from util import AverageMeter
__all__ = ['train', 'validate', 'PerformanceScoreboard']
logger = logging.getLogger()
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with t.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def train(train_loader, model, criterion, optimizer, lr_scheduler, epoch, monitors, alpha,args):
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
batch_time = AverageMeter()
total_sample = len(train_loader.sampler)
batch_size = train_loader.batch_size
steps_per_epoch = math.ceil(total_sample / batch_size)
logger.info('Training: %d samples (%d per mini-batch)', total_sample, batch_size)
print('Training: %d samples (%d per mini-batch)' %(total_sample, batch_size))
model.train()
end_time = time.time()
for batch_idx, (inputs, targets) in enumerate(train_loader):
inputs = inputs.to(args.device.type)
targets = targets.to(args.device.type)
outputs = model(inputs)
loss = criterion(outputs, targets)
sparsity_loss = 0.
for n, m in model.named_modules():
if hasattr(m, "p"):
sparsity_loss += t.exp(-m.p) * (1 - (m.p.detach() / (m.c.detach() + 1e-12)))
loss += sparsity_loss * alpha
acc1, acc5 = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(acc1.item(), inputs.size(0))
top5.update(acc5.item(), inputs.size(0))
if lr_scheduler is not None:
lr_scheduler.step(epoch=epoch, batch=batch_idx)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end_time)
end_time = time.time()
p = {}
c = {}
for n, m in model.named_modules():
if hasattr(m, "p"):
p[n] = m.p
c[n] = m.c
if (batch_idx + 1) % args.log.print_freq == 0:
for m in monitors:
m.update(epoch, batch_idx + 1, steps_per_epoch, 'Training', {
'Loss': losses,
'Top1': top1,
'Top5': top5,
'BatchTime': batch_time,
'LR': optimizer.param_groups[0]['lr'],
})
logger.info('==> Top1: %.3f Top5: %.3f Loss: %.3f\n',
top1.avg, top5.avg, losses.avg)
print('==> Top1: %.3f Top5: %.3f Loss: %.3f\n'
%(top1.avg, top5.avg, losses.avg))
return top1.avg, top5.avg, losses.avg
def validate(data_loader, model, criterion, epoch, monitors, args):
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
batch_time = AverageMeter()
total_sample = len(data_loader.sampler)
batch_size = data_loader.batch_size
steps_per_epoch = math.ceil(total_sample / batch_size)
logger.info('Validation: %d samples (%d per mini-batch)', total_sample, batch_size)
print('Validation: %d samples (%d per mini-batch)' %(total_sample, batch_size))
model.eval()
end_time = time.time()
for batch_idx, (inputs, targets) in enumerate(data_loader):
with t.no_grad():
inputs = inputs.to(args.device.type)
targets = targets.to(args.device.type)
outputs = model(inputs)
loss = criterion(outputs, targets)
acc1, acc5 = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(acc1.item(), inputs.size(0))
top5.update(acc5.item(), inputs.size(0))
batch_time.update(time.time() - end_time)
end_time = time.time()
if (batch_idx + 1) % args.log.print_freq == 0:
for m in monitors:
m.update(epoch, batch_idx + 1, steps_per_epoch, 'Validation', {
'Loss': losses,
'Top1': top1,
'Top5': top5,
'BatchTime': batch_time
})
logger.info('==> Top1: %.3f Top5: %.3f Loss: %.3f\n', top1.avg, top5.avg, losses.avg)
print('==> Top1: %.3f Top5: %.3f Loss: %.3f\n' %(top1.avg, top5.avg, losses.avg))
total_zero = 0.
total_numel = 0.
for n, m in model.named_modules():
if hasattr(m, "quan_w_fn"):
weight_zero = (m.quan_w_fn(m.weight.detach())==0).sum()
weight_numel = m.weight.detach().numel()
sparsity = weight_zero / weight_numel
print(n, sparsity)
total_zero += weight_zero
total_numel += weight_numel
sparsity = total_zero / total_numel
return top1.avg, top5.avg, losses.avg, sparsity
class PerformanceScoreboard:
def __init__(self, num_best_scores):
self.board = list()
self.num_best_scores = num_best_scores
def update(self, top1, top5, epoch, sparsity):
""" Update the list of top training scores achieved so far, and log the best scores so far"""
self.board.append({'top1': top1, 'top5': top5, 'epoch': epoch, 'sparsity' : sparsity})
# Keep scoreboard sorted from best to worst, and sort by top1, top5 and epoch
curr_len = min(self.num_best_scores, len(self.board))
self.board = sorted(self.board,
key=operator.itemgetter('top1', 'top5', 'epoch'),
reverse=True)[0:curr_len]
for idx in range(curr_len):
score = self.board[idx]
logger.info('Scoreboard best %d ==> Epoch [%d][Top1: %.3f Top5: %.3f] Sparsity : %.3f',
idx + 1, score['epoch'], score['top1'], score['top5'], score['sparsity'])
print('Scoreboard best %d ==> Epoch [%d][Top1: %.3f Top5: %.3f] Sparsity : %.3f'
%(idx + 1, score['epoch'], score['top1'], score['top5'], score['sparsity']))
def is_best(self, epoch):
return self.board[0]['epoch'] == epoch
|
[
"ilena7440@naver.com"
] |
ilena7440@naver.com
|
c6af8dd9dee7257067c8afe6edcb1d6b40606fd1
|
c2fd9c421b225862633f74f99a7a0dad635c5c67
|
/array/MatrixBlockSum.py
|
3ce867a1465eecbd7378e650c0259da5a7c1ce5c
|
[] |
no_license
|
yuhangxiaocs/LeetCodePy
|
3751881dbd78b581a1d75beea737aed28765988b
|
31012a004ba14ddfb468a91925d86bc2dfb60dd4
|
refs/heads/master
| 2020-12-20T19:36:55.421295
| 2020-11-24T17:01:15
| 2020-11-24T17:01:15
| 236,190,313
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,646
|
py
|
'''
和那个快速求数组某段长度的和是一直的 先有个 pre_computation 把 sum[i]代表 0-i的sum
从而 sum(i...j) = sum[j] - sum[i-1]
这题就是扩展到二维
Runtime: 72 ms, faster than 98.47% of Python online submissions for Matrix Block Sum.
Memory Usage: 12.5 MB, less than 100.00% of Python online submissions for Matrix Block Sum.
'''
class Solution(object):
def matrixBlockSum(self, mat, K):
"""
:type mat: List[List[int]]
:type K: int
:rtype: List[List[int]]
"""
m, n = len(mat), len(mat[0])
# print(m, n)
for i in range(m):
for j in range(1, n):
mat[i][j] += mat[i][j - 1]
for i in range(n):
for j in range(1, m):
mat[j][i] += mat[j - 1][i]
# for i in range(n):
# print(mat[i])
res = [[0 for _ in range(n)] for __ in range(m)]
for i in range(m):
for j in range(n):
rmax = i + K if i + K < m else m - 1
cmax = j + K if j + K < n else n - 1
mi, mid1, mid2 = 0, 0, 0
if i - K - 1 >= 0 and j - K - 1 >= 0:
mi = mat[i - K - 1][j - K - 1]
if i - K - 1 >= 0:
mid2 = mat[i - K - 1][cmax]
if j - K - 1 >= 0:
mid1 = mat[rmax][j - K - 1]
res[i][j] = mat[rmax][cmax] - mid1 - mid2 + mi
return res
if __name__ == '__main__':
mat = Solution().matrixBlockSum([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 1)
for i in mat:
print(i)
|
[
"1248618975@qq.com"
] |
1248618975@qq.com
|
2ce670f2db577a36adc48c27f56c09138c83a420
|
f5758bd67d18f30e7e6eea7d0342f9ad04acaecf
|
/shimaenaga/cli.py
|
2dfe613d5c21720efb060920120e20a683c28e23
|
[
"MIT"
] |
permissive
|
kk6/shimaenaga
|
f863e5d2307cd346ae70ca36710583beefdd52e9
|
e0fc200b1cfa13223cda14868456d92ae7b48767
|
refs/heads/master
| 2021-07-07T05:37:42.201017
| 2021-03-08T01:29:48
| 2021-03-08T01:29:48
| 244,536,499
| 0
| 0
|
MIT
| 2021-06-22T20:25:26
| 2020-03-03T03:53:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,003
|
py
|
import datetime
import pathlib
import typer
from livereload import Server
from loguru import logger
from .config import Config, SiteMeta
from .config import default_config as dc
from .config import parse_config
from .generators import generate_markdown_template, generate_article_template
from .initializer import initialize
from .project import Project
app = typer.Typer()
@app.command()
def init(
site_title: str = typer.Option(dc.sitemeta.title, prompt="What's your site title?"),
author: str = typer.Option(dc.sitemeta.author, prompt="What's your name?"),
language_code: str = typer.Option(
dc.sitemeta.language_code, prompt="Language code?"
),
per_page: int = typer.Option(dc.sitemeta.per_page, prompt="How many articles per page?"),
) -> None:
"""Initialize new site"""
config = Config(
theme=dc.theme,
sitemeta=SiteMeta(title=site_title, author=author, language_code=language_code, per_page=per_page),
)
initialize(config)
typer.echo("New site initial setup complete ✨")
@app.command()
def new(filename: str, title: str = "New title", page: bool = False) -> None:
"""Create new page from template"""
if page:
dirname = pathlib.Path("./pages")
path = generate_markdown_template(dirname, title, filename)
else:
local_time = datetime.date.today()
dirname = pathlib.Path(".")
path = generate_article_template(dirname, title, filename, local_time)
typer.echo(f"New markdown file created at {path}")
@app.command()
def build() -> None:
"""Build project"""
logger.info("Building project...")
config = parse_config("config.toml")
project = Project(config)
project.build()
logger.success("Build completed")
@app.command()
def serve() -> None:
server = Server()
logger.info("Starting live-reload...")
server.watch("articles/", build)
server.watch("pages/", build)
server.watch("config.toml", build)
server.serve(root="dest")
|
[
"hiro.ashiya@gmail.com"
] |
hiro.ashiya@gmail.com
|
fc28e89d7f7e027449639d4297556009da6ceeaf
|
26e2cd7c32549670478c82292f1657cbb1bfe635
|
/api/person/addPerson/app.py
|
14d3573f0269b10b1196e2cd38738ea406090cc9
|
[] |
no_license
|
gungorerhan/bluetooth-localization
|
5869446c2d91041c65b5d03bf41be98eae61c281
|
381d7b10e9e6e3f3d2ad147b8e1203a44f8795ae
|
refs/heads/master
| 2023-01-05T15:58:10.098630
| 2020-07-12T16:09:02
| 2020-07-12T16:09:02
| 226,554,223
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,709
|
py
|
import sys
import logging
import rds_config
import pymysql
import json
#rds settings
rds_host = "ips-1.c6mdsdjlgnmm.eu-central-1.rds.amazonaws.com"
name = rds_config.db_username
password = rds_config.db_password
db_name = rds_config.db_name
logger = logging.getLogger()
logger.setLevel(logging.INFO)
try:
conn = pymysql.connect(rds_host, user=name, passwd=password, db=db_name, connect_timeout=5)
except pymysql.MySQLError as e:
logger.error("ERROR: Unexpected error: Could not connect to MySQL instance.")
logger.error(e)
sys.exit()
logger.info("SUCCESS: Connection to RDS MySQL instance succeeded")
def handler(event, context):
# Parse event body
eventBody = event["body"]
eventBody = json.loads(eventBody)
personId = eventBody["personId"]
firstName = eventBody["firstName"]
lastName = eventBody["lastName"]
cardId = eventBody["cardId"]
# Construct thae body of the response object
responseBody = {}
responseBody['status'] = f'New person added with id={personId}'
with conn.cursor() as cur:
values = f'("{personId}", "{firstName}", "{lastName}", "{cardId}")'
cur.execute("INSERT INTO Person (personId, firstName, lastName, cardId) VALUES" + values + ";")
logger.info(f'New person added with id={personId}')
conn.commit()
# Construct http response object
responseObject = {}
responseObject['statusCode'] = 200
responseObject['headers'] = {}
responseObject['headers']['Content-Type'] = 'application/json'
responseObject['headers']['Access-Control-Allow-Origin'] = '*'
responseObject['body'] = json.dumps(responseBody)
# Return the response object
return responseObject
|
[
"erhangungor@zohomail.com"
] |
erhangungor@zohomail.com
|
e184f5bfa191e589dad273770f1c3706cea852fc
|
26c909d5ccf36193a72e9034707b69edbfd67789
|
/300_longest_increasing_subsequence.py
|
993b20880bfa3f9225777b54a31f7e9f1750c560
|
[] |
no_license
|
zeroohub/leetcode
|
39a835476eedea5bf8f434a15efb5e73495209f9
|
cfefa073d6c6f664a835b87369dbba0203b91e58
|
refs/heads/master
| 2020-03-22T03:51:22.247932
| 2019-02-21T11:07:13
| 2019-02-21T11:07:13
| 139,456,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 716
|
py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
class Solution(object):
def lengthOfLIS(self, nums):
"""
TODO check solution
https://leetcode.com/problems/longest-increasing-subsequence/solution/
brute force, O(n^2)
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
dp = []
big = 1
for i, n in enumerate(nums):
maxval = 1
for j in range(i):
if n > nums[j]:
maxval = max(maxval, dp[j]+1)
big = max(maxval, big)
dp.append(maxval)
return big
print(Solution().lengthOfLIS([1,3,6,7,9,4,10,5,6]))
|
[
"spamzero@yeah.net"
] |
spamzero@yeah.net
|
1b89119e377c53d6374d3709b42f98d8b21ffd19
|
a0eb6744e6f7f509b96d21f0bc8b3f8387f6861c
|
/notebook/pass_def_class.py
|
e29e17921ea9fb76908f29d73e9c72df07c9ddcd
|
[
"MIT"
] |
permissive
|
nkmk/python-snippets
|
a6c66bdf999502e52f4795a3074ced63bf440817
|
f9dd286a9cf93f474e20371f8fffc4732cb3c4d5
|
refs/heads/master
| 2023-08-03T04:20:05.606293
| 2023-07-26T13:21:11
| 2023-07-26T13:21:11
| 98,900,570
| 253
| 77
|
MIT
| 2020-10-25T01:12:53
| 2017-07-31T14:54:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 255
|
py
|
# def empty_func():
# SyntaxError: unexpected EOF while parsing
def empty_func():
pass
# class EmptyClass():
# SyntaxError: unexpected EOF while parsing
class EmptyClass():
pass
def empty_func_one_line(): pass
class EmptyClassOneLine(): pass
|
[
"nkmk.on@gmail.com"
] |
nkmk.on@gmail.com
|
826afca4df28ff30b6a1d904ee64e55471bcf448
|
d85b190bf504191f00197a47a9426811a3d7b55e
|
/practice1.py
|
e056048cef017c6e7f157d984a9f4e4d663e5cbf
|
[] |
no_license
|
0702ray/minecraft-2
|
c436561bb458a55d98c35f7544c233098b4fd3dc
|
832702b560889e59ed4877f420b735851738d78d
|
refs/heads/master
| 2022-11-29T02:52:37.800178
| 2020-08-06T05:34:37
| 2020-08-06T05:34:37
| 284,612,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 3 15:42:25 2020
@author: SCE
"""
from mcpi.minecraft import Minecraft
import time
mc=Minecraft.create()
time.sleep(5)
a=0
while a<11:
a=a+1
x,y,z=mc.player.getTilePos()
mc.setBlock(x,y,z,38,)
time.sleep(0.1)
|
[
"noreply@github.com"
] |
0702ray.noreply@github.com
|
ea93139613173aec84cb777c42e2e68be166d0a6
|
c04f8a554172bd6081c86c4ef89105b2b1b856f5
|
/multiple_processes.py
|
f4f7660072c49335a59f9d1c404d228c1f718011
|
[
"MIT"
] |
permissive
|
RKJenamani/pybullet-parallel
|
da5ae2cd6e811975ed138b5cc53cf239a5325fa2
|
4f6693bba0299a5abf31be07863c97dedbb45cff
|
refs/heads/main
| 2023-04-26T12:43:36.527479
| 2021-05-22T05:02:44
| 2021-05-22T05:02:44
| 369,637,938
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,535
|
py
|
import multiprocessing
import time
import numpy as np
from types import SimpleNamespace
def setup_exp(p):
mu = 0.65
object_ids = []
local_path = "./"
state = ([-0.0404755 , -0.00112935 , 0.183326,0.000183599, 0.0274296, -3.12269])
req = SimpleNamespace(o_x=state[0],o_y=state[1],o_z=state[2],o_r = state[3], o_p = state[4], o_yaw = state[5], type = 8)
xyz = [req.o_x, req.o_y, req.o_z]
rpy = [req.o_r, req.o_p, req.o_yaw]
body_id = p.loadURDF(local_path + "models/024_bowl/model.urdf", xyz, p.getQuaternionFromEuler(rpy))
p.changeDynamics(body_id, -1, lateralFriction=mu)
object_ids.append(body_id)
state = ([-0.031447 , 0.00245332 , 0.231471,1.11077, 0.116883, 1.88827])
req = SimpleNamespace(o_x=state[0],o_y=state[1],o_z=state[2],o_r = state[3], o_p = state[4], o_yaw = state[5], type = 8)
xyz = [req.o_x, req.o_y, req.o_z]
rpy = [req.o_r, req.o_p, req.o_yaw]
body_id = p.loadURDF(local_path + "models/037_scissors/model.urdf", xyz, p.getQuaternionFromEuler(rpy))
p.changeDynamics(body_id, -1, lateralFriction=mu)
object_ids.append(body_id)
state = ([-0.0342039 , -0.0411717 , 0.221495,-1.21418, -0.411491, 1.3281])
req = SimpleNamespace(o_x=state[0],o_y=state[1],o_z=state[2],o_r = state[3], o_p = state[4], o_yaw = state[5], type = 8)
xyz = [req.o_x, req.o_y, req.o_z]
rpy = [req.o_r, req.o_p, req.o_yaw]
body_id = p.loadURDF(local_path + "models/011_banana/model.urdf", xyz, p.getQuaternionFromEuler(rpy))
p.changeDynamics(body_id, -1, lateralFriction=mu)
object_ids.append(body_id)
state = ([-0.00109234 , 0.00217348, 0.0940739,0.603044, -1.5319, 2.60045])
req = SimpleNamespace(o_x=state[0],o_y=state[1],o_z=state[2],o_r = state[3], o_p = state[4], o_yaw = state[5], type = 8)
xyz = [req.o_x, req.o_y, req.o_z]
rpy = [req.o_r, req.o_p, req.o_yaw]
body_id = p.loadURDF(local_path + "models/004_sugar_box/model.urdf", xyz, p.getQuaternionFromEuler(rpy))
p.changeDynamics(body_id, -1, lateralFriction=mu)
object_ids.append(body_id)
state = ([0.0431414 , -0.0814738, 0.100775,-1.56183, 0.0200383, -0.0332266])
req = SimpleNamespace(o_x=state[0],o_y=state[1],o_z=state[2],o_r = state[3], o_p = state[4], o_yaw = state[5], type = 8)
xyz = [req.o_x, req.o_y, req.o_z]
rpy = [req.o_r, req.o_p, req.o_yaw]
body_id = p.loadURDF(local_path + "models/010_potted_meat_can/model.urdf", xyz, p.getQuaternionFromEuler(rpy))
p.changeDynamics(body_id, -1, lateralFriction=mu)
object_ids.append(body_id)
state = ([-0.0996265 , -0.0109982, 0.229266,1.09516, -0.0324135, 1.93206])
req = SimpleNamespace(o_x=state[0],o_y=state[1],o_z=state[2],o_r = state[3], o_p = state[4], o_yaw = state[5], type = 8)
xyz = [req.o_x, req.o_y, req.o_z]
rpy = [req.o_r, req.o_p, req.o_yaw]
body_id = p.loadURDF(local_path + "models/037_scissors/model.urdf", xyz, p.getQuaternionFromEuler(rpy))
p.changeDynamics(body_id, -1, lateralFriction=mu)
object_ids.append(body_id)
state = ([0.0391189 , -0.0793964, 0.142291,-3.03823, -0.084093, 1.51302])
req = SimpleNamespace(o_x=state[0],o_y=state[1],o_z=state[2],o_r = state[3], o_p = state[4], o_yaw = state[5], type = 8)
xyz = [req.o_x, req.o_y, req.o_z]
rpy = [req.o_r, req.o_p, req.o_yaw]
body_id = p.loadURDF(local_path + "models/009_gelatin_box/model.urdf", xyz, p.getQuaternionFromEuler(rpy))
p.changeDynamics(body_id, -1, lateralFriction=mu)
object_ids.append(body_id)
state = ([-0.039549 , -0.000712143, 0.141748,-0.00616443, 0.0036889, -1.53266])
req = SimpleNamespace(o_x=state[0],o_y=state[1],o_z=state[2],o_r = state[3], o_p = state[4], o_yaw = state[5], type = 8)
xyz = [req.o_x, req.o_y, req.o_z]
rpy = [req.o_r, req.o_p, req.o_yaw]
body_id = p.loadURDF(local_path + "models/008_pudding_box/model.urdf", xyz, p.getQuaternionFromEuler(rpy))
p.changeDynamics(body_id, -1, lateralFriction=mu)
object_ids.append(body_id)
state = ([0.0518593 , -0.113949, 0.206423,-2.99895, -0.0929695, 1.27618])
req = SimpleNamespace(o_x=state[0],o_y=state[1],o_z=state[2],o_r = state[3], o_p = state[4], o_yaw = state[5], type = 8)
xyz = [req.o_x, req.o_y, req.o_z]
rpy = [req.o_r, req.o_p, req.o_yaw]
body_id = p.loadURDF(local_path + "models/007_tuna_fish_can/model.urdf", xyz, p.getQuaternionFromEuler(rpy))
p.changeDynamics(body_id, -1, lateralFriction=mu)
object_ids.append(body_id)
state = ([0.0575085 , -0.102112, 0.171442,-3.01417, -0.111824, 1.44483])
req = SimpleNamespace(o_x=state[0],o_y=state[1],o_z=state[2],o_r = state[3], o_p = state[4], o_yaw = state[5], type = 8)
xyz = [req.o_x, req.o_y, req.o_z]
rpy = [req.o_r, req.o_p, req.o_yaw]
body_id = p.loadURDF(local_path + "models/007_tuna_fish_can/model.urdf", xyz, p.getQuaternionFromEuler(rpy))
p.changeDynamics(body_id, -1, lateralFriction=mu)
object_ids.append(body_id)
state = ([0.000732725 , 0.000258393, 0.0369799,-2.1136, -1.55321, 0.539161])
req = SimpleNamespace(o_x=state[0],o_y=state[1],o_z=state[2],o_r = state[3], o_p = state[4], o_yaw = state[5], type = 8)
xyz = [req.o_x, req.o_y, req.o_z]
rpy = [req.o_r, req.o_p, req.o_yaw]
body_id = p.loadURDF(local_path + "models/003_cracker_box/model.urdf", xyz, p.getQuaternionFromEuler(rpy))
p.changeDynamics(body_id, -1, lateralFriction=mu)
object_ids.append(body_id)
def make_sim():
import pybullet
import pybullet_data
import pybullet_utils.bullet_client as bc
p = bc.BulletClient(connection_mode=pybullet.DIRECT)
# p.connect(p.DIRECT)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setGravity(0,0,-10)
plane = p.loadURDF("plane.urdf")
p.setRealTimeSimulation(0)
setup_exp(p)
for _ in range(1000):
p.stepSimulation()
return None
def worker(inst):
pos = make_sim()
for iter in range(1,13,3):
num_parallel = iter
start_time = time.time()
w = []
for i in range(num_parallel):
wi = multiprocessing.Process(target=worker, args={"inst":i})
w.append(wi)
for i in range(num_parallel):
w[i].start()
for i in range(num_parallel):
w[i].join()
end_time = time.time()
print("Total time taken: {0:.2f} | Parallel Envs: {1} | Time taken per simulation: {2:.2f} ".format((end_time-start_time),num_parallel,(end_time-start_time)/num_parallel))
|
[
"rajatkj11@gmail.com"
] |
rajatkj11@gmail.com
|
2dfdb8b02f2200284bec6076c1f6e6d9b094ba8c
|
060ce17de7b5cdbd5f7064d1fceb4ded17a23649
|
/fn_googlesafebrowsing/setup.py
|
6f057bb8503335e605ed4a98ec83a02553343947
|
[
"MIT"
] |
permissive
|
ibmresilient/resilient-community-apps
|
74bbd770062a22801cef585d4415c29cbb4d34e2
|
6878c78b94eeca407998a41ce8db2cc00f2b6758
|
refs/heads/main
| 2023-06-26T20:47:15.059297
| 2023-06-23T16:33:58
| 2023-06-23T16:33:58
| 101,410,006
| 81
| 107
|
MIT
| 2023-03-29T20:40:31
| 2017-08-25T14:07:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,041
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import glob
import ntpath
def get_module_name(module_path):
"""
Return the module name of the module path
"""
return ntpath.split(module_path)[1].split(".")[0]
def snake_to_camel(word):
"""
Convert a word from snake_case to CamelCase
"""
return ''.join(x.capitalize() or '_' for x in word.split('_'))
setup(
name="fn_googlesafebrowsing",
display_name="Google Safe Browsing Function for IBM SOAR",
version="1.0.1",
license="MIT",
author="IBM SOAR",
author_email="",
url="https://github.com/resilient/resilient-community-apps",
description="IBM Security SOAR app for 'fn_googlesafebrowsing'",
long_description="""This app uses Google Safe Browsing to check artifacts with a URL type and adds a hit if the site is potentially unsafe. The hit contains a link to Google Transparency Report that gives information on the potentially unsafe url.'""",
install_requires=[
"resilient-circuits>=43.0.0"
],
python_requires='>=3.6',
packages=find_packages(),
include_package_data=True,
platforms="any",
classifiers=[
"Programming Language :: Python",
],
entry_points={
"resilient.circuits.components": [
# When setup.py is executed, loop through the .py files in the components directory and create the entry points.
"{}FunctionComponent = fn_googlesafebrowsing.components.{}:FunctionComponent".format(snake_to_camel(get_module_name(filename)), get_module_name(filename)) for filename in glob.glob("./fn_googlesafebrowsing/components/[a-zA-Z]*.py")
],
"resilient.circuits.configsection": ["gen_config = fn_googlesafebrowsing.util.config:config_section_data"],
"resilient.circuits.customize": ["customize = fn_googlesafebrowsing.util.customize:customization_data"],
"resilient.circuits.selftest": ["selftest = fn_googlesafebrowsing.util.selftest:selftest_function"]
}
)
|
[
"shane.curtin@ie.ibm.com"
] |
shane.curtin@ie.ibm.com
|
799d89d6eed5762ca6c85e634da9a39b18eebfff
|
2671293f01b2b06c7cf68070194205383ee58aee
|
/Scripts/tottagSmoother.py
|
1f783b4828743bc1831c50e2a3d6e406e83fc2d4
|
[] |
no_license
|
vanderbiltsealab/tottag_methods
|
63481e4250a29a8959bca3b316ec146b93b6e9d5
|
46c454031b24b503a32aefb18d07ab496d3d7450
|
refs/heads/master
| 2023-06-25T06:45:35.438413
| 2021-07-27T20:21:27
| 2021-07-27T20:21:27
| 279,338,761
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,044
|
py
|
#!/usr/bin/env python
import os
import sys
#class to handle the moving average.
#works kind of like a queue, keeping smoothVal values stored
class SmoothedGroup:
def __init__(self, time, mote, val, size, s):
self.stamps = []
self.data = []
self.size = size
self.stamps.append(time)
self.data.append(val)
self.mote = mote
self.lastTime = time
self.outfile = s
def addVal(self, time, val):
self.stamps.insert(0, time)
self.data.insert(0, val)
self.lastTime = time
if (len(self.data) >= self.size):
self.average()
def clear(self):
self.stamps = []
self.data = []
#helper methof to average and write out the line. Pops oldest value off
def average(self):
averageVal = 0
index = int(self.size/2)
for i in range(0, self.size):
averageVal += self.data[i]
self.data.pop()
self.stamps.pop()
averageVal /= self.size
self.outfile.write(str(self.stamps[index])+"\t"+self.mote+"\t"+str(round((int(averageVal)/25.4/12), 2))+"\n")
OUT_OF_RANGE_CODE = 999999
if len(sys.argv) < 2:
print('USAGE: python tottag.py SMOOTHING VALUE LOG_FILE_PATH LOG_FILE_PATH LOG_FILE_PATH LOG_FILE_PATH')
sys.exit(1)
logs = sys.argv[2:]
smoothVal = int(sys.argv[1])
logfile_date = None
for i in logs:
outFile = i[:-4] + "-smoothed.log"
s = open(outFile, "w+")
first = {}
classDict = {}
with open(i) as f:
s.write(f.readline())
for line in f:
if line[0] != '#':
tokens = line.split('\t')
#this if only operates on the first recording from each mote.
#serves to initialize the class
if (first.setdefault(tokens[1], True) and int(tokens[2]) != OUT_OF_RANGE_CODE):
classDict[tokens[1]] = classDict.setdefault(tokens[1], SmoothedGroup(int(tokens[0]), tokens[1], int(tokens[2]), smoothVal, s))
first[tokens[1]] = False
elif (int(tokens[2]) != OUT_OF_RANGE_CODE):
#checks here for time skips
timeDiff = int(tokens[0]) - classDict[tokens[1]].lastTime
if (timeDiff == 1):
classDict[tokens[1]].addVal(int(tokens[0]), int(tokens[2]))
#If the skip is small, fills in time with current value
elif (timeDiff > 1 and timeDiff <= smoothVal):
for i in range(classDict[tokens[1]].lastTime + 1, int(tokens[0]) + 1):
classDict[tokens[1]].addVal(i, int(tokens[2]))
#If the skip is larger than the smoothVal, it starts the buffer over
else:
classDict[tokens[1]].clear()
classDict[tokens[1]].addVal(int(tokens[0]), int(tokens[2]))
f.close()
s.close()
|
[
"virginia.salo@gmail.com"
] |
virginia.salo@gmail.com
|
935b4b4690a238df2f20c0ae83eafe2b1f8af080
|
0738bf4f07aaa81a42757d438cb56e1fe61eb482
|
/3. SQL/pset7/houses/roster.py
|
466758bf040e737f01d3dbc3d01eb6b60d25f02e
|
[] |
no_license
|
laisdeghaide/CS50x
|
b7072e50e19669d91e0724c9524b2e12d72c5ff0
|
443ab2003cd8507c3ba5cbd6b04cadf25fae27cd
|
refs/heads/master
| 2022-12-07T09:00:29.158469
| 2020-09-03T14:47:39
| 2020-09-03T14:47:39
| 277,606,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
from sys import argv
from cs50 import SQL
if len(argv) != 2:
print("Usage: python roster.py House")
exit()
db = SQL("sqlite:///students.db")
characteres = db.execute("SELECT * FROM students WHERE house = (?) ORDER BY last", argv[1])
for c in characteres:
if c['middle'] != None:
print("{} {} {}, born {}" .format(c['first'], c['middle'], c['last'], c['birth']))
else:
print("{} {}, born {}" .format(c['first'], c['last'], c['birth']))
|
[
"laisdeghaide@ufu.br"
] |
laisdeghaide@ufu.br
|
d8c6dde444542ce844fca80c31b2000669bb9b4d
|
3e5d508ae0609482421b529178a52e360a58c96f
|
/flappy_two_players.py
|
0c4640f2795610f087d3a774fd1513bcd337792c
|
[
"MIT"
] |
permissive
|
yasminemedhat/FlapPyBird
|
4a6729cf42f30262c3a5ef840eb2f86be391d5aa
|
a70b48340c94189690492fd34637f0e9b4041c93
|
refs/heads/master
| 2020-05-17T18:31:06.984521
| 2019-05-14T14:52:59
| 2019-05-14T14:52:59
| 183,885,988
| 0
| 0
|
MIT
| 2019-04-28T09:21:00
| 2019-04-28T09:21:00
| null |
UTF-8
|
Python
| false
| false
| 23,925
|
py
|
from itertools import cycle
import random
import pygame
from pygame.locals import *
import socket
import sys
from _thread import *
import time
from datetime import datetime
HOST = ''
SEND_PORT = 0
RECV_PORT = 0
ID = 0
isServer = False
# ToDo: Added
someone_still_playing = True
my_gameover = False
broadcast_port = 8888
player2_ip = 0
player2_send_port = 0
player2_recv_port = 0
player2_score = 0
recv_score_socket = None
send_score_socket = None
FPS = 30
SCREENWIDTH = 288
SCREENHEIGHT = 512
PIPEGAPSIZE = 100 # gap between upper and lower part of pipe
BASEY = SCREENHEIGHT * 0.79
# image, sound and hitmask dicts
IMAGES, SOUNDS, HITMASKS = {}, {}, {}
# list of all possible players (tuple of 3 positions of flap)
PLAYERS_LIST = (
# red bird
(
'assets/sprites/redbird-upflap.png',
'assets/sprites/redbird-midflap.png',
'assets/sprites/redbird-downflap.png',
),
# blue bird
(
'assets/sprites/bluebird-upflap.png',
'assets/sprites/bluebird-midflap.png',
'assets/sprites/bluebird-downflap.png',
),
# yellow bird
(
'assets/sprites/yellowbird-upflap.png',
'assets/sprites/yellowbird-midflap.png',
'assets/sprites/yellowbird-downflap.png',
),
)
# list of backgrounds
BACKGROUNDS_LIST = (
'assets/sprites/background-day.png',
'assets/sprites/background-night.png',
)
# list of pipes
PIPES_LIST = (
'assets/sprites/pipe-green.png',
'assets/sprites/pipe-red.png',
)
try:
xrange
except NameError:
xrange = range
def main():
global SCREEN, FPSCLOCK
pygame.init()
FPSCLOCK = pygame.time.Clock()
SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption('Flappy Bird')
# numbers sprites for score display
IMAGES['numbers'] = (
pygame.image.load('assets/sprites/0.png').convert_alpha(),
pygame.image.load('assets/sprites/1.png').convert_alpha(),
pygame.image.load('assets/sprites/2.png').convert_alpha(),
pygame.image.load('assets/sprites/3.png').convert_alpha(),
pygame.image.load('assets/sprites/4.png').convert_alpha(),
pygame.image.load('assets/sprites/5.png').convert_alpha(),
pygame.image.load('assets/sprites/6.png').convert_alpha(),
pygame.image.load('assets/sprites/7.png').convert_alpha(),
pygame.image.load('assets/sprites/8.png').convert_alpha(),
pygame.image.load('assets/sprites/9.png').convert_alpha()
)
# game over sprite
IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha()
# message sprite for welcome screen
IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha()
# base (ground) sprite
IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha()
# winner sprite
IMAGES['winner'] = pygame.image.load('assets/sprites/winner.png').convert_alpha()
# loser sprite
IMAGES['loser'] = pygame.image.load('assets/sprites/loser.png').convert_alpha()
# tie sprite
IMAGES['tie'] = pygame.image.load('assets/sprites/tie.png').convert_alpha()
# sounds
if 'win' in sys.platform:
soundExt = '.wav'
else:
soundExt = '.ogg'
SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt)
SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt)
SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt)
SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt)
SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt)
while True:
# select random background sprites
randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1)
IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert()
# select random player sprites
randPlayer = random.randint(0, len(PLAYERS_LIST) - 1)
IMAGES['player'] = (
pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(),
)
# select random pipe sprites
pipeindex = random.randint(0, len(PIPES_LIST) - 1)
IMAGES['pipe'] = (
pygame.transform.flip(
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), False, True),
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(),
)
# hismask for pipes
HITMASKS['pipe'] = (
getHitmask(IMAGES['pipe'][0]),
getHitmask(IMAGES['pipe'][1]),
)
# hitmask for player
HITMASKS['player'] = (
getHitmask(IMAGES['player'][0]),
getHitmask(IMAGES['player'][1]),
getHitmask(IMAGES['player'][2]),
)
global HOST
HOST = socket.gethostbyname_ex('')
HOST = HOST[-1][-1]
print(HOST)
# random generator that uses the timestamp
random.seed(datetime.now())
# generate a random sending port number
global SEND_PORT
SEND_PORT = random.randint(2 ** 10, 2 ** 16)
print(SEND_PORT)
# generate a random receiving port number
global RECV_PORT
RECV_PORT = random.randint(2 ** 10, 2 ** 16)
print(RECV_PORT)
# generate random player id
global ID
ID = random.randint(10, 100)
movementInfo = showWelcomeAnimation()
crashInfo = mainGame(movementInfo)
showGameOverScreen(crashInfo)
def showWelcomeAnimation():
"""Shows welcome screen animation of flappy bird"""
# index of player to blit on screen
playerIndex = 0
playerIndexGen = cycle([0, 1, 2, 1])
# iterator used to change playerIndex after every 5th iteration
loopIter = 0
playerx = int(SCREENWIDTH * 0.2)
playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2)
messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) / 2)
messagey = int(SCREENHEIGHT * 0.12)
basex = 0
# amount by which base can maximum shift to left
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# player shm for up-down motion on welcome screen
playerShmVals = {'val': 0, 'dir': 1}
Connect_to_second_player()
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
# if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
# make first flap sound and return values for mainGame
SOUNDS['wing'].play()
return {
'playery': playery + playerShmVals['val'],
'basex': basex,
'playerIndexGen': playerIndexGen,
}
# adjust playery, playerIndex, basex
if (loopIter + 1) % 5 == 0:
playerIndex = next(playerIndexGen)
loopIter = (loopIter + 1) % 30
basex = -((-basex + 4) % baseShift)
playerShm(playerShmVals)
# draw sprites
SCREEN.blit(IMAGES['background'], (0, 0))
SCREEN.blit(IMAGES['player'][playerIndex],
(playerx, playery + playerShmVals['val']))
SCREEN.blit(IMAGES['message'], (messagex, messagey))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
pygame.display.update()
FPSCLOCK.tick(FPS)
def mainGame(movementInfo):
# ToDo Added
global someone_still_playing
score = playerIndex = loopIter = 0
playerIndexGen = movementInfo['playerIndexGen']
playerx, playery = int(SCREENWIDTH * 0.2), movementInfo['playery']
basex = movementInfo['basex']
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# get 2 new pipes to add to upperPipes lowerPipes list
newPipe1 = getRandomPipe()
newPipe2 = getRandomPipe()
# list of upper pipes
upperPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']},
]
# list of lowerpipe
lowerPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']},
]
pipeVelX = -4
# player velocity, max velocity, downward accleration, accleration on flap
playerVelY = -9 # player's velocity along Y, default same as playerFlapped
playerMaxVelY = 10 # max vel along Y, max descend speed
playerMinVelY = -8 # min vel along Y, max ascend speed
playerAccY = 1 # players downward accleration
playerRot = 45 # player's rotation
playerVelRot = 3 # angular speed
playerRotThr = 20 # rotation threshold
playerFlapAcc = -9 # players speed on flapping
playerFlapped = False # True when player flaps
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
if playery > -2 * IMAGES['player'][0].get_height():
playerVelY = playerFlapAcc
playerFlapped = True
SOUNDS['wing'].play()
# check for crash here
crashTest = checkCrash({'x': playerx, 'y': playery, 'index': playerIndex},
upperPipes, lowerPipes)
if crashTest[0]:
return {
'y': playery,
'groundCrash': crashTest[1],
'basex': basex,
'upperPipes': upperPipes,
'lowerPipes': lowerPipes,
'score': score,
'playerVelY': playerVelY,
'playerRot': playerRot
}
# check for score
playerMidPos = playerx + IMAGES['player'][0].get_width() / 2
for pipe in upperPipes:
pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2
if pipeMidPos <= playerMidPos < pipeMidPos + 4:
score += 1
SOUNDS['point'].play()
# playerIndex basex change
if (loopIter + 1) % 3 == 0:
playerIndex = next(playerIndexGen)
loopIter = (loopIter + 1) % 30
basex = -((-basex + 100) % baseShift)
# rotate the player
if playerRot > -90:
playerRot -= playerVelRot
# player's movement
if playerVelY < playerMaxVelY and not playerFlapped:
playerVelY += playerAccY
if playerFlapped:
playerFlapped = False
# more rotation to cover the threshold (calculated in visible rotation)
playerRot = 45
playerHeight = IMAGES['player'][playerIndex].get_height()
playery += min(playerVelY, BASEY - playery - playerHeight)
# move pipes to left
for uPipe, lPipe in zip(upperPipes, lowerPipes):
uPipe['x'] += pipeVelX
lPipe['x'] += pipeVelX
# add new pipe when first pipe is about to touch left of screen
if 0 < upperPipes[0]['x'] < 5:
newPipe = getRandomPipe()
upperPipes.append(newPipe[0])
lowerPipes.append(newPipe[1])
# remove first pipe if its out of the screen
if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width():
upperPipes.pop(0)
lowerPipes.pop(0)
# draw sprites
SCREEN.blit(IMAGES['background'], (0, 0))
for uPipe, lPipe in zip(upperPipes, lowerPipes):
SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
# print score so player overlaps the score
showScore(score)
# ToDo: Added
if someone_still_playing:
start_new_thread(send_Score, (score,))
showOtherScore()
# Player rotation has a threshold
visibleRot = playerRotThr
if playerRot <= playerRotThr:
visibleRot = playerRot
playerSurface = pygame.transform.rotate(IMAGES['player'][playerIndex], visibleRot)
SCREEN.blit(playerSurface, (playerx, playery))
pygame.display.update()
FPSCLOCK.tick(FPS)
def showGameOverScreen(crashInfo):
"""crashes the player down ans shows gameover image"""
# ToDo: Added
global someone_still_playing
global my_gameover
score = crashInfo['score']
playerx = SCREENWIDTH * 0.2
playery = crashInfo['y']
playerHeight = IMAGES['player'][0].get_height()
playerVelY = crashInfo['playerVelY']
playerAccY = 2
playerRot = crashInfo['playerRot']
playerVelRot = 7
basex = crashInfo['basex']
upperPipes, lowerPipes = crashInfo['upperPipes'], crashInfo['lowerPipes']
# play hit and die sounds
SOUNDS['hit'].play()
if not crashInfo['groundCrash']:
SOUNDS['die'].play()
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
if playery + playerHeight >= BASEY - 1:
return
# player y shift
if playery + playerHeight < BASEY - 1:
playery += min(playerVelY, BASEY - playery - playerHeight)
# player velocity change
if playerVelY < 15:
playerVelY += playerAccY
# rotate only when it's a pipe crash
if not crashInfo['groundCrash']:
if playerRot > -90:
playerRot -= playerVelRot
# draw sprites
SCREEN.blit(IMAGES['background'], (0, 0))
for uPipe, lPipe in zip(upperPipes, lowerPipes):
SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
showScore(score)
# ToDo: Added game has ended parameter
if someone_still_playing and not my_gameover:
start_new_thread(send_Score, (score, True,))
my_gameover = True
show_win_lose(score)
showOtherScore()
playerSurface = pygame.transform.rotate(IMAGES['player'][1], playerRot)
SCREEN.blit(playerSurface, (playerx, playery))
SCREEN.blit(IMAGES['gameover'], (50, 180))
FPSCLOCK.tick(FPS)
pygame.display.update()
# start_new_thread(close_connections, ())
# ToDo: Added
def show_win_lose(score):
if score < player2_score:
SCREEN.blit(IMAGES['loser'], (50, 180))
elif score > player2_score:
SCREEN.blit(IMAGES['winner'], (50, 180))
else:
SCREEN.blit(IMAGES['tie'], (50, 180))
def playerShm(playerShm):
"""oscillates the value of playerShm['val'] between 8 and -8"""
if abs(playerShm['val']) == 8:
playerShm['dir'] *= -1
if playerShm['dir'] == 1:
playerShm['val'] += 1
else:
playerShm['val'] -= 1
def getRandomPipe():
"""returns a randomly generated pipe"""
# y of gap between upper and lower pipe
gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE))
gapY += int(BASEY * 0.2)
pipeHeight = IMAGES['pipe'][0].get_height()
pipeX = SCREENWIDTH + 10
return [
{'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe
{'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe
]
def showScore(score):
"""displays score in center of screen"""
scoreDigits = [int(x) for x in list(str(score))]
totalWidth = 0 # total width of all numbers to be printed
for digit in scoreDigits:
totalWidth += IMAGES['numbers'][digit].get_width()
Xoffset = (SCREENWIDTH - totalWidth) / 2
for digit in scoreDigits:
SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1))
Xoffset += IMAGES['numbers'][digit].get_width()
def showOtherScore():
"""displays score in center of screen"""
scoreDigits = [int(x) for x in list(str(player2_score))]
totalWidth = 0 # total width of all numbers to be printed
for digit in scoreDigits:
totalWidth += IMAGES['numbers'][digit].get_width()
Xoffset = (SCREENWIDTH - totalWidth) / 2
for digit in scoreDigits:
SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.9))
Xoffset += IMAGES['numbers'][digit].get_width()
def checkCrash(player, upperPipes, lowerPipes):
"""returns True if player collders with base or pipes."""
pi = player['index']
player['w'] = IMAGES['player'][0].get_width()
player['h'] = IMAGES['player'][0].get_height()
# if player crashes into ground
if player['y'] + player['h'] >= BASEY - 1:
return [True, True]
else:
playerRect = pygame.Rect(player['x'], player['y'],
player['w'], player['h'])
pipeW = IMAGES['pipe'][0].get_width()
pipeH = IMAGES['pipe'][0].get_height()
for uPipe, lPipe in zip(upperPipes, lowerPipes):
# upper and lower pipe rects
uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)
lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)
# player and upper/lower pipe hitmasks
pHitMask = HITMASKS['player'][pi]
uHitmask = HITMASKS['pipe'][0]
lHitmask = HITMASKS['pipe'][1]
# if bird collided with upipe or lpipe
uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
if uCollide or lCollide:
return [True, False]
return [False, False]
def pixelCollision(rect1, rect2, hitmask1, hitmask2):
"""Checks if two objects collide and not just their rects"""
rect = rect1.clip(rect2)
if rect.width == 0 or rect.height == 0:
return False
x1, y1 = rect.x - rect1.x, rect.y - rect1.y
x2, y2 = rect.x - rect2.x, rect.y - rect2.y
for x in xrange(rect.width):
for y in xrange(rect.height):
if hitmask1[x1 + x][y1 + y] and hitmask2[x2 + x][y2 + y]:
return True
return False
def getHitmask(image):
"""returns a hitmask using an image's alpha."""
mask = []
for x in xrange(image.get_width()):
mask.append([])
for y in xrange(image.get_height()):
mask[x].append(bool(image.get_at((x, y))[3]))
return mask
def Connect_to_second_player():
# create a UDP socket for broadcasting
my_udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
my_udp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
my_udp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# raise exception if it did not recieve any data
# my_udp_socket.setblocking(0)
my_udp_socket.bind((HOST, broadcast_port))
# global PORT
my_udp_socket.settimeout(10)
# broadcast message
msg = 'Player ' + str(ID) + ' connect with me via port ' + str(SEND_PORT) + ' and port ' + str(RECV_PORT)
global player2_ip, player2_send_port, player2_id, player2_recv_port
# my_socket.sendto(msg.encode(),('<broadcast>',8888))
global isServer
try:
while True:
data, address = my_udp_socket.recvfrom(4096)
data = str(data.decode())
if (int(data.split(' ')[-1]) != RECV_PORT):
if (data.startswith("Player")):
isServer = False
player2_recv_port = int(data.split(' ')[-1])
player2_send_port = int(data.split(' ')[-4])
player2_ip = address[0]
player2_id = data.split(' ')[1]
my_udp_socket.sendto(msg.encode(), address)
print('player 2 ip:', player2_ip, " port: ", player2_send_port)
time.sleep(10 )
break
except socket.timeout:
# If no data is received, you get here, but it's not an error
# Ignore and continue
isServer = True
pass
global send_score_socket
global recv_score_socket
# create two sockets one for sending the score and one for receiving the other okayer's score
recv_score_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
send_score_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if isServer:
# listen for the other player tcp connection request
print('sending broadcast: ' + msg)
my_udp_socket.sendto(msg.encode(), ('255.255.255.255', broadcast_port))
print("trying to receive broadcast")
recv_score_socket.bind((HOST, RECV_PORT))
recv_score_socket.listen(1)
send_score_socket.bind((HOST, SEND_PORT))
send_score_socket.listen(1)
player2_send_conn, player2_send_addr = recv_score_socket.accept()
player2_ip = player2_send_addr[0]
player2_send_port = player2_send_addr[1]
print('Connection for receiving established with ', player2_send_addr[0], ' with port = ', player2_send_port)
recv_score_socket = player2_send_conn
player2_recv_conn, player2_recv_addr = send_score_socket.accept()
player2_ip = player2_recv_addr[0]
player2_recv_port = player2_recv_addr[1]
print('Connection for receiving established with ', player2_recv_addr[0], ' with port = ', player2_recv_port)
# start the sending thread
send_score_socket = player2_recv_conn
else:
# initiate the connection with the other player
print(player2_ip)
send_score_socket.connect((player2_ip, player2_recv_port))
print("Connection for sending initiated with player : ", player2_id)
recv_score_socket.connect((player2_ip, player2_send_port))
print("Connection for receiving initiated with player : ", player2_id)
start_new_thread(recv_thread, ())
def recv_thread():
while get_score():
continue
def get_score():
global player2_score
global someone_still_playing
global my_gameover
try:
score = recv_score_socket.recv(1024).decode()
if "score" in score:
print("score received: ", score)
player2_score = int(score.split()[-1]) # message format score: 10
return True
# ToDo: Added
elif "ended" in score:
someone_still_playing = False
recv_score_socket.close()
print("player to has ended message received and receive socket successfully closed")
return False
except socket.timeout:
print("timed out")
return False
pass
except socket.error:
print("disconnected")
return False
# ToDo: Added
def send_Score(score, is_ended=False):
global send_score_socket
global someone_still_playing
global my_gameover
msg = "score: " + str(score)
try:
if not is_ended:
send_score_socket.send(msg.encode())
# print("score sent")
else:
msg = "my game has ended score: " + str(score)
send_score_socket.send(msg.encode())
print("last message: I finished message sent")
send_score_socket.close()
print("send score socket successfully closed")
except socket.error:
print("send score socket disconnected")
def close_connections():
recv_score_socket.close()
send_score_socket.close()
if __name__ == '__main__':
main()
|
[
"yasminemedhat97@gmail.com"
] |
yasminemedhat97@gmail.com
|
a27232b2e85807c91a084f8cc591aab8d2abf72d
|
4914d636f995ecdcb69fd8180b576c168f731b9d
|
/ert-backend/project/config/settings/production.py
|
abb7bf2f27daada1a43c7fcf67f4651c9cf947b5
|
[] |
no_license
|
kohrohit/againstCOVID
|
c1e840c710e359b5e89287a455c92d7b6e90fcdc
|
4efc13646eacb1f44d783fde934ddbe2d4446fda
|
refs/heads/backend
| 2022-04-20T19:33:07.461156
| 2020-04-21T18:57:46
| 2020-04-21T18:57:46
| 255,864,943
| 0
| 0
| null | 2020-04-21T19:02:20
| 2020-04-15T09:16:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,361
|
py
|
from .base import *
# AWS S3 settings for static files
AWS_STORAGE_BUCKET_NAME = 'drf-static'
AWS_S3_REGION_NAME = 'ap-south-1'
AWS_ACCESS_KEY_ID = get_secret("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = get_secret('AWS_SECRET_ACCESS_KEY')
# Tell django-storages the domain to use to refer to static files
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
AWS_LOCATION = 'static'
AWS_DEFAULT_ACL = None
# Tell the staticfiles app to use S3Boto3 storage when writing the collected static files (when
# you run `collectstatic`).
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
STATIC_URL = 'https://%s/%s/' % (AWS_S3_CUSTOM_DOMAIN, AWS_LOCATION)
MEDIAFILES_LOCATION = 'media'
DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': get_secret("DATABASE_NAME"),
'USER': get_secret("DATABASE_USER"),
'PASSWORD': get_secret("DATABASE_PASSWORD"),
'HOST': get_secret("DATABASE_HOST"),
'PORT': get_secret("DATABASE_PORT"),
}
}
# REDIS Settings
REDIS_CONFIG = {
'HOST': get_secret("REDIS_HOST"),
'PORT': 6379,
'PASSWORD': get_secret("REDIS_PASSWORD")
}
MIDDLEWARE.insert(0, 'config.utility.middleware.ErrorLoggingMiddleWare')
ELASTIC_APM['ENVIRONMENT'] = 'production'
|
[
"aishwarydhare@gmail.com"
] |
aishwarydhare@gmail.com
|
875ff98291902fbdf522257d68d7c5f0394167ca
|
36210015abfb240aef4ea006b14fca0b340e0893
|
/jasmin/protocols/smpp/factory.py
|
9a02404c5ce280c3198eca8e43e3c9124e5b1455
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
flopraden/jasmin
|
7456d603efb2527267f2f2e56e6e8de33901b704
|
39b5ba99657da1bebb36b8c5f1853824ebbbc9f8
|
refs/heads/master
| 2021-01-22T16:05:40.332605
| 2015-10-05T06:41:10
| 2015-10-05T06:41:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,884
|
py
|
#pylint: disable-msg=W0401,W0611
import logging
import re
from logging.handlers import TimedRotatingFileHandler
from datetime import datetime, timedelta
from OpenSSL import SSL
from twisted.internet.protocol import ClientFactory
from twisted.internet import defer, reactor, ssl
from .stats import SMPPClientStatsCollector, SMPPServerStatsCollector
from .protocol import SMPPClientProtocol, SMPPServerProtocol
from .error import *
from .validation import SmppsCredentialValidator
from jasmin.vendor.smpp.twisted.server import SMPPServerFactory as _SMPPServerFactory
from jasmin.vendor.smpp.twisted.server import SMPPBindManager as _SMPPBindManager
from jasmin.vendor.smpp.pdu import pdu_types, constants
from jasmin.vendor.smpp.twisted.protocol import DataHandlerResponse
from jasmin.routing.Routables import RoutableSubmitSm
LOG_CATEGORY_CLIENT_BASE = "smpp.client"
LOG_CATEGORY_SERVER_BASE = "smpp.server"
class SmppClientIsNotConnected(Exception):
"""
An exception that is raised when a trying to use smpp object when
it is still None (before callbacking bind())
"""
class SMPPClientFactory(ClientFactory):
protocol = SMPPClientProtocol
def __init__(self, config, msgHandler = None):
self.reconnectTimer = None
self.smpp = None
self.connectionRetry = True
self.config = config
# Setup statistics collector
self.stats = SMPPClientStatsCollector().get(cid = self.config.id)
self.stats.set('created_at', datetime.now())
# Set up a dedicated logger
self.log = logging.getLogger(LOG_CATEGORY_CLIENT_BASE+".%s" % config.id)
if len(self.log.handlers) != 1:
self.log.setLevel(config.log_level)
_when = self.config.log_rotate if hasattr(self.config, 'log_rotate') else 'midnight'
handler = TimedRotatingFileHandler(filename=self.config.log_file,
when = _when)
formatter = logging.Formatter(config.log_format, config.log_date_format)
handler.setFormatter(formatter)
self.log.addHandler(handler)
self.log.propagate = False
if msgHandler is None:
self.msgHandler = self.msgHandlerStub
else:
self.msgHandler = msgHandler
def buildProtocol(self, addr):
"""Provision protocol
"""
proto = ClientFactory.buildProtocol(self, addr)
# Setup logger
proto.log = self.log
return proto
def getConfig(self):
return self.config
def msgHandlerStub(self, smpp, pdu):
self.log.warn("msgHandlerStub: Received an unhandled message %s ..." % pdu)
def startedConnecting(self, connector):
self.log.info("Connecting to %s ..." % connector.getDestination())
def getExitDeferred(self):
"""Get a Deferred so you can be notified on disconnect and exited
This deferred is called once disconnection occurs without a further
reconnection retrys
"""
return self.exitDeferred
def clientConnectionFailed(self, connector, reason):
"""Connection failed
"""
self.log.error("Connection failed. Reason: %s" % str(reason))
if self.config.reconnectOnConnectionFailure and self.connectionRetry:
self.log.info("Reconnecting after %d seconds ..." % self.config.reconnectOnConnectionFailureDelay)
self.reconnectTimer = reactor.callLater(self.config.reconnectOnConnectionFailureDelay, self.reConnect, connector)
else:
self.connectDeferred.errback(reason)
self.exitDeferred.callback(None)
self.log.info("Exiting.")
def clientConnectionLost(self, connector, reason):
"""Connection lost
"""
self.log.error("Connection lost. Reason: %s" % str(reason))
if self.config.reconnectOnConnectionLoss and self.connectionRetry:
self.log.info("Reconnecting after %d seconds ..." % self.config.reconnectOnConnectionLossDelay)
self.reconnectTimer = reactor.callLater(self.config.reconnectOnConnectionLossDelay, self.reConnect, connector)
else:
self.exitDeferred.callback(None)
self.log.info("Exiting.")
def reConnect(self, connector = None):
if connector is None:
self.log.error("No connector to retry !")
else:
# Reset deferred if it were called before
if self.connectDeferred.called is True:
self.connectDeferred = defer.Deferred()
self.connectDeferred.addCallback(self.bind)
# And try to connect again
connector.connect()
def _connect(self):
self.connectionRetry = True
if self.config.useSSL:
self.log.info('Establishing SSL connection to %s:%d' % (self.config.host, self.config.port))
reactor.connectSSL(self.config.host, self.config.port, self, CtxFactory(self.config))
else:
self.log.info('Establishing TCP connection to %s:%d' % (self.config.host, self.config.port))
reactor.connectTCP(self.config.host, self.config.port, self)
self.exitDeferred = defer.Deferred()
self.connectDeferred = defer.Deferred()
return self.connectDeferred
def connectAndBind(self):
self._connect()
self.connectDeferred.addCallback(self.bind)
return self.connectDeferred
def disconnect(self):
if self.smpp is not None:
self.log.info('Disconnecting SMPP client')
return self.smpp.unbindAndDisconnect()
else:
return None
def stopConnectionRetrying(self):
"""This will stop the factory from reconnecting
It is used whenever a service stop has been requested, the connectionRetry flag
is reset to True upon connect() call
"""
self.log.info('Stopped automatic connection retrying.')
if self.reconnectTimer and self.reconnectTimer.active():
self.reconnectTimer.cancel()
self.reconnectTimer = None
self.connectionRetry = False
def disconnectAndDontRetryToConnect(self):
self.log.info('Ordering a disconnect with no further reconnections.')
self.stopConnectionRetrying()
return self.disconnect()
def bind(self, smpp):
self.smpp = smpp
if self.config.bindOperation == 'transceiver':
return smpp.bindAsTransceiver()
elif self.config.bindOperation == 'receiver':
return smpp.bindAsReceiver()
elif self.config.bindOperation == 'transmitter':
return smpp.bindAsTransmitter()
else:
raise SMPPClientError("Invalid bind operation: %s" % self.config.bindOperation)
def getSessionState(self):
if self.smpp is None:
return None
else:
return self.smpp.sessionState
class CtxFactory(ssl.ClientContextFactory):
def __init__(self, config):
self.smppConfig = config
def getContext(self):
self.method = SSL.SSLv23_METHOD
ctx = ssl.ClientContextFactory.getContext(self)
if self.smppConfig.SSLCertificateFile:
ctx.use_certificate_file(self.smppConfig.SSLCertificateFile)
return ctx
class SMPPServerFactory(_SMPPServerFactory):
protocol = SMPPServerProtocol
def __init__(self, config, auth_portal, RouterPB = None, SMPPClientManagerPB = None):
self.config = config
# A dict of protocol instances for each of the current connections,
# indexed by system_id
self.bound_connections = {}
self._auth_portal = auth_portal
self.RouterPB = RouterPB
self.SMPPClientManagerPB = SMPPClientManagerPB
# Setup statistics collector
self.stats = SMPPServerStatsCollector().get(cid = self.config.id)
self.stats.set('created_at', datetime.now())
# Set up a dedicated logger
self.log = logging.getLogger(LOG_CATEGORY_SERVER_BASE+".%s" % config.id)
if len(self.log.handlers) != 1:
self.log.setLevel(config.log_level)
handler = TimedRotatingFileHandler(filename=self.config.log_file,
when = self.config.log_rotate)
formatter = logging.Formatter(config.log_format, config.log_date_format)
handler.setFormatter(formatter)
self.log.addHandler(handler)
self.log.propagate = False
self.msgHandler = self.submit_sm_event
def submit_sm_event(self, system_id, *args):
"""This event handler will deliver the submit_sm to the right smppc connector.
Note that Jasmin deliver submit_sm messages like this:
- from httpapi to smppc (handled in jasmin.protocols.http.server)
- from smpps to smppc (this event handler)
Note: This event handler MUST behave exactly like jasmin.protocols.http.server.Send.render
"""
self.log.debug('Handling submit_sm event for system_id: %s' % system_id)
# Args validation
if len(args) != 2:
self.log.error('(submit_sm_event/%s) Invalid args: %s' % (system_id, args))
raise SubmitSmInvalidArgsError()
if not isinstance(args[1], pdu_types.PDURequest):
self.log.error('(submit_sm_event/%s) Received an unknown object when waiting for a PDURequest: %s' % (system_id, args[1]))
raise SubmitSmInvalidArgsError()
if args[1].id != pdu_types.CommandId.submit_sm:
self.log.error('(submit_sm_event/%s) Received a non submit_sm command id: %s' % (system_id, args[1].id))
raise SubmitSmInvalidArgsError()
if not isinstance(args[0], SMPPServerProtocol):
self.log.error('(submit_sm_event/%s) Received an unknown object when waiting for a SMPPServerProtocol: %s' % (system_id, args[0]))
raise SubmitSmInvalidArgsError()
proto = args[0]
user = proto.user
SubmitSmPDU = args[1]
# Update CnxStatus
user.getCnxStatus().smpps['submit_sm_request_count']+= 1
# Basic validation
if len(SubmitSmPDU.params['destination_addr']) < 1 or SubmitSmPDU.params['destination_addr'] is None:
self.log.error('(submit_sm_event/%s) SubmitSmPDU have no defined destination_addr' % system_id)
raise SubmitSmWithoutDestinationAddrError()
# Make Credential validation
v = SmppsCredentialValidator('Send', user, SubmitSmPDU)
v.validate()
# Update SubmitSmPDU by default values from user MtMessagingCredential
SubmitSmPDU = v.updatePDUWithUserDefaults(SubmitSmPDU)
if self.RouterPB is None:
self.log.error('(submit_sm_event/%s) RouterPB not set: submit_sm will not be routed' % system_id)
return
# Routing
routedConnector = None # init
routable = RoutableSubmitSm(SubmitSmPDU, user)
route = self.RouterPB.getMTRoutingTable().getRouteFor(routable)
if route is None:
self.log.error("No route matched from user %s for SubmitSmPDU: %s" % (user, SubmitSmPDU))
raise SubmitSmRouteNotFoundError()
# Get connector from selected route
self.log.debug("RouterPB selected %s for this SubmitSmPDU" % route)
routedConnector = route.getConnector()
# QoS throttling
if user.mt_credential.getQuota('smpps_throughput') >= 0 and user.getCnxStatus().smpps['qos_last_submit_sm_at'] != 0:
qos_throughput_second = 1 / float(user.mt_credential.getQuota('smpps_throughput'))
qos_throughput_ysecond_td = timedelta( microseconds = qos_throughput_second * 1000000)
qos_delay = datetime.now() - user.getCnxStatus().smpps['qos_last_submit_sm_at']
if qos_delay < qos_throughput_ysecond_td:
self.log.error("QoS: submit_sm_event is faster (%s) than fixed throughput (%s) for user (%s), rejecting message." % (
qos_delay,
qos_throughput_ysecond_td,
user
))
raise SubmitSmThroughputExceededError()
user.getCnxStatus().smpps['qos_last_submit_sm_at'] = datetime.now()
# Pre-sending submit_sm: Billing processing
bill = route.getBillFor(user)
self.log.debug("SubmitSmBill [bid:%s] [ttlamounts:%s] generated for this SubmitSmPDU" %
(bill.bid, bill.getTotalAmounts()))
charging_requirements = []
u_balance = user.mt_credential.getQuota('balance')
u_subsm_count = user.mt_credential.getQuota('submit_sm_count')
if u_balance is not None and bill.getTotalAmounts() > 0:
# Ensure user have enough balance to pay submit_sm and submit_sm_resp
charging_requirements.append({'condition': bill.getTotalAmounts() <= u_balance,
'error_message': 'Not enough balance (%s) for charging: %s' %
(u_balance, bill.getTotalAmounts())})
if u_subsm_count is not None:
# Ensure user have enough submit_sm_count to to cover the bill action (decrement_submit_sm_count)
charging_requirements.append({'condition': bill.getAction('decrement_submit_sm_count') <= u_subsm_count,
'error_message': 'Not enough submit_sm_count (%s) for charging: %s' %
(u_subsm_count, bill.getAction('decrement_submit_sm_count'))})
if self.RouterPB.chargeUserForSubmitSms(user, bill, requirements = charging_requirements) is None:
self.log.error('Charging user %s failed, [bid:%s] [ttlamounts:%s] (check router log)' %
(user, bill.bid, bill.getTotalAmounts()))
raise SubmitSmChargingError()
# Get priority value from SubmitSmPDU to pass to SMPPClientManagerPB.perspective_submit_sm()
priority = 0
if SubmitSmPDU.params['priority_flag'] is not None:
priority = SubmitSmPDU.params['priority_flag'].index
if self.SMPPClientManagerPB is None:
self.log.error('(submit_sm_event/%s) SMPPClientManagerPB not set: submit_sm will not be submitted' % system_id)
return
########################################################
# Send SubmitSmPDU through smpp client manager PB server
self.log.debug("Connector '%s' is set to be a route for this SubmitSmPDU" % routedConnector.cid)
c = self.SMPPClientManagerPB.perspective_submit_sm(routedConnector.cid,
SubmitSmPDU,
priority,
pickled = False,
submit_sm_resp_bill = bill.getSubmitSmRespBill(),
source_connector = proto)
# Build final response
if not c.result:
self.log.error('Failed to send SubmitSmPDU to [cid:%s]' % routedConnector.cid)
raise SubmitSmRoutingError()
else:
self.log.debug('SubmitSmPDU sent to [cid:%s], result = %s' % (routedConnector.cid, c.result))
self.log.info('SMS-MT [uid:%s] [cid:%s] [msgid:%s] [prio:%s] [from:%s] [to:%s] [content:%s]'
% (user.uid,
routedConnector.cid,
c.result,
priority,
SubmitSmPDU.params['source_addr'],
SubmitSmPDU.params['destination_addr'],
re.sub(r'[^\x20-\x7E]+','.', SubmitSmPDU.params['short_message'])))
return DataHandlerResponse(status=pdu_types.CommandStatus.ESME_ROK,
message_id=c.result)
def buildProtocol(self, addr):
"""Provision protocol with the dedicated logger
"""
proto = _SMPPServerFactory.buildProtocol(self, addr)
# Setup logger
proto.log = self.log
return proto
def addBoundConnection(self, connection, user):
"""
Overloading _SMPPServerFactory to remove dependency with config.systems
Jasmin removed systems from config as everything about credentials is
managed through User object
"""
system_id = connection.system_id
self.log.debug('Adding SMPP binding for %s' % system_id)
if system_id not in self.bound_connections:
self.bound_connections[system_id] = SMPPBindManager(user)
self.bound_connections[system_id].addBinding(connection)
bind_type = connection.bind_type
self.log.info("Added %s bind for '%s'. Active binds: %s." % (bind_type,
system_id,
self.getBoundConnectionCountsStr(system_id)))
def removeConnection(self, connection):
"""
Overloading _SMPPServerFactory to remove dependency with config.systems
Jasmin removed systems from config as everything about credentials is
managed through User object
"""
if connection.system_id is None:
self.log.debug("SMPP connection attempt failed without binding.")
else:
system_id = connection.system_id
bind_type = connection.bind_type
self.bound_connections[system_id].removeBinding(connection)
self.log.info("Dropped %s bind for '%s'. Active binds: %s." % (bind_type,
system_id,
self.getBoundConnectionCountsStr(system_id)))
# If this is the last binding for this service then remove the BindManager
if self.bound_connections[system_id].getBindingCount() == 0:
self.bound_connections.pop(system_id)
def canOpenNewConnection(self, user, bind_type):
"""
Overloading _SMPPServerFactory to remove dependency with config.systems
Jasmin removed systems from config as everything about credentials is
managed through User object
This method will check for authorization and quotas before allowing a new
connection
"""
# Can bind ?
if not user.smpps_credential.getAuthorization('bind'):
self.log.warning('New bind rejected for username: "%s", reason: authorization failure.' % user.username)
return False
# Still didnt reach max_bindings ?
elif user.smpps_credential.getQuota('max_bindings') is not None:
bind_count = user.getCnxStatus().smpps['bound_connections_count']['bind_transmitter']
bind_count+= user.getCnxStatus().smpps['bound_connections_count']['bind_receiver']
bind_count+= user.getCnxStatus().smpps['bound_connections_count']['bind_transceiver']
if bind_count >= user.smpps_credential.getQuota('max_bindings'):
self.log.warning('New bind rejected for username: "%s", reason: max_bindings limit reached.' %
user.username)
return False
return True
def unbindAndRemoveGateway(self, user):
"""
Overloading _SMPPServerFactory to remove dependency with config.systems
Jasmin removed systems from config as everything about credentials is
managed through User object
"""
user.smpps_credential.setAuthorization('bind', False)
d = self.unbindGateway(user.username)
return d
class SMPPBindManager(_SMPPBindManager):
"Overloads _SMPPBindManager to add user tracking"
def __init__(self, user):
_SMPPBindManager.__init__(self, system_id = user.username)
self.user = user
def addBinding(self, connection):
_SMPPBindManager.addBinding(self, connection)
# Update CnxStatus
self.user.getCnxStatus().smpps['bind_count']+= 1
self.user.getCnxStatus().smpps['bound_connections_count'][str(connection.bind_type)]+= 1
def removeBinding(self, connection):
_SMPPBindManager.removeBinding(self, connection)
# Update CnxStatus
self.user.getCnxStatus().smpps['unbind_count']+= 1
self.user.getCnxStatus().smpps['bound_connections_count'][str(connection.bind_type)]-= 1
|
[
"fourat@gmail.com"
] |
fourat@gmail.com
|
583df475e250658c05e7fa939aedf0cc7539e694
|
d253e0611a80c5649abae4dc78995b3a6fd4b61c
|
/count.py
|
d01433412cacc94e16c70670cef51cfb66bf1e93
|
[] |
no_license
|
manoj652/sentiment-analysis-and-moodmapping-of-hindu-newspaper
|
8f3e75f116a6e85f20f2e8013dd59c751ede0cda
|
d54454dc0c9ba245d0cf488b873a554dcaa88df8
|
refs/heads/master
| 2021-01-24T11:04:45.017679
| 2018-02-27T04:46:21
| 2018-02-27T04:46:21
| 123,073,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,021
|
py
|
# from string import punctuation
# from operator import itemgetter
# N = 10
# words = {}
# words_gen = (word.strip(punctuation).lower() for line in open("test1.txt")
# for word in line.split())
# for word in words_gen:
# words[word] = words.get(word, 0) + 1
# top_words = sorted(words.iteritems(), key=itemgetter(1), reverse=True)[:N]
# for word, frequency in top_words:
# print "%s: %d" % (word, frequency)
# from string import punctuation
# def sort_items(x, y):
# """Sort by value first, and by key (reverted) second."""
# return cmp(x[1], y[1]) or cmp(y[0], x[0])
# N = 10
# words = {}
# words_gen = (word.strip(punctuation).lower() for line in open("test1.txt")
# for word in line.split())
# for word in words_gen:
# words[word] = words.get(word, 0) + 1
# top_words = sorted(words.iteritems(), cmp=sort_items, reverse=True)[:N]
# for word, frequency in top_words:
# print "%s: %d" % (word, frequency)
# from string import punctuation
# N = 10
# words = {}
# words_gen = (word.strip(punctuation).lower() for line in open("test1.txt")
# for word in line.split())
# for word in words_gen:
# words[word] = words.get(word, 0) + 1
# top_words = sorted(words.iteritems(),
# cmp=lambda x, y: cmp(x[1], y[1]) or cmp(y[0], x[0]),
# reverse=True)[:N]
# for word, frequency in top_words:
# print "%s: %d" % (word, frequency)
from string import punctuation
N = 10
words = {}
words_gen = (word.strip(punctuation).lower() for line in open("test1.txt")
for word in line.split())
for word in words_gen:
words[word] = words.get(word, 0) + 1
top_words = sorted(words.iteritems(),
key=lambda(word, count): (-count, word))[:N]
for word, frequency in top_words:
print "%s: %d" % (word, frequency)
# import urllib
# import operator
# txtFile = urllib.urlopen("test1.txt").readlines()
# txtFile = " ".join(txtFile) # this with .readlines() replaces new lines with spaces
# txtFile = "".join(char for char in txtFile if char.isalnum() or char.isspace()) # removes everything that's not alphanumeric or spaces.
# word_counter = {}
# for word in txtFile.split(" "): # split in every space.
# if len(word) > 0 and word != '\r\n':
# if word not in word_counter: # if 'word' not in word_counter, add it, and set value to 1
# word_counter[word] = 1
# else:
# word_counter[word] += 1 # if 'word' already in word_counter, increment it by 1
# for i,word in enumerate(sorted(word_counter,key=word_counter.get,reverse=True)[:10]):
# # sorts the dict by the values, from top to botton, takes the 10 top items,
# print "%s: %s - %s"%(i+1,word,word_counter[word])
|
[
"noreply@github.com"
] |
manoj652.noreply@github.com
|
e5eec9ae3eb7e318b857ee0890ed6aa5e2c7fb18
|
9d9d51385d4c4ac92dea36c85c3ccfde7925fbf5
|
/appionlib/apImage/imagefilter.py
|
59e419989fef0423974a7f1db6eac68b72751a90
|
[] |
no_license
|
leschzinerlab/Tiltpicker_import_patched
|
0ae14eae93018378fc70d9bb0e3cb449cde7e040
|
f2885c96c16edb7ff2ed140458d22aeafd645acd
|
refs/heads/master
| 2016-09-05T23:17:19.035803
| 2015-07-19T18:45:33
| 2015-07-19T18:45:33
| 39,341,623
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,146
|
py
|
#Part of the new pyappion
## pythonlib
import os
import time
## numpy
import numpy
import pyami.quietscipy
from scipy import ndimage
from numpy import linalg
## appion
from appionlib import apDisplay
from appionlib.apSpider import filters
## pyami
from pyami import imagefun, fftengine
ffteng = fftengine.fftEngine()
####
# This is a low-level file with NO database connections
# Please keep it this way
####
#=========================
def _processImage(imgarray, bin=1, apix=1.0, lowpass=0.0, highpass=0.0,
planeReg=True, median=0, invert=False, pixlimit=0, msg=True):
"""
standard processing for an image
"""
simgarray = imgarray.copy()
if median > 0:
simgarray = ndimage.median_filter(simgarray, size=median)
simgarray = binImg(simgarray, bin)
if planeReg is True:
simgarray = planeRegression(simgarray, msg)
#simgarray = highPassFilter(simgarray, apix, bin, highpass, msg=msg)
simgarray = fermiHighPassFilter(simgarray, apix, bin, highpass, msg=msg)
simgarray = pixelLimitFilter(simgarray, pixlimit)
simgarray = lowPassFilter(simgarray, apix, bin, lowpass, msg)
#simgarray = fermiLowPassFilter(simgarray, apix, bin, lowpass, msg)
if invert is True:
simgarray = invertImage(simgarray)
simgarray = 255.0*(normRange(simgarray)+1.0e-7)
return simgarray
#=========================
def preProcessImage(imgarray, bin=None, apix=None, lowpass=None, planeReg=None,
median=None, highpass=None, correct=False, invert=None, pixlimit=None, msg=None,
params={}):
"""
standard processing for an image
"""
startt = time.time()
#MESSAGING
if msg is None:
if 'background' in params:
msg = not params['background']
else:
msg = True
#BINNING
if bin is None:
if 'bin' in params:
bin = params['bin']
else:
bin = 1
#PLANE REGRESSION
if planeReg is None:
if 'planereg' in params:
planeReg = params['planereg']
else:
planeReg = False
#ANGSTROMS PER PIXEL
if apix is None:
if 'apix' in params:
apix = params['apix']
else:
apDisplay.printError("'apix' is not defined in preProcessImage()")
#MEDIAN FILTER
if median is None:
if 'median' in params:
median = params['median']
else:
median = 0
#LOW PASS FILTER
if lowpass is None:
if 'lowpass' in params and params['lowpass'] is not None:
lowpass = params['lowpass']
elif 'lp' in params and params['lp'] is not None:
lowpass = params['lp']
else:
lowpass = 0
#INVERT IMAGE
if invert is None:
if 'invert' in params:
invert = params['invert']
else:
invert = False
apDisplay.printWarning("'invert' is not defined in preProcessImage()")
#HIGH PASS FILTER
if highpass is None:
if 'highpass' in params:
highpass = params['highpass']
elif 'hp' in params:
highpass = params['hp']
else:
highpass = 0
#PIXEL LIMITATION FILTER
if pixlimit is None:
if 'pixlimit' in params:
pixlimit = params['pixlimit']
else:
pixlimit = 0
#HIGH PASS FILTER => PLANE REGRESSION
result = _processImage(imgarray, bin, apix, lowpass, highpass, planeReg, median, invert, pixlimit, msg)
if msg is True:
apDisplay.printMsg("filtered image in "+apDisplay.timeString(time.time()-startt))
return result
#=========================
def normRange(imgarray):
"""
normalize the range of an image between 0 and 1
"""
min1=imgarray.min()
max1=imgarray.max()
if min1 == max1:
return imgarray - min1
return (imgarray - min1)/(max1 - min1)
#=========================
def binImg(imgarray, bin=1, warn=True):
"""
returns a binned image of a 2D image
"""
if bin <= 1:
return imgarray
oldshape = numpy.asarray(imgarray.shape)
remain = oldshape % bin
if remain.any():
maxx = int(oldshape[0]/bin)*bin
maxy = int(oldshape[1]/bin)*bin
cutshape = numpy.asarray((maxx, maxy))
if warn is True:
apDisplay.printWarning("rescaling array to fit bin dimensions: "+str(oldshape)+" -> "+str(cutshape))
imgarray = frame_cut(imgarray, cutshape)
newshape = numpy.asarray(oldshape)/bin
tmpshape = (newshape[0], bin, newshape[1], bin)
f = bin * bin
binned = numpy.sum(numpy.sum(numpy.reshape(imgarray, tmpshape), 1), 2) / f
return binned
#=========================
def invertImage(imgarray):
"""
returns a contrast inverted image
"""
return -1.0*imgarray
#=========================
def filterImg(imgarray,apix=1.0,rad=0.0,bin=1):
#TEMPORARY ALIAS FOR lowPassFilter
return lowPassFilter(imgarray,apix=apix,bin=1,radius=rad)
#=========================
def pixelLimitFilter(imgarray, pixlimit=0):
if pixlimit < 0.1:
return imgarray
mean1 = imgarray.mean()
std1 = imgarray.std()
upperbound = mean1 + pixlimit * std1
lowerbound = mean1 - pixlimit * std1
# print mean1,std1
imgarray2 = numpy.asarray(imgarray)
# print imgarray2
imgarray2 = numpy.where(imgarray2 > upperbound, upperbound, imgarray2)
imgarray2 = numpy.where(imgarray2 < lowerbound, lowerbound, imgarray2)
# print imgarray2
return imgarray2
#=========================
def lowPassFilter(imgarray, apix=1.0, bin=1, radius=0.0, msg=True):
"""
low pass filter image to radius resolution
"""
if radius is None or radius == 0:
if msg is True:
apDisplay.printMsg("skipping low pass filter")
return(imgarray)
sigma=float(radius/apix/float(bin))
return ndimage.gaussian_filter(imgarray, sigma=sigma/3.0)
#=========================
def fermiHighPassFilter(imgarray, apix=1.0, bin=1, radius=0.0, msg=True):
"""
Fermi high pass filter image to radius resolution
"""
if radius is None or radius == 0:
if msg is True:
apDisplay.printMsg("skipping high pass filter")
return(imgarray)
pixrad = float(radius/apix/float(bin))
filtimg = filters.fermiHighPassFilter(imgarray, pixrad)
return filtimg
#=========================
def fermiLowPassFilter(imgarray, apix=1.0, bin=1, radius=0.0, msg=True):
"""
Fermi low pass filter image to radius resolution
"""
if radius is None or radius == 0:
if msg is True:
apDisplay.printMsg("skipping low pass filter")
return imgarray
pixrad = float(radius/apix/float(bin))
if pixrad < 2.0:
apDisplay.printWarning("low pass filter radius "+str(round(pixrad,2))+" is less than 2 pixels; skipping filter")
return imgarray
filtimg = filters.fermiLowPassFilter(imgarray, pixrad)
return filtimg
#=========================
def highPassFilter(imgarray, apix=1.0, bin=1, radius=0.0, localbin=8, msg=True):
"""
high pass filter image to radius resolution
"""
if radius is None or radius < 1 or imgarray.shape[0] < 256:
if msg is True:
apDisplay.printMsg("skipping high pass filter")
return(imgarray)
try:
bimgarray = binImg(imgarray, localbin)
sigma=float(radius/apix/float(bin*localbin))
filtimg = ndimage.gaussian_filter(bimgarray, sigma=sigma)
expandimg = scaleImage(filtimg, localbin)
expandimg = frame_constant(expandimg, imgarray.shape)
filtimg = imgarray - expandimg
except:
apDisplay.printWarning("High Pass Filter failed")
return imgarray
return filtimg
#=========================
def maskHighPassFilter(imgarray, apix=1.0, bin=1, zero_res=0.0, one_res=0.0, msg=True):
"""
high pass filter that ensures the fft values within zero_radius is zero to avoid
interference of really strong structure factors, only works right for square image
"""
if one_res is None or one_res < 1 or zero_res < 1 or imgarray.shape[0] < 256:
if msg is True:
apDisplay.printMsg("skipping high pass filter")
return(imgarray)
shape = imgarray.shape
zero_radius = apix*min(shape)/zero_res/bin
one_radius = apix*min(shape)/one_res/bin
print zero_radius, one_radius
try:
filtimg = _maskHighPassFilter(imgarray,zero_radius, one_radius)
except:
raise
apDisplay.printWarning("Mask High Pass Filter failed")
return imgarray
return filtimg
#=========================
def _maskHighPassFilter(a,zero_radius,one_radius):
if zero_radius == 0 or zero_radius > one_radius:
return a
fft = ffteng.transform(a)
fft = imagefun.swap_quadrants(fft)
_center_mask(fft,zero_radius,one_radius)
bfft = imagefun.swap_quadrants(fft)
b = ffteng.itransform(bfft)
return b
#=========================
def _gradient(cs_shape,zeroradius):
oneradius = min(cs_shape[0]/2.0,cs_shape[1]/2.0)
a = numpy.indices(cs_shape)
cut = zeroradius/float(oneradius)
radii = numpy.hypot(a[0,:]-(cs_shape[0]/2.0-0.5),a[1,:]-(cs_shape[1]/2.0-0.5))/oneradius
def _grad(r):
return (r-cut)/(1-cut)
g = numpy.piecewise(radii,[radii < cut,numpy.logical_and(radii < 1, radii >=cut),
radii>=1-cut],[0,_grad,1])
return g
#=========================
def _center_mask(a, zero_radius,one_radius):
shape = a.shape
center = shape[0]/2, shape[1]/2
center_square = a[center[0]-one_radius:center[0]+one_radius, center[1]-one_radius:center[1]+one_radius]
cs_shape = center_square.shape
cs_center = cs_shape[0]/2, cs_shape[1]/2
circ = _gradient(cs_shape,zero_radius)
center_square[:] = center_square * circ.astype(center_square.dtype)
#=========================
def planeRegression(imgarray, msg=True):
"""
performs a two-dimensional linear regression and subtracts it from an image
essentially a fast high pass filter
"""
### create index arrays, e.g., [1, 2, 3, 4, 5, ..., N]
def retx(y,x):
return x
def rety(y,x):
return y
xarray = numpy.fromfunction(retx, imgarray.shape, dtype=numpy.float32)
yarray = numpy.fromfunction(rety, imgarray.shape, dtype=numpy.float32)
xsize = imgarray.shape[0]
ysize = imgarray.shape[1]
xarray = xarray/(ysize-1.0) - 0.5
yarray = yarray/(xsize-1.0) - 0.5
### get running sums
count = float(xsize*ysize)
xsum = xarray.sum()
xsumsq = (xarray*xarray).sum()
ysum = yarray.sum()
ysumsq = (yarray*yarray).sum()
xysum = (xarray*yarray).sum()
xzsum = (xarray*imgarray).sum()
yzsum = (yarray*imgarray).sum()
zsum = imgarray.sum()
zsumsq = (imgarray*imgarray).sum()
### create linear algebra matrices
leftmat = numpy.array( [[xsumsq, xysum, xsum], [xysum, ysumsq, ysum], [xsum, ysum, count]], dtype=numpy.float64)
rightmat = numpy.array( [xzsum, yzsum, zsum], dtype=numpy.float64)
### solve eigen vectors
resvec = linalg.solve(leftmat,rightmat)
### show solution
if msg is True:
apDisplay.printMsg("plane_regress: x-slope: %.3f, y-slope: %.3f, xy-intercept: %.3f"
%(resvec[0], resvec[1], resvec[2]))
### subtract plane from array
newarray = imgarray - xarray*resvec[0] - yarray*resvec[1] - resvec[2]
return newarray
#=========================
def scaleImage(imgdata, scale):
"""
scale an image
"""
if scale == 1.0:
return imgdata
return ndimage.zoom(imgdata, scale, order=1)
#=========================
def correctImage(imgdata, sessionname):
"""
Correct an image using the old method:
- no bias correction
- dark correction is not time dependent
"""
rawimgarray = imgdata['image']
from appionlib import apDatabase
darkarray, normarray = apDatabase.getDarkNorm(sessionname, imgdata['camera'])
correctedimgarray = normarray * (rawimgarray - darkarray)
return correctedimgarray
#=========================
def frame_cut(a, newshape):
mindimx = int( (a.shape[0] / 2.0) - (newshape[0] / 2.0) )
maxdimx = int( (a.shape[0] / 2.0) + (newshape[0] / 2.0) )
mindimy = int( (a.shape[1] / 2.0) - (newshape[1] / 2.0) )
maxdimy = int( (a.shape[1] / 2.0) + (newshape[1] / 2.0) )
return a[mindimx:maxdimx, mindimy:maxdimy]
#=========================
def frame_constant(a, shape, cval=0):
"""
frame_nearest creates an oversized copy of 'a' with new 'shape'
and the contents of 'a' in the center. The boundary pixels are
copied from the nearest edge pixel in 'a'.
>>> a = num.arange(16, shape=(4,4))
>>> frame_constant(a, (8,8), cval=42)
array(
[[42, 42, 42, 42, 42, 42, 42, 42],
[42, 42, 42, 42, 42, 42, 42, 42],
[42, 42, 0, 1, 2, 3, 42, 42],
[42, 42, 4, 5, 6, 7, 42, 42],
[42, 42, 8, 9, 10, 11, 42, 42],
[42, 42, 12, 13, 14, 15, 42, 42],
[42, 42, 42, 42, 42, 42, 42, 42],
[42, 42, 42, 42, 42, 42, 42, 42]])
"""
b = numpy.zeros(shape, dtype=a.dtype)
delta = (numpy.array(b.shape) - numpy.array(a.shape))
dy = delta[0] // 2
dx = delta[1] // 2
my = a.shape[0] + dy
mx = a.shape[1] + dx
b[dy:my, dx:mx] = a # center
b[:dy,dx:mx] = cval # top
b[my:,dx:mx] = cval # bottom
b[dy:my, :dx] = cval # left
b[dy:my, mx:] = cval # right
b[:dy, :dx] = cval # topleft
b[:dy, mx:] = cval # topright
b[my:, :dx] = cval # bottomleft
b[my:, mx:] = cval # bottomright
return b
#=========================
def spiderTransform(a, rot=0, shift=(0,0), mirror=False, order=2):
"""
rotates (in degrees) about an off-center pixel, then shifts (in pixels) and last mirrors an array
FROM http://www.wadsworth.org/spider_doc/spider/docs/man/apmq.html
UNTESTED
"""
### make a copy
b = a
### rotate is positive, but shifted by a half pixel
b = ndimage.shift(b, shift=(-0.5, -0.5), mode='wrap', order=order)
b = ndimage.rotate(b, angle=rot, reshape=False, mode='reflect', order=order)
b = ndimage.shift(b, shift=(0.5, 0.5), mode='wrap', order=order)
# shift is in rows/columns not x,y
rowcol = (shift[1],shift[0])
b = ndimage.shift(b, shift=rowcol, mode='reflect', order=order)
# mirror the image about the y-axis, i.e. flip left-right
if mirror is True:
b = numpy.fliplr(b)
return b
#=========================
def xmippTransform(a, rot=0, shift=(0,0), mirror=False, order=2):
"""
shift, mirror, then rotate (in degrees) about an off-center pixel
rotates (in degrees) then shifts (in pixels) then mirrors an array, just like SPIDER
FROM http://xmipp.cnb.uam.es/twiki/bin/view/Xmipp/AlignementParametersNote
"""
### make a copy
b = a
### shift is in rows/columns not x,y
rowcol = (shift[1],shift[0])
b = ndimage.shift(b, shift=rowcol, mode='reflect', order=order)
### mirror the image about the y-axis, i.e. flip left-right
if mirror is True:
b = numpy.fliplr(b)
### rotate is positive, but shifted by a half pixel
b = ndimage.shift(b, shift=(-0.5, -0.5), mode='wrap', order=order)
b = ndimage.rotate(b, angle=-1*rot, reshape=False, mode='reflect', order=order)
b = ndimage.shift(b, shift=(0.5, 0.5), mode='wrap', order=order)
return b
####
# This is a low-level file with NO database connections
# Please keep it this way
####
|
[
"michael.a.cianfrocco@gmail.com"
] |
michael.a.cianfrocco@gmail.com
|
e22511135a4e7da9ed8d487fd03d90013e30cf6e
|
bfee1e84d58ea829c75c83dc63d2cde7bc7008be
|
/echo.py
|
d9cb0d9885f2e104a8c75b41d3e5b81f40c32f98
|
[] |
no_license
|
social-coding-kr/cctv-app
|
be01aae87df359ddf4d3062fbe0f03c6f31a800c
|
bbc94b53142dbabcbdca23992b2e7292a77f2500
|
refs/heads/master
| 2020-04-22T16:58:22.856993
| 2016-02-15T13:21:54
| 2016-02-15T13:21:54
| 40,083,225
| 1
| 1
| null | 2015-10-24T19:30:33
| 2015-08-02T15:12:45
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 434
|
py
|
import socket
host = ''
port = 9030
backlog = 5
size = 10240
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host,port))
s.listen(backlog)
while 1:
client, address = s.accept()
while 1:
data = client.recv(size)
if data or len(data) > 0:
client.send(data)
print(data)
else:
client.close()
print('----- NO MORE DATA -----')
break
|
[
"foryou8904@gmail.com"
] |
foryou8904@gmail.com
|
e5ceb4b64c19a9e21727c3123d215a1d28d6c014
|
10cdf6bb22488c165e480b2355d491abf8c1c6d9
|
/src/picross_processing.py
|
f1b8b4622e548cd79e3bc74b233cdd351151750c
|
[] |
no_license
|
TommyWoh/pic_picross
|
771fa35f8eba59193a430ed4cd052f8a8b4fd621
|
9568c15a1f1dcb1f4d0dbb5f80ecba85e6395fd5
|
refs/heads/master
| 2020-12-22T16:11:36.198212
| 2018-04-18T15:44:24
| 2018-04-18T15:44:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,975
|
py
|
#encoding:utf-8
import os
import cv2
import numpy as np
import sys
from constants import constans as co
from picross_verify import *
class PicrossProcessing:
__img = []
__box_size = 10
__row_hint = []
__column_hint = []
def __init__(self, img, box_size):
self.__img = img
self.__box_size = box_size
#
# ピクロスの生成を行う処理
#
#ピクロスの数字を計算する関数
def __calc_pic_hint(self):
column_num = []
row_num = []
#行方向の処理
for i in range(0, len(self.__img)):
str_num = ""
count_flag = False
count = 0
for k in range(0, len(self.__img[0])):
if self.__img[i][k] == 0:
if count_flag:
count += 1
else:
count_flag = True
count = 1
else:
if count_flag:
str_num += str(count) + ","
count_flag = False
count = 0
if count != 0:
str_num += str(count) + ","
if str_num == "":
row_num.append('0')
else:
row_num.append(str_num[:-1])
#列方向の処理
for j in range(0, len(self.__img[0])):
str_num = ""
count_flag = False
count = 0
for k in range(0, len(self.__img)):
if self.__img[k][j] == 0:
if count_flag:
count += 1
else:
count_flag = True
count = 1
else:
if count_flag:
str_num += str(count) + ","
count_flag = False
count = 0
if count != 0:
str_num += str(count) + ","
if str_num == "":
column_num.append('0')
else:
column_num.append(str_num[:-1])
self.__row_hint = row_num
self.__column_hint = column_num
#最も長い文字列の長さを返す処理
def __calc_pic_num_length(self, num_list):
max_num = 0
for i in range(0, len(num_list)):
tmp = len(num_list[i])
if max_num < tmp:
max_num = tmp
return max_num
#
# ピクロスを描画する処理
#
#ドットを描画する関数
def __draw_pic_dot(self, picross_img, hint_width, hint_height):
for y in range(0, len(self.__img)):
for x in range(0, len(self.__img[0])):
if self.__img[y][x] == 0:
piv_x = hint_width + x * self.__box_size
piv_y = hint_height + y * self.__box_size
cv2.rectangle(picross_img, (piv_x, piv_y), (piv_x + self.__box_size, piv_y + self.__box_size), (0, 0, 0), cv2.FILLED)
#線を描画する関数
def __draw_pic_line(self, picross_img, hint_width, hint_height):
row_length = hint_width + len(self.__img[0]) * self.__box_size
column_length = hint_height + len(self.__img) * self.__box_size
#行の線を描画
for y in range(0, len(self.__img) + 1):
if y % co.BOLD_LINE_SPAN == 0:
cv2.line(picross_img, (0, hint_height + y * self.__box_size), (row_length, hint_height + y * self.__box_size), (0, 0, 0), co.BOLD_LINE_SIZE)
else:
cv2.line(picross_img, (0, hint_height + y * self.__box_size), (row_length, hint_height + y * self.__box_size), (0, 0, 0), co.NORMAL_LINE_SIZE)
#列の線を描画
for x in range(0, len(self.__img[0]) + 1):
if x % co.BOLD_LINE_SPAN == 0:
cv2.line(picross_img, (hint_width + x * self.__box_size, 0), (hint_width + x * self.__box_size, column_length), (0, 0, 0), co.BOLD_LINE_SIZE)
else:
cv2.line(picross_img, (hint_width + x * self.__box_size, 0), (hint_width + x * self.__box_size, column_length), (0, 0, 0), co.NORMAL_LINE_SIZE)
#ヒントの描画用関数
def __draw_pic_hint(self, picross_img, hint_width, hint_height):
#行方向のヒントの描画
for y in range(0, len(self.__img)):
hint = self.__row_hint[y][::-1]
for i in range(0, len(hint)):
cv2.putText(picross_img, hint[i], \
(int(hint_width - co.ROW_HINT_LINE_WIDTH_MARGIN - co.ROW_HINT_MARGIN * i), \
int(hint_height + y * self.__box_size + co.ROW_HINT_LINE_HEIGHT_MARGIN)), \
cv2.FONT_HERSHEY_SIMPLEX, co.HINT_FONT_SIZE, (0, 0, 0),\
co.HINT_FONT_WIDTH, cv2.LINE_AA)
#列方向のヒントの描画
for x in range(0, len(self.__img[0])):
#縦書きに変換
hint = self.__column_hint[x].split(',')
#reverse処理
hint = hint[::-1]
for i in range(0, len(hint)):
if int(hint[i]) >= 10:
cv2.putText(picross_img, hint[i],\
(int(hint_width + x * self.__box_size + co.COLUMN_HINT_LINE_WIDTH_MARGIN - co.COLUMN_HINT_DIGIT_MARGIN),\
int(hint_height - co.COLUMN_HINT_LINE_HEIGHT_MARGIN - i * co.COLUMN_HINT_MARGIN)), \
cv2.FONT_HERSHEY_SIMPLEX, co.HINT_FONT_SIZE, (0, 0, 0),\
co.HINT_FONT_WIDTH, cv2.LINE_AA)
else:
cv2.putText(picross_img, hint[i],\
(int(hint_width + x * self.__box_size + co.COLUMN_HINT_LINE_WIDTH_MARGIN),\
int(hint_height - co.COLUMN_HINT_LINE_HEIGHT_MARGIN - i * co.COLUMN_HINT_MARGIN)), \
cv2.FONT_HERSHEY_SIMPLEX, co.HINT_FONT_SIZE, (0, 0, 0),\
co.HINT_FONT_WIDTH, cv2.LINE_AA)
#guiの制御部分
def draw_main(self):
#ピクロスの端の部分を導出
self.__calc_pic_hint()
row_num_length = self.__calc_pic_num_length(self.__row_hint)
column_num_length = self.__calc_pic_num_length(self.__column_hint)
#ピクロスが回答可能かの検証
is_solved_flag = picross_verify(self.__img, self.__row_hint, self.__column_hint)
if is_solved_flag:
#ヒント部分の長さ
hint_width = row_num_length * co.HINT_MARGIN_WIDTH
hint_height = column_num_length * co.HINT_MARGIN_WIDTH
pic_width = len(self.__img[0]) * self.__box_size + hint_width
pic_height = len(self.__img) * self.__box_size + hint_height
#
# GUI設定
#
size = (pic_height + co.WINDOW_MARGIN_HEIGHT, pic_width + co.WINDOW_MARGIN_WIDTH, 3)
# np.fillで白に埋める
picross_img = np.zeros(size, dtype=np.uint8)
picross_img.fill(255)
#ピクロスのドット部を描画
##保存用ディレクトリの作成
if not os.path.exists("./img"):
os.mkdir("./img")
##記入用紙の作成
self.__draw_pic_line(picross_img, hint_width, hint_height)
self.__draw_pic_hint(picross_img, hint_width, hint_height)
cv2.namedWindow("Picross Paper Image", cv2.WINDOW_AUTOSIZE)
cv2.imshow("Picross Paper Image",picross_img)
cv2.imwrite("./img/picross_paper.png", picross_img)
##答えの保存
self.__draw_pic_dot(picross_img, hint_width, hint_height)
cv2.namedWindow("Picross Answer Image", cv2.WINDOW_AUTOSIZE)
cv2.imshow("Picross Answer Image",picross_img)
cv2.imwrite("./img/picross_ans.png", picross_img)
else:
print("Error: Sorry, we could not make the solvable picross in this picture.")
|
[
"takowasakun@gmail.com"
] |
takowasakun@gmail.com
|
8dce9768a9717ae3aa41c814c7f3e62c03c93efb
|
2a1059be149fb2eb01e0d12a98752ecccada20a6
|
/F_flask/Part4_数据库/venv/Scripts/rst2s5.py
|
c33c7d678f2b9dc3af5134065496fe8269913ce3
|
[] |
no_license
|
Jhon-Chen/Code_Practice
|
0796c0eb4f3e4ba82e6da9cd5dd19afb5ccec51d
|
e4bfa900b95764880e2ca2fcfb0389113f570147
|
refs/heads/master
| 2022-12-11T10:02:14.895722
| 2019-09-08T01:46:32
| 2019-09-08T01:46:32
| 186,768,893
| 0
| 1
| null | 2022-12-08T02:34:33
| 2019-05-15T07:06:10
|
Python
|
UTF-8
|
Python
| false
| false
| 692
|
py
|
#!E:\GitHub\Code_Practice\F_flask\Part4_数据库\venv\Scripts\python.exe
# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Chris Liechti <cliechti@gmx.net>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
|
[
"17368089403@163.com"
] |
17368089403@163.com
|
afa610a90ffd1caff0fe0608da948d6c705b9c3c
|
56953d87029820bcc695f428ea5d663e827aaf2d
|
/array.py
|
18f02be7025ce72a468d50accd3446e0b6a60378
|
[] |
no_license
|
anishakthi/pythonPrograms
|
d5b1230e8c04b35433125300ab6a869a6591c997
|
2dfc44be7fd9ab8a2a898b4c03d3a6bcdcf5f5d8
|
refs/heads/master
| 2021-01-19T19:39:48.375611
| 2017-04-16T18:29:23
| 2017-04-16T18:29:23
| 88,434,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
arr = [1,2,-3,4]
size = len(arr)
for i in range(0, size):
print arr[size-1]
size -= 1
print sum(arr)
# printing values
for i in arr:
print i
|
[
"anishakthi@gmail.com"
] |
anishakthi@gmail.com
|
0d9fa0cc1c27be5db42061747766a34a70099a4d
|
98f94bef4b2ca15a47f16db107764eeb600e2f3c
|
/SoftUniExamTasks/Fundamentals Exams/03. Mid Exam Retake/02. Shoot for the Win.py
|
85f4f65b264b1a88ea02ea1004f89228cb968bb3
|
[
"MIT"
] |
permissive
|
Pittor052/SoftUniExamTasks
|
2cb382cf63f4bd630e20dcbbdde08b08e5246da1
|
7256a7f7a11c3308d3fde2e7ef5cc703cc8dba4a
|
refs/heads/main
| 2023-08-15T11:37:17.344707
| 2021-10-01T11:19:08
| 2021-10-01T11:19:08
| 396,364,687
| 0
| 0
| null | 2021-08-24T09:03:41
| 2021-08-15T13:44:31
|
Python
|
UTF-8
|
Python
| false
| false
| 664
|
py
|
targets = list(map(lambda x: int(x), input().split()))
shot_targets = []
while True:
command = input()
if command == 'End':
print(f"Shot targets: {targets.count(-1)} -> ", end="")
print(*targets, sep=" ")
break
index = int(command)
if not 0 <= index <= len(targets) - 1:
continue
test = targets[index]
targets[index] = -1
shot_targets.append(index)
for i in range(len(targets)):
if i in shot_targets:
continue
if test < targets[i]:
targets[i] -= test
continue
if test >= targets[i] and not index == i:
targets[i] += test
|
[
"lazar_off@yahoo.com"
] |
lazar_off@yahoo.com
|
ae3a181d26ac2e6179b21fa9b92f8445d21ec875
|
099c179887b43a2925d674210e782483bf440636
|
/city.py
|
ff1e7770e31763f24fa6615f0ef84aba6b4eb47b
|
[] |
no_license
|
c2huc2hu/Isometric-Game
|
31d50d43aed44a8c02ba93a22c7aa183bf9e3287
|
b1325ac31d3116d62502f5811dba01d68d31abd4
|
refs/heads/master
| 2021-01-21T07:30:27.228039
| 2014-10-21T03:43:13
| 2014-10-21T03:43:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,814
|
py
|
from product import Product
from unit import Unit
team_names = ["A team", "B team", "C team"] #make sure the version in main stays current too.
team_colours = [0xFF0000, 0x880088, 0xFF0088]
class City():
def __init__ (self, name, allegiance, x, y):
self.name = name
self.allegiance = allegiance
self.colour = team_colours [self.allegiance]
self.x = x
self.y = y
self.city_improvements = []
self.current_product = Product.NOTHING
self.current_progress = 0
#values that will change based on the terrain, but I'm setting them as constant for now
self.productivity = 3
def set_product (self, product):
"Begin producing something"
self.current_product = product
#decrease current_progress because building walls doesn't help you train units,
#but remember the previous production if they switch and switch back
def next_turn(self):
"Reset the city for the next turn"
if self.current_progress >= Product.cost [self.current_product]:
self.current_progress -= Product.cost [self.current_product]
if Product.isUnit(self.current_product):
print ("{:s} has produced product id {:s}".format (self.name, self.current_product))
return Unit (self.x, self.y, self.current_product)
elif Product.isBuilding(self.current_product):
self.city_improvements.append (self.current_product)
self.current_product = Product.NONE
self.current_progress += self.productivity
return None
def __repr__ (self):
return "{:10s} ({:4}, {:4})".format(self.name, self.x, self.y)
def to_tile (self):
return "⌂{:1}".format (team_names [self.allegiance])
|
[
"c-squared@sympatico.ca"
] |
c-squared@sympatico.ca
|
0b7e389f6e7f6fedb1c12477f38df96c9300d1a4
|
7d42c29d5171f94734bb0b3efde95ef5454380fa
|
/utils/pushtx.py
|
5374d504bc36d290b1fed4adc1fc25c4627052fb
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
CPT-Jack-A-Castle/monero-python
|
6fb57d86d1ec08da7955edc7007eee11986ef7dc
|
aa8bf90561ee48da5f2c5265b2c5e0a05aeb1363
|
refs/heads/master
| 2023-06-15T06:25:58.541586
| 2021-02-07T19:38:35
| 2021-02-07T19:38:35
| 342,531,136
| 1
| 0
|
BSD-3-Clause
| 2021-07-18T07:06:27
| 2021-02-26T09:49:01
| null |
UTF-8
|
Python
| false
| false
| 2,301
|
py
|
#!/usr/bin/python
import argparse
import logging
import operator
import re
import sys
import six
from monero.backends.jsonrpc import JSONRPCDaemon
from monero.daemon import Daemon
from monero.transaction import Transaction
from monero import exceptions
def url_data(url):
gs = re.compile(
r'^(?P<host>[^:\s]+)(?::(?P<port>[0-9]+))?$'
).match(url).groupdict()
return dict(filter(operator.itemgetter(1), gs.items()))
argsparser = argparse.ArgumentParser(description="Push transaction to network")
argsparser.add_argument('daemon_rpc_url', nargs='?', type=url_data, default='127.0.0.1:18081',
help="Daemon RPC URL [host[:port]]")
argsparser.add_argument('-v', dest='verbosity', action='count', default=0,
help="Verbosity (repeat to increase; -v for INFO, -vv for DEBUG")
argsparser.add_argument('-p', dest='proxy_url', nargs='?', type=str, default=None,
help="Proxy URL")
argsparser.add_argument('-t', dest='timeout', type=int, default=30, help="Request timeout")
argsparser.add_argument('-i', dest='tx_filenames', nargs='+', default=None,
help="Files with transaction data. Will read from stdin if not given.")
argsparser.add_argument('--no-relay', dest='relay', action='store_false',
help="Do not relay the transaction (it will stay at the node unless mined or expired)")
args = argsparser.parse_args()
level = logging.WARNING
if args.verbosity == 1:
level = logging.INFO
elif args.verbosity > 1:
level = logging.DEBUG
logging.basicConfig(level=level, format="%(asctime)-15s %(message)s")
if args.tx_filenames:
blobs = [(f, open(f, 'rb').read()) for f in args.tx_filenames]
else:
blobs = [('transaction', sys.stdin.buffer.read() if six.PY3 else sys.stdin.read())]
d = Daemon(JSONRPCDaemon(timeout=args.timeout, proxy_url=args.proxy_url, **args.daemon_rpc_url))
for name, blob in blobs:
logging.debug("Sending {}".format(name))
tx = Transaction(blob=blob)
try:
res = d.send_transaction(tx, relay=args.relay)
except exceptions.TransactionBroadcastError as e:
print("{} not sent, reason: {}".format(name, e.details['reason']))
logging.debug(e.details)
continue
if res['not_relayed']:
print("{} not relayed".format(name))
else:
print("{} successfully sent".format(name))
|
[
"michal@salaban.info"
] |
michal@salaban.info
|
d5275baa59e82244b8069adabb2a2531dc8b92c0
|
80d50ea48e10674b1b7d3f583a1c4b7d0b01200f
|
/src/datadog_api_client/v2/model/service_definition_v1_contact.py
|
e3eac879a4e43a733b865237536f79e440e84e2a
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] |
permissive
|
DataDog/datadog-api-client-python
|
3e01fa630278ad0b5c7005f08b7f61d07aa87345
|
392de360e7de659ee25e4a6753706820ca7c6a92
|
refs/heads/master
| 2023-09-01T20:32:37.718187
| 2023-09-01T14:42:04
| 2023-09-01T14:42:04
| 193,793,657
| 82
| 36
|
Apache-2.0
| 2023-09-14T18:22:39
| 2019-06-25T22:52:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from typing import Union
from datadog_api_client.model_utils import (
ModelNormal,
cached_property,
unset,
UnsetType,
)
class ServiceDefinitionV1Contact(ModelNormal):
@cached_property
def openapi_types(_):
return {
"email": (str,),
"slack": (str,),
}
attribute_map = {
"email": "email",
"slack": "slack",
}
def __init__(self_, email: Union[str, UnsetType] = unset, slack: Union[str, UnsetType] = unset, **kwargs):
"""
Contact information about the service.
:param email: Service owner’s email.
:type email: str, optional
:param slack: Service owner’s Slack channel.
:type slack: str, optional
"""
if email is not unset:
kwargs["email"] = email
if slack is not unset:
kwargs["slack"] = slack
super().__init__(kwargs)
|
[
"noreply@github.com"
] |
DataDog.noreply@github.com
|
f05468c51978e5789810d01bfcc9deab3bf99acb
|
d5d1bdbcf32b3b79fd3cc31e50d1c7767e780f8b
|
/linked_list/linkedlist_ops.py
|
0670304f1a25595b2ec0b426393caed39f6151bd
|
[] |
no_license
|
bittu1990/python_code
|
335ff4d212f5a1acffc2466400924fbe45c98267
|
e288857f32d757841c1ab13de746a1aaf6f427db
|
refs/heads/master
| 2023-03-04T21:54:14.744940
| 2021-02-17T22:15:08
| 2021-02-17T22:15:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,094
|
py
|
# Linked list operations in Python
class Node:
def __init__(self,item):
self.item = item
self.next = None
class LinkedList:
def __init__(self):
self.head = None
# Insert at the beginning
def insertAtBeginning(self, data):
new_node = Node(data)
new_node.next = self.head
self.head = new_node
# Insert after a node
def insertAfter(self, node, data):
if node is None:
print("The given previous node must inLinkedList.")
return
new_node = Node(data)
new_node.next = node.next
node.next = new_node
# Insert at the end
def insertAtEnd(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
return
last = self.head
while (last.next):
last = last.next
last.next = new_node
# Deleting a node
def deleteNode(self, position):
if self.head == None:
return
temp_node = self.head
if position == 0:
self.head = temp_node.next
temp_node = None
return
# Find the key to be deleted
for i in range(position - 1):
temp_node = temp_node.next
if temp_node is None:
break
# If the key is not present
if temp_node is None:
return
if temp_node.next is None:
return
next = temp_node.next.next
temp_node.next = None
temp_node.next = next
def printList(self):
temp_node = self.head
while (temp_node):
print(str(temp_node.item) + " ", end="")
temp_node = temp_node.next
if __name__ == '__main__':
llist = LinkedList()
llist.insertAtEnd(1)
llist.insertAtBeginning(2)
llist.insertAtBeginning(3)
llist.insertAtEnd(4)
llist.insertAfter(llist.head.next, 5)
print('Linked list:')
llist.printList()
print("\nAfter deleting an element:")
llist.deleteNode(3)
llist.printList()
|
[
"62112081+bittu1990@users.noreply.github.com"
] |
62112081+bittu1990@users.noreply.github.com
|
3c8813ce0926fcb66cf690f46afdc6009bda9ff8
|
d2c052050635a3cbcce28a5e9f164b8225581310
|
/binaryapi/ws/chanels/logout.py
|
f8349354410eb0c2be5c542315c032964408b179
|
[] |
no_license
|
victalejo/binaryapi
|
242862192a70893b2f3c1b1415ccd58f85f6edb5
|
2d658cc6368a88453b79f27f1d53ac6209f70602
|
refs/heads/master
| 2023-01-04T14:05:17.271043
| 2020-10-31T14:04:26
| 2020-10-31T14:04:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
"""Module for Binary logout websocket channel."""
from binaryapi.ws.chanels.base import Base
from typing import Optional, Any
# https://developers.binary.com/api/#logout
class Logout(Base):
"""Class for Binary logout websocket channel."""
name = "logout"
def __call__(self, passthrough: Optional[Any] = None, req_id: Optional[int] = None):
"""Method to send message to logout websocket channel.
Log Out (request)
Logout the session
:param passthrough: [Optional] Used to pass data through the websocket, which may be retrieved via the `echo_req` output field.
:type passthrough: Optional[Any]
:param req_id: [Optional] Used to map request to response.
:type req_id: Optional[int]
"""
data = {
"logout": int(1)
}
return self.send_websocket_request(self.name, data, passthrough=passthrough, req_id=req_id)
|
[
"mdn522@gmail.com"
] |
mdn522@gmail.com
|
e93485072f7dde66a96f36226d73af1ce6020ef3
|
195df01037252363ecfd9a64cefb486e4a41b342
|
/kwargs.py
|
16f0e0ec78154d3f24402d9fc3ca8ca08de03925
|
[] |
no_license
|
vitalikas/python
|
133e8222a14fad007d85806e868d1dde9f3682e4
|
79cb7b43a2e64eafd27e68f14bcb96e1f8b2fb68
|
refs/heads/master
| 2021-03-04T13:08:20.083551
| 2020-03-13T15:32:25
| 2020-03-13T15:32:25
| 246,035,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
from kwargs_constr import Tag
paragraph = Tag(
"p",
klass=("text-align-right", "text-nice"),
id="heading-text",
data_bind="not-above"
)
paragraph.text = "Some text inside tag"
print(paragraph)
|
[
"noreply@github.com"
] |
vitalikas.noreply@github.com
|
0ea5708e39aa0f5393ce8193792de58cd696611e
|
1c8a8c51803daa4503c45caddcec632a32a5b655
|
/Sina/a.py
|
aef44345d71ce23af62fe6a4c55bf9c118b11e61
|
[] |
no_license
|
Bridgegong/sina_news
|
2fe466a932bae4a2ff411e3f8eda8c8d62ad651f
|
a3ca8654737119eec93ef799f26b1e53a18decc4
|
refs/heads/master
| 2020-03-18T02:45:50.385553
| 2018-05-21T02:13:26
| 2018-05-21T02:13:26
| 134,206,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2018/4/25 14:24
# @Author : Bridge
# @Email : 13722450120@163.com
# @File : a.py
# @Software: PyCharm
from scrapy.cmdline import execute
execute(['scrapy','crawl','xinlang'])
|
[
"gongqf@xingyuanauto.com"
] |
gongqf@xingyuanauto.com
|
74ca69ba3a710702817368570c10f30f0588dc56
|
3f411730a385c03593a9331e59259e5758fe38c3
|
/molecule/default/tests/test_default.py
|
bc3ddcca7774eccf7b9b15234554046869096d2e
|
[
"BSD-3-Clause"
] |
permissive
|
tiware23/ansible-prometheus
|
aa3a766850e0d14afefb9232d94d2c4bdb7efdfa
|
c36e4f36a189a641e9fea9c9e47e7a30b0a93c85
|
refs/heads/master
| 2020-04-12T05:09:14.480660
| 2018-12-19T11:16:40
| 2018-12-19T11:16:40
| 162,316,859
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 731
|
py
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_config(host):
config = host.run("sudo grep 'localhost:9090' prometheus.yml |wc -l ")
assert config.rc == 0
def test_prometheus_config(host):
config = host.run("sudo systemctl status prometheus")
assert config.rc == 0
def test_prometheus_local_port(host):
local_port = host.socket("tcp://127.0.0.1:9090")
assert local_port.is_listening
def test_prometheus_running_and_enabled(host):
prometheus_service = host.service("prometheus")
assert prometheus_service.is_running
assert prometheus_service.is_enabled
|
[
"thiagocavalcante@MacBook-Pro-de-Thiago.local"
] |
thiagocavalcante@MacBook-Pro-de-Thiago.local
|
758e08d3d3232922ed1c3883e4c8f21bb48407bb
|
1f7704c5df44247097c17789998bb5ee9a62f876
|
/Wave1-labs/lab1of2.py
|
158f8d1d8b33e9090c4d648be255b7940d78228b
|
[] |
no_license
|
promisejeremiah/promisejeremiah-WEJAPA-internship-datascience-study-2
|
d16b40702096bec88f09b2c784d1b4c83cb5c659
|
3d55104fd8eb5a39fc269c4a249f361b3e1e4b66
|
refs/heads/master
| 2022-12-01T12:58:54.174912
| 2020-08-16T14:56:06
| 2020-08-16T14:56:06
| 281,957,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
floor1 = 9*7
floor2 = 5*7
total_floor_area = floor1 + floor2
print(total_floor_area)
# Answer1: total number of tiles needed is: (total_floor_area = 98)
pack_of_tile = 6
packs17_of_tiles = pack_of_tile * 17
left_over_of_tiles = packs17_of_tiles - total_floor_area
print(left_over_of_tiles)
#left over number of tiles will be: (left_over_of_tiles = 4)
|
[
"noreply@github.com"
] |
promisejeremiah.noreply@github.com
|
62f0d88f3927d57627424c035823594222bde7e5
|
e444fa5f3806305b32cb092a43f1f32340123727
|
/property/migrations/0002_property_prop_type.py
|
2e78ab72179b74b0ec5ea6081ff478f18690fc19
|
[] |
no_license
|
oronibrian/safeCubProperty
|
2bcdec4b94e6511ad3108bb37bb8de8d155b1690
|
2e54f6a7d17dd4c485e9fb3ec793c8bcceecc694
|
refs/heads/master
| 2020-04-21T21:12:40.057883
| 2019-02-13T12:04:32
| 2019-02-13T12:04:32
| 169,871,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
# Generated by Django 2.1.5 on 2019-02-08 18:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('property', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='property',
name='prop_type',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='property.PropertyType'),
preserve_default=False,
),
]
|
[
"brianoroni6@gmail.com"
] |
brianoroni6@gmail.com
|
0956bb50cd2c49f015a9317467b5b03862d8afd1
|
fd7a642a9af2801c5248665d4966f0c14ca682fc
|
/data/__init__.py
|
24b7e22d8afc7f0b22b43cc819be1d894c438f6f
|
[
"MIT"
] |
permissive
|
Tythos/kepler
|
fa267d7bd8752c1fbc17ae31665c8f117abae5c5
|
68444879db15d3932899f95784c06bcbf17f8726
|
refs/heads/master
| 2021-01-20T17:23:52.322960
| 2016-06-14T18:25:32
| 2016-06-14T18:25:32
| 60,046,249
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 606
|
py
|
"""Interface for package data stored in the "data" folder, providing methods to
both resolve and load data.
"""
import os
def get_path(data_path):
"""Returns the absolute path to the indicated data file stored under the
'data/' folder co-located with this module.
"""
return os.path.dirname(os.path.realpath(__file__)) + os.sep + data_path
def get_text(data_path):
"""Returns the text contents (as a string) of the given file stored under
the 'data/' folder co-located with this module.
"""
p = get_full_path(data_path)
f = open(p, 'r')
content = f.read()
f.close()
return content
|
[
"code@tythos.net"
] |
code@tythos.net
|
f41b45fbd083edec87e2aa2772ee1f6e8c289ade
|
ae15890d6e1185eb4bdb2017b91e8c2d047c5f8e
|
/main.py
|
2cda78e748f3e97796b9a1e435eab2099186a769
|
[] |
no_license
|
shlomoko/cellebriteTest
|
feaf10d3bf48f18efd6fb0b2078cf1d3d530563a
|
81e3809f93b99901b3c312da965bb1b1d08c76f4
|
refs/heads/master
| 2020-04-14T06:25:10.900430
| 2018-12-31T18:31:43
| 2018-12-31T18:31:43
| 163,686,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,392
|
py
|
import pandas as pd
import re
import time
class TestParser:
df = pd.DataFrame(columns=['ids', 'values'])
filePath = ""
def __init__(self, filePath):
self.filePath = filePath
def parseFile(self):
with open(self.filePath, 'rb') as f:
for line in f:
decodedLine = line.decode('utf_8', 'ignore').strip()[4:]
ids = re.findall(r'(.{4}000.)', decodedLine)
values = re.split(r'.{4}000.{2}', decodedLine)[1:]
self.__appendDf__(ids, values)
self.__cleanDf__()
def __appendDf__(self, ids, values):
lineDf = pd.DataFrame(data=[ids, values]).T
lineDf.columns = ['ids', 'values']
self.df = pd.merge(self.df, lineDf, on=['ids'], how='outer')
def __fromEpochToDate__(self, epoch):
if pd.isnull(epoch): return '-'
else: return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(epoch))
def __cleanDf__(self):
self.df = self.df.dropna(axis=1, how='all')
del self.df['ids']
self.df.columns = ['first_name', 'last_name', 'phone', 'date']
self.df['date'] = self.df['date'].astype(float).apply(self.__fromEpochToDate__)
a = 'ex_v7'
parser = TestParser(a)
parser.parseFile()
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(parser.df)
# print(parser.df.value)
|
[
"shlomokoppel2@gmail.com"
] |
shlomokoppel2@gmail.com
|
e2b320628ffea84913ec9ca43c9ed390d0ea2cef
|
ae03ae6a546a81151457151f1dfeb226a2dc82e0
|
/python/showcase/setup.py
|
af36e6829eaad3ef88c50a51a40388a5d8fae86a
|
[
"Apache-2.0"
] |
permissive
|
NilsHullegien/Apache-Flink-IN4331-Group15
|
1991f7053b1eb7011b98da0e8852803165efd3b9
|
b96401c94af3fcc9075968c20f927d835c47ecec
|
refs/heads/master
| 2023-05-22T07:48:06.671233
| 2021-06-15T00:52:20
| 2021-06-15T00:52:20
| 370,631,557
| 0
| 0
|
Apache-2.0
| 2021-06-14T15:14:03
| 2021-05-25T09:09:19
|
Java
|
UTF-8
|
Python
| false
| false
| 2,090
|
py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from setuptools import setup
import io
import os
this_directory = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(this_directory, 'README.md'), 'r', encoding='utf-8') as f:
long_description = f.read()
setup(
name='showcase',
version='3.0.0',
packages=["showcase"],
url='https://github.com/apache/flink-statefun-playground',
license='https://www.apache.org/licenses/LICENSE-2.0',
license_files=["LICENSE", "NOTICE"],
author='Apache Software Foundation',
author_email='dev@flink.apache.org',
description='Python SDK for Apache Flink Stateful functions',
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=['protobuf>=3.11.3,<4.0.0',
'apache-flink-statefun==3.0.0',
'aiohttp'],
tests_require=['pytest'],
python_requires='>=3.8',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7']
)
|
[
"Y"
] |
Y
|
71b9ade4a56e5e81e5e9a596188e3d39ddf35892
|
f903b3465da8da0ac10af8058ff2e54ae26e31e2
|
/artnet/artnet_node_planboard.py
|
1042d5a9e3ba56523685dae742a1230d797a0eb0
|
[
"MIT"
] |
permissive
|
protolab-rosenheim/python-artnet
|
f2833878e0f4b0107ff7ff498541b44cd46a52c7
|
d4db8270f94b6cc0295fff33e026a64805d4cb96
|
refs/heads/master
| 2021-07-06T09:38:30.673518
| 2020-08-19T07:12:07
| 2020-08-19T07:12:07
| 150,841,578
| 4
| 0
|
MIT
| 2020-08-19T06:34:53
| 2018-09-29T08:13:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,602
|
py
|
import copy
import logging
from .artnet_node import ArtNetNode, ArtNetDMXPacket, PacketType
class ArtNetNodePlanboard(ArtNetNode):
def __init__(self, name, ip_address, port=6454):
ArtNetNode.__init__(self, name, ip_address, port, max_history_size=5)
# Contains a dict with universes and a list of their lines. E.g.: {1: [0, 1], 3: [2, 3]}
self.universe_to_lines = {}
def set_led_strip(self, extracted_lines):
logging.debug(self.universe_to_lines)
for universe, lines in self.universe_to_lines.items():
tmp_led_strip = copy.deepcopy(self.universe[universe])
logging.debug(tmp_led_strip)
board_coordinates = []
for line in lines:
sorted_columns = sorted(extracted_lines[line], key=lambda column: column['column_id'], reverse=True)
sorted_columns.pop(0)
board_coordinates.extend(sorted_columns)
for counter, color in enumerate(tmp_led_strip.led_strip):
if not board_coordinates[counter]['led_color']:
color.set_color('black')
else:
color.set_color(board_coordinates[counter]['led_color'], True)
self.send_queue.append(ArtNetDMXPacket(PacketType.ART_DMX,
self.sequence,
0,
int(universe),
tmp_led_strip.to_byte_array()).packet_to_byte_array())
|
[
"michael.list@stud.fh-rosenheim.de"
] |
michael.list@stud.fh-rosenheim.de
|
73248824d501628d62f90cc9698b0b528a989850
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part007132.py
|
1f6089f2019b0f614d30131e3882286e759c213f
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher66122(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.3.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.3.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher66122._instance is None:
CommutativeMatcher66122._instance = CommutativeMatcher66122()
return CommutativeMatcher66122._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 66121
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
dc15ab72d78039c72cb7652569799005745eaab3
|
012a587295049839a7784a1e7969094e2e5e3881
|
/assets/polls/models.py
|
c6e6b6cc9780b4783a2aa20411e207bcbc522085
|
[
"MIT"
] |
permissive
|
EmilyXu12/EmilyXu12.github.io
|
d9aef815c5590d2f180f8cc9cf02a19b859ccc1a
|
fd3359a9314be114fddf6a474acc1b87cc89f763
|
refs/heads/master
| 2022-12-11T09:05:55.883434
| 2020-08-30T02:45:23
| 2020-08-30T02:45:23
| 269,801,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
# Create your models here.
import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
|
[
"yuan.yuk@husky.neu.edu"
] |
yuan.yuk@husky.neu.edu
|
5668ef2e1bb86d4b768ee317f984f8fe7bf11a41
|
23ac2442f3982b135f4450582a2616c70e7b0ae7
|
/pp.py
|
280b3ddc132881b4d81e24dbb4803de5f0ee21fc
|
[] |
no_license
|
RATED-R-SUNDRAM/CPP-Graph-ALgo
|
0e29cd87180a2a5bd8db992159f790f24f27343d
|
662efa7a2bdee4dae57235c2d500716f2ab39201
|
refs/heads/main
| 2023-04-07T14:40:56.522040
| 2021-04-15T15:05:56
| 2021-04-15T15:05:56
| 342,605,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
def fun():
a=1
b=10
c=1
if ((a&b or 0) or (a and c and 0)):
a=a+c
b=b%3
a=a<<1
print(a+b-c)
fun()
|
[
"ss6437p@gmail.com"
] |
ss6437p@gmail.com
|
466cb7ec79e01b61046b8004e4078b5bcab517a3
|
b9af2c53b739ad4353ee4867aeef20e377aa90bf
|
/bot.py
|
3692492765e9190aa76978f2e5f0b6cea27dd349
|
[
"MIT"
] |
permissive
|
rodrigolanes/loterias
|
57a70fbacf37e860762433fe40eb50e42fa1b800
|
6725a9a7f24c35dc753c9213bb0ec60ae1ade20c
|
refs/heads/master
| 2021-01-05T08:48:38.464079
| 2020-09-07T23:26:34
| 2020-09-07T23:26:34
| 240,961,156
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,357
|
py
|
import logging
import os
from dotenv import load_dotenv
from telegram.ext import CommandHandler, Filters, MessageHandler, Updater
from utils.format import formata_concurso_text
from utils.sqlite_helper import add_usuario, get_last_concurso
load_dotenv()
telegram_token = os.getenv("TELEGRAM_TOKEN")
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
# Define a few command handlers. These usually take the two arguments update and
# context. Error handlers also receive the raised TelegramError object in error.
def start(update, context):
"""Send a message when the command /start is issued."""
mensagem = """Oi, Sou o bot da Boa Sorte!
Eu envio mensagem sobre os jogos da Mega Sena sempre que um novo resultado é publicado no site da Caixa Econômica Federal.
Aguarde pelo próximo concurso e enviarei o resultado."""
add_usuario(update.message.from_user.id)
update.message.reply_text(mensagem)
def help_command(update, context):
"""Send a message when the command /help is issued."""
update.message.reply_text('Help!')
def echo(update, context):
"""Echo the user message."""
update.message.reply_text(update.message.text)
def ultimo_concurso(update, context):
"""Responde o último consurso da mega_sena."""
update.message.reply_text(
formata_concurso_text(get_last_concurso()), parse_mode="MARKDOWN")
def main():
"""Start the bot."""
updater = Updater(
telegram_token, use_context=True)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help_command))
dp.add_handler(CommandHandler("ultimo", ultimo_concurso))
# on noncommand i.e message - echo the message on Telegram
dp.add_handler(MessageHandler(Filters.text & ~Filters.command, echo))
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
|
[
"rodrigolanes@gmail.com"
] |
rodrigolanes@gmail.com
|
7060cb7afe9007c628f318c3bc483c28a8c8b619
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/5GW5Kb2RpGwhHax2W_20.py
|
a667a9e58b60c21164ebaa897ae6cefc8bf30434
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
"""
Create a function that takes a two-dimensional list as an argument and returns
a one-dimensional list with the values of the original 2d list that are
arranged by spiralling through it (starting from the top-left).
### Examples
spiral_transposition([
[7, 2],
[5, 0]
])
➞ [7, 2, 0, 5]
spiral_transposition([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
])
➞ [1, 2, 3, 6, 9, 8, 7, 4, 5]
spiral_transposition([
[1, 1, 1],
[4, 5, 2],
[3, 3, 2]
])
➞ [1, 1, 1, 2, 2, 3, 3, 4, 5]
### Notes
If you do not understand the instructions, write the 3x3 list example on a
piece of paper and trace the output through it.
"""
def spiral_transposition(lst):
rows, cols = (-1,len(lst)), (-1,len(lst[0]))
r, c = 0, 0
track, res = set(), []
direct = 1
while len(res) < rows[1]*cols[1]:
rn, cn = r, c
if (r, c) not in track:
res.append(lst[r][c])
track.add((r,c))
if direct == 1: cn += 1
elif direct == 2: rn += 1
elif direct == 3: cn -= 1
else: rn -= 1
if rn in rows or cn in cols or (rn, cn) in track:
direct = direct+1 if direct < 4 else 1
else:
r, c = rn, cn
return res
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
fa5c6ddf98c3a80daff815c2a86a680d44ea94a9
|
682319f56c17e949bab0d6e418838d33977dd760
|
/RP/dis_Factorial.py
|
a7effff9c1276fd9ca0e3f0f754704cb606138c8
|
[] |
no_license
|
DilipBDabahde/PythonExample
|
8eb70773a783b1f4b6cf6d7fbd2dc1302af8aa1b
|
669762a8d9ee81ce79416d74a4b6af1e2fb63865
|
refs/heads/master
| 2020-08-23T01:05:44.788080
| 2020-07-25T21:59:52
| 2020-07-25T21:59:52
| 216,511,985
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
'''
4.Write a program which accept one number form user and return addition of its factors.
input: 5
output: 120
'''
def make_factorial(iNo):
ifact = 1
if iNo <= 0:
print("invalid input")
return 0;
while(iNo != 0):
ifact = ifact * iNo;
iNo = iNo - 1;
return ifact;
def main():
ival = int(input("Enter a number: "));
result = make_factorial(ival);
if result > 0:
print("Factoria of given num is: ",result)
else:
print("try again");
if __name__ == "__main__":
main();
|
[
"noreply@github.com"
] |
DilipBDabahde.noreply@github.com
|
a558b468d4be4de4ce8099d14650f06b022724d4
|
9f64ea02913bdc5cf841ac1242ec0f237c2edf87
|
/utils/text_preprocess.py
|
1b08637dcf5ad2ca211206aa4b36493dcb9f4d71
|
[] |
no_license
|
zxp93/wsr
|
c23cca9c04af809e1bb7e9248737e891cf3ffcec
|
329944087982e6e951f2f6b18b31fb86a3845524
|
refs/heads/master
| 2023-06-13T21:57:23.639240
| 2021-07-12T05:29:47
| 2021-07-12T05:29:47
| 385,133,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,682
|
py
|
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
import string
import pandas as pd
def Preprocessing(text):
"""
1. lower
2. punctuations are replaced by ' '
3. tokenization
4. remove stopwords
5. stemming
6. lemmatization
"""
text = text.lower() # 将所有的单词转换成小写字母
for c in string.punctuation:
text = text.replace(c, " ") # 将标点符号转换成空格
wordList = nltk.word_tokenize(text) # 分词
filtered = [w for w in wordList if w not in stopwords.words('english')] # 删除停顿词
# filtered = [w for w in wordList]
# stem
ps = PorterStemmer()
filtered = [ps.stem(w) for w in filtered] # 提取词干
wl = WordNetLemmatizer()
filtered = [wl.lemmatize(w) for w in filtered] # 词形还原
return " ".join(filtered)
if __name__ == "__main__":
# mapping = {'Financial': '0',
# 'Tools': '1',
# 'Messaging': '2',
# 'eCommerce': '3',
# 'Payments': '4',
# 'Social': '5',
# 'Enterprise': '6',
# 'Mapping': '7',
# 'Science': '8',
# 'Government': '9'}
df = pd.read_csv('../data/top_10_api.csv')
w = open('../data/top_10_api.txt', 'w', encoding='utf-8')
count = 0.0
sum_word = 0
for c, d in zip(df['Primary Category'], df['description']):
d = Preprocessing(d)
w.write(c + ',' + d + '\n')
count += 1
sum_word += len(d.split(' '))
w.close()
print(sum_word/count)
|
[
"zxpkpnm@gmail.com"
] |
zxpkpnm@gmail.com
|
89650fb76bc0e4a41e0cd0e7a034edd4a5070dc4
|
18f05db01e134c0f1869f97d6ea829ffadcc27b2
|
/figure5/cluster-on-2d.py
|
03f75968f63826e805ada337039f8ee147bd4e69
|
[] |
no_license
|
neusim/locust_AL_multipara_effect
|
d2f8662bbe89e196c5c7f23251190f30f434c50a
|
a4e6cb8374ad68a5e6cf70cae14f6928a9573743
|
refs/heads/master
| 2021-05-16T09:36:04.578248
| 2017-09-22T14:37:22
| 2017-09-22T14:37:22
| 104,484,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,321
|
py
|
execfile('../slow/slow_analy_head.py')
idx = range(PN_number)
tb = 1100
te = 1350
use_num = 150 # use how many PNs
print('use %d PNs'%use_num)
use_idx = choice(idx,use_num) # PNs in use
print('PNs:',use_idx)
#use_slist = [0,18,42,72,108] # shifts
#use_slist = [0,12, 30, 60,90] # shifts
use_slist = [0, 12, 24, 36, 48] # shifts
print('use shifts',use_slist)
c0='b' #'sage'
c1='g' #'orange'
c2='r' #'royalblue'
c3='c' #'pink'
c4='m' #'rosybrown'
# for name, hex in matplotlib.colors.cnames.items():
# print(name)
if __name__ == '__main__':
c = 0
sprate_ls = [load_sf_avged_over_trial(c,s,tb,te)[use_idx] for s in use_slist]
for s in use_slist:
for t in range(trial_number):
sprate_ls.append(load_sf_from_file(c,s,t,1100,1350)[use_idx])
y=PCA(sprate_ls, 2)
plot(y[0,0], y[0,1], 'o', ms=6, c=c0)
plot(y[1,0], y[1,1], 'o', ms=6, c=c1)
plot(y[2,0], y[2,1], 'o', ms=6, c=c2)
plot(y[3,0], y[3,1], 'o', ms=6, c=c3)
plot(y[4,0], y[4,1], 'o', ms=6, c=c4)
ttt=0
for i in range(len(use_slist)+ttt*trial_number, len(use_slist)+(ttt+1)*trial_number):
plot(y[i,0], y[i,1], '8', ms=4, c=c0)
ttt=1
for i in range(len(use_slist)+ttt*trial_number, len(use_slist)+(ttt+1)*trial_number):
plot(y[i,0], y[i,1], 'x', ms=4, c=c1)
ttt=2
for i in range(len(use_slist)+ttt*trial_number, len(use_slist)+(ttt+1)*trial_number):
plot(y[i,0], y[i,1], '+', ms=4, c=c2)
ttt=3
for i in range(len(use_slist)+ttt*trial_number, len(use_slist)+(ttt+1)*trial_number):
plot(y[i,0], y[i,1], '^', ms=4, c=c3)
ttt=4
for i in range(len(use_slist)+ttt*trial_number, len(use_slist)+(ttt+1)*trial_number):
plot(y[i,0], y[i,1], '*', ms=4, c=c4)
title('couple %d, use %d PNs'%(c,use_num))
savefig('classification_2d_c%d_%dPNs.jpg'%(c,use_num))
savefig('classification_2d_c%d_%dPNs.eps'%(c,use_num))
show()
# ----
c = 100
sprate_ls = [load_sf_avged_over_trial(c,s,tb,te)[use_idx] for s in use_slist]
for s in use_slist:
for t in range(trial_number):
sprate_ls.append(load_sf_from_file(c,s,t,1100,1350)[use_idx])
y=PCA(sprate_ls, 2)
plot(y[0,0], y[0,1], 'o', ms=6, c=c0)
plot(y[1,0], y[1,1], 'o', ms=6, c=c1)
plot(y[2,0], y[2,1], 'o', ms=6, c=c2)
plot(y[3,0], y[3,1], 'o', ms=6, c=c3)
plot(y[4,0], y[4,1], 'o', ms=6, c=c4)
ttt=0
for i in range(len(use_slist)+ttt*trial_number, len(use_slist)+(ttt+1)*trial_number):
plot(y[i,0], y[i,1], '8', ms=4, c=c0)
ttt=1
for i in range(len(use_slist)+ttt*trial_number, len(use_slist)+(ttt+1)*trial_number):
plot(y[i,0], y[i,1], 'x', ms=4, c=c1)
ttt=2
for i in range(len(use_slist)+ttt*trial_number, len(use_slist)+(ttt+1)*trial_number):
plot(y[i,0], y[i,1], '+', ms=4, c=c2)
ttt=3
for i in range(len(use_slist)+ttt*trial_number, len(use_slist)+(ttt+1)*trial_number):
plot(y[i,0], y[i,1], '^', ms=4, c=c3)
ttt=4
for i in range(len(use_slist)+ttt*trial_number, len(use_slist)+(ttt+1)*trial_number):
plot(y[i,0], y[i,1], '*', ms=4, c=c4)
title('couple %d, use %d PNs'%(c,use_num))
savefig('classification_2d_c%d_%dPNs.jpg'%(c,use_num))
savefig('classification_2d_c%d_%dPNs.eps'%(c,use_num))
show()
|
[
"maoji.wang@cims.nyu.edu"
] |
maoji.wang@cims.nyu.edu
|
f69ff33856dc08396b6de73337a2ae395df10786
|
2d13c3f509cba7b5870aa7537bd5b8a31dea651c
|
/config/settings.py
|
17e4b53ee9cf77105e777e6ceba0615df18a7066
|
[] |
no_license
|
HUe1998/dashboard_with_chart
|
d72620c3f2e20cd8cf91bf68f901c625db17256d
|
242e0324f04927ff3d74c4067c8897b3db8e7eb2
|
refs/heads/main
| 2023-05-14T09:32:01.621167
| 2021-06-09T17:29:09
| 2021-06-09T17:29:09
| 374,452,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,640
|
py
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-z0#c#5x6=w@i@f8yga55z*%%&7ml1fdim)rtrnz5q@)9zwhg&^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['.herokuapp.com', 'localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'dashboard',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [str(BASE_DIR.joinpath('templates'))],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [str(BASE_DIR.joinpath('static'))]
STATIC_ROOT = str(BASE_DIR.joinpath('staticfiles'))
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_REDIRECT_URL = 'dashboard'
|
[
"himanshu.1998me@gmail.com"
] |
himanshu.1998me@gmail.com
|
544b4a184d2bb3e6c8bed77a782d8f10ece1ffda
|
ccc2e354dc59bc464a62273f583a981686168030
|
/utils/data_helper.py
|
5a4b5b9c56953b5aa89d7f22fcae1f22e99aab37
|
[] |
no_license
|
infovis2019-nfl/nfl_visualization
|
bd8b1adcc55f4b750dbe64cdcf66d3b4fd9c8318
|
50df1ad3773bcfffe452b9fa9622789d69f15554
|
refs/heads/master
| 2020-04-20T17:58:37.467408
| 2019-04-10T00:52:09
| 2019-04-10T00:52:09
| 169,005,058
| 0
| 0
| null | 2019-04-01T02:14:16
| 2019-02-03T23:21:16
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 253
|
py
|
def normalize(arr):
minValue = min(arr)
maxValue = max(arr)
normalizedArr = []
for i in arr:
normalizedValue = (i - minValue) / (maxValue - minValue)
normalizedArr.append(normalizedValue)
return normalizedArr
|
[
"jordanjay28@gmail.com"
] |
jordanjay28@gmail.com
|
dcd633771db3d305f2e7309cc2fbf6b31b683fe0
|
978b43297abb5fbcae8bc71d5047ce59c85e0835
|
/U_NetandPSP/PSP.py
|
593aae0bd0328dcc1ea8b49e816185f1088bab6e
|
[] |
no_license
|
SongJgit/Learning-segmentation
|
e76d94ca5c3998b30d9397314f1f04f7bfd4779d
|
caf2a3ef6f14bb044eca337d04567ba749fbf300
|
refs/heads/main
| 2023-07-04T04:03:44.811960
| 2021-08-11T01:02:57
| 2021-08-11T01:02:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,093
|
py
|
'''
Descripttion: Learn PSP
version: 1.0
Author: SongJ
Date: 2021-07-29 19:43:47
LastEditors: SongJ
LastEditTime: 2021-08-09 11:16:20
'''
import numpy as np
import paddle.fluid as fluid
import paddle
from paddle.fluid.dygraph import to_variable
from paddle.fluid.dygraph import Layer
from paddle.fluid.dygraph import Conv2D
from paddle.fluid.dygraph import BatchNorm
from paddle.fluid.dygraph import Pool2D
from paddle.fluid.dygraph import Conv2DTranspose
from paddle.fluid.dygraph import Dropout
from resnet_dilated import ResNet50
class PSPModule(Layer):
def __init__(self, num_channels,bin_size_list):
super(PSPModule, self).__init__()
self.bin_size_list = bin_size_list
num_filters =num_channels // len(bin_size_list) # C/4
self.features = []
for i in range(len(bin_size_list)):
self.features.append(
fluid.dygraph.Sequential(
Conv2D(num_channels,num_filters,1),
BatchNorm(num_filters,act='relu')
)
)
def forward(self, inputs):
out = [inputs]
for idx, f in enumerate(self.features):
# 输入变成1,2,3,6、2048C的feature map
x = fluid.layers.adaptive_pool2d(inputs,self.bin_size_list[idx])
x = f(x)
x = fluid.layers.interpolate(x,inputs.shape[2::], align_corners=True)
out.append(x)
out = fluid.layers.concat(out, axis=1)
return out
class PSPNet(Layer):
def __init__(self, num_classes=59, backbone='resnet50'):
super(PSPNet, self).__init__()
res = ResNet50(pretrained=False)
self.layer0 = fluid.dygraph.Sequential(
res.conv,
res.pool2d_max
)
self.layer1 = res.layer1
self.layer2 = res.layer2
self.layer3 = res.layer3
self.layer4 = res.layer4
# pspmodule,2048 → 2048*2对应图的输入和输出C
num_channels = 2048
self.pspmodule = PSPModule(num_channels,[1,2,3,6])
num_channels *=2
# cls: 2048*2 → 512 → cnum_classes
self.classifier = fluid.dygraph.Sequential(
Conv2D(num_channels=num_channels, num_filters=512, filter_size=3, padding=1),
BatchNorm(512,act='relu'),
Dropout(0,1),
Conv2D(num_channels=512, num_filters=num_classes, filter_size=1)
)
# aux
def forward(self,inputs):
x = self.layer0(inputs)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.pspmodule(x)
x = self.classifier(x)
x= fluid.layers.interpolate(x,inputs.shape[2::],align_corners=True)
return x,1
def main():
with fluid.dygraph.guard(fluid.CPUPlace()):
model = PSPNet(num_classes=59)
x_data = np.random.rand(1,3,473,473).astype(np.float32)
model.train()
inputs = to_variable(x_data)
pred,aux = model(inputs)
print(pred.shape,aux.shape)
if __name__ =='__main__':
main()
|
[
"songj95@outlook.com"
] |
songj95@outlook.com
|
73e14457dfe2f353b233083c289ec0843c364a15
|
1190c052fd3673a011acaf9b89d6f819b3acba56
|
/reg_tap4fun_v8.6.py
|
dfb9282d5631b78928f3bcba5bae124976a481c0
|
[] |
no_license
|
pblh123/kaggle-1
|
e6d9494994efe2da5d4b7e2d8976f713cad37077
|
ee30b6e8a8da0e097381aac6621231d8de8da5c4
|
refs/heads/master
| 2020-03-27T17:49:02.464979
| 2018-09-12T06:30:19
| 2018-09-12T06:30:19
| 146,876,478
| 0
| 0
| null | 2018-08-31T10:18:31
| 2018-08-31T10:18:30
| null |
UTF-8
|
Python
| false
| false
| 16,839
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 10 14:22:48 2018
@author: 1707500
"""
'''
V1.0回归 ->85
v2.0 LASSO回归 转化label 为 ([prediction_pay_price]-[pay_price])/[prediction_pay_price] -> 66
v3.0 LASSO回归 转化label 为 ([prediction_pay_price]-[pay_price])/[prediction_pay_price] -> 66
'''
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn import metrics
import lightgbm as lgb
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import seaborn as sns
from scipy import stats
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
import datetime
from sklearn.ensemble import RandomForestRegressor
# =============================================================================
# import sys
# sys.path.append('..')
# import gluonbook as gb
# from mxnet import autograd, gluon, init, nd
# from mxnet.gluon import loss as gloss, nn
# =============================================================================
'''
basic做数据预处理
'''
def data_process(X):
# =============================================================================
# X['wood'] = X['wood_add_value'] - X['wood_reduce_value']
# X['stone'] = X['stone_add_value'] - X['stone_reduce_value']
# X['ivory'] = X['ivory_add_value'] - X['ivory_reduce_value']
# X['meat'] = X['meat_add_value'] - X['meat_reduce_value']
# X['magic'] = X['magic_add_value'] - X['magic_reduce_value']
# X['infantry'] = X['infantry_add_value'] - X['infantry_reduce_value']
# X['cavalry'] = X['cavalry_add_value'] - X['cavalry_reduce_value']
# X['shaman'] = X['shaman_add_value'] - X['shaman_reduce_value']
# X['wound_infantry'] = X['wound_infantry_add_value'] - X['wound_infantry_reduce_value']
# X['wound_cavalry'] = X['wound_cavalry_add_value'] - X['wound_cavalry_reduce_value']
# X['wound_shaman'] = X['wound_shaman_add_value'] - X['wound_shaman_reduce_value']
# X['general_acceleration'] = X['general_acceleration_reduce_value'] - X['general_acceleration_add_value']
# X['building_acceleration'] = X['building_acceleration_add_value'] - X['building_acceleration_reduce_value']
# X['reaserch_acceleration'] = X['reaserch_acceleration_add_value'] - X['reaserch_acceleration_reduce_value']
# X['training_acceleration'] = X['training_acceleration_add_value'] - X['training_acceleration_reduce_value']
# X['treatment_acceleration'] = X['treatment_acceleraion_add_value'] - X['treatment_acceleration_reduce_value']
# =============================================================================
X = X
# =============================================================================
# X = X.drop(['wood_reduce_value' ,'wood_add_value' ,'stone_add_value' ,'stone_reduce_value' ,'ivory_add_value' ,'ivory_reduce_value' ,'meat_add_value' ,'meat_reduce_value' ,'magic_add_value' ,'magic_reduce_value' ,'infantry_add_value' ,'infantry_reduce_value' ,'cavalry_add_value' ,'cavalry_reduce_value' ,'shaman_add_value' ,'shaman_reduce_value' ,'wound_infantry_add_value' ,'wound_infantry_reduce_value' ,'wound_cavalry_add_value' ,'wound_cavalry_reduce_value' ,'wound_shaman_add_value' ,'wound_shaman_reduce_value' ,'general_acceleration_add_value' ,'general_acceleration_reduce_value' ,'building_acceleration_add_value' ,'building_acceleration_reduce_value' ,'reaserch_acceleration_add_value' ,'reaserch_acceleration_reduce_value' ,'training_acceleration_add_value' ,'training_acceleration_reduce_value' ,'treatment_acceleraion_add_value' ,'treatment_acceleration_reduce_value'],axis=1)
# =============================================================================
return X
def get_dummies(X):
X = pd.get_dummies(X, columns = ['bd_training_hut_level' ,'bd_healing_lodge_level' ,'bd_stronghold_level' ,'bd_outpost_portal_level' ,'bd_barrack_level' ,'bd_healing_spring_level' ,'bd_dolmen_level' ,'bd_guest_cavern_level' ,'bd_warehouse_level' ,'bd_watchtower_level' ,'bd_magic_coin_tree_level' ,'bd_hall_of_war_level' ,'bd_market_level' ,'bd_hero_gacha_level' ,'bd_hero_strengthen_level' ,'bd_hero_pve_level','sr_scout_level' ,'sr_training_speed_level' ,'sr_infantry_tier_2_level' ,'sr_cavalry_tier_2_level' ,'sr_shaman_tier_2_level' ,'sr_infantry_atk_level' ,'sr_cavalry_atk_level' ,'sr_shaman_atk_level' ,'sr_infantry_tier_3_level' ,'sr_cavalry_tier_3_level' ,'sr_shaman_tier_3_level' ,'sr_troop_defense_level' ,'sr_infantry_def_level' ,'sr_cavalry_def_level' ,'sr_shaman_def_level' ,'sr_infantry_hp_level' ,'sr_cavalry_hp_level' ,'sr_shaman_hp_level' ,'sr_infantry_tier_4_level' ,'sr_cavalry_tier_4_level' ,'sr_shaman_tier_4_level' ,'sr_troop_attack_level' ,'sr_construction_speed_level' ,'sr_hide_storage_level' ,'sr_troop_consumption_level' ,'sr_rss_b_prod_level' ,'sr_rss_c_prod_level' ,'sr_rss_d_prod_level' ,'sr_rss_a_gather_level' ,'sr_rss_b_gather_level' ,'sr_rss_c_gather_level' ,'sr_rss_d_gather_level' ,'sr_troop_load_level' ,'sr_rss_e_gather_level' ,'sr_rss_e_prod_level' ,'sr_outpost_durability_level' ,'sr_outpost_tier_2_level' ,'sr_healing_space_level' ,'sr_gathering_hunter_buff_level' ,'sr_healing_speed_level' ,'sr_outpost_tier_3_level' ,'sr_alliance_march_speed_level' ,'sr_pvp_march_speed_level' ,'sr_gathering_march_speed_level' ,'sr_outpost_tier_4_level' ,'sr_guest_troop_capacity_level' ,'sr_march_size_level' ,'sr_rss_help_bonus_level' ])
# =============================================================================
# X = pd.get_dummies(X, columns = ['sr_rss_e_prod_level','sr_gathering_march_speed_level','sr_outpost_durability_level','bd_hall_of_war_level','sr_rss_e_gather_level','sr_healing_space_level','sr_rss_a_gather_level','sr_hide_storage_level'])
# =============================================================================
return X
def standardscaler(X):
standardscaler = preprocessing.StandardScaler()
features_new = standardscaler.fit_transform(X)
X = pd.DataFrame(features_new, columns=X.columns)
return X
tap_fun_train = pd.read_csv("D:/data/tap4fun/tap_fun_train.csv", sep = ',')
tap_fun_test = pd.read_csv("D:/data/tap4fun/tap_fun_test.csv", sep = ',')
'''
数据可视化分析
'''
# =============================================================================
# data_vis = tap_fun_train[tap_fun_train.prediction_pay_price > 3000]
# corrmat = data_vis.corr()
# f, ax = plt.subplots(figsize=(40, 40))
# sns.heatmap(corrmat, vmax=0.8, square=True)
#
#
#
# k = 20 # 关系矩阵中将显示10个特征
# cols = corrmat.nlargest(k, 'prediction_pay_price')['prediction_pay_price'].index
# cm = np.corrcoef(data_vis[cols].values.T)
# sns.set(font_scale=1.25)
# hm = sns.heatmap(cm, cbar=True, annot=True, \
# square=True, fmt='.2f', annot_kws={'size': 5}, yticklabels=cols.values, xticklabels=cols.values)
# plt.show()
# =============================================================================
tap_fun_train['xiangcha'] = tap_fun_train['prediction_pay_price']-tap_fun_train['pay_price']
tap_fun_train['label'] = tap_fun_train['prediction_pay_price']
tap_fun_train = tap_fun_train.fillna(0)
tap_fun_train['classification_label'] = tap_fun_train['xiangcha'].map(lambda x: 1 if x > 0 else 0)
data = pd.concat([tap_fun_train,tap_fun_test])
data = data.fillna(-1)
register_time = []
for i in data['register_time']:
# =============================================================================
# print(datetime.datetime.strptime(i,'%Y-%m-%d %H:%M:%S').strftime("%a"))
# =============================================================================
register_time.append(datetime.datetime.strptime(i,'%Y-%m-%d %H:%M:%S').strftime("%a"))
data['register_time'] = register_time
print(data['classification_label'].value_counts())
'''
分类数据预处理
'''
data = data_process(data)
# =============================================================================
# data = get_dummies(data)
# =============================================================================
print('dummies finish!!')
'''
回归建模测试集数据准备
'''
# =============================================================================
# reg_data = get_dummies(reg_data)
# =============================================================================
reg_test_1 = data[(data.classification_label == -1)&(data.pay_price == 0)].drop(['user_id','register_time','classification_label','prediction_pay_price','xiangcha','label'],axis=1)
reg_test_1['intercept'] = 1.0
# =============================================================================
# reg_test_2 = data[(data.classification_label == -1)&(data.pay_price > 0)].drop(['user_id','register_time','classification_label','prediction_pay_price','xiangcha','label'],axis=1)
# =============================================================================
reg_test_2 = data[(data.classification_label == -1)&(data.pay_price > 0)&(data.pay_price < 3000)][['pay_price','ivory_add_value','stone_add_value','ivory_reduce_value','wood_add_value','general_acceleration_add_value','stone_reduce_value','training_acceleration_add_value','wood_reduce_value','meat_add_value','general_acceleration_reduce_value']]
reg_test_2['intercept'] = 1.0
reg_test_3 = data[(data.classification_label == -1)&(data.pay_price >= 3000)][['pay_price','pay_count','training_acceleration_add_value']]
# =============================================================================
# reg_test_3 = data[(data.classification_label == -1)&(data.pay_price >= 3000)][['pay_price']]
# =============================================================================
reg_test_3['intercept'] = 1.0
'''
训练回归模型
'''
reg_data = data
reg_data_1 = reg_data[(reg_data.label >= 0)&(reg_data.pay_price == 0)]
# =============================================================================
# reg_data = reg_data.drop((reg_data[(reg_data.pay_price >= 100) & (reg_data.avg_online_minutes < 35)]).index)
# reg_data_1 = reg_data_1.drop((reg_data_1[(reg_data_1.pay_price < 0.99) & (reg_data_1.avg_online_minutes > 840)]).index)
# =============================================================================
reg_target_1 = reg_data_1['label']
reg_features_1 = reg_data_1.drop(['user_id','register_time','classification_label','prediction_pay_price','xiangcha','label'],axis=1)
reg_features_1['intercept'] = 1.0
reg_data_2 = reg_data[(reg_data.label >= 0)&(reg_data.pay_price > 0)&(reg_data.pay_price < 3000)]
reg_data_2 = reg_data_2.drop((reg_data_2[(reg_data_2.pay_price >= 1000) & ((reg_data_2.pay_price/reg_data_2.prediction_pay_price) > 0.9)]).index)
reg_target_2 = reg_data_2['label']
# =============================================================================
# reg_features_2 = reg_data_2.drop(['user_id','register_time','classification_label','prediction_pay_price','xiangcha','label'],axis=1)
# =============================================================================
reg_features_2 = reg_data_2[['pay_price','ivory_add_value','stone_add_value','ivory_reduce_value','wood_add_value','general_acceleration_add_value','stone_reduce_value','training_acceleration_add_value','wood_reduce_value','meat_add_value','general_acceleration_reduce_value']]
reg_features_2['intercept'] = 1.0
reg_data_3 = reg_data[(reg_data.label >= 0)&(reg_data.pay_price >= 3000)]
reg_data_3 = reg_data_3.drop(reg_data_3[(reg_data_3.pay_price/reg_data_3.prediction_pay_price) > 0.9].index)
# =============================================================================
# reg_data_3 = reg_data_3.drop((reg_data_3[(reg_data_3.pay_price >= 1000) & (reg_data_3.pay_price == reg_data_3.prediction_pay_price)]).index)
# =============================================================================
reg_target_3 = reg_data_3['label']
reg_features_3 = reg_data_3[['pay_price','pay_count','training_acceleration_add_value']]
reg_features_3['intercept'] = 1.0
'''
'''
# =============================================================================
# from sklearn import linear_model
# reg = linear_model.Lasso(alpha=0.1, fit_intercept=True, normalize=True, precompute=True, copy_X=True, max_iter=1000, tol=0.0001, warm_start=True, positive=True, random_state=42, selection='cyclic')
# =============================================================================
from sklearn.linear_model import ElasticNet
reg_1 = ElasticNet(alpha=0.8, l1_ratio=0.8, fit_intercept=True, normalize=False, precompute=True, copy_X=True, max_iter=1000, tol=0.001, warm_start=True, positive=True, random_state=42, selection='cyclic')
reg_2 = ElasticNet(alpha=0.9, l1_ratio=1, fit_intercept=True, normalize=False, precompute=True, copy_X=True, max_iter=1000, tol=0.001, warm_start=True, positive=True, random_state=42, selection='cyclic')
reg_3 = ElasticNet(alpha=0.9, l1_ratio=1, fit_intercept=False, normalize=False, precompute=True, copy_X=True, max_iter=1000, tol=0.001, warm_start=True, positive=True, random_state=42, selection='cyclic')
'''
实际回归建模
'''
X_train,X_test,y_train,y_test = train_test_split(reg_features_1,reg_target_1,test_size=0.2,random_state=42)
reg_1.fit(reg_features_1, reg_target_1)
y_pre = reg_1.predict(X_test)
print(np.sqrt(metrics.mean_squared_error(y_test, y_pre)))
y_pred_1 = reg_1.predict(reg_test_1)
reg_df_1 = pd.DataFrame({
'user_id' : data[(data.classification_label == -1)&(data.pay_price == 0)]['user_id'],
'price':reg_test_1['pay_price'],
'prediction_pay_price' : y_pred_1
})
reg_df_1['prediction_pay_price'] = reg_df_1['prediction_pay_price'].map(lambda x: 0 if x < 0 else x)
X_train,X_test,y_train,y_test = train_test_split(reg_features_2,reg_target_2,test_size=0.2,random_state=42)
reg_2.fit(reg_features_2, reg_target_2)
y_pre = reg_2.predict(X_test)
print(np.sqrt(metrics.mean_squared_error(y_test, y_pre)))
y_pred_2 = reg_2.predict(reg_test_2)
reg_df_2 = pd.DataFrame({
'user_id' : data[(data.classification_label == -1)&(data.pay_price > 0)&(data.pay_price <3000)]['user_id'],
'price':reg_test_2['pay_price'],
'prediction_pay_price' : y_pred_2
})
reg_df_2['prediction_pay_price'] = reg_df_2['prediction_pay_price'].map(lambda x: 0.99 if x < 0 else x)
X_train,X_test,y_train,y_test = train_test_split(reg_features_3,reg_target_3,test_size=0.2,random_state=42)
reg_3.fit(reg_features_3, reg_target_3)
y_pre = reg_3.predict(X_test)
print(np.sqrt(metrics.mean_squared_error(y_test, y_pre)))
y_pred_3 = reg_3.predict(reg_test_3)
reg_df_3 = pd.DataFrame({
'user_id' : data[(data.classification_label == -1)&(data.pay_price >= 3000)]['user_id'],
'price':reg_test_3['pay_price'],
'prediction_pay_price' : y_pred_3
})
reg_df_3['prediction_pay_price'] = reg_df_3['prediction_pay_price'].map(lambda x: 3000 if x < 3000 else x)
reg_df_3['prediction_pay_price'] = reg_df_3['prediction_pay_price'].map(lambda x: x*1.8 if x < 20000 and x > 15000 else x)
reg_df = pd.concat([reg_df_1,reg_df_2,reg_df_3])
reg_df['prediction_pay_price'] = reg_df['prediction_pay_price'].map(lambda x: 0 if x < 0.99 else x)
reg_df['a'] = reg_df['prediction_pay_price'] - reg_df['price']
reg_df['a'] = reg_df['a'].map(lambda x: x if x < 0 else 0)
reg_df['prediction_pay_price'] = reg_df['prediction_pay_price'] - reg_df['a']
reg_df[['user_id','prediction_pay_price']].to_csv('D:/999github/kaggle/sub_sample.csv', index=False)
reg_df[['user_id','price','prediction_pay_price']].to_csv('D:/999github/kaggle/sub_sample_10.csv', index=False)
# =============================================================================
#
# '''
# 回归特征权重显示
# '''
# importances = reg_1.coef_
# indices = np.argsort(importances)[::-1]
# print("Feature ranking:")
# for f in range(reg_features_1.shape[1]):
# print("%d. feature %d (%f): %s" % (f + 1, indices[f], importances[indices[f]] , reg_features_1.columns[indices[f]] ))
#
#
#
# importances = reg_2.coef_
# indices = np.argsort(importances)[::-1]
# print("Feature ranking:")
# for f in range(reg_features_2.shape[1]):
# print("%d. feature %d (%f): %s" % (f + 1, indices[f], importances[indices[f]] , reg_features_2.columns[indices[f]] ))
#
#
# importances = reg_3.coef_
# indices = np.argsort(importances)[::-1]
# print("Feature ranking:")
# for f in range(reg_features_3.shape[1]):
# print("%d. feature %d (%f): %s" % (f + 1, indices[f], importances[indices[f]] , reg_features_3.columns[indices[f]] ))
#
# =============================================================================
|
[
"noreply@github.com"
] |
pblh123.noreply@github.com
|
1230993ea613696925d7f4d532aaad2779d6e8ff
|
3cb720248739427782d3ff5ac19c71f865b716a9
|
/Decorator/base/base/settings.py
|
e385c8584e59a4168cb3d5e6a825ae417ff359bd
|
[] |
no_license
|
nidhi61/Aurora_repo
|
51b20350333e7e09718e1542f4df7b29b0372e7c
|
7c4c0ea53614eb6240893471c948a19fe2285be9
|
refs/heads/main
| 2023-08-07T00:09:14.743506
| 2021-10-01T18:45:24
| 2021-10-01T18:45:24
| 410,797,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,246
|
py
|
"""
Django settings for base project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-y+!os*5#j2kb2450j@_q3nr+d=+02dd$1-!-*^yoft7sh)svg_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'authApp',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'base.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'base.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"abhishek.mehta2k@gmail.com"
] |
abhishek.mehta2k@gmail.com
|
6ab9f08d6457b9b944820808109b67b0941f1197
|
aeeb9321646de059d0c392a4afecd9b5d5aea571
|
/kladama/queries.py
|
2d58398310064911f607b12498b18dbd1a4a0933
|
[
"MIT"
] |
permissive
|
kladama/kladama-api-python
|
090fc68bd80b54533a4782bbf153f972b9f3c9c2
|
4d9650e986f3753b7e4303c2fc13a71cbcdfab28
|
refs/heads/master
| 2023-05-12T07:26:52.428423
| 2020-10-12T16:30:01
| 2020-10-12T16:30:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,238
|
py
|
import abc
from abc import ABC
# Base type
class QueryBase(ABC):
def __init__(self, parent):
self._parent = parent
@property
def parent(self):
return self._parent
@property
def url_path(self) -> str:
return '{0}{1}'.format(self._parent.url_path, self.sub_url)
@property
@abc.abstractmethod
def sub_url(self):
pass
class SingleResultQuery(QueryBase, ABC):
pass
# Filter Query types
class FilterQuery(QueryBase, ABC):
def __init__(self, parent: QueryBase, filter_value):
QueryBase.__init__(self, parent)
self._filter_value = filter_value
@property
def _collection_filter_value_path(self):
return ','.join(self._filter_value)
class ByDatesQuery(FilterQuery):
def __init__(self, parent, *dates):
FilterQuery.__init__(self, parent, dates)
@property
def sub_url(self):
dates_path = '/dates'
if len(self._filter_value) > 0:
return '{0}/{1}'.format(dates_path, self._collection_filter_value_path)
return dates_path
class ByDatePeriodQuery(QueryBase):
def __init__(self, parent, from_, to):
QueryBase.__init__(self, parent)
self._from = from_
self._to = to
@property
def sub_url(self):
return '/dates/{0}TO{1}'.format(self._from, self._to)
class ByKeyQuery(FilterQuery, SingleResultQuery):
@property
def sub_url(self):
return '/{0}'.format(self._filter_value)
class ByPhenomenaQuery(FilterQuery):
@property
def sub_url(self):
return '/phenom/{0}'.format(self._filter_value)
class BySourcesQuery(FilterQuery):
def __init__(self, parent, *sources):
FilterQuery.__init__(self, parent, sources)
@property
def sub_url(self):
return '/src/{0}'.format(self._collection_filter_value_path)
class ByStatusQuery(FilterQuery):
@property
def sub_url(self):
return '/status/{0}'.format(self._filter_value)
class BySubscriptionsQuery(FilterQuery):
def __init__(self, parent, *subscriptions):
FilterQuery.__init__(self, parent, subscriptions)
@property
def sub_url(self):
result = '/subsc'
if len(self._filter_value) > 0:
subscriptions = self._collection_filter_value_path
return '{0}/{1}'.format(result, subscriptions)
return result
class ByUserQuery(FilterQuery):
@property
def sub_url(self):
return '/user/{0}'.format(self._filter_value)
class ForecastQuery(QueryBase):
def __init__(self, parent):
QueryBase.__init__(self, parent)
@property
def sub_url(self):
return '/forecast'
class ObservedQuery(QueryBase):
def __init__(self, parent):
QueryBase.__init__(self, parent)
@property
def sub_url(self):
return '/observed'
# Result queries
class AroundQuery(QueryBase):
def __init__(self, parent, days, *dates):
QueryBase.__init__(self, parent)
self._days = days
self._dates = dates
@property
def sub_url(self) -> str:
return '/{1}around{2}'.format(self.parent.url_path, self._days, '/' + ','.join(*self._dates))
class LastQuery(QueryBase):
def __init__(self, parent):
QueryBase.__init__(self, parent)
@property
def sub_url(self):
return '/last'
class LastNQuery(QueryBase):
def __init__(self, parent, count: int):
QueryBase.__init__(self, parent)
self._count = count
@property
def sub_url(self):
return '/last{0}'.format(self._count)
class LastNYearsQuery(QueryBase):
def __init__(self, parent, years: int, *dates):
QueryBase.__init__(self, parent)
self._years = years
self._dates = dates
@property
def sub_url(self):
dates_path = ','.join(self._dates)
return '/{0}years{1}'.format(self._years, '/' + dates_path)
class PeriodQuery(QueryBase):
def __init__(self, parent, from_, to):
QueryBase.__init__(self, parent)
self._from = from_
self._to = to
@property
def sub_url(self):
return '/period/{0}TO{1}'.format(self._from, self._to)
class ResultsQuery(QueryBase):
def __init__(self, parent):
QueryBase.__init__(self, parent)
@property
def sub_url(self):
return '/results'
def around(self, days: int, *dates):
return AroundQuery(self, days, dates)
def dates(self, *dates):
return ByDatesQuery(self, *dates)
@property
def last(self):
return LastQuery(self)
def last_n(self, count: int):
return LastNQuery(self, count)
def last_n_years(self, years: int, *dates):
return LastNYearsQuery(self, years, *dates)
def period(self, from_, to):
return PeriodQuery(self, from_, to)
# Queryable types
class ByKeyQueryable(QueryBase, ABC):
def by_key(self, name):
return ByKeyQuery(self, name)
class ByDatesQueryable(QueryBase, ABC):
def by_dates(self, *dates):
return ByDatesQuery(self, *dates)
class ByPhenomenaQueryable(QueryBase, ABC):
def by_phenomena(self, phenomena):
return ByPhenomenaQuery(self, phenomena)
class BySourcesQueryable(QueryBase, ABC):
def by_sources(self, *sources):
return BySourcesQuery(self, *sources)
class ByStatusQueryable(QueryBase, ABC):
def by_status(self, status):
return ByStatusQuery(self, status)
class BySubscriptionsQueryable(QueryBase, ABC):
def by_subsc(self, *subscriptions):
return BySubscriptionsQuery(self, *subscriptions)
class ByUserQueryable(QueryBase, ABC):
def by_user(self, user):
return ByUserQuery(self, user)
class ForecastQueryable(QueryBase, ABC):
@property
def forecast(self):
return ForecastQuery(self)
class ObservedQueryable(QueryBase, ABC):
@property
def observed(self):
return ObservedQuery(self)
# Specific queries
class GetSubscriptionQuery(ByKeyQuery):
@property
def results(self):
return ResultsQuery(self)
@property
def dates(self):
return ByDatesQuery(self)
def dates_since(self, from_):
return ByDatePeriodQuery(self, from_, 'NOW')
def dates_in(self, from_, to):
return ByDatePeriodQuery(self, from_, to)
class GetSubscriptionQueryable(QueryBase, ABC):
def by_subsc(self, subscription):
return GetSubscriptionQuery(self, subscription)
class AfterUserByKeyQueryable(ByUserQuery, ByKeyQueryable, ABC):
pass
class AfterUserBySubscriptionsQueryable(ByUserQuery):
def by_subsc(self, *subscriptions):
return BySubscriptionsQuery(self, *subscriptions)
class AfterUserGetSubscriptionQueryable(ByUserQuery, ByStatusQueryable):
def filter_by(self, subscription):
return GetSubscriptionQuery(self, subscription)
class AfterPhenomenaGetForecastObservedQueryable(ByPhenomenaQuery, ForecastQueryable, ObservedQueryable, ABC):
pass
class AfterSourcesGetForecastObservedQueryable(BySourcesQuery, ForecastQueryable, ObservedQueryable, ABC):
pass
class AfterUserByKeyStatusQueryable(AfterUserByKeyQueryable, ByStatusQueryable, ABC):
def by_key(self, key):
return GetSubscriptionQuery(self, key)
# Endpoint Query types
class EndpointQuery(QueryBase, ABC):
def __init__(self):
QueryBase.__init__(self, None)
@property
def url_path(self) -> str:
return self.sub_url
class AreaOfInterestQuery(EndpointQuery, ByKeyQueryable, ByUserQueryable):
@property
def sub_url(self):
return '/aoi'
def by_user(self, user):
return AfterUserByKeyQueryable(self, user)
class OrganizationQuery(EndpointQuery, ByKeyQueryable):
@property
def sub_url(self):
return '/org'
class PhenomenaQuery(EndpointQuery, ByKeyQueryable, BySourcesQueryable, ForecastQueryable, ObservedQueryable):
@property
def sub_url(self):
return '/phenom'
def by_sources(self, *sources):
return AfterSourcesGetForecastObservedQueryable(self, *sources)
class ScheduleQuery(EndpointQuery, ByUserQueryable):
@property
def sub_url(self):
return '/schedule'
def by_user(self, user):
return AfterUserBySubscriptionsQueryable(self, user)
class SourceQuery(EndpointQuery, ByKeyQueryable, ByPhenomenaQueryable, ForecastQueryable, ObservedQueryable):
@property
def sub_url(self):
return '/src'
def by_phenomena(self, phenomena):
return AfterPhenomenaGetForecastObservedQueryable(self, phenomena)
class SpatialOperationQuery(EndpointQuery):
@property
def sub_url(self):
return '/oper'
class SubscriptionQuery(EndpointQuery, ByKeyQueryable, ByUserQueryable, ByStatusQueryable):
@property
def sub_url(self):
return '/subsc'
def by_key(self, name):
return GetSubscriptionQuery(self, name)
def by_user(self, user):
return AfterUserByKeyStatusQueryable(self, user)
class UserQuery(EndpointQuery, ByKeyQueryable):
@property
def sub_url(self):
return '/user'
class VariableQuery(EndpointQuery, ByKeyQueryable, ByPhenomenaQueryable, BySourcesQueryable, ForecastQueryable,
ObservedQueryable):
@property
def sub_url(self):
return '/var'
def by_phenomena(self, phenomena):
return AfterPhenomenaGetForecastObservedQueryable(self, phenomena)
def by_sources(self, *sources):
return AfterSourcesGetForecastObservedQueryable(self, *sources)
# Query
class Queries:
@property
def aoi(self):
return AreaOfInterestQuery()
@property
def oper(self):
return SpatialOperationQuery()
@property
def org(self):
return OrganizationQuery()
@property
def phenom(self):
return PhenomenaQuery()
@property
def schedule(self):
return ScheduleQuery()
@property
def subsc(self):
return SubscriptionQuery()
@property
def src(self):
return SourceQuery()
@property
def user(self):
return UserQuery()
@property
def var(self):
return VariableQuery()
|
[
"noreply@github.com"
] |
kladama.noreply@github.com
|
03badda6c736ae77fabe6eb4d69ce4e9211884cb
|
401f7294cc36a33400a8e563afc29bf43ad5eaf1
|
/FllSite/FLLapp/migrations/0002_auto_20161104_1845.py
|
f40335359d831dbebf02a4728f3a58229b54040d
|
[] |
no_license
|
PattonPowerPandas/Website
|
061663a67b947b7f885c59912dd6aeba371e5fe9
|
7f7f6bb3ac026904bc2726bc508a06cb4e735b6d
|
refs/heads/master
| 2021-01-19T00:15:18.729252
| 2016-11-18T23:35:04
| 2016-11-18T23:35:04
| 73,023,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-04 22:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FLLapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='center',
name='acceptsbirds',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='center',
name='acceptsdogscats',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='center',
name='acceptsprimates',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='center',
name='phone1',
field=models.SmallIntegerField(default=555),
),
migrations.AlterField(
model_name='center',
name='phone2',
field=models.SmallIntegerField(default=555),
),
migrations.AlterField(
model_name='center',
name='phone3',
field=models.SmallIntegerField(default=5555),
),
]
|
[
"noreply@github.com"
] |
PattonPowerPandas.noreply@github.com
|
46d488a81158109ea8212fb75a4625cf43db29e9
|
614fe05c42c0b9cd157b995f9f53fc33bb723ddc
|
/sample.py
|
cdd9698f0ac5c9541c6262c393474eb6d6bcfb16
|
[] |
no_license
|
ryanh121/Image-Captioning
|
1612a17f7e0c9dd1e95883da4e0ad09c3e896aae
|
bc791492de597d7ce989533bdabb6799e066f033
|
refs/heads/master
| 2020-05-22T22:15:24.158154
| 2019-05-14T04:09:53
| 2019-05-14T04:09:53
| 186,542,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,123
|
py
|
import torch
import numpy as np
import pickle
import os
from torchvision import transforms
from model import EncoderCNN, DecoderRNN
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from PIL import Image
from pycocotools.coco import COCO
print('finished loading module')
model_name = 'dropoutandlayer8'
start_model_idx = 0
end_model_idx = 14
idx2word_path = 'idx2word'
embed_size = 512
hidden_size = 512
# Device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
coco = COCO('/projects/training/bawc/IC/annotations/captions_val2014.json')
img_ids = [203564, 179765, 322141, 16977]
img_paths = []
for img_id in img_ids:
img_paths.append('/projects/training/bawc/IC/val2014/' + coco.loadImgs(img_id)[0]['file_name'])
# img_paths.append('val2014/' + coco.loadImgs(img_id)[0]['file_name'])
def load_images(image_paths, transform=None):
images = []
original_images = []
for image_path in image_paths:
original_images.append(Image.open(image_path))
image = original_images[-1].convert('RGB')
if transform is not None:
image = transform(image)
images.append(image)
images = torch.stack(images)
return images, original_images
def plot(samples):
num = int(np.sqrt(len(img_ids)))
fig = plt.figure(figsize=(num*5, num*5), dpi = 300)
gs = gridspec.GridSpec(num, num)
#gs.update(wspace=0.02, hspace=0.02)
for i, (image, pred_caption) in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
#ax.set_aspect('equal')
plt.title(pred_caption, fontsize = 8)
plt.imshow(image)
return fig
transform = transforms.Compose([
transforms.Resize((224, 224), interpolation=2),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
# Load idx2word
with open(idx2word_path, 'rb') as f:
idx2word = pickle.load(f)
encoder = EncoderCNN(embed_size)
decoder = DecoderRNN(embed_size, hidden_size, len(idx2word), num_layers=1)
encoder = encoder.to(device)
decoder = decoder.to(device)
checkpoints = os.listdir('checkpoint')
for model_idx in range(start_model_idx,end_model_idx+1):
if checkpoints:
for cp in checkpoints:
name, num = cp[:-4].split('_')
num = int(num)
if name == model_name and model_idx == num:
state_dict = torch.load(
'checkpoint/{}_{}.tar'.format(model_name, num))
encoder.load_state_dict(state_dict['encoder_state_dict'])
decoder.load_state_dict(state_dict['decoder_state_dict'])
break
# test
decoder.eval()
encoder.eval()
with torch.no_grad():
# Prepare an image
images, original_images = load_images(img_paths, transform)
images = images.to(device)
# Generate an caption from the image
feature = encoder(images)
print('Encoder finished')
pred_ids = decoder.beam_search(feature)
print('beam search finished')
# Convert word_ids to words
pred_captions = []
for pred_id in pred_ids:
temp = []
for word_id in pred_id:
temp.append(idx2word[word_id])
if temp[-1] == '<end>':
#pred_captions.append(' '.join(temp))
break
if len(temp) > 8:
temp[len(temp)//2] = temp[len(temp)//2] + '\n'
pred_captions.append(' '.join(temp))
print('finished caption generation')
print(pred_captions)
print(images.size(),len(pred_captions))
result = zip(original_images,pred_captions)
fig = plot(result)
plt.savefig('{}_{}_NIC'.format(model_name,model_idx),bbox_inches='tight')
plt.close(fig)
# result = zip(original_images,['1','2','3','4'])
# fig = plot(result)
# plt.savefig('samplefig',bbox_inches='tight',dpi=400)
# plt.close(fig)
|
[
"noreply@github.com"
] |
ryanh121.noreply@github.com
|
68b6ffd46d0fbf12b15ab7c25f7a4336f5eadec5
|
32e730b561b771de91192b8942854c25b4989c0a
|
/src/models/supervised/K_nearest_neighbors.py
|
e4054df0ce63356dbb39f9ce9e2e05fc65138367
|
[] |
no_license
|
ramesht007/Machine-Learning-From-Scratch
|
4c14b953c6fb983539d34f150ec58d88e4126cef
|
0365c1a8ebdc081fe87082d3fa9858c98be9a3ea
|
refs/heads/main
| 2023-07-03T03:47:23.850864
| 2021-08-07T06:24:21
| 2021-08-07T06:24:21
| 391,897,143
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,462
|
py
|
import numpy as np
from src.utils.data_operation import euclidean_distance
class KNN():
""" K Nearest Neighbor classifier.
--------
input:
k : {int}
Number of nearest neighbors that will determine
the class or value of prediction.
"""
def __init__(self, k=3):
self.k = k
def fit(self, X, y):
# store the training samples for latter use
self.X_train = X
self.y_train = y
def predict(self, X):
# this function can get single or multiple samples at a time
predicted_labels = [self._predict(x) for x in X]
return np.array(predicted_labels )
def _predict(self, x):
# this method will be passed with one sample at a time
# Compute distances
distances = [euclidean_distance(x, x_train) for x_train in self.X_train]
# k nearest samples, labels
k_samples_index = np.argsort(distances)[:self.k] # sort the distances and select top k samples
k_nearest_label = [self.y_train[i] for i in k_samples_index] # get the labels based on index from k_sample_index
# majority vote, most common class model
most_common = self._vote(np.array(k_nearest_label))
return most_common
def _vote(self, neighbor_labels):
""" Return the most common class among the neighbor samples """
counts = np.bincount(neighbor_labels.astype('int'))
return counts.argmax()
|
[
"36106177+ramesht007@users.noreply.github.com"
] |
36106177+ramesht007@users.noreply.github.com
|
e2765a1f14faec13301f2a8e52b72c76e99f7e6e
|
9ce3a5b7a85bf5d7a99145000b995b0ca3776809
|
/run.py
|
d8b50e02c4d1888bd03c4a18cfe4d91e94d86697
|
[
"Apache-2.0"
] |
permissive
|
ducha-aiki/image-matching-benchmark
|
29c659420cb3343f9a61e7e3fde6f6daf158c85c
|
f1ab8f9e1dc1545648b2e700e7179c0ef5ea6e89
|
refs/heads/master
| 2021-01-03T21:17:38.133514
| 2020-02-13T06:58:23
| 2020-02-13T06:58:23
| 240,239,775
| 3
| 0
|
Apache-2.0
| 2020-02-13T11:02:16
| 2020-02-13T11:02:15
| null |
UTF-8
|
Python
| false
| false
| 10,080
|
py
|
# Copyright 2020 Google LLC, University of Victoria, Czech Technical University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import os
from config import get_config, print_usage, validate_method
from utils.colmap_helper import is_colmap_complete
from utils.io_helper import load_json
from utils.queue_helper import (create_and_queue_jobs, create_sh_cmd,
estimate_runtime, is_job_complete,
create_job_key)
def create_eval_jobs(dep_list, mode, cfg, job_dict):
# Check if job is complete
if is_job_complete(mode, cfg):
print(' -- File {} already exists'.format(mode))
return []
# Check if other program is doing the same job
job_key = create_job_key(mode, cfg)
if job_key in job_dict:
print(' -- {} is already running on {}'.format(mode,
job_dict[job_key]))
return [job_dict[job_key]]
else:
# Update dependency
dep_str = None
if len(dep_list) > 0:
dep_str = ','.join(dep_list)
# Check if matches are computed -- queue (dependent on previous
# job)
print(' -- Computing {}'.format(mode))
cmd_list = [create_sh_cmd('compute_{}.py'.format(mode), cfg)]
job = create_and_queue_jobs(cmd_list, cfg, dep_str)
job_dict[job_key] = job
return [job]
def eval_viz_stereo(dep_list, cfg):
# Do this one for one run
if cfg.run > 0:
return
# Update dependency
dep_str = None
if len(dep_list) > 0:
dep_str = ','.join(dep_list)
# The checks on existing files run inside, as there are many of them
print(' -- Generating stereo visualizations')
cmd_list = [create_sh_cmd('viz_stereo.py', cfg)]
create_and_queue_jobs(cmd_list, cfg, dep_str)
def eval_viz_colmap(dep_list, cfg):
# Do this one for one run
if cfg.run > 0:
return
# Update dependency
dep_str = None
if len(dep_list) > 0:
dep_str = ','.join(dep_list)
# The checks on existing files run inside, as there are many of them
print(' -- Generating multi-view visualizations')
cmd_list = [create_sh_cmd('viz_colmap.py', cfg)]
create_and_queue_jobs(cmd_list, cfg, dep_str)
def eval_packing(dep_list, cfg):
# Update dependency
dep_str = None
if len(dep_list) > 0:
dep_str = ','.join(dep_list)
print(' -- Packing results')
cmd_list = [create_sh_cmd('pack_res.py', cfg)]
create_and_queue_jobs(cmd_list, cfg, dep_str)
def eval_multiview(dep_list, cfg, bag_size_list, bag_size_num):
colmap_jobs = []
# Update dependency
dep_str = None
if len(dep_list) > 0:
dep_str = ','.join(dep_list)
# COLMAP evaluation
#
# TODO; For colmap, should we queue twice?
cfg_bag = deepcopy(cfg)
cmd_list = []
cfg_list = []
print(' -- The multiview task will work on these bags {}'.format([
'{} (x{})'.format(b, n) for b, n in zip(bag_size_list, bag_size_num)
]))
for _bag_size, _num_in_bag in zip(bag_size_list, bag_size_num):
for _bag_id in range(_num_in_bag):
cfg_bag.bag_size = _bag_size
cfg_bag.bag_id = _bag_id
# Check if colmap evaluation is complete -- queue
if not is_colmap_complete(cfg_bag):
cmd_list += [create_sh_cmd('eval_colmap.py', cfg_bag)]
cfg_list += [deepcopy(cfg_bag)]
else:
print(' -- Multiview: bag size {} bag id {} results'
' already exists'.format(_bag_size, _bag_id))
# Check cfg_list to retrieve the estimated runtime. Queue
# cmd_list and reset both lists if we are expected to have
# less than 30 min of wall time after this job.
t_split = [float(t) for t in cfg.cc_time.split(':')]
if estimate_runtime(cfg_list) >= t_split[0] + \
t_split[1] / 60 - 0.5:
colmap_jobs += [create_and_queue_jobs(cmd_list, cfg, dep_str)]
cmd_list = []
cfg_list = []
# Queue any leftover jobs for this bag
if len(cmd_list) > 0:
colmap_jobs += [create_and_queue_jobs(cmd_list, cfg, dep_str)]
return colmap_jobs
def main(cfg):
''' Main routine for the benchmark '''
# Read data and splits
for dataset in ['phototourism']:
for subset in ['val', 'test']:
setattr(cfg, 'scenes_{}_{}'.format(dataset, subset),
'./json/data/{}_{}.json'.format(dataset, subset))
setattr(cfg, 'splits_{}_{}'.format(dataset, subset),
'./json/bag_size/{}_{}.json'.format(dataset, subset))
# Read the list of methods and datasets
method_list = load_json(cfg.json_method)
for i, method in enumerate(method_list):
print('Validating method {}/{}: "{}"'.format(
i + 1, len(method_list), method['config_common']['json_label']))
validate_method(method, is_challenge=cfg.is_challenge)
# Back up original config
cfg_orig = deepcopy(cfg)
job_dict = {}
# Loop over methods, datasets/scenes, and tasks
for method in method_list:
# accumulate packing dependencies over datasets and runs
all_stereo_jobs = []
all_multiview_jobs = []
all_relocalization_jobs = []
for dataset in ['phototourism']:
# Load data config
scene_list = load_json(
getattr(cfg_orig,
'scenes_{}_{}'.format(dataset, cfg_orig.subset)))
bag_size_json = load_json(
getattr(cfg_orig,
'splits_{}_{}'.format(dataset, cfg_orig.subset)))
bag_size_list = [b['bag_size'] for b in bag_size_json]
bag_size_num = [b['num_in_bag'] for b in bag_size_json]
for scene in scene_list:
print('Working on {}: {}/{}'.format(
method['config_common']['json_label'], dataset, scene))
# For each task
for task in ['stereo', 'multiview', 'relocalization']:
# Skip if the key does not exist or it is empty
cur_key = 'config_{}_{}'.format(dataset, task)
if cur_key not in method or not method[cur_key]:
print(
'Empty config for "{}", skipping!'.format(cur_key))
continue
# Append method to config
cfg = deepcopy(cfg_orig)
cfg.method_dict = deepcopy(method)
cfg.dataset = dataset
cfg.task = task
cfg.scene = scene
# Features
feature_jobs = create_eval_jobs([], 'feature', cfg,
job_dict)
# Matches
match_jobs = create_eval_jobs(feature_jobs, 'match', cfg,
job_dict)
# Filter
match_inlier_jobs = create_eval_jobs(
match_jobs, 'filter', cfg, job_dict)
# Empty dependencies
stereo_jobs = []
multiview_jobs = []
relocalization_jobs = []
num_runs = getattr(
cfg, 'num_runs_{}_{}'.format(cfg.subset, task))
for run in range(num_runs):
cfg.run = run
# Pose estimation and stereo evaluation
if task == 'stereo' and cfg.eval_stereo:
geom_model_jobs = create_eval_jobs(
match_inlier_jobs, 'model', cfg, job_dict)
stereo_jobs += create_eval_jobs(
geom_model_jobs, 'stereo', cfg, job_dict)
all_stereo_jobs += stereo_jobs
# Visualization for stereo
if task == 'stereo' and cfg.run_viz:
eval_viz_stereo(stereo_jobs, cfg)
# Multiview
if task == 'multiview' and cfg.eval_multiview:
multiview_jobs += eval_multiview(
match_inlier_jobs, cfg, bag_size_list,
bag_size_num)
all_multiview_jobs += multiview_jobs
# Visualization for colmap
if task == 'multiview' and cfg.run_viz:
eval_viz_colmap(multiview_jobs, cfg)
if task == 'relocalization' and cfg.eval_relocalization:
raise NotImplementedError(
'TODO relocalization task')
# Packing -- can be skipped with --skip_packing=True
# For instance, when only generating visualizations
if not cfg.skip_packing:
cfg = deepcopy(cfg_orig)
cfg.method_dict = deepcopy(method)
eval_packing(
all_stereo_jobs + all_multiview_jobs + all_relocalization_jobs,
cfg)
if __name__ == '__main__':
cfg, unparsed = get_config()
# If we have unparsed arguments, print usage and exit
if len(unparsed) > 0:
print_usage()
exit(1)
main(cfg)
|
[
"etru1927@gmail.com"
] |
etru1927@gmail.com
|
8bfcc4e0bbf644fede4572d63ea6c88246f3b3c2
|
18b99e3badc3648ef33db3261f94b504fdd03ea3
|
/Process10/PythonScripts/ChIPSeqPipeline.py
|
d5c0fdf70343c8754a005a0fc6cac34a93736b1e
|
[] |
no_license
|
inambioinfo/ChIPSeqQC
|
311b9ea75515f6dbd854cd2ae52fc3be6add7a70
|
8f902ecf521b6ec1df4853ab9930c557dc76f19f
|
refs/heads/master
| 2020-04-18T10:12:35.434219
| 2013-11-16T16:24:14
| 2013-11-16T16:24:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,487
|
py
|
#!/bin/env /home/mib-cri/software/PythonInstall/bin/python2.7
import argparse
import textwrap
import os
import ConfigParser
import sys
import subprocess
parser = argparse.ArgumentParser(
prog='ChIPSeqPipeline',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='\nChIP-Seq Analysis Pipeline\nWritten by Tom Carroll,Suraj Menon and Rory Stark\nCRUK, CRI\n2012\n')
group3 = parser.add_argument_group('Analysis Settings')
group = parser.add_argument_group('SLX and Project Management')
group1 = parser.add_argument_group('Coverage And Pileup')
group2 = parser.add_argument_group('Peak Calling')
ConfigArgs = parser.add_argument_group('Additional Config Arguments')
#argparse.RawDescriptionHelpFormatter
group3.add_argument("--genome",nargs=1,choices=["HG18","GRCh37","MM9","MM8"],dest="genome",default=None,help='Genome to use for analysis')
group3.add_argument('--excludedRegions', nargs=1,type=file,dest="excludedregions",default=None,metavar="",
help='Bedfile of genomic regions to exclude from analysis')
group3.add_argument('--useExcludedRegions',action='store_true', default=True,dest="useexcludedregions",
help='Remove reads mapping to blacklist regions')
group3.add_argument('--mapqFilter', nargs=1,dest="mapqfilter",metavar="",type=int,default=15,
help='MapQ quality filter setting')
group3.add_argument('--removeDuplicates', action='store_true', default=False,
dest='removeduplicates',
help='Remove duplicates from analysis')
group1.add_argument('--bothStrands', action='store_true', default=False,
dest='BothStrands',
help='Generate seperate BedGraph files for either strand')
group.add_argument('--notProjectDirectory', action='store_false', default=True,
dest='usepresentdir',
help='Use directory basename for project assignment')
group.add_argument('--workingDirectory',nargs=1,metavar="",
#default=os.getcwd(),
dest='workingdirectory',
default=None,
help='Directory for project')
group.add_argument('--tempDirectory',nargs=1,metavar="",
#default=os.getcwd(),
dest='tempdirectory',
default=None,
help='Temporary Directory')
group2.add_argument('--callMacsPeaks', nargs=1,choices=['Yes','No'],
dest='callmacspeaks',default="Yes",
help='Call MACS peaks')
group2.add_argument('--callSicerPeaks', nargs=1,choices=['Yes','No'],
dest='callsicerpeaks',default="No",
help='Call Sicer peaks')
group2.add_argument('--callTpicsPeaks', nargs=1,choices=['Yes','No'],
dest='calltpicspeaks',default="No",
help='Call T-PICS peaks')
group2.add_argument('--callMacsMotifs', nargs=1,choices=['Yes','No'],
dest='callmacsmotifs',default="No",
help='Call T-PICS peaks')
group.add_argument('--bamDirectory',nargs=1,metavar="",dest='bamdirectory',
# default=os.path.join(os.getcwd(),"bamFiles"),
default=None,
help='Directory for bam files (default: %(default)s)',
)
group.add_argument('--fastqDirectory',nargs=1,metavar="",dest='fastqdirectory',
#default=os.path.join(os.getcwd(),"FQFiles"),
default=None,
help='Directory for fastq files',
)
group.add_argument('--addSLXIDs',nargs="*",action='append', dest='SLXids',metavar="SLXID",
default=[],
help='SLXID/s to be added to the current project',
)
group.add_argument('--addProjects',nargs="*",action='append', dest='Projects',metavar="ProjectID",
default=[],
help='Project/s to be merged with the current project',
)
group.add_argument('--addMetadata',nargs="*",action='append', dest='metadata',metavar="SampleSheet.csv",
default=[],
help='SampleSheets containing metadata to be added to the current project',
)
parser.add_argument('--version', action='version', version='%(prog)s 0.1')
ConfigArgs.add_argument('Config Variables',nargs=argparse.REMAINDER,help="Overwrite or include additional variables into config.ini")
results = parser.parse_args()
CmdLineOptions = vars(results)
AllKeys = CmdLineOptions.keys()
if str(CmdLineOptions["tempdirectory"]) == "None":
if str(CmdLineOptions["workingdirectory"]) == "None":
Temptemp = os.path.join(os.getcwd(),"Temp")
if str(CmdLineOptions["workingdirectory"]) != "None":
Temptemp = os.path.join(ConfigOptions["workingdirectory"],"Temp")
if str(CmdLineOptions["tempdirectory"]) != "None":
Temptemp = os.path.join(os.getcwd(),"Temp")
Temptemp = os.path.join(ConfigOptions["workingdirectory"],"Temp")
config = ConfigParser.ConfigParser()
if os.path.exists(os.path.join(Temptemp,"config.ini")):
config.read(os.path.join(Temptemp,"config.ini"))
print "\nLocal config file found"
else:
config.read("/lustre/mib-cri/carrol09/Work/MyPipe/Process10/Config/Config.ini")
print "\nUsing generic config\n"
ConfigOptions = {}
for section in config.sections():
for option in config.options(section):
ConfigOptions[option] = config.get(section, option)
for Key in AllKeys:
if Key in ConfigOptions:
#print Key+"\t"+ConfigOptions[Key]
if str(ConfigOptions[Key]) != str(CmdLineOptions[Key]) and CmdLineOptions[Key] is not None:
print "Overwriting config option for "+Key+" to "+str(CmdLineOptions[Key][0])+"\n"
if Key != "genome":
ConfigOptions[Key] = str(CmdLineOptions[Key])
if Key == "genome":
ConfigOptions[Key] = str(CmdLineOptions[Key][0])
if Key == "callmacspeaks":
ConfigOptions[Key] = str(CmdLineOptions[Key][0])
if Key == "callsicerpeaks":
ConfigOptions[Key] = str(CmdLineOptions[Key][0])
if Key == "calltpicspeaks":
ConfigOptions[Key] = str(CmdLineOptions[Key][0])
if Key == "callmacsmotifs":
ConfigOptions[Key] = str(CmdLineOptions[Key][0])
#ConfigOptions[Key] = CmdLineOptions[Key]
#print str(ConfigOptions[Key][0])+"\n"
if str(ConfigOptions["genome"]) == "None" and str(CmdLineOptions["genome"]) == "None":
print "No Genome set in config or as commandline argument\nplease see usage with {ChipSeqPipeline --help}\n"
sys.exit()
if str(ConfigOptions["workingdirectory"]) == "None" and str(CmdLineOptions["workingdirectory"]) == "None":
print "No working directory set in config or as commandline argument\nworking directory as "+os.getcwd()+"\n"
ConfigOptions["workingdirectory"] = os.getcwd()
if str(ConfigOptions["bamdirectory"]) == "None" and str(CmdLineOptions["bamdirectory"]) == "None":
print "No Bam directory set in config or as commandline argument\nSetting Bam directory as "+os.path.join(ConfigOptions["workingdirectory"],"bamFiles\n")
ConfigOptions["bamdirectory"] = os.path.join(ConfigOptions["workingdirectory"],"bamFiles")
if str(ConfigOptions["fastqdirectory"]) == "None" and str(CmdLineOptions["fastqdirectory"]) == "None":
print "No FastQ directory set in config or as commandline argument\nSetting working directory as "+os.path.join(ConfigOptions["workingdirectory"],"FQFiles\n")
ConfigOptions["fastqdirectory"] = os.path.join(ConfigOptions["workingdirectory"],"FQFiles")
if str(ConfigOptions["tempdirectory"]) == "None" and str(CmdLineOptions["tempdirectory"]) == "None":
print "No Temp directory set in config or as commandline argument\nSetting temp directory as "+os.path.join(ConfigOptions["workingdirectory"],"Temp\n")
ConfigOptions["tempdirectory"] = os.path.join(ConfigOptions["workingdirectory"],"Temp")
if not os.path.exists(ConfigOptions["tempdirectory"]):
os.makedirs(ConfigOptions["tempdirectory"])
ExtraSLXids = []
if CmdLineOptions["metadata"]:
metadata = CmdLineOptions["metadata"][0]
metaFile = open(os.path.join(ConfigOptions["tempdirectory"],"metadata.txt"),"w")
for meta in metadata:
metaFile.write(str(meta)+"\n")
metaFile.close()
ExtraMeta = []
if CmdLineOptions["SLXids"]:
ExtraSLXids = CmdLineOptions["SLXids"][0]
SLXFile = open(os.path.join(ConfigOptions["tempdirectory"],"Samples_SLXIDs.txt"),"w")
for SLXid in ExtraSLXids:
SLXFile.write(str(SLXid)+"\n")
SLXFile.close()
ExtraProjects = []
if CmdLineOptions["Projects"] or CmdLineOptions["usepresentdir"]:
ProjectFile = open(os.path.join(ConfigOptions["tempdirectory"],"Projects.txt"),"w")
if CmdLineOptions["Projects"]:
TempProjects=CmdLineOptions["Projects"][0]
for aProj in TempProjects:
ExtraProjects.append(aProj)
if CmdLineOptions["usepresentdir"]:
ExtraProjects.append(os.path.basename(os.getcwd()))
for project in ExtraProjects:
ProjectFile.write(str(project)+"\n")
ProjectFile.close()
if not ExtraProjects and not ExtraSLXids:
print "No Samples or Project specified!!! Can't do much"
sys.exit()
subprocess.call(["bash", "/lustre/mib-cri/carrol09/Work/MyPipe/Process10/BashScripts/GetLimsInfo.sh",ConfigOptions["tempdirectory"]])
if not os.path.exists(ConfigOptions["bamdirectory"]):
os.makedirs(ConfigOptions["bamdirectory"])
subprocess.call("lfs setstripe "+ConfigOptions["bamdirectory"],shell=True)
inifile = open(os.path.join(ConfigOptions["tempdirectory"],"config.ini"),'w')
OutConfig = ConfigParser.ConfigParser()
# add the settings to the structure of the file, and lets write it out...
OutConfig.add_section('Analysis Settings')
OutConfig.add_section('Peak Calling')
OutConfig.add_section('SLX and Project Management')
OutConfig.add_section('Executables')
OutConfig.add_section('Custom Scripts')
OutConfig.add_section('ExcludedRegions')
OutConfig.add_section('Genomes')
OutConfig.set('Analysis Settings','genome',str(ConfigOptions["genome"]))
OutConfig.set('Analysis Settings','excludedRegions',str(ConfigOptions["excludedregions"]))
OutConfig.set('Analysis Settings','mapQFilter',str(ConfigOptions["mapqfilter"]))
OutConfig.set('Analysis Settings','useExcludedRegionFilter',str(ConfigOptions["useexcludedregionfilter"]))
OutConfig.set('Analysis Settings','removeDuplicates',str(ConfigOptions["removeduplicates"]))
OutConfig.set('Peak Calling','callmacspeaks',str(ConfigOptions["callmacspeaks"]))
OutConfig.set('Peak Calling','callsicerpeaks',str(ConfigOptions["callsicerpeaks"]))
OutConfig.set('Peak Calling','calltpicspeaks',str(ConfigOptions["calltpicspeaks"]))
OutConfig.set('Peak Calling','callmacsmotifs',str(ConfigOptions["callmacsmotifs"]))
OutConfig.set('SLX and Project Management','workingdirectory',str(ConfigOptions["workingdirectory"]))
OutConfig.set('SLX and Project Management','bamdirectory',str(ConfigOptions["bamdirectory"]))
OutConfig.set('SLX and Project Management','fastqdirectory',str(ConfigOptions["fastqdirectory"]))
OutConfig.set('SLX and Project Management','tempdirectory',str(ConfigOptions["tempdirectory"]))
OutConfig.set('Executables','bwa',str(ConfigOptions["bwa"]))
OutConfig.set('Executables','python',str(ConfigOptions["python"]))
OutConfig.set('Executables','samtools',str(ConfigOptions["samtools"]))
OutConfig.set('Executables','picard',str(ConfigOptions["picard"]))
OutConfig.set('Executables','perl',str(ConfigOptions["perl"]))
OutConfig.set('Executables','rsync',str(ConfigOptions["rsync"]))
OutConfig.set('Executables','bedtools',str(ConfigOptions["bedtools"]))
OutConfig.set('Executables','java',str(ConfigOptions["java"]))
OutConfig.set('Custom Scripts','bam_processing_script',str(ConfigOptions["bam_processing_script"]))
OutConfig.set('Custom Scripts','metadata_script',str(ConfigOptions["metadata_script"]))
OutConfig.set('Custom Scripts','getgenome_script',str(ConfigOptions["getgenome_script"]))
OutConfig.set('Custom Scripts','bamlocations_script',str(ConfigOptions["bamlocations_script"]))
OutConfig.set('Custom Scripts','fastqlocations_script',str(ConfigOptions["fastqlocations_script"]))
OutConfig.set('Custom Scripts','sicer_cri_script',str(ConfigOptions["sicer_cri_script"]))
OutConfig.set('Custom Scripts','tpicszeta_cri_script',str(ConfigOptions["tpicszeta_cri_script"]))
OutConfig.set('ExcludedRegions','HG18',str(ConfigOptions["hg18"]))
OutConfig.set('ExcludedRegions','GRCh37',str(ConfigOptions["grch37"]))
OutConfig.set('Genomes','HG18',str(ConfigOptions["hg18"]))
OutConfig.set('Genomes','GRCh37',str(ConfigOptions["grch37"]))
OutConfig.write(inifile)
inifile.close()
subprocess.call(["/home/mib-cri/software/R-2.14.0/bin/Rscript","--vanilla","/lustre/mib-cri/carrol09/Work/MyPipe/Process10/RScripts/RMainPipeSetUp.r",str(ConfigOptions["tempdirectory"])])
#subprocess.call("bash /lustre/mib-cri/carrol09/Work/MyPipe/Process10/BashScripts/GetLimsInfoOld.sh",shell=True)
#subprocess.call(["bash", "/lustre/mib-cri/carrol09/Work/MyPipe/Process10/BashScripts/GetLimsInfoOld.sh"])
#subprocess.call(["bash",ConfigOptions["lims_info_script"]])
#subprocess.call(["bash",str(ConfigOptions["lims_info_script"]),str(ConfigOptions["tempdirectory"])])
#subprocess.call(["bash", "/lustre/mib-cri/carrol09/Work/MyPipe/Process10/BashScripts/GetLimsInfoOld.sh"])
#subprocess.call(["bash",ConfigOptions["lims_info_script"],ConfigOptions["tempdirectory"]])
#print(ConfigOptions["tempdirectory"])
#print(ConfigOptions["lims_info_script"])
|
[
"tc.infomatics@gmail.com"
] |
tc.infomatics@gmail.com
|
7184479c3765556523e2f0825cfcd578d81e8a89
|
755fec3e8280d4f07de7f637ea7e3acc7001a00d
|
/RostrosCNN/RostrosCNN.py
|
159a314d3818a884c19466e95cdbe84b76d48483
|
[] |
no_license
|
julianapads/RostrosCNN
|
980086976e96df523b95f2a73229305342056818
|
9d990e742d1c3fcb882a28487d8758a691f748e7
|
refs/heads/master
| 2020-09-09T00:21:32.875466
| 2019-11-12T19:42:15
| 2019-11-12T19:42:15
| 221,287,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,630
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 1 17:28:20 2019
@author: JuanMC
"""
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
import matplotlib.pyplot as plt
import numpy as np
CatDog=Sequential()
# Convolucion
CatDog.add(Conv2D(32,(5,5), input_shape=(64,64,3), activation='relu'))
CatDog.add(MaxPooling2D(pool_size=(2,2)))
# Convolucion
CatDog.add(Conv2D(32,(5,5), input_shape=(64,64,3), activation='relu'))
CatDog.add(MaxPooling2D(pool_size=(2,2)))
CatDog.add(Flatten())
CatDog.add(Dense( units=512, activation='relu' ))
CatDog.add(Dense( units=512, activation='relu' ))
CatDog.add(Dense( units=1, activation='sigmoid'))
# parametros de enrtrenamiento
CatDog.compile(optimizer='adam', loss='binary_crossentropy',metrics=['accuracy'])
train_datagen=ImageDataGenerator(rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
test_datagen=ImageDataGenerator(rescale=1./255)
training_set=train_datagen.flow_from_directory('C:/Users/julia/Desktop/training_set_short', target_size=(64,64), batch_size=32, class_mode='binary')
test_set=test_datagen.flow_from_directory('C:/Users/julia/Desktop/test_set_short', target_size=(64,64), batch_size=32, class_mode='binary')
CatDog.fit_generator(training_set, steps_per_epoch=38, epochs=150 , validation_data=test_set, validation_steps=3)
CatDog.save('Red_face5')
#int( np.ceil(1183/ 32) )
#int( np.ceil(60/ 32) )
|
[
"noreply@github.com"
] |
julianapads.noreply@github.com
|
69c57df03fa273b49d12c88542be2ea63de291c0
|
df5a8c7785d7e8c3a3056da2296526ea6a6f6ec2
|
/Learn Python The Hard Way/ex10.py
|
0f2957dcf226de8564eaf4e2e5ca8406cf56ff91
|
[] |
no_license
|
ldpcosta/LearningBooks
|
1d6e06cf97bb882f2eaca4b007131aa86734c319
|
d0939e2b4da92db95bdcf941f830f319112babe6
|
refs/heads/master
| 2021-01-18T02:00:00.521121
| 2016-08-11T10:53:14
| 2016-08-11T10:53:14
| 62,816,423
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
#exercise 10 - more printing
tabby_cat = "\tI'm tabbed in."
persian_cat = "I'm split\non a line."
backslash_cat = "I'm \\ a \\ cat."
fat_cat = """
I'll do a list:
\t* Cat food
\t* Fishies
\t* Catnip\n\t* Grass
"""
print tabby_cat
print persian_cat
print backslash_cat
print fat_cat
print """
These are some escape sequence examples:
\t Backslash \\ a
\t Double-quote \" a
\t Single-quote \' a
\t ASCII bell \a a
\t ASCII backspace \b a
\t ASCII formfeed \f a
\t ASCII linefeed \n a
\t Carriage return \r a
\t horizontal tab \t a
\t ASCII vertical tab \v a
"""
for i in range(30):
for j in ["/","-","|","\\","|"]:
print "%s\r" % j
|
[
"COSTAL@hbm.com"
] |
COSTAL@hbm.com
|
298940d3569217e9ed7accd8e0f7b762d3d7a94b
|
37dbc79767c29aadc0a2c60a7c20dcea1c345479
|
/RickyA/PythonExplorations/files/files/GripRunner.py
|
406350132e39dcac1134627e0f918786ed8f1ba4
|
[] |
no_license
|
Team100/2016-roboRIO-repo
|
47e20c875fa61cbc8aff3541e28a45c2a3366148
|
51385443e900fc9019451eb8d1e52ca59ebc8138
|
refs/heads/master
| 2021-03-22T02:15:20.020425
| 2019-01-04T04:21:11
| 2019-01-04T04:21:11
| 45,866,463
| 0
| 0
| null | 2019-01-04T04:21:12
| 2015-11-09T20:59:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,341
|
py
|
#!/usr/bin/python3
"""
Simple skeleton program for running an OpenCV pipeline generated by GRIP and using NetworkTables to send data.
Users need to:
1. Import the generated GRIP pipeline, which should be generated in the same directory as this file.
2. Set the network table server IP. This is usually the robots address (roborio-TEAM-frc.local) or localhost
3. Handle putting the generated code into NetworkTables
"""
import cv2
import numpy as np
from networktables import NetworkTable
from grip import GripPipeline # TODO change the default module and class, if needed
cx = None
ip = '127.0.0.1'
piLoc = 'http://raspberrypi.local:5802/?action=stream'
#cv2.namedWindow("Display")
#myImage = cv2.imread("C:/Users/Team 100/GRIP/CardboardVisionTarget/files/myPic.jpg", cv2.IMREAD_COLOR)
#cv2.imshow("Display", myImage)
#cv2.waitKey(0)
#def extra_processing(pipeline: GripPipeline):
# """
# Performs extra processing on the pipeline's outputs and publishes data to NetworkTables.
# :param pipeline: the pipeline that just processed an image
# :return: None
# """
# # TODO: Users need to implement this.
# # Useful for converting OpenCV objects (e.g. contours) to something NetworkTables can understand.
# pass
def main():
try:
NetworkTable.setTeam(100) # TODO set your team number
NetworkTable.setIPAddress(ip)
NetworkTable.setClientMode()
NetworkTable.initialize()
except:
pass
#print("Already Initialized")
sd = NetworkTable.getTable('SmartDashboard')
cap = cv2.VideoCapture(1)
pipeline = GripPipeline()
while True:
ret, frame = cap.read()
if ret:
pipeline.process(frame) # TODO add extra parameters if the pipeline takes more than just a single image
if pipeline.center != None:
sd.putNumberArray("Center", pipeline.center)
cv2.rectangle(frame, (pipeline.center[0]-5, pipeline.center[1]-5), (pipeline.center[0]+5, pipeline.center[1]+5), (0,0,255), 1)
cv2.imshow("myFrame", frame)
#extra_processing(pipeline)
if cv2.waitKey(1) & 0xFF == ord('q'):
break;
# When everything done, release the capture
cap.release()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
Team100.noreply@github.com
|
34742c7fda1ef63e7c3eff473e6820fd8c28bc64
|
804892fd741a9148091f23e8f2bec6d3ea46b9f4
|
/try02.py
|
cf9ab3d99e4d1c068beb5337a5077ebfbdb14f97
|
[] |
no_license
|
waysman1/pyapi
|
2505a11ddc5dcb384089cae23063f135ffc7ee7e
|
722b417640596cba18a1fdd8215138c36be5228d
|
refs/heads/master
| 2020-06-26T03:55:09.826493
| 2019-08-01T14:31:54
| 2019-08-01T14:31:54
| 199,521,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
#!/usr/bin/env python3
import sys
#start with our infinite loop
while True:
try:
print("let 's divide x by y!")
x = int(input("What is the integer value of x?"))
y = int(input("What is the integer value of y?"))
print("The value of x/y: ", x/y)
except ZeroDivisionError as zerr:
print("Handling of a run time error:", zerr)
except:
print("oh wow. We did not produce code to handle this type of error yet.")
print(sys.exc_info()[0])
raise
|
[
"lovern.m.ways@verizon.com"
] |
lovern.m.ways@verizon.com
|
5197eee454e25481782e8dd7dc12d0ee79a3be2c
|
5d0add2ccde6732ff646c1cafba8650e8d30db8b
|
/interface/beer-control.py
|
e8fffa37dfc2b28e658ce10a0c1c42a34afdaa68
|
[] |
no_license
|
paulbaumgart/mash-lauter-control
|
4ae51eb8468d02de4221c4502af3160efbe9db20
|
d3d871344e7062dfa3b2aed7150a938768b22f99
|
refs/heads/master
| 2021-01-11T17:07:47.632080
| 2011-11-06T17:38:13
| 2011-11-06T17:38:13
| 957,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,100
|
py
|
from SerialCommunicator import SerialCommunicator, DeviceSyncError
from Recipe import Recipe
import os, ossaudiodev, sys, time, wave
def usage():
print 'Usage: python %s <recipe_file> <output_log_file>' % sys.argv[0]
sys.exit(1)
def play_sound(filename):
sound = wave.open(filename,'rb')
try:
dsp = ossaudiodev.open('/dev/dsp','w')
except IOError:
return
dsp.setparameters(ossaudiodev.AFMT_S16_NE,
sound.getnchannels(),
sound.getframerate())
sound_data = sound.readframes(sound.getnframes())
sound.close()
dsp.write(sound_data)
dsp.close()
try:
recipe_file_name = sys.argv[1]
log_file_name = sys.argv[2]
except IndexError:
usage()
s = SerialCommunicator()
try:
s.open()
pass
except Exception, e:
print e
sys.exit(1)
time.sleep(1)
log_file = open(log_file_name, 'a')
current_status = s.read_current_status()
if current_status:
if current_status[:5] == 'ERROR':
print current_status
else:
print 'ERROR: Program already running. Reset the device and try again.'
sys.exit(1)
else:
recipe = Recipe(open(recipe_file_name, 'r').read())
print 'Sending recipe:'
print "\n".join(recipe.human_readable())
try:
s.write_recipe(recipe)
except DeviceSyncError as err:
print err
sys.exit(1)
had_error = False
while True:
current_status = s.read_current_status()
#current_status = 'SPARGING,HEA,0,0,31.00,24.25,40.00,40.00,1000,ON'
if current_status == 'PAUSED':
if had_error:
play_sound('interface/alarm.wav')
else:
play_sound('interface/ding.wav')
sys.stdin.flush()
raw_input('Paused. Press Enter to continue.')
s.serial.write('K')
had_error = False
else:
os.system("clear")
log_file.write(time.asctime() + ',' + current_status + "\n")
output = SerialCommunicator.human_readable_status(current_status)
if output[:5] == 'ERROR':
had_error = True
print output
|
[
"paul@baumgart.us"
] |
paul@baumgart.us
|
0a238db51960ba7e0593dd8c6e26e12a1597966b
|
b2ff53d368f543b4234a5c3a699d4dc51dc2ae00
|
/javahome/-boto3-9am-2018/resources-demo.py
|
f6e69f47ca605dbf7505d3a37dd518ae1670f8a8
|
[] |
no_license
|
KostivAndrii/sources
|
9710c1123aa628f758ee31bd29d0779db168d58b
|
263a7455abee59c05566977af603fb14a86aa98f
|
refs/heads/master
| 2022-12-21T14:19:40.341327
| 2019-08-07T11:59:58
| 2019-08-07T11:59:58
| 181,833,282
| 4
| 2
| null | 2022-12-16T22:58:33
| 2019-04-17T06:47:38
|
Java
|
UTF-8
|
Python
| false
| false
| 355
|
py
|
# Find all running instances and stop
import boto3
"""
Boto3 client is low level object, it has all operations
Boto3 Resource is a wrapper around client, resources will not
have all operation(methods) which client has, resources,
can simplify your code.
"""
ec2 = boto3.resource('ec2')
# list(ec2.Instance)
ec2.instances.all().stop()
|
[
"andrii_kostiv@epam.com"
] |
andrii_kostiv@epam.com
|
7e26db2c2d363b2bbc701f880fc9fdbada89ddd9
|
baa2c6f22ff563d417e34692bf3345077eb8fa5f
|
/tools/tests/embed/embed_flufl.py
|
76682fae93617336b7dc20193ee4870564a03a44
|
[
"BSD-3-Clause"
] |
permissive
|
ipython/ipython
|
c42ea223b6e391bb7dd39888cb959d4d5d6b21a1
|
e5103f971233fd66b558585cce7a4f52a716cd56
|
refs/heads/main
| 2023-08-30T18:27:18.436521
| 2023-08-29T12:16:00
| 2023-08-29T12:16:00
| 658,518
| 13,673
| 4,729
|
BSD-3-Clause
| 2023-09-12T20:22:09
| 2010-05-10T04:46:06
|
Python
|
UTF-8
|
Python
| false
| false
| 269
|
py
|
"""This tests that future compiler flags are passed to the embedded IPython."""
from __future__ import barry_as_FLUFL
from IPython import embed
embed(banner1='', header='check 1 <> 2 == True')
embed(banner1='', header='check 1 <> 2 cause SyntaxError', compile_flags=0)
|
[
"pivanov5@bloomberg.net"
] |
pivanov5@bloomberg.net
|
02558b646dbad0d6d01ecdca115a896aeb99a244
|
154c3188df0f9ba0dd03694d12ee61b3996ff160
|
/galeria/local/__init__.py
|
33e293417146be75834cf79572e08861f3485147
|
[] |
no_license
|
apbonillab/Galeria_Equipo3
|
10e5f6d808ad42695e497e0a00fa468fca384f14
|
6091eee36271a50c45417f31b59ef72100bf3e6e
|
refs/heads/master
| 2022-10-16T14:02:04.228708
| 2018-09-11T16:01:11
| 2018-09-11T16:01:11
| 145,240,831
| 0
| 1
| null | 2022-09-30T22:49:16
| 2018-08-18T17:47:40
|
Python
|
UTF-8
|
Python
| false
| false
| 58
|
py
|
/home/adrianab/PycharmProjects/galeria/galeria/__init__.py
|
[
"apbonillab@gmail.com"
] |
apbonillab@gmail.com
|
1f7dce71e7f172553640c9717607e06c6039adfb
|
a86cb1d0cc2c01ccc5b7d03d25a1b98d4f8b66ca
|
/day_04/dictionary_04.py
|
cbd49b836f3792192fbfe62c8fb33c5140b986be
|
[] |
no_license
|
yongseongCho/python_201911
|
020efd812df909f6d1150c6a15a9a4fa6ee946b6
|
f4696fac81a101d13a95ca0ca602e6478b4d2f58
|
refs/heads/master
| 2020-09-12T12:44:46.364259
| 2019-12-19T13:17:08
| 2019-12-19T13:17:08
| 222,429,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,129
|
py
|
# -*- coding: utf-8 -*-
dict_numbers={'one':1,'two':2,
'three':3,'four':4,
'five':5}
# dict_numbers 딕셔너리에 저장된 요소의 개수 확인
c = len(dict_numbers)
print('딕셔너리에 저장된 요소의 개수 : ', c)
# Dictionary 변수의 keys 메소드는 해당 Dictionary 내부에
# 저장된 모든 키의 값을 dict_keys 타입으로 반환
# dict_keys 타입을 인덱스를 기반으로 손쉽게 사용하기
# 위해서 리스트 타입으로 변환할 수 있으며,
# list() 형변환을 통해 변환합니다.
keys = list(dict_numbers.keys())
print(keys)
print(keys[0])
print(keys[1])
print(keys[2])
# Dictionary 변수의 values 메소드는
# 해당 Dictionary 내부에 저장된 모든 value 값을
# dict_values 타입으로 반환
# dict_values 타입을 인덱스를 기반으로 손쉽게 사용하기
# 위해서 리스트 타입으로 변환할 수 있으며,
# list() 형변환을 통해 변환합니다.
values = list(dict_numbers.values())
print(values)
print(values[0])
print(values[1])
print(values[2])
|
[
"noreply@github.com"
] |
yongseongCho.noreply@github.com
|
c8045660690f63186b8d944263713fcbde1a17ea
|
7dc6492844ab167c623d97ad6fdd3ef2fd62125b
|
/scripts/etl-challenge-1/xscript.py
|
bc137321aa089470ea532eeea0233e988d114315
|
[] |
no_license
|
NearTripleh/Airflow-snowflake
|
085bbda845f8485c793b16fba9bc6b678199b0b3
|
8c9fc21f7edc39dd4408aad0a917b1b0fdf5d58d
|
refs/heads/main
| 2023-06-13T00:26:46.423382
| 2021-07-14T20:00:42
| 2021-07-14T20:00:42
| 386,060,042
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,489
|
py
|
import pandas as pd
import os, sys, requests, io
from datetime import datetime
def create_insert(args):
PWD = os.environ['pWD']
file = PWD+'/scripts/etl-challenge-1/data.csv'
df = pd.read_csv(file,encoding = "utf-8")
df = df.head(100)
inserts = "INSERT INTO ETL_CHALLENGE_1.DATA_EXAMPLE "
for index,row in df.iterrows():
if index == 0:
inserts += "VALUES({0},'{1}','{2}','{3}','{4}','{5}','{6}')".format(
row['year'],row['industry_code_ANZSIC'],row['industry_name_ANZSIC'],row['rme_size_grp'],row['variable'],row['unit'],row['value'])
else:
inserts += ", ({0},'{1}','{2}','{3}','{4}','{5}','{6}')".format(
row['year'],row['industry_code_ANZSIC'],row['industry_name_ANZSIC'],row['rme_size_grp'],row['variable'],row['unit'],row['value'])
if not os.path.exists(PWD+'/dags/etl-challenge-1/'):
os.mkdir(PWD+'/dags/etl-challenge-1/')
with io.open(PWD+'/dags/etl-challenge-1/data.sql','w') as f:
f.write(inserts)
def download_data(args):
PWD = os.environ['pWD']
url = 'https://www.stats.govt.nz/assets/Uploads/Annual-enterprise-survey/Annual-enterprise-survey-2020-financial-year-provisional/Download-data/annual-enterprise-survey-2020-financial-year-provisional-size-bands-csv.csv'
r = requests.get(url)
open(PWD+'/scripts/etl-challenge-1/data.csv','wb').write(r.content)
def main():
globals()[sys.argv[1]](sys.argv)
if __name__ == "__main__":
main()
|
[
"adonovan.avila@gmail.com"
] |
adonovan.avila@gmail.com
|
5f434e40f19de6e5c8a1e60229ee6a8c75d55095
|
296c96b8c5d9a4ce379a1194c3f14d06ea488142
|
/Checking_for_longest_gene_set.py
|
3fefcd2a187e1beb2cad4d19a7122523b7504518
|
[] |
no_license
|
phhm/thefruitflygang
|
6bed7031de9da4517301a519cf3442963e83c217
|
064fd907d7d0c89ca9c178481c96e79070d45c89
|
refs/heads/master
| 2016-09-06T12:02:44.278226
| 2014-12-19T11:36:35
| 2014-12-19T11:36:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,235
|
py
|
Melanogaster = [23,1,2,11,24,22,19,6,10,7,25,20,5,8,18,12,13,14,15,16,17,21,3,4,9]
def Swap(List, left_border, right_border):
'''
Swaps a sequence of List elements between the
left_border and right_border
'''
i = List.index(left_border)
j = List.index(right_border)
templist = []
for number in reversed(List[i:j+1]):
templist.append(number)
return List[:i] + templist + List[j+1:]
def breakpoint_search(List):
'''
Function to determine Breakpoint positions.
Breakpoints are returned in a list containing each Breakpoint.
'''
# Creating borders to check for Breakpoints before the first, and behind the last element.
start = min(List) - 1
end = max(List) + 1
# copy the List, appending start and end
List_breakpoint_check = List[:]
List_breakpoint_check.append(end)
List_breakpoint_check.insert(0,start)
# Creates an empty list of Breakpoints, This is used to append the breakpoints found in the Genome.
# Checker is the value of the previous List element, starting at the first element of our List: start.
# Count is used to keep track of the index value inside our List while looping.
Breakpoints = []
checker = start
count = 0
# For-loop used to check if an element is consecutive with the previous value (either +1 or -1).
# Previous value is determined by checker and updated using "count".
for e in List_breakpoint_check[1:]:
# if element is consecutive with the previous value, skip to next value
if e == checker + 1 or e == checker -1:
count += 1
checker = List_breakpoint_check[count]
# if value is non-consecutive with the previous value, append it to Breakpoints
else:
Breakpoints.append(List_breakpoint_check.index(e))
count += 1
checker = List_breakpoint_check[count]
return Breakpoints
breakpoints_list = breakpoint_search(Melanogaster)
def Consecutive_genes_check(breakpoints_list):
'''
Checks the list of Breakpoints and returns the largest consecutive genes set:
returns the index of the first element of this consecutive gene sets + the length
'''
# Searches for the biggest difference in values in the Breakpoints List
# It evaluates each value in the list with its previous value:
# If breakpoints list is: [1,2,5], this will return [1,3] because 2-1=1 and 5-2=3
new_list = [j-i for i, j in zip(breakpoints_list[:-1], breakpoints_list[1:])]
# By choosing the largest number in new_list, the biggest consecutive gene set is found
# The same could be done for the smalles set of consecutive genes
longest_consecutive_list_length = max(new_list)
# Finding the index of the biggest number in new_list
# adding 1 gives the index in Melanogaster of the start of the longest gene-set
longest_start = new_list.index(longest_consecutive_list_length) + 1
return longest_start, longest_consecutive_list_length
a,b = Consecutive_genes_check(breakpoints_list)
print "This is what the functions depicted above are able to do:"
print "This is Melanogaster at the start of our Algorithm ", Melanogaster
print "This is our list of breakpoints: ", breakpoints_list
print "This is the amount of breakpoints we have at the start ", str(len(breakpoints_list))
print "This is the longest consecutive gene set in our Genome ", Melanogaster[a:a+b]
|
[
"jasper.linmans@hotmail.com"
] |
jasper.linmans@hotmail.com
|
03f120ae899bbdaad8234487b6738b00eac1367e
|
76cb5c642d1b447cefd6cde9a84b1b99e244c9bf
|
/IpManager.py
|
4d5691a9fd50bce134b203aa6a1c8dfb77707014
|
[] |
no_license
|
simingjiao/WebcrawlerforCOVID-19
|
19aaac79e0502b8f2d8a59dfb755442e11dbc338
|
278e982d51ff39e84a6eb3a8862c2815727da331
|
refs/heads/master
| 2022-04-25T13:38:49.479965
| 2020-04-26T01:38:52
| 2020-04-26T01:38:52
| 258,660,022
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,988
|
py
|
#管理IP,构建和维护自己的IP代理池,防止电脑的IP被封号
import pickle
import pymysql
import SqlManager as Sql
import requests
import time
from bs4 import BeautifulSoup
import random
import json
import Htmlparser as parser
import re
import telnetlib
class IpManager():
def __init__(self):
self.tablename = 'newipmanager'
self.sqlman = Sql.sqlmanager()
self.db = 'twitter'
#从数据库中选出所有可能有效的ip地址
def getipfromsql(self):
ip = self.sqlman.getsthfromsql(self.db,self.tablename,'ip',sths_flag = 1,condition = 'where flag > 0')
return ip
#转化成可以用的方式
def todict(self,iplist):
proxylist = []
for newip in iplist :
proxy = {}
proxy['http'] = newip
proxy['https'] = newip
proxylist.append(proxy)
return proxylist
#对于可行的proxy进行奖励
def rewardthisproxy(self,proxy):
if type(proxy) == dict:
pro = proxy['http']
else:
pro = proxy
print(pro)
self.sqlman.updatesql(self.db,self.tablename,'flag', 'flag + 1 where ip = "'+ pro + '"')
#对于不可行的proxy进行惩罚
def punishthisproxy(self,proxy):
if type(proxy) == dict:
pro = proxy['http']
else:
pro = proxy
print(pro)
self.sqlman.updatesql(self.db, self.tablename, 'flag', 'flag - 1 where ip = "' + pro + '"')
#删除proxy
def deletethisproxy(self,proxy):
if type(proxy) == dict:
pro = proxy['http']
else:
pro = proxy
print(pro)
self.sqlman.deletesql(self.db, self.tablename, 'where ip = "' + pro + '"')
#从获取到的所有可用ip中选出这么多个ip
def getproxyfromipsql(self, numbers = 3):
ip = self.getipfromsql()
iplist = []
for i in range(numbers):
newip = random.choices(ip)[0]
iplist.append(newip)
# print(iplist)
proxy = self.todict(iplist)
# print(proxy)
# print(proxy)
return proxy
def checkip(self, ip):
try:
ip = ip.replace('http://','')
h = ip.split(':')[0]
p = ip.split(':')[1]
print(h)
print(p)
telnetlib.Telnet(host= h,port= p,timeout=2)
print("代理ip有效!")
return True
except:
print("代理ip无效!")
return False
#打开这个网址,检测ip是否有效
def checkip_(self,ip):
url = 'http://icanhazip.com'
proxy = {
"http": ip,
"https": ip,
}
print(proxy)
try:
print(1)
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0"}
print(2)
response = requests.get(url, proxies = proxy, headers = headers,timeout = 5)
print(3)
return response
except Exception as e:
print(str(e))
return None
def IPgetfrom89ip(self, page=10):
h = parser.html_parser()
for i in range(1, page):
proxy_url = 'http://www.89ip.cn/' + str(i) + '.html'
response = h.getnormalresponceofurl(proxy_url)
# print(response.text)
html = response.content
soup = BeautifulSoup(html, "html.parser")
tag1 = soup.find_all('tr')
for t in tag1:
tds = t.find_all('td')
# print(tds)
ip = ''
for td in tds[:2]:
# print(td.text)
ttext = td.text.split()[0]
if bool(re.search(r'\d', ttext)):
# print(td.text)
if len(ttext) > 5:
ip = ttext
else:
ip = ip + ':' + ttext
print(ip)
ip = "http://" + ip
if self.checkip(ip):
record = [ip, 2]
self.sqlman.Inserttosql_(self.db, self.tablename, record)
def IPclean(self):
self.sqlman.deletesql(self.db, self.tablename,'where flag < 2')
# ips = self.sqlman.getsthfromsql(self.db,self.tablename,'ip',1,0,'where flag > 1')
# for ip in ips:
# if not self.checkip(ip):
# self.punishthisproxy(ip)
if __name__ == '__main__':
i = IpManager()
#从数据库中选出可用的ip
# proxies = i.getproxyfromipsql(10)
# print(proxies)
# i.IPclean()
#从网络中获取新的ip
i.IPgetfrom89ip(20)
|
[
"noreply@github.com"
] |
simingjiao.noreply@github.com
|
21f4047c9004ca6bdb817238921564195e4409bd
|
896c24c5bbc82e7759e3ab8f1c4de5f1bf44ce6c
|
/organization/migrations/0011_alter_userinformation_address.py
|
e6f5d33e6067de5bb6aa2504818eb57d05f9d03b
|
[] |
no_license
|
ajayradiantinfonet/lms
|
d92bd525d2043c6b1963596df53010cb9d98da42
|
16c742d20fcfc220e0e8c7fe3ebade54c516dcf4
|
refs/heads/main
| 2023-08-29T22:20:48.518174
| 2021-10-26T09:58:17
| 2021-10-26T09:58:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
# Generated by Django 3.2.4 on 2021-06-23 09:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization', '0010_alter_userinformation_phone'),
]
operations = [
migrations.AlterField(
model_name='userinformation',
name='address',
field=models.CharField(blank=True, max_length=225, null=True),
),
]
|
[
"ajaybabu0046@gmail.com"
] |
ajaybabu0046@gmail.com
|
0809874d62973c8e46501adf85a2889569c647eb
|
da1c56016a69b68fdb9010130a650778c363b4fa
|
/lcode/lab126/swap_bits.py
|
e535192b48efe3b2960a8450511188bcf956bd02
|
[] |
no_license
|
sandeepbaldawa/Programming-Concepts-Python
|
519ab60fc9ca5bf2f52e098bab0885218a1d9411
|
178a1d26443066f469987467bda0390f5422561e
|
refs/heads/master
| 2023-03-06T21:35:14.585039
| 2023-02-26T04:38:44
| 2023-02-26T04:38:44
| 40,800,329
| 12
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
'''
write a program tto see is it is a prime or not
bits manipulation...given: number 73 in binary, value of index i and j ...swap index i and j to get a new number.
'''
#Below just toggles the bits when we know the bits are different..
def swap_bits(x, i, j):
# Toggle bits only if both ith and jth value are diff
if ((x >> i) & 1) != ((x >> j) & 1):
bit_mask = ((1 << i) | (1<< j))
x = x ^ bit_mask
|
[
"noreply@github.com"
] |
sandeepbaldawa.noreply@github.com
|
49c0cd88ff8e38f5ffa9e85e7d46e07391ed29c7
|
ef8c5c55b6ec3971adff9afe2db1f76556b87082
|
/code_examples.bak/VTK/visual_traits.py
|
cbd5ebbe2a06dd2a2a21f0eb871e662d4602aa0b
|
[] |
no_license
|
wbkifun/my_stuff
|
7007efc94b678234097abf0df9babfbd79dcf0ff
|
0b5ad5d4d103fd05989b514bca0d5114691f8ff7
|
refs/heads/master
| 2020-12-10T22:40:28.532993
| 2017-11-15T11:39:41
| 2017-11-15T11:39:41
| 5,178,225
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
from numpy import linspace, pi, cos, sin
from enthought.traits.api import HasTraits, Range, Instance, on_trait_change
from enthought.traits.ui.api import View, Item, HGroup
from enthought.mayavi.core.ui.api import SceneEditor, MlabSceneModel
def curve(n_turns):
phi = linspace(0, 2*pi, 2000)
return [ cos(phi) * (1 + 0.5*cos(n_turns*phi)),
sin(phi) * (1 + 0.5*cos(n_turns*phi)),
0.5*sin(n_turns*phi)]
class Visualization(HasTraits):
n_turns = Range(0, 30, 11)
scene = Instance(MlabSceneModel, ())
def __init__(self):
HasTraits.__init__(self)
x, y, z = curve(self.n_turns)
self.plot = self.scene.mlab.plot3d(x, y, z)
@on_trait_change('n_turns')
def update_plot(self):
x, y, z = curve(self.n_turns)
self.plot.mlab_source.set(x=x, y=y, z=z)
view = View(Item('scene', height=300, show_label=False,
editor=SceneEditor()),
HGroup('n_turns'), resizable=True)
Visualization().configure_traits()
|
[
"kh.kim@kiaps.org"
] |
kh.kim@kiaps.org
|
8405b24873b73b5ee1bd8ea55d70de82dc98f7d8
|
c6a9a6db04cc7c28678bb91c26805fa25c3e4d89
|
/cocos2d/build/android-build.py
|
47ffe5930e403f952ddaa308c9a6f1b8b6427e14
|
[
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"AFL-3.0",
"AFL-2.1",
"MIT-0",
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
aftermisak/WSSParticleSystem
|
e51b799afff7bb0790bcde6bab4869d36226f894
|
94b0b0f68b6c08537efd4f2d6ab1af1228f36d02
|
refs/heads/master
| 2023-08-31T07:44:31.178492
| 2018-04-19T07:36:43
| 2018-04-19T07:36:43
| 132,569,861
| 0
| 0
|
Apache-2.0
| 2018-10-22T03:19:03
| 2018-05-08T07:18:26
|
C++
|
UTF-8
|
Python
| false
| false
| 3,413
|
py
|
#!/usr/bin/python
# android-build.py
# Build android
import sys
import os, os.path
import shutil
from optparse import OptionParser
CPP_SAMPLES = ['cpp-empty-test', 'cpp-tests', 'game-controller-test']
LUA_SAMPLES = ['lua-empty-test', 'lua-tests', 'lua-game-controller-test']
ALL_SAMPLES = CPP_SAMPLES + LUA_SAMPLES
def caculate_built_samples(args):
''' Compute the sampels to be built
'cpp' for short of all cpp tests
'lua' for short of all lua tests
'''
if 'all' in args:
return ALL_SAMPLES
targets = []
if 'cpp' in args:
targets += CPP_SAMPLES
args.remove('cpp')
if 'lua' in args:
targets += LUA_SAMPLES
args.remove('lua')
targets += args
# remove duplicate elements, for example
# python android-build.py cpp hellocpp
targets = set(targets)
return list(targets)
def do_build(app_android_root, build_mode):
command = 'cocos compile -p android -s %s --ndk-mode %s' % (app_android_root, build_mode)
print command
if os.system(command) != 0:
raise Exception("Build dynamic library for project [ " + app_android_root + " ] fails!")
def build_samples(target, build_mode):
if build_mode is None:
build_mode = 'debug'
elif build_mode != 'release':
build_mode = 'debug'
build_targets = caculate_built_samples(target)
app_android_root = ''
target_proj_path_map = {
"cpp-empty-test": "tests/cpp-empty-test",
"game-controller-test": "tests/game-controller-test",
"cpp-tests": "tests/cpp-tests",
"lua-empty-test": "tests/lua-empty-test",
"lua-tests": "tests/lua-tests",
"lua-game-controller-test": "tests/lua-game-controller-test"
}
cocos_root = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
for target in build_targets:
if target in target_proj_path_map:
app_android_root = os.path.join(cocos_root, target_proj_path_map[target])
else:
print 'unknown target: %s' % target
continue
do_build(app_android_root, build_mode)
# -------------- main --------------
if __name__ == '__main__':
#parse the params
usage = """
This script is mainy used for building tests built-in with cocos2d-x.
Usage: %prog [options] [cpp-empty-test|cpp-tests|lua-empty-test|lua-tests|cpp|lua|all]
If you are new to cocos2d-x, I recommend you start with cpp-empty-test, lua-empty-test.
You can combine these targets like this:
python android-build.py cpp-empty-test lua-empty-test
"""
parser = OptionParser(usage=usage)
parser.add_option("-n", "--ndk", dest="ndk_build_param",
help='It is not used anymore, because cocos console does not support it.')
parser.add_option("-p", "--platform", dest="android_platform",
help='This parameter is not used any more, just keep compatible.')
parser.add_option("-b", "--build", dest="build_mode",
help='The build mode for java project,debug[default] or release. Get more information,please refer to http://developer.android.com/tools/building/building-cmdline.html')
(opts, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
else:
try:
build_samples(args, opts.build_mode)
except Exception as e:
print e
sys.exit(1)
|
[
"1139454623@qq.com"
] |
1139454623@qq.com
|
cd1e22776ab074701e0dca59a09998094b33253b
|
fdf5af195051846dab4d2a1e798084ba1d4c2ee7
|
/mysite/settings.py
|
f9684a67aaf8a05918f3b49309698d3b5c878615
|
[] |
no_license
|
Takeru0909/first-blog
|
ddcf262294ca41afc42f0b9a2981f1fbbab48ce7
|
3529437ccb07464e44eb4e6be7bf0541d3cf943e
|
refs/heads/master
| 2023-06-28T08:51:55.400589
| 2021-07-29T04:21:43
| 2021-07-29T04:21:43
| 390,598,937
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,201
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.24.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#u_dn3(6ix-1dn@ndbrg#o4c91=j#4gc2#!g^5j-4*5cn7+76h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"shingutakeru0909@ezweb.ne.jp"
] |
shingutakeru0909@ezweb.ne.jp
|
78d626193a10189b8c18ca9679ff23376dfddb21
|
5fad1b7002394b0cc32261ec2c417a916716b68f
|
/temp_01/src/temp_08.py
|
4c9e32c0e27acdc9348f0eebc75b82dc4786ef9f
|
[] |
no_license
|
WenruiShen/PythonPractice
|
320a710e933cd99c82681db037a202097a80f182
|
c43c8b9db681bb5a7d60afe4a48fd361f457d198
|
refs/heads/master
| 2021-07-11T14:29:08.461917
| 2017-10-12T01:10:29
| 2017-10-12T01:10:29
| 106,627,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,578
|
py
|
import sys
'''
360 test:
7 20
4 5 2 12 5 12 12
4 20
10 10 10 10
4 12
5 5 5 5
'''
line_1_list = sys.stdin.readline().strip().split()
game_n = int(line_1_list[0])
max_t = int(line_1_list[1])
line_2 = sys.stdin.readline().strip()
item_time_list = list(map(int, line_2.split()))
sum_time = 0
max_sum_time = 0
# select the max item time;
max_item_time = 0
for item_time in item_time_list:
if item_time > max_item_time:
max_item_time = item_time
max_id = 0
item_len = len(item_time_list)
item_id = 0
for item_time in item_time_list:
if item_time == max_item_time:
max_id = item_id
break
item_id = item_id+1
item_time_list.pop(max_id)
#print(max_item_time)
#print(item_time_list)
# select from remaining items;
def select_one_item(item_time_list, sel_id, sum_time):
global max_sum_time
item_len = len(item_time_list)
this_item_time = item_time_list[sel_id]
sum_time_temp = sum_time + this_item_time
if (sum_time_temp >= max_t) or (sel_id >= item_len - 1):
if sum_time > max_sum_time:
max_sum_time = sum_time
elif sum_time_temp < max_t:
while(sel_id < item_len):
#max_sum_time =
select_one_item(item_time_list, sel_id, sum_time_temp)
sel_id = sel_id + 1
#return max_sum_time
item_id = 0
item_len = len(item_time_list)
while(item_id < item_len):
#max_sum_time =
select_one_item(item_time_list, item_id, sum_time)
item_id = item_id + 1
if max_sum_time < max_t:
max_sum_time = max_sum_time + max_item_time
print(max_sum_time)
|
[
"wenrui.shen@ucdconnect.ie"
] |
wenrui.shen@ucdconnect.ie
|
f1d22991161fc9da201d5eaefae33f1d470f0aa7
|
76cf3f747e0a640016323d8d99886a260852dcc0
|
/hackerrank/leetcode75.py
|
7dd04d9a68252a5508a72396d05fa45ebd47a3bc
|
[] |
no_license
|
cocacolabe/TechInterviewProblems
|
be4c257c5a066cca472a95b200eddbf8c9940734
|
d16e6da62a6f7cdb5e18da3c0379882aac717d39
|
refs/heads/master
| 2021-06-13T18:48:10.062638
| 2017-02-11T18:47:25
| 2017-02-11T18:47:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
class Solution(object):
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
n = len(nums)
i, j = 0, 0
for k in range(n):
v = nums[k]
nums[k] = 2
if v < 2:
nums[j] = 1
j += 1
if v == 0:
nums[i] = 0
i += 1
|
[
"noreply@github.com"
] |
cocacolabe.noreply@github.com
|
bd1a32229bba4dc8c31dffdb97a8505ea9559002
|
5a281cb78335e06c631181720546f6876005d4e5
|
/murano-7.0.0/murano/cmd/cfapi.py
|
9bfd922e41c8a94789f7727076f26d8c5235d109
|
[
"Apache-2.0"
] |
permissive
|
scottwedge/OpenStack-Stein
|
d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8
|
7077d1f602031dace92916f14e36b124f474de15
|
refs/heads/master
| 2021-03-22T16:07:19.561504
| 2020-03-15T01:31:10
| 2020-03-15T01:31:10
| 247,380,811
| 0
| 0
|
Apache-2.0
| 2020-03-15T01:24:15
| 2020-03-15T01:24:15
| null |
UTF-8
|
Python
| false
| false
| 2,144
|
py
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import service
from murano.api.v1 import request_statistics
from murano.common import app_loader
from murano.common import cf_config as config
from murano.common import policy
from murano.common import wsgi
CONF = cfg.CONF
if os.name == 'nt':
# eventlet monkey patching causes subprocess.Popen to fail on Windows
# when using pipes due to missing non blocking I/O support
eventlet.monkey_patch(os=False)
else:
eventlet.monkey_patch()
# If ../murano/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
root = os.path.join(os.path.abspath(__file__), os.pardir, os.pardir, os.pardir)
if os.path.exists(os.path.join(root, 'murano', '__init__.py')):
sys.path.insert(0, root)
def main():
try:
config.parse_args()
logging.setup(CONF, 'murano-cfapi')
request_statistics.init_stats()
policy.init()
launcher = service.ServiceLauncher(CONF)
cfapp = app_loader.load_paste_app('cloudfoundry')
cfport, cfhost = (config.CONF.cfapi.bind_port,
config.CONF.cfapi.bind_host)
launcher.launch_service(wsgi.Service(cfapp, cfport, cfhost))
launcher.wait()
except RuntimeError as e:
sys.stderr.write("ERROR: %s\n" % e)
sys.exit(1)
if __name__ == '__main__':
main()
|
[
"Wayne Gong@minbgong-winvm.cisco.com"
] |
Wayne Gong@minbgong-winvm.cisco.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.