blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
544cf17289d4bea37ac89d89533a462b179ec1c5 | e15f7d0a672950b90baab0eacf6b476c9bf551ff | /sqlite_database/clientTable2_db.py | 4303dd7420ab2f4f6e0a27b48f7572d4cc243b3b | [] | no_license | oomintrixx/EC500-hackthon | b75d46c9984411e49664bc1878ae9c5f8b7b6897 | 18eec9b138d86f7e77363e37c7afed1e7df21942 | refs/heads/main | 2023-05-01T00:26:30.646119 | 2021-05-05T21:24:25 | 2021-05-05T21:24:25 | 354,972,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,330 | py | import sqlite3
# table two: table used for store friends
# primary ID, username(string), ip address(string), port(int), public key(string)
def client_database2():
conn = sqlite3.connect('client_table2.db') #Opens Connection to SQLite database file.
conn.cursor().execute('''CREATE TABLE client_table2
(PRIMARY_ID INTEGER,
USERNAME TEXT,
IP_ADDRESS TEXT,
PORT INTEGER,
PUBLIC_KEY TEXT
);''')
conn.commit()
conn.close()
#create client table 2 with primary id, username, text, time
def create_cltable2(primaryid, username, ipaddress, port, publickey):
conn = sqlite3.connect('client_table2.db')
cursor = conn.cursor()
params = (primaryid, username, ipaddress, port, publickey)
cursor.execute("INSERT INTO client_table2 VALUES (?,?,?,?,?)",params)
conn.commit()
#print('User Creation Successful')
conn.close()
#retrieve all information stored inside client table 2
def retrieve_all():
conn = sqlite3.connect('client_table2.db')
cur = conn.cursor()
cur.execute("SELECT * FROM client_table2")
return (cur.fetchall())
#return all information associated with this primary id
def retrieve_specific(primaryid):
conn = sqlite3.connect('client_table2.db')
cur = conn.cursor()
cur.execute("SELECT * FROM client_table2 WHERE PRIMARY_ID =:PRIMARY_ID",{'PRIMARY_ID':primaryid})
return (cur.fetchall())
#delete all information related to this username from the database
def delete_userFromUsername(username):
conn = sqlite3.connect('client_table2.db')
cur = conn.cursor()
cur.execute("""DELETE FROM client_table2 WHERE USERNAME =:USERNAME """,{'USERNAME':username})
conn.commit()
conn.close()
#delete all information related to this primary from the database
def delete_userFromID(primaryid):
conn = sqlite3.connect('client_table2.db')
cur = conn.cursor()
cur.execute("""DELETE FROM client_table2 WHERE PRIMARY_ID =:PRIMARY_ID """,{'PRIMARY_ID':primaryid})
conn.commit()
conn.close()
#delete all information from database
def delete_all():
conn = sqlite3.connect('client_table2.db')
cur = conn.cursor()
cur.execute("""DELETE FROM client_table2""")
conn.commit()
conn.close()
| [
"p.nuwapa@gmail.com"
] | p.nuwapa@gmail.com |
16473cbfdf7536d42e5ac55f5e31c8de2f2cc785 | f92a545c2b4db694bb82415c5385672c89ccbea2 | /tests/test_time_interval.py | 77381eab4c4a5fe9ec7eccbc270d1ee8681df425 | [] | no_license | fenixguard/candyshop_api | 28f21cb01ace8c12df04d143c4ed15ee1f95e550 | 60900fd4c8382c5e5ebf92adc19b5b0fb5be27aa | refs/heads/master | 2023-04-02T15:38:41.453440 | 2021-04-02T16:15:14 | 2021-04-02T16:15:14 | 349,173,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | import unittest
from application.model.time_interval import convert_time_str_to_int, convert_time_int_to_str
class TestConvertTime(unittest.TestCase):
def test_str_to_int(self): # "09:00-18:00" -> [540, 1080]
self.assertEqual(convert_time_str_to_int(["09:00", "18:00"]), [540, 1080])
def test_int_to_str(self): # [540, 1080] -> "09:00-18:00"
self.assertEqual(convert_time_int_to_str([540, 1080]), "09:00-18:00")
if __name__ == '__main__':
unittest.main()
| [
"fenixguard1993@gmail.com"
] | fenixguard1993@gmail.com |
14076432efe1e114fe9c26e59250ad20b2cd0070 | 2086fb4adee17692b41a5431fcab663bed407028 | /mnist/quantize_icml.py | ce5592c67bc8cd6e972b07d697de0c50726a9577 | [] | no_license | charbel-sakr/Precision-Analysis-of-Neural-Networks | 482bc6e959aeee806d2e76a24a5506b214aa4a1c | 18eb7d3f19ebf6569e4d4082a0f24a21e6ea232e | refs/heads/master | 2021-05-06T12:10:14.118298 | 2018-03-27T00:06:23 | 2018-03-27T00:06:23 | 113,064,121 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,325 | py | import theano
import theano.tensor as T
import numpy as np
import layers
import load_mnist
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
x_test, t_test, x_valid, t_valid, x_train, t_train = load_mnist.load()
x_train = np.concatenate((x_train,x_valid),axis=0)
t_train = np.concatenate((t_train,t_valid),axis=0)
# define symbolic Theano variables
x = T.matrix()
t = T.matrix()
lr = T.scalar()
B = T.scalar()
#prepare weight
#BC architecture is 2X128C3 - MP2 - 2x256C3 - MP2 - 2x512C3 - MP2 - 2x1024FC - 10
params = layers.loadMNIST('mnist_pretrained_plain.save')
def feedForward(x, params,B):
x = layers.quantizeAct(x,B)
l=0
current_params = params[l]
current_params[0] = layers.quantizeWeight(current_params[0],B+2)
current_params[1] = layers.quantizeWeight(current_params[1],B+2)
c1 = layers.linOutermost(x,current_params)
c1 = layers.slopedClipping(c1)
c1 = layers.quantizeAct(c1,B)
l+=1
current_params = params[l]
current_params[0] = layers.quantizeWeight(current_params[0],B+2)
current_params[1] = layers.quantizeWeight(current_params[1],B+2)
c2 = layers.linOutermost(c1,current_params)
c2 = layers.slopedClipping(c2)
c2 = layers.quantizeAct(c2,B)
l+=1
current_params = params[l]
current_params[0] = layers.quantizeWeight(current_params[0],B+2)
current_params[1] = layers.quantizeWeight(current_params[1],B+2)
c3 = layers.linOutermost(c2,current_params)
c3 = layers.slopedClipping(c3)
c3 = layers.quantizeAct(c3,B)
l+=1
current_params = params[l]
current_params[0] = layers.quantizeWeight(current_params[0],B+2)
current_params[1] = layers.quantizeWeight(current_params[1],B+2)
z = layers.linOutermost(c3,current_params)
return z
z = feedForward(x, params,B)
y = T.argmax(z, axis=1)
# compile theano function
predict = theano.function([x,B], y)
batch_size = 200
# test
labels = np.argmax(t_test, axis=1)
for B in range(20):
running_accuracy =0.0
batches = 0
for start in range(0,10000,batch_size):
x_batch = x_test[start:start+batch_size]
t_batch = labels[start:start+batch_size]
running_accuracy += np.mean(predict(x_batch,B+1) == t_batch)
batches+=1
test_accuracy = running_accuracy/batches
print(repr(1.0-test_accuracy))
| [
"noreply@github.com"
] | charbel-sakr.noreply@github.com |
84223190f9fa6ce27b1641ee53ef42b67d36f770 | 6bcb44e305bb89a704555ce92ff4281071b4a248 | /main.py | 00b65ad70bf564ae5bd1124524a6c6b07d3ae2c3 | [] | no_license | rFriso/Geometry_and_mesh | b856dfb81d0fd76d36317d3b6203a2b6286502ac | 35740902bab319efaa8f1d6f3ca949d0f1957da4 | refs/heads/main | 2023-02-20T10:39:22.390131 | 2021-01-20T14:31:51 | 2021-01-20T14:31:51 | 331,297,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | #!/usr/bin/env python
## Description: airfoil geometry and mesh generator code based on salome library
import CST ## Parameterisation technique
## Geometry generation inputs
wl = [-0.17, 0.5, 0.5, 1.3, 1.4] ## CST weights of lower surface
wu = [0.8, 1.8, 2, 3.3, 2.9] ## CST weights of upper surface
dz = 0.08 ## half trailing edge thickness
N = 1000 ## number of points discretizing the surfaces
c = 1 ## airfoil chord
yte = -1.4 ## y-coordinate of TE
s = 1 ## pitch of the cascade
## Mesh generation inputs
maxSizeElem = 0.01 ## Element max size
minSizeElem = 0.003 ## Element min size
BLthick = 0.003 ## Prism layer thickness (structured O-Grid)
nLayers = 3 ## number of layers in the structured grid
growthRatio = 1.2 ## layers growth ratio in the structured grid
## Advices:
## - keep the ratio maxSizeElem/minSizeElem approximately equal to 3
## - a large BLthick could lead to blows-up the process
## - use a growthRatio value between 1.1 - 1.2
## Execute Salome library
exec(open("./salomeFiles.py").read())
## Airfoil geometry generation
exec(open("./fluidDomain.py").read())
## Name selection
exec(open("./salomeGeomGroups.py").read())
## Mesh generation
exec(open("./meshGeneration.py").read())
if salome.sg.hasDesktop():
salome.sg.updateObjBrowser()
| [
"noreply@github.com"
] | rFriso.noreply@github.com |
76931673e3db556c89f0c03fa5617acd77989b43 | 03acfdb33a5a5b256b2e9b6c5f019c6150fc5765 | /productivityProject/activities/urls.py | 74cbe6bd28d53623fce9f54e83904dee29044116 | [] | no_license | argoebel/ProductivityApp | 251a9c6c2f0c3c124d41c39adccd822637607a9e | dcbc3db838e653eda2c6042624bd89c138e756ea | refs/heads/master | 2022-12-10T18:26:48.441363 | 2020-08-25T22:03:24 | 2020-08-25T22:03:24 | 285,477,562 | 0 | 0 | null | 2020-08-25T22:03:25 | 2020-08-06T04:58:26 | JavaScript | UTF-8 | Python | false | false | 512 | py | from rest_framework import routers
from django.urls import path
from .api import TaskAPIView, CreateTaskAPIView, CreateActivityAPIView, ActivityAPIView
urlpatterns = [
path('api/activities',
ActivityAPIView.as_view(), name='get_activities'),
path('api/activities/create',
CreateActivityAPIView.as_view(), name='create_activity'),
path('api/tasks/<int:pk>', TaskAPIView.as_view(), name='get_tasks'),
path('api/tasks/create', CreateTaskAPIView.as_view(), name='create_task'),
]
| [
"argoebel@ualberta.ca"
] | argoebel@ualberta.ca |
9599100cda07d29f3eaeb8432bfe6710bb9b354b | a74b980fd95d5d810315f181449fc9d1710e6923 | /savecode/threeyears/idownclient/scan/plugin/zgrab2/zgrab2scanner/zgrab2scannerhttp.py | 5962809ec41cc9835857020f7ecd4321d0f27b59 | [
"Apache-2.0"
] | permissive | cbbbbbbbb/sspywork | b70f5539203b47b21eec2f0514ddca155affc2b8 | 8f05a6b91fc205960edd57f9076facec04f49a1a | refs/heads/master | 2023-03-22T19:45:13.024076 | 2021-03-08T01:24:21 | 2021-03-08T01:24:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,009 | py | """zgrab2 scanner http"""
# -*- coding:utf-8 -*-
import json
import signal
import os
import traceback
import uuid
import psutil
from datacontract.iscandataset.iscantask import IscanTask
from .zgrab2scannerbase import Zgrab2ScannerBase
from ..zgrab2parser import Zgrab2ParserHttp, Zgrab2ParserTls
from .....clientdatafeedback.scoutdatafeedback import PortInfo
class Zgrab2ScannerHttp(Zgrab2ScannerBase):
"""zgrab2 http scanner"""
def __init__(self, zgrab_path: str):
Zgrab2ScannerBase.__init__(self, "zgrab2http")
self._parser_http: Zgrab2ParserHttp = Zgrab2ParserHttp()
self._parser_tls: Zgrab2ParserTls = Zgrab2ParserTls()
def get_banner_http(
self,
task: IscanTask,
level,
pinfo_dict,
port,
*args,
zgrab2path: str = "zgrab2",
sudo: bool = False,
timeout: float = 600,
) -> iter:
"""scan http services and get the banner"""
hostfi = None
outfi = None
try:
if not isinstance(port, int) or port < 0 or port > 65535:
raise Exception("Invalid port: {}".format(port))
hosts: iter = pinfo_dict.keys()
# for d in portinfo.domains:
# if not d in hosts:
# hosts.append(d)
# if len(hosts) < 1:
# # scan ip is not good, only scan them when
# # no domain is available
# hosts.append(portinfo._host)
# for h in portinfo.hostnames:
# if not h in hosts:
# hosts.append(h)
hostfi = self._write_hosts_to_file(task, hosts)
if hostfi is None:
return
outfi = self._scan_http(
task,
level,
hostfi,
port,
*args,
zgrab2path=zgrab2path,
sudo=sudo,
timeout=timeout,
)
if outfi is None or not os.path.isfile(outfi):
return
self._parse_result(task, level, pinfo_dict, outfi)
except Exception:
self._logger.error("Scan http error: {}".format(traceback.format_exc()))
finally:
if not hostfi is None and os.path.isfile(hostfi):
os.remove(hostfi)
if not outfi is None and os.path.isfile(outfi):
os.remove(outfi)
#################################
# scan
def _scan_http(
self,
task: IscanTask,
level,
host_file: str,
port: int,
*args,
zgrab2path: str = "zgrab2",
sudo: bool = False,
timeout: float = 600,
) -> str:
"""
scan the ips or domains, and write the output files to specified output directory.
host_file: the full path of a file with list of ['1.1.1.1','www.xxx.com'] in the file per line
port: '80' or '443'
outfi: result file path
"""
outfi: str = None
exitcode = None
try:
enhanced_args = []
# add hosts and ports to args
enhanced_args.append("http")
enhanced_args.append("--port=%s" % port)
# zgrab2 http 192.168.40.114 --port=8020 --endpoint='/' --heartbleed
# --extended-master-secret --extended-random --max-redirects=2
# --session-ticket --follow-localhost-redirects --retry-https --timeout=30
# --user-agent="Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36"
# -f ./a.list -o ./a.json
if not "--endpoint=" in args:
enhanced_args.append("--endpoint='/'")
if not "--max-size" in args:
# Kb
enhanced_args.append("--max-size=256")
if not "--heartbleed" in args:
enhanced_args.append("--heartbleed")
if not "--extended-master-secret" in args:
enhanced_args.append("--extended-master-secret")
if not "--extended-random" in args:
enhanced_args.append("--extended-random")
if not "--max-redirects=" in args:
enhanced_args.append("--max-redirects=1")
if not "--session-ticket" in args:
enhanced_args.append("--session-ticket")
if not "--retry-https" in args:
enhanced_args.append("--retry-https")
if not "--timeout=" in args:
enhanced_args.append("--timeout=30")
if not "--user-agent=" in args:
enhanced_args.append(
'--user-agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"'
)
enhanced_args.extend(args)
if not "--input-file=" in args or "-f" in args:
enhanced_args.append("-f %s" % host_file) # input file
with self._outfile_locker:
outfi = os.path.join(
self._tmpdir, "{}_{}.http".format(str(uuid.uuid1()), port)
)
while os.path.isfile(outfi):
outfi = os.path.join(
self._tmpdir, "{}_{}.http".format(str(uuid.uuid1()), port)
)
# outfi = os.path.join(self._tmpdir, "{}_{}.http".format(task.taskid, port))
if not "--output-file=" in args or "-o" in args:
# here must use -o, use '--output-file' will cause exception 'No such file or directory'
# this may be a bug
enhanced_args.append("-o %s" % outfi) # output file
outdir = os.path.dirname(outfi)
if not os.path.exists(outdir) or not os.path.isdir(outdir):
os.makedirs(outdir)
curr_process = None
try:
curr_process = self._run_process(
zgrab2path, *enhanced_args, rootDir=outdir, sudo=sudo
)
stdout, stderr = curr_process.communicate(timeout=timeout)
exitcode = curr_process.wait(timeout=10)
if not stdout is None:
self._logger.trace(stdout)
if not stderr is None:
self._logger.trace(stderr)
if exitcode != 0:
raise Exception("Scan HTTP error: %s\n%s" % (stdout, stderr))
self._logger.info(
"Scan HTTP exitcode={}\ntaskid:{}\nbatchid:{}\nport:{}".format(
str(exitcode), task.taskid, task.batchid, port
)
)
finally:
if curr_process is not None:
curr_process.kill()
except Exception:
if not outfi is None and os.path.isfile(outfi):
os.remove(outfi)
outfi = None
self._logger.info(
"Scan HTTP error\ntaskid:{}\nbatchid:{}\nport:{}".format(
task.taskid, task.batchid, port
)
)
return outfi
#################################
# parse
def _parse_result(self, task: IscanTask, level: int, pinfo_dict, outfi):
"""parse http infor and ssl info"""
try:
if not os.path.isfile(outfi):
self._logger.error(
"Resultfi not exists:\ntaskid:{}\nresultfi:{}".format(
task.taskid, outfi
)
)
return
# its' one json object per line
linenum = 1
with open(outfi, mode="r") as fs:
while True:
try:
line = fs.readline()
if line is None or line == "":
break
sj = json.loads(line)
if sj is None:
continue
ip = sj.get("ip")
if ip is None or pinfo_dict.get(ip) is None:
self._logger.error(
"Unexpect error, cant get ip info from zgrab2 result"
)
continue
portinfo = pinfo_dict.get(ip)
# self._parser_http._parse_http(sj, portinfo)
self._parse_http(task, sj, portinfo)
# do not parse ssl certificate here,
# cuz already got tls information
# self._parser_tls._parse_cert(sj, portinfo)
# self._parse_tls(task, sj, portinfo)
except Exception:
self._logger.error(
"Parse one http banner json line error:\ntaskid:{}\nresultfi:{}\nlinenum:{}\nerror:{}".format(
task.taskid,
outfi,
linenum,
traceback.format_exc(),
)
)
finally:
linenum += 1
except Exception:
self._logger.error(
"Parse http result error:\ntaskid:{}\nresultfi:{}".format(
task.taskid, outfi
)
)
def _parse_http(self, task: IscanTask, sj, portinfo: PortInfo):
"""parse site(http) info"""
try:
self._parser_http._parse_http(sj, portinfo)
except Exception:
self._logger.error(
"Parse http site result error:\ntaskid:{}\nbatchid:{}\nerror:{}".format(
task.taskid, task.batchid, traceback.format_exc()
)
)
def _parse_tls(self, task: IscanTask, sj, portinfo: PortInfo):
"""parse site(http) info"""
try:
if not sj.__contains__("data") or not sj["data"].__contains__("http"):
return
if sj["data"]["http"]["status"] != "success":
return
sjresp = sj["data"]["http"]["result"]["response"]
if not sjresp.__contains__("request") or not sjresp["request"].__contains__(
"tls_log"
):
return
sjtls = sjresp["request"]["tls_log"]
sjhandshake = sjtls.get("handshake_log")
if sjhandshake is None or len(sjhandshake) < 1:
return
self._parser_tls._parse_cert(sjhandshake, portinfo)
except Exception:
self._logger.error(
"Parse http tls result error:\ntaskid:{}\nbatchid:{}\nerror:{}".format(
task.taskid, task.batchid, traceback.format_exc()
)
)
| [
"shiyuegege@qq.com"
] | shiyuegege@qq.com |
d5e939d7cc1fcb5358ac62026cdf76abfe52da49 | 53d7ed315fa6133a7302b22210a8771ebbfd7e34 | /DCP_063 ! Word Matrix.py | faf4c3b1d6080387ae5cca33dd5b30b3f2fbdc1f | [] | no_license | MaxTechniche/dcp | 64300bba48cbe7117aa7c7f2b879f20f5915e1ba | 3a8f4667ee82c591927deed7768faa481f4902f0 | refs/heads/master | 2023-01-02T05:08:48.261468 | 2020-10-25T02:19:49 | 2020-10-25T02:19:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | """
Good morning! Here's your coding interview problem for today.
This problem was asked by Microsoft.
Given a 2D matrix of characters and a target word, write a function that returns whether the word can be found in the matrix by going left-to-right, or up-to-down.
For example, given the following matrix:
[['F', 'A', 'C', 'I'],
['O', 'B', 'Q', 'P'],
['A', 'N', 'O', 'B'],
['M', 'A', 'S', 'S']]
and the target word 'FOAM', you should return true, since it's the leftmost column. Similarly, given the target word 'MASS', you should return true, since it's the last row.
"""
| [
"Max.Techniche@gmail.com"
] | Max.Techniche@gmail.com |
d597cb77ad0b3c478898bcf937ccdebef106cd20 | 168c6412acb7cdf144e93ce73bcdc6c4d8618444 | /lidarnet_fei/simladar_eval.py | 8d2ddd5cd0256f0b6e9befde00d6032132f8b0fa | [] | no_license | ZhenghaoFei/camera_simulate_lidar | c0e2cfa1a664401450e5705b6b5d9b22f0f62040 | b30391386cf4b0b7580dd4b5f5b961693c543fac | refs/heads/master | 2021-01-11T03:54:46.198327 | 2017-01-15T20:16:13 | 2017-01-15T20:16:13 | 71,275,543 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,296 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
import numpy as np
import tensorflow as tf
import simladar
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_dir', './save',
"""Directory where to write event logs.""")
tf.app.flags.DEFINE_string('eval_data', '../data/data_valid/',
"""validation data directory""")
tf.app.flags.DEFINE_string('checkpoint_dir', './save/',
"""Directory where to read model checkpoints.""")
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,
"""How often to run the eval.""")
tf.app.flags.DEFINE_integer('num_examples', 1500,
"""Number of examples to run.""")
tf.app.flags.DEFINE_boolean('run_once', False,
"""Whether to run eval only once.""")
def eval_once(saver, summary_writer, valid_op, summary_op):
"""Run Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
top_k_op: Top K op.
summary_op: Summary op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/cifar10_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
# true_count = 0 # Counts the number of correct predictions.
# accuracy_sum = 0
# total_sample_count = num_iter * FLAGS.batch_size
sum_rmse = 0
step = 0
while step < num_iter and not coord.should_stop():
sum_rmse += sess.run(valid_op)
# true_count += np.sum(predictions)
step += 1
rmse = float(sum_rmse/num_iter)
# Compute precision @ 1.
# accuracy = accuracy_sum / total_sample_count
print('%s: rmse = %.3f' % (datetime.now(), rmse))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='rmse', simple_value=rmse)
summary_writer.add_summary(summary, global_step)
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate():
"""Eval CIFAR-10 for a number of steps."""
# with tf.Graph().as_default(),tf.device('/cpu:0') as g:
with tf.device('/cpu:0') as g:
global_step = tf.Variable(0, trainable=False)
# # Get images and labels for CIFAR-10.
# eval_data = FLAGS.eval_data == 'test'
# images, labels = cifar10.inputs(eval_data=eval_data)
left_batch_validate, right_batch_validate, lidar_batch_validate = simladar.inputs(eval_data=True)
# Build a Graph that computes the logits predictions from the
# inference model.
# logits = cifar10.inference(images)
keep_prob = tf.constant(1.) #dropout (keep probability)
pred_validate = simladar.inference(left_batch_validate, right_batch_validate, keep_prob)
# Calculate predictions.
# top_k_op = tf.nn.in_top_k(logits, labels, 1)
valid_op = tf.sqrt(tf.reduce_mean(tf.square(tf.sub(lidar_batch_validate, pred_validate))))
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
simladar.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)
while True:
eval_once(saver, summary_writer, valid_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def main(argv=None): # pylint: disable=unused-argument
evaluate()
if __name__ == '__main__':
tf.app.run()
| [
"penchen@ucdavis.edu"
] | penchen@ucdavis.edu |
027e34753a5633d392d90e6a3351c2c1ee646140 | 0fd66a4a28bdc7d967ec18d90eca5cc54b5cbdd4 | /middleware/legato/library/plugins/scripts/generator/fontsource.py | 85e614f4f6ad2d71a1b35a15aabe28e38d391989 | [
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"LicenseRef-scancode-public-domain"
] | permissive | fb321/gfx | b865539ea6acd9c99d11a3968424ae03b5dea438 | e59a8d65ef77d4b017fdc523305d4d29a066d92a | refs/heads/master | 2020-06-27T14:20:24.209933 | 2019-07-31T22:01:05 | 2019-07-31T22:01:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,638 | py | def generateFontSourceFile(font):
name = font.getName()
antialias = font.getAntialias()
height = font.getAdjustedHeight()
baseline = font.getBaseline()
style = ""
if antialias == True:
style += "Antialias"
if len(style) == 0:
style = "Plain"
if style.endswith(",") == True:
style = style[:-1]
fontData = font.generateFontData()
if fontData.glyphs.size() == 0:
return
fntSrc = File("generated/font/le_gen_font_" + name + ".c")
fntSrc.write('#include "gfx/legato/generated/le_gen_assets.h"')
fntSrc.writeNewLine()
fntSrc.write("/*********************************")
fntSrc.write(" * Legato Font Asset")
fntSrc.write(" * Name: %s" % (name))
fntSrc.write(" * Height: %d" % (height))
fntSrc.write(" * Baseline: %d" % (baseline))
fntSrc.write(" * Style: %s" % (style))
fntSrc.write(" * Glyph Count: %d" % (fontData.glyphs.size()))
fntSrc.write(" * Range Count: %d" % (fontData.ranges.size()))
fntSrc.writeNoNewline(" * Glyph Ranges: ")
idx = 0
for range in fontData.ranges:
start = range.getStartOrdinal()
end = range.getEndOrdinal()
if idx == 0:
if start != end:
fntSrc.write("0x%02X-0x%02X" % (start, end))
else:
fntSrc.write("0x%02X" % (start))
else:
if start != end:
fntSrc.write(" 0x%02X-0x%02X" % (start, end))
else:
fntSrc.write(" 0x%02X" % (start))
idx += 1
fntSrc.write(" *********************************/")
locIdx = font.getMemoryLocationIndex()
kerningData = fontData.getKerningDataArray()
kerningDataLength = len(kerningData)
fntSrc.write("/*********************************")
fntSrc.write(" * font glyph kerning table description")
fntSrc.write(" *")
fntSrc.write(" * unsigned int - number of glyphs")
fntSrc.write(" * for each glyph:")
fntSrc.write(" * unsigned short - codepoint * the glyph's codepoint")
fntSrc.write(" * short - width * the glyph's width in pixels")
fntSrc.write(" * short - height * the glyph's height in pixels")
fntSrc.write(" * short - advance * the glyph's advance value in pixels")
fntSrc.write(" * short - bearingX * the glyph's bearing value in pixels on the X axis")
fntSrc.write(" * short - bearingY * the glyph's bearing value in pixels on the Y axis")
fntSrc.write(" * unsigned short - flags * status flags for this glyph")
fntSrc.write(" * unsigned short - data row width * the size of a row of glyph data in bytes")
fntSrc.write(" * unsigned int - data table offset * the offset into the corresponding font data table")
fntSrc.write(" ********************************/")
if locIdx == 0: # internal flash = const
fntSrc.writeNoNewline("const ")
fntSrc.write("uint8_t %s_glyphs[%d] =" % (name, kerningDataLength))
fntSrc.write("{")
writeBinaryData(fntSrc, kerningData, kerningDataLength)
fntSrc.write("};")
fntSrc.writeNewLine()
if locIdx < 2:
glyphData = fontData.getGlyphDataArray()
glyphDataLength = len(glyphData)
fntSrc.write("/*********************************")
fntSrc.write(" * raw font glyph data")
fntSrc.write(" ********************************/")
if locIdx == 0: # internal flash = const
fntSrc.writeNoNewline("const ")
fntSrc.write("uint8_t %s_data[%d] =" % (name, glyphDataLength))
fntSrc.write("{")
writeBinaryData(fntSrc, glyphData, glyphDataLength)
fntSrc.write("};")
fntSrc.writeNewLine()
antialias = font.getAntialias()
bpp = 1
if antialias == True:
bpp = 8
memLocName = ""
if locIdx < 2:
memLocName = "LE_STREAM_LOCATION_ID_INTERNAL"
else:
memLocName = font.getMemoryLocationName()
fntSrc.write("leRasterFont %s =" % (name))
fntSrc.write("{")
fntSrc.write(" {")
fntSrc.write(" {")
fntSrc.write(" %s, // data location id" % (memLocName))
fntSrc.write(" (void*)%s_data, // data address pointer" % (name))
fntSrc.write(" %d, // data size" % (glyphDataLength))
fntSrc.write(" },")
fntSrc.write(" LE_RASTER_FONT,")
fntSrc.write(" },")
fntSrc.write(" %d," % (fontData.getMaxHeight()))
fntSrc.write(" %d," % (fontData.getMaxBaseline()))
fntSrc.write(" LE_FONT_BPP_%d, // bits per pixel" % (bpp))
fntSrc.write(" %s_glyphs, // glyph table" % (name))
fntSrc.write("};")
fntSrc.close()
global fileDict
fileDict[fntSrc.name] = fntSrc | [
"http://support.microchip.com"
] | http://support.microchip.com |
0d03fc791409f0a5c97a2b1af3d06de5bca7c727 | 228c54822400aaf033679fa49f2a7475e64b5a73 | /setup.py | a62e8fba4aa9b92759beae4d264dd203160b80a1 | [
"MIT"
] | permissive | kuangmeng/MRIUtils | 3111da0c212b8bd74b4b35bb6a5fbf641c4d28ff | 3a79e8104071deb0dc17c402ac878f94161d9b4a | refs/heads/master | 2023-06-01T14:51:24.547215 | 2021-06-23T07:22:15 | 2021-06-23T07:22:15 | 322,794,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | #!/usr/bin/env python
import setuptools
readme = 'README.md'
setuptools.setup(
name="mriutils",
version="1.2.18",
author="Mengmeng Kuang",
keywords="MRI-Analysis",
author_email="kuangmeng@msn.com",
description="A simple common utils and models package",
long_description=open(readme, 'r').read(),
long_description_content_type="text/markdown",
url="https://github.com/kuangmeng/MRIUtils",
packages=setuptools.find_packages(),
data_files=[readme],
install_requires=["requests"],
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Utilities"
],
python_requires='>=3.5',
) | [
"kuangmeng@msn.com"
] | kuangmeng@msn.com |
2dea0bfd55ce4a3bff0ce13ca881d5ded7316f0a | 2f29702bfa169509ef166228afcf4c167c09decd | /BOJ_Python/Math1/BOJ_2839_SugarDilivery.py | 25aa74780be6febb23645667f713d82b8e0dbb0c | [] | no_license | maroro0220/PythonStudy | 53b2721c000fc6f86e1184772a3e837e47c250c1 | 985ff7f8ddf93ad25d8b3d46d1c7a740e37225ea | refs/heads/master | 2020-12-21T02:21:43.198573 | 2020-05-10T06:28:21 | 2020-05-10T06:28:21 | 236,277,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | '''
문제
상근이는 요즘 설탕공장에서 설탕을 배달하고 있다. 상근이는 지금 사탕가게에 설탕을 정확하게 N킬로그램을 배달해야 한다. 설탕공장에서 만드는 설탕은 봉지에 담겨져 있다. 봉지는 3킬로그램 봉지와 5킬로그램 봉지가 있다.
상근이는 귀찮기 때문에, 최대한 적은 봉지를 들고 가려고 한다. 예를 들어, 18킬로그램 설탕을 배달해야 할 때, 3킬로그램 봉지 6개를 가져가도 되지만, 5킬로그램 3개와 3킬로그램 1개를 배달하면, 더 적은 개수의 봉지를 배달할 수 있다.
상근이가 설탕을 정확하게 N킬로그램 배달해야 할 때, 봉지 몇 개를 가져가면 되는지 그 수를 구하는 프로그램을 작성하시오.
입력
첫째 줄에 N이 주어진다. (3 ≤ N ≤ 5000)
출력
상근이가 배달하는 봉지의 최소 개수를 출력한다. 만약, 정확하게 N킬로그램을 만들 수 없다면 -1을 출력한다.
예제 입력 1
18
예제 출력 1
4
예제 입력 2
4
예제 출력 2
-1
예제 입력 3
6
예제 출력 3
2
예제 입력 4
9
예제 출력 4
3
예제 입력 5
11
예제 출력 5
3
'''
sugar=int(input())
if sugar<3 or sugar==4 or sugar==7:
chk=0
print(-1)
else:
chk=1
five=sugar//5
while chk:
exc_five=sugar-five*5
if not exc_five:
print(five)
break
elif not exc_five%3:
print(five+exc_five//3)
break
else:
five=five-1
| [
"maro1339@gmail.com"
] | maro1339@gmail.com |
f0b918fd25848e32e351ba35139e4151c98faf97 | dd8ed0972bf55ae0ed76aa3cc47fd2b905318ff8 | /scholars.py | b08ca187bfb83613c95249996b6d761a1c147ee0 | [] | no_license | sky121/178ML | 55b61d612f7aefadddf562994df911ae53afd4ec | ebef8186a9e4a7a1d80130215779477e18291067 | refs/heads/main | 2023-01-25T04:07:07.553581 | 2020-12-11T04:19:48 | 2020-12-11T04:19:48 | 320,407,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,542 | py | import mltools as ml
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from mltools import kernel
class RandomForest:
def __init__(self, X, Y, nFeatures, maxDepth, minLeaf, number_of_learner):
(N, D) = X.shape
self.number_of_learner = number_of_learner
self.learners = [0]*self.number_of_learner
for i in range(self.number_of_learner):
(bstrp_x, bstrp_y) = ml.bootstrapData(X, Y)
self.learners[i] = ml.dtree.treeClassify(bstrp_x, bstrp_y, nFeatures=nFeatures, maxDepth=maxDepth, minLeaf=minLeaf)
def predict(self, data):
predictions = [0]*(self.number_of_learner)
num_of_data = len(data)
for i in range(self.number_of_learner):
predictions[i] = self.learners[i].predict(data) #prediction = [[data1, data2], [data1, data2], [data1,data2]]
final_prediction = [0]*num_of_data
for j in range(num_of_data):
final_prediction[j] = np.mean(np.array(predictions)[:, j])
return final_prediction
class AdaBoost:
def __init__(self, X, Y, numStumps = 100, learning_rate = 0.25):
self.AdaBoostClassifier = AdaBoostClassifier(n_estimators = numStumps, learning_rate = learning_rate)
self.AdaBoostClassifier.fit(X, Y)
def predict(self, data):
return self.AdaBoostClassifier.predict_proba(data)
class GradientBoost:
def __init__(self, X, Y):
self.GradientBoostingClassifier = GradientBoostingClassifier()
self.GradientBoostingClassifier.fit(X, Y)
def predict(self, data):
return self.GradientBoostingClassifier.predict_proba(data)
class BaggedTree(ml.base.classifier):
def __init__(self, learners):
"""Constructs a BaggedTree class with a set of learners. """
self.learners = learners
def predictSoft(self, X):
"""Predicts the probabilities with each bagged learner and average over the results. """
n_bags = len(self.learners)
preds = [self.learners[l].predictSoft(X) for l in range(n_bags)]
return np.mean(preds, axis=0)
class RandomForest2:
def __init__(self, X, Y, Nbags = 80, maxDepth = 20, nFeatures = 20):
self.bags = []
for i in range(Nbags):
Xi, Yi = ml.bootstrapData(X, Y, X.shape[0])
tree = ml.dtree.treeClassify(Xi, Yi, maxDepth = maxDepth, nFeatures = nFeatures)
self.bags.append(tree)
self.bt = BaggedTree(self.bags)
self.bt.classes = np.unique(Y)
def predict(self, data):
x1 = self.bt.predictSoft(data)[:,1]
return x1
class GradientBoost2:
def __init__(self, X, Y, nEns=100):
M = X.shape[0]
self.en = [None]*nEns
YHat = np.zeros((M,nEns))
f = np.zeros(Y.shape)
self.alpha = 0.5
for l in range(nEns): # this is a lot faster than the bagging loop:
dJ = 1.*Y - self.sigma(f)
self.en[l] = ml.dtree.treeRegress(X,dJ, maxDepth=3) # train and save learner
f -= (self.alpha)*((self.en)[l]).predict(X)
def sigma(self, z):
return np.exp(-z)/(1.+np.exp(-z))
def predict(self, data):
a = np.zeros((data.shape[0],39))
for l in range(39):
a[:,l] = -self.alpha*self.en[l].predict(data)
preds = self.sigma(a.sum(axis=1))
return preds | [
"skylarooz@gmail.com"
] | skylarooz@gmail.com |
e7d5f58ceda88c84bc2cf87e7f99fecb837cf9b3 | 49fbdb00e0ce9775974442c9f09fad29fab77a02 | /rest_api/urls.py | 27454a45284747b21c2e70e0acd37c934b2e690b | [] | no_license | Macaulayfamous/Basic_api_app | 9d009f627632ca247e6224aa4ea8ff8fde288421 | 625668f703334b2b9aa58710938fb8baceff9fff | refs/heads/main | 2023-06-18T04:00:43.649749 | 2021-07-15T06:39:42 | 2021-07-15T06:39:42 | 386,176,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py |
from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
from api.views import MovieViewSet, ActionViewSet, ComedyViewSet
from django.conf import settings
from django.conf.urls.static import static
router = routers.SimpleRouter()
router.register('', MovieViewSet)
router.register('action', ActionViewSet)
router.register('comedy', ComedyViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('', include(router.urls)),
]+static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"pythonfamous@gmail.com"
] | pythonfamous@gmail.com |
3731094ab99923201e31d46122ad87ceee945bfb | 0364bd3bfa82153b5d9e5b92894936390a1972ae | /inqoire/connection/admin.py | a640d1d10332b5e1fddd582bf9658971e0e1ea77 | [] | no_license | kraft99/inQoire | 8e9a05d8f033c302380ab7dceba48242f2fe57f3 | 5a88ce2e21cb45ec7b7412010157c716c864d825 | refs/heads/master | 2020-12-04T16:04:16.549607 | 2020-03-02T07:16:00 | 2020-03-02T07:16:00 | 231,828,965 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | from django.contrib import admin
from .models import Connection
admin.site.register(Connection)
| [
"kraft.developer@gmail.com"
] | kraft.developer@gmail.com |
48e60a05f222102e719311be1a90a1be88b93574 | 78b82749360a90f74f6af2f94f9976d39d094663 | /homeassistant/components/bond/config_flow.py | 5996cd03baed1a2ecfbc573854ff27e9568e8d2f | [
"Apache-2.0"
] | permissive | klaashoekstra94/core | 0ef2d44a6dce5a93000c3085e042fa7b81e2e93e | 47b6755177b47b721cc077b4b182f2a2a491a785 | refs/heads/dev | 2023-02-13T21:25:38.068552 | 2021-11-10T18:40:49 | 2021-11-10T18:40:49 | 248,250,407 | 2 | 0 | Apache-2.0 | 2021-04-06T10:33:09 | 2020-03-18T14:14:00 | Python | UTF-8 | Python | false | false | 7,170 | py | """Config flow for Bond integration."""
from __future__ import annotations
from http import HTTPStatus
import logging
from typing import Any
from aiohttp import ClientConnectionError, ClientResponseError
from bond_api import Bond
import voluptuous as vol
from homeassistant import config_entries, exceptions
from homeassistant.components import zeroconf
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_HOST, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import AbortFlow, FlowResult
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.typing import DiscoveryInfoType
from .const import DOMAIN
from .utils import BondHub
_LOGGER = logging.getLogger(__name__)
USER_SCHEMA = vol.Schema(
{vol.Required(CONF_HOST): str, vol.Required(CONF_ACCESS_TOKEN): str}
)
DISCOVERY_SCHEMA = vol.Schema({vol.Required(CONF_ACCESS_TOKEN): str})
TOKEN_SCHEMA = vol.Schema({})
async def async_get_token(hass: HomeAssistant, host: str) -> str | None:
"""Try to fetch the token from the bond device."""
bond = Bond(host, "", session=async_get_clientsession(hass))
try:
response: dict[str, str] = await bond.token()
except ClientConnectionError:
return None
return response.get("token")
async def _validate_input(hass: HomeAssistant, data: dict[str, Any]) -> tuple[str, str]:
"""Validate the user input allows us to connect."""
bond = Bond(
data[CONF_HOST], data[CONF_ACCESS_TOKEN], session=async_get_clientsession(hass)
)
try:
hub = BondHub(bond)
await hub.setup(max_devices=1)
except ClientConnectionError as error:
raise InputValidationError("cannot_connect") from error
except ClientResponseError as error:
if error.status == HTTPStatus.UNAUTHORIZED:
raise InputValidationError("invalid_auth") from error
raise InputValidationError("unknown") from error
except Exception as error:
_LOGGER.exception("Unexpected exception")
raise InputValidationError("unknown") from error
# Return unique ID from the hub to be stored in the config entry.
if not hub.bond_id:
raise InputValidationError("old_firmware")
return hub.bond_id, hub.name
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Bond."""
VERSION = 1
def __init__(self) -> None:
"""Initialize config flow."""
self._discovered: dict[str, str] = {}
async def _async_try_automatic_configure(self) -> None:
"""Try to auto configure the device.
Failure is acceptable here since the device may have been
online longer then the allowed setup period, and we will
instead ask them to manually enter the token.
"""
host = self._discovered[CONF_HOST]
if not (token := await async_get_token(self.hass, host)):
return
self._discovered[CONF_ACCESS_TOKEN] = token
_, hub_name = await _validate_input(self.hass, self._discovered)
self._discovered[CONF_NAME] = hub_name
async def async_step_zeroconf(
self, discovery_info: DiscoveryInfoType
) -> FlowResult:
"""Handle a flow initialized by zeroconf discovery."""
name: str = discovery_info[zeroconf.ATTR_NAME]
host: str = discovery_info[zeroconf.ATTR_HOST]
bond_id = name.partition(".")[0]
await self.async_set_unique_id(bond_id)
for entry in self._async_current_entries():
if entry.unique_id != bond_id:
continue
updates = {CONF_HOST: host}
if entry.state == ConfigEntryState.SETUP_ERROR and (
token := await async_get_token(self.hass, host)
):
updates[CONF_ACCESS_TOKEN] = token
new_data = {**entry.data, **updates}
if new_data != dict(entry.data):
self.hass.config_entries.async_update_entry(
entry, data={**entry.data, **updates}
)
self.hass.async_create_task(
self.hass.config_entries.async_reload(entry.entry_id)
)
raise AbortFlow("already_configured")
self._discovered = {CONF_HOST: host, CONF_NAME: bond_id}
await self._async_try_automatic_configure()
self.context.update(
{
"title_placeholders": {
CONF_HOST: self._discovered[CONF_HOST],
CONF_NAME: self._discovered[CONF_NAME],
}
}
)
return await self.async_step_confirm()
async def async_step_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle confirmation flow for discovered bond hub."""
errors = {}
if user_input is not None:
if CONF_ACCESS_TOKEN in self._discovered:
return self.async_create_entry(
title=self._discovered[CONF_NAME],
data={
CONF_ACCESS_TOKEN: self._discovered[CONF_ACCESS_TOKEN],
CONF_HOST: self._discovered[CONF_HOST],
},
)
data = {
CONF_ACCESS_TOKEN: user_input[CONF_ACCESS_TOKEN],
CONF_HOST: self._discovered[CONF_HOST],
}
try:
_, hub_name = await _validate_input(self.hass, data)
except InputValidationError as error:
errors["base"] = error.base
else:
return self.async_create_entry(
title=hub_name,
data=data,
)
if CONF_ACCESS_TOKEN in self._discovered:
data_schema = TOKEN_SCHEMA
else:
data_schema = DISCOVERY_SCHEMA
return self.async_show_form(
step_id="confirm",
data_schema=data_schema,
errors=errors,
description_placeholders=self._discovered,
)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
try:
bond_id, hub_name = await _validate_input(self.hass, user_input)
except InputValidationError as error:
errors["base"] = error.base
else:
await self.async_set_unique_id(bond_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=hub_name, data=user_input)
return self.async_show_form(
step_id="user", data_schema=USER_SCHEMA, errors=errors
)
class InputValidationError(exceptions.HomeAssistantError):
"""Error to indicate we cannot proceed due to invalid input."""
def __init__(self, base: str) -> None:
"""Initialize with error base."""
super().__init__()
self.base = base
| [
"noreply@github.com"
] | klaashoekstra94.noreply@github.com |
2d90bff06855f9bae090f63724ebaf7df1c195fc | f077cc46fea261ea5d76342877229721c8e5b44a | /bin/main.py | adcad06e2d91b56a1687fbc7a56e86cefe095d42 | [] | no_license | bjherger/ticketfly_data_challenge | c95d3ad72c692b8decc49977263614a278ea3ff8 | 9cf5aeeda51787189ce994995fb6b58b04bd05d9 | refs/heads/master | 2021-01-24T11:08:30.047655 | 2016-11-22T22:11:41 | 2016-11-22T22:11:41 | 70,256,616 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,637 | py | #!/usr/bin/env python
"""
coding=utf-8
Response to Ticketfly data challenge
"""
import logging
import numpy as np
import pandas as pd
import patsy
import statsmodels.api as sm
logging.basicConfig(level=logging.DEBUG)
def create_training_df():
"""
Read in tables from file, and join them to create a single observation table
:return:
"""
# Read in tables from CSV
email_df = pd.read_csv('../data/input/email_table.csv')
email_opened_df = pd.read_csv('../data/input/email_opened_table.csv')
link_df = pd.read_csv('../data/input/link_clicked_table.csv')
# Perform joins to create one observations df
observation_df = pd.merge(email_df, email_opened_df, on='email_id')
observation_df = pd.merge(observation_df, link_df, on='email_id')
# Feature typing
observation_df['opened_flag'] = observation_df['opened_flag'] == 'Yes'
observation_df['CTA_link_click'] = observation_df['CTA_link_click'] == 'Yes'
observation_df['row_weight'] = 1
logging.info('Observation dataframe columns: \n%s' % observation_df.columns)
logging.info('Observation dataframe shape: \n%s' % str(observation_df.shape))
logging.info('Observation dataframe description: \n%s' % observation_df.describe(include='all'))
# Return observations DF
return observation_df
def q1(df):
# print df[['email_version', 'opened_flag', 'row_weight']].groupby(by=['email_version', 'opened_flag']).count()
print df[['opened_flag', 'row_weight', 'CTA_link_click']].groupby(by=['opened_flag']).agg(
{'row_weight': np.sum, 'CTA_link_click': np.sum})
# print df['opened_flag'].value_counts()
# print df[df['opened_flag']]['CTA_link_click'].value_counts()
# print df[~df['opened_flag']]['CTA_link_click'].value_counts()
print df[(df['opened_flag']) & (df['CTA_link_click'])].shape
print df[(df['opened_flag']) & ~(df['CTA_link_click'])].shape
print df[(~df['opened_flag']) & (df['CTA_link_click'])].shape
print df[(~df['opened_flag']) & ~(df['CTA_link_click'])].shape
def q2(df):
print df['CTA_link_click'].value_counts()
y, X = patsy.dmatrices('CTA_link_click ~ user_past_purchases + user_country + np.sin(hour) + np.cos(hour)',
data=df, return_type='dataframe')
print y
print X
mod = sm.Logit(y['CTA_link_click[True]'], X)
res = mod.fit()
print res.summary()
def main():
"""
Main function documentation template
:return: None
:rtype: None
"""
# Create dataset
df = create_training_df()
q1(df)
q2(df)
# Main section
if __name__ == '__main__':
main()
| [
"brendan.herger@capitalone.com"
] | brendan.herger@capitalone.com |
686a6b61c56e2ebc9b0eb54718bd8ea9f10202ce | 6571ba31ee91c1e24073231a1625f2a6fe2d426f | /mysite/settings.py | 5d0ee942a0904886f2646a95579995da063cd816 | [] | no_license | alyasamba/django-journal | 41376efbcbcd6b148af7783c6d83289aa8bd6131 | 00447d88102e5c546dd96ee5d8cd1fcc524635fb | refs/heads/master | 2021-06-08T14:06:16.125870 | 2016-12-14T17:01:22 | 2016-12-14T17:01:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,648 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qsg@j7yppj&#_g@hn3-y^+0p71$%8422x*7ikld(5=!73=2ih@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'tinymce',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
TINYMCE_DEFAULT_CONFIG = {
'plugins': "table,spellchecker,paste,searchreplace",
'theme': "advanced",
'cleanup_on_startup': True,
'custom_undo_redo_levels': 10,
'content_css': '//fonts.googleapis.com/css?family=Roboto+Slab|Roboto:400,400i,500,700',
'theme_advanced_buttons3_add': "search,replace,table,spellchecker",
}
TINYMCE_SPELLCHECKER = True
TINYMCE_COMPRESSOR = True
| [
"kaushikskalmady@gmail.com"
] | kaushikskalmady@gmail.com |
198ed41b5675e5534cc3b177590fa2b0b589576d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/10/usersdata/132/9933/submittedfiles/testes.py | 7261bb0dd0a251c9ed449627f60c5cd59e8963a4 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | # -*- coding: utf-8 -*-
from __future__ import division
maior=0
dia=1
for i in range(1,31,1):
n=input('numero de discos vendidos')
if n>maior:
maior=n
dia=i
print(dia)
print(maior)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
0a30aaf77f75e4687957aa58e4ba2fd7b68f29b2 | 058d94f394a985627d3953fc06d8581edea886fd | /src/dvtests/__init__.py | 3d3d923c67fecaf20093ae106634d6f866795b51 | [
"MIT"
] | permissive | gdcc/dataverse_tests | 7c3d01158a4ba8f519509b312decaf81dbea5441 | d37791f588969973f1bb651e83154247ffdb9d49 | refs/heads/master | 2023-04-15T08:48:32.037168 | 2022-07-20T08:18:42 | 2022-07-20T08:18:42 | 233,846,862 | 2 | 4 | MIT | 2022-07-20T08:08:31 | 2020-01-14T13:24:51 | Python | UTF-8 | Python | false | false | 494 | py | """Find out more at https://github.com/AUSSDA/dataverse_tests.
Copyright 2022 Stefan Kasberger
Licensed under the MIT License.
"""
from requests.packages import urllib3
urllib3.disable_warnings() # noqa
__author__ = "Stefan Kasberger"
__email__ = "mail@stefankasberger.at"
__copyright__ = "Copyright (c) 2022 Stefan Kasberger"
__license__ = "MIT License"
# __version__ = "0.1.0"
__url__ = "https://github.com/gdcc/dataverse_tests"
__description__ = "Dataverse tests."
__name__ = "dvtests"
| [
"mail@stefankasberger.at"
] | mail@stefankasberger.at |
98ba95f8dbff24f9396d835c83b3232a91d917cc | 83dab2b5adaf537c525a04584e21501871fc8a4e | /model/write_data.py | e0fff521dd14697fc28f71a5ea1191330a5d6955 | [] | no_license | Najah-Shanableh/lead-public | 7852e2371d186c9e097fde01b3e0d7c1b2fc044e | f538249c43e0444b45b5ef4e58aa46811e825a58 | refs/heads/master | 2021-05-30T06:47:44.511409 | 2015-04-21T15:33:08 | 2015-04-21T15:33:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | #!/usr/bin/python
import sys
import yaml
import util
import os
directory = sys.argv[1]
if not os.path.exists(directory):
os.makedirs(directory)
with open(sys.argv[2]) as f:
params = yaml.load(f)
params['data']['directory'] = directory
engine = util.create_engine()
data_name = params['data'].pop('name')
data = util.get_class(data_name)(**params['data'])
data.read_sql()
data.write()
| [
"eric@k2co3.net"
] | eric@k2co3.net |
bed8f7ef017694c7daa9fefa05467436adf4e1cf | 77588c0b2905f76896d221f18dc5e4b4e6338b42 | /standart_methods/method__new__.py | 5c0e0fd8be1a6f42730b3b6636404f51c4e5ffd0 | [] | no_license | konstantin1985/learn_python | 702a96ed171ce9fd27f51b0c0356b981bfed5f8b | ecc0d79b0bd5cda9846287da4c815ddb173c6ada | refs/heads/master | 2021-01-10T08:28:27.178267 | 2015-11-16T14:33:24 | 2015-11-16T14:33:24 | 46,173,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | #http://stackoverflow.com/questions/674304/pythons-use-of-new-and-init - Great explanation
| [
"konstantin@linux-rswc.site"
] | konstantin@linux-rswc.site |
0b5bbe3d5d84622d94ea279b1eb39216ea4a5707 | c49a6e67a63a541f8d420e725af155505d1e7f84 | /Design/lru-cache*.py | 8d4594e47dd5afbac9564423dc042919a58233fb | [] | no_license | wttttt-wang/leetcode_withTopics | b41ed0f8a036fd00f3b457e5b56efe32f872ca13 | e2837f3d6c23f012148a2d1f9d0ef6d34d4e6912 | refs/heads/master | 2021-09-05T05:03:47.519344 | 2018-01-24T08:28:58 | 2018-01-24T08:28:58 | 112,893,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | """
LRU Cache
@ Design: 1. Two hashMap + One linkedList
2. should be careful when dealing with hashMap in case of 'keyError'
3. reminder to update self.tail
"""
class ListNode(object):
def __init__(self, val):
self.val, self.next = val, None
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.k2v, self.k2node = {}, {}
self.head, self.capacity = ListNode(0), capacity
self.tail = self.head
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key not in self.k2v:
return -1
val = self.k2v[key]
node = self.removeNode(self.k2node[key].next)
self.addAhead(node, val)
return val
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if key in self.k2v:
node = self.removeNode(self.k2node[key].next)
self.addAhead(node, value)
return
if len(self.k2v) == self.capacity:
self.removeNode(self.tail)
self.addAhead(ListNode(key), value)
def removeNode(self, node):
if node.next:
self.k2node[node.next.val] = self.k2node[node.val]
else:
self.tail = self.k2node[node.val]
self.k2node[node.val].next = node.next
self.k2node.pop(node.val)
self.k2v.pop(node.val)
return node
def addAhead(self, node, value):
if self.head.next:
self.k2node[self.head.next.val] = node
else:
self.tail = node
node.next = self.head.next
self.head.next = node
self.k2node[node.val] = self.head
self.k2v[node.val] = value
| [
"wttttt@Wttttt-de-MacBookPro.local"
] | wttttt@Wttttt-de-MacBookPro.local |
90db8927ca3b2e6e98fbb58229d85981f53b2c12 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03583/s935139311.py | 7cb09551156a49be9231f099550afb736cbc3172 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | N = int(input())
flag = 0
for a in range(1,3501):
if flag ==1:
break
for b in range(1,3501):
if 4*a*b - a*N - b*N != 0:
if a*b*N // (4*a*b - a*N - b* N) > 0 and a*b*N % (4*a*b - a*N - b* N) ==0:
c = int(a*b*N / (4*a*b - a*N - b* N))
print(a, b, c)
flag = 1
break | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e488527b0e94108d5d12c1cd9ba81d6348b88f36 | 7ccafccf2f52708386592ff03609a01495078976 | /p0-titanic-survival-exploration/titanic_survival_rev01.py | cee2f7f7cdb654ba13c3cec154e4b073593a5d2a | [] | no_license | nitin2016/machine-learning-nd-1 | 09e9c82b44254e5b4ae3d05f34eb466e1cb3d2a7 | 69ec26e3e0ead5d878f19ba7e306f005340eee89 | refs/heads/master | 2020-04-23T08:51:29.123252 | 2019-02-13T15:49:30 | 2019-02-13T15:49:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 13 14:17:59 2018
@author: User
"""
| [
"niall@changex.org"
] | niall@changex.org |
0a9fa1d2e3e4513278a11b5942e704c8b477555b | 1bf4cf8c61c5399979838e12beccd207a363396a | /tests/druid_import_test.py | cad352c0ee2da88f233720dd9035312793238d9b | [
"Apache-2.0"
] | permissive | moshebeeri/datap | 2da33e6eeed21c441a19c56e5acc59ad198af499 | 9ff99bb435728cd69f2589e3ee858a06768ea85e | refs/heads/main | 2023-08-19T07:35:32.917031 | 2021-09-25T07:42:14 | 2021-09-25T07:42:14 | 312,567,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from service.druid import Druid
class TestImport:
def test_it(self):
druid = Druid(start=None, end=None)
assert True | [
"moshe.beeri@gmail.com"
] | moshe.beeri@gmail.com |
62b681e01ba371318445c3f5ce2d06bd47fea7e4 | 87ac19d29284e0ec46e5f8f57bc23bf3901827e6 | /Week_07/libs/union_find.py | 72476257020849472a6803ef420a26da2d0e3398 | [] | no_license | ws2823147532/algorithm008-class01 | c0939aedc91b6f2f52df117430c8066ecc6c28ca | dd00020d013ad7bf594341fe60f7229f3ea08930 | refs/heads/master | 2022-11-12T15:14:55.699458 | 2020-07-04T00:37:24 | 2020-07-04T00:37:24 | 255,079,618 | 0 | 0 | null | 2020-04-12T12:40:13 | 2020-04-12T12:40:13 | null | UTF-8 | Python | false | false | 1,042 | py | class UnionFind:
"""
并查集
"""
def __init__(self, n):
self.p = [i for i in range(n)]
self.count = n
def union(self, i, j):
"""
把j当做i的parent
"""
p1 = self.parent(i)
p2 = self.parent(j)
if p1 != p2:
self.p[p1] = p2
self.count -= 1
def parent(self, i):
root = i
while self.p[root] != root:
root = self.p[root]
# while self.p[i] != i:
# x, i, self.p[x] = i, self.p[i], root
while self.p[i] != i:
# i, self.p[i] = self.p[i], root # 注意:这里不能用这种写法,因为修改了i的值之后,self.p[i]就会立刻指向修改后的i的位置。这里和普通的a b交换有区别
self.p[i], i = root, self.p[i] # 可以
# x = i
# i = self.p[i]
# self.p[x] = root
return root
uf = UnionFind(6)
uf.union(1, 0)
uf.union(3, 2)
uf.union(4, 0)
uf.union(4, 2)
uf.union(4, 1)
print(uf)
| [
"shang.wang@renren-inc.com"
] | shang.wang@renren-inc.com |
720b5a826589b2a2d5604841f4151d6cc8627e71 | 0c4309d55acb30fb3270400ba9243764193573a0 | /parte_2/semana_3/tipo_triangulo.py | 41c2700a6416da1c51e7a021d3d6f6a33feaa62f | [] | no_license | mwoitek/python-coursera | 8936e39eece19bb40caa1dab98b14529dc836db7 | 90d5d390868d0d0147d837939ee0fab2450c646c | refs/heads/master | 2022-04-25T20:02:45.984640 | 2020-04-30T01:16:57 | 2020-04-30T01:16:57 | 244,276,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | class Triangulo:
def __init__(self, lado1, lado2, lado3):
self.a = lado1
self.b = lado2
self.c = lado3
def perimetro(self):
return self.a + self.b + self.c
def tipo_lado(self):
teste1 = self.a == self.b
teste2 = self.a == self.c
teste3 = self.b == self.c
if teste1 and teste2 and teste3:
return "equilátero"
elif (not teste1) and (not teste2) and (not teste3):
return "escaleno"
else:
return "isósceles"
| [
"woitek@usp.br"
] | woitek@usp.br |
0bf39985c39b47434a9632b18b956c25b3069897 | eb7afa613940f5a3f202352a94dd996edcb6bed5 | /boto3_type_annotations/boto3_type_annotations/s3control/__init__.py | 4045ce4f030398c8486740f70ed511fb671b3a79 | [
"MIT"
] | permissive | alliefitter/boto3_type_annotations | e4da614e27a1d2ad3c9c653c50b8e30108180da5 | 2a88aa562b1aee6e8a6cc30402980884b3707fbb | refs/heads/master | 2020-04-05T22:05:12.689913 | 2019-11-28T03:32:13 | 2019-11-28T03:32:13 | 157,244,330 | 131 | 11 | MIT | 2023-04-21T17:17:03 | 2018-11-12T16:38:57 | Python | UTF-8 | Python | false | false | 91 | py | from boto3_type_annotations.s3control.client import Client
__all__ = (
'Client'
)
| [
"afitter@cellcontrol.com"
] | afitter@cellcontrol.com |
5f481220f5ee531759795b3696ba705afea9bc70 | ba0e8b50564ae5a3c4548c01516792c6512a0a08 | /smartcab/simulator.py | f0b7fd6888a42754da73e2972108637b282d4679 | [] | no_license | joshnewnham/udacity_smartcab | 7ff7027fc79fbbe0f375132b887cb0145de9f7f2 | f339c80e016992e9496d268a7b7ff547488c4eff | refs/heads/master | 2016-09-12T20:38:19.017417 | 2016-04-27T21:59:21 | 2016-04-27T21:59:21 | 57,077,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,667 | py | import os
import time
import random
import pygame
class Simulator(object):
"""PyGame-based simulator to create a dynamic environment."""
colors = {
'black' : ( 0, 0, 0),
'white' : (255, 255, 255),
'red' : (255, 0, 0),
'green' : ( 0, 255, 0),
'blue' : ( 0, 0, 255),
'cyan' : ( 0, 200, 200),
'magenta' : (200, 0, 200),
'yellow' : (255, 255, 0),
'orange' : (255, 128, 0)
}
def __init__(self, env, size=None, frame_delay=10, update_delay=1.0):
self.env = env
self.size = size if size is not None else ((self.env.grid_size[0] + 1) * self.env.block_size, (self.env.grid_size[1] + 1) * self.env.block_size)
self.width, self.height = self.size
self.frame_delay = frame_delay
self.bg_color = self.colors['white']
self.road_width = 5
self.road_color = self.colors['black']
self.quit = False
self.start_time = None
self.current_time = 0.0
self.last_updated = 0.0
self.update_delay = update_delay
pygame.init()
self.screen = pygame.display.set_mode(self.size)
self.agent_sprite_size = (32, 32)
self.agent_circle_radius = 10 # radius of circle, when using simple representation
for agent in self.env.agent_states:
agent._sprite = pygame.transform.smoothscale(pygame.image.load(os.path.join("images", "car-{}.png".format(agent.color))), self.agent_sprite_size)
agent._sprite_size = (agent._sprite.get_width(), agent._sprite.get_height())
self.font = pygame.font.Font(None, 28)
self.paused = False
def run(self, n_trials=1):
self.quit = False
for trial in xrange(n_trials):
print "Simulator.run(): Trial {}".format(trial) # [debug]
self.env.reset()
self.current_time = 0.0
self.last_updated = 0.0
self.start_time = time.time()
while True:
self.current_time = time.time() - self.start_time
#print "Simulator.run(): current_time = {:.3f}".format(self.current_time)
try:
# Handle events
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.quit = True
elif event.type == pygame.KEYDOWN:
if event.key == 27: # Esc
self.quit = True
elif event.unicode == u' ':
self.paused = True
if self.paused:
self.pause()
# Update environment
if self.current_time - self.last_updated >= self.update_delay:
self.env.step()
self.last_updated = self.current_time
# Render and sleep
self.render()
pygame.time.wait(self.frame_delay)
except KeyboardInterrupt:
self.quit = True
finally:
if self.quit or self.env.done:
self.env.on_finished()
break
if self.quit:
self.env.on_finished()
break
def render(self):
# Clear screen
self.screen.fill(self.bg_color)
# Draw elements
# * Static elements
for road in self.env.roads:
pygame.draw.line(self.screen, self.road_color, (road[0][0] * self.env.block_size, road[0][1] * self.env.block_size), (road[1][0] * self.env.block_size, road[1][1] * self.env.block_size), self.road_width)
for intersection, traffic_light in self.env.intersections.iteritems():
pygame.draw.circle(self.screen, self.road_color, (intersection[0] * self.env.block_size, intersection[1] * self.env.block_size), 10)
if traffic_light.state: # North-South is open
pygame.draw.line(self.screen, self.colors['green'],
(intersection[0] * self.env.block_size, intersection[1] * self.env.block_size - 15),
(intersection[0] * self.env.block_size, intersection[1] * self.env.block_size + 15), self.road_width)
else: # East-West is open
pygame.draw.line(self.screen, self.colors['green'],
(intersection[0] * self.env.block_size - 15, intersection[1] * self.env.block_size),
(intersection[0] * self.env.block_size + 15, intersection[1] * self.env.block_size), self.road_width)
# * Dynamic elements
for agent, state in self.env.agent_states.iteritems():
# Compute precise agent location here (back from the intersection some)
agent_offset = (2 * state['heading'][0] * self.agent_circle_radius, 2 * state['heading'][1] * self.agent_circle_radius)
agent_pos = (state['location'][0] * self.env.block_size - agent_offset[0], state['location'][1] * self.env.block_size - agent_offset[1])
agent_color = self.colors[agent.color]
if hasattr(agent, '_sprite') and agent._sprite is not None:
# Draw agent sprite (image), properly rotated
rotated_sprite = agent._sprite if state['heading'] == (1, 0) else pygame.transform.rotate(agent._sprite, 180 if state['heading'][0] == -1 else state['heading'][1] * -90)
self.screen.blit(rotated_sprite,
pygame.rect.Rect(agent_pos[0] - agent._sprite_size[0] / 2, agent_pos[1] - agent._sprite_size[1] / 2,
agent._sprite_size[0], agent._sprite_size[1]))
else:
# Draw simple agent (circle with a short line segment poking out to indicate heading)
pygame.draw.circle(self.screen, agent_color, agent_pos, self.agent_circle_radius)
pygame.draw.line(self.screen, agent_color, agent_pos, state['location'], self.road_width)
if agent.get_next_waypoint() is not None:
self.screen.blit(self.font.render(agent.get_next_waypoint(), True, agent_color, self.bg_color), (agent_pos[0] + 10, agent_pos[1] + 10))
if state['destination'] is not None:
pygame.draw.circle(self.screen, agent_color, (state['destination'][0] * self.env.block_size, state['destination'][1] * self.env.block_size), 6)
pygame.draw.circle(self.screen, agent_color, (state['destination'][0] * self.env.block_size, state['destination'][1] * self.env.block_size), 15, 2)
# * Overlays
text_y = 10
for text in self.env.status_text.split('\n'):
self.screen.blit(self.font.render(text, True, self.colors['red'], self.bg_color), (100, text_y))
text_y += 20
# Flip buffers
pygame.display.flip()
def pause(self):
abs_pause_time = time.time()
pause_text = "[PAUSED] Press any key to continue..."
self.screen.blit(self.font.render(pause_text, True, self.colors['cyan'], self.bg_color), (100, self.height - 40))
pygame.display.flip()
print pause_text # [debug]
while self.paused:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
self.paused = False
pygame.time.wait(self.frame_delay)
self.screen.blit(self.font.render(pause_text, True, self.bg_color, self.bg_color), (100, self.height - 40))
self.start_time += (time.time() - abs_pause_time)
| [
"josh@wemakeplay.com"
] | josh@wemakeplay.com |
8fe3bd1dd3226d26dac5e8615714e7b61fbb87a2 | 261fa90a0ab6b844682465356fee1d5f490774d7 | /02_matplotlib/06_axis.py | 7a085813afb58ff177ab889e8c38e236fd44e6b6 | [] | no_license | lofues/Data_Science | 85d7fcd6e2e7f3dad6392010b30272bb8ca9d1b3 | d91a05325bf597f641d9af1afcf26575489c4960 | refs/heads/master | 2020-09-03T12:43:01.998302 | 2019-11-07T09:17:19 | 2019-11-07T09:17:19 | 219,464,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | import numpy as np
import matplotlib.pyplot as mp
ax = mp.gca()
ax.xaxis.set_major_locator(mp.MultipleLocator(1))
ax.xaxis.set_minor_locator(mp.MultipleLocator(0.1))
# 只查看x轴的1 到 10
mp.xlim(1,10)
# 不查看y轴
mp.yticks([])
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_position(('data',0.5))
mp.tight_layout()
mp.show() | [
"junfuwang@163.com"
] | junfuwang@163.com |
c49df3b7148427ccab8d418470e85866a904febc | 82395399e18144a2f495c47cf33517c44df2364e | /backend/rpi/sms_receive.py | 1c6530077397156df13dc96d629eb6268cd46fa9 | [] | no_license | roxana-andreea/BachelorProject | 945218f7ec6c34a988b6b1d4ad306c8b4dcb58ac | 44ce97466ec82fd23cddcc2b2531994c828882d3 | refs/heads/master | 2021-01-01T05:57:36.481475 | 2017-07-23T08:13:03 | 2017-07-23T08:13:03 | 97,319,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,181 | py | #!/usr/bin/python3
import sys
import logging
import requests
ERROR_SMS_FORMAT = 101
ERROR_PINCODE_VALIDATION = 102
ERROR_UNKNOWN_ACTION = 103
ERROR_UNKNOWN_PARAMETER = 104
ERROR_FEATURE_ACTIVATION = 105
URL = 'http://vss.lupu.online:8080'
LOGIN = 'test@domain1'
PASSWORD = 'secret1'
USER_ID = 1
DEVICE_ID = 1
def debug(str):
print("[DEBUG] {}".format(str))
logger.debug(str)
def info(str):
print("[INFO] {}".format(str))
logger.info(str)
def error(str):
print("[ERROR] {}".format(str))
logger.error(str)
def print_response(res):
debug('response={} {}'.format(res.status_code, res.reason))
debug('headers={}'.format(res.headers))
debug('json={}'.format(res.json()))
def get_user_info():
"""API"""
headers={'Content-Type': 'application/json'}
auth = (LOGIN,PASSWORD)
res = requests.get(
'{}/users'.format(URL),
headers=headers,
auth=auth,
)
print_response(res)
# import pdb; pdb.set_trace()
return res.json()["_items"][0]['sms'],res.json()["_items"][0]['pincode']
"""Logging"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create a file handler
handler = logging.FileHandler('/var/log/rpi-sms.log')
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
# IN20160618_233927_00_+40753174860_00.txt
filename = sys.argv[1]
dirname = "/var/spool/gammu/inbox/"
phone = filename.split('_')[3]
debug('filename={}'.format(filename))
debug('dirname={}'.format(dirname))
sms=open(dirname+filename)
text = sms.readline()
sms.close()
info("Received SMS from number: '{}' with text: '{}'".format(phone, text))
actions = ['START', 'STOP', 'UP', 'DOWN']
parameters = ['ENGINE', 'ALARM']
sms, pincode = get_user_info()
info("For user with login '{}', the sms feature is: '{}' and pincode is: '{}'".format(LOGIN,sms,pincode))
if sms == False:
error('SMS Feature not enabled')
exit(ERROR_FEATURE_ACTIVATION)
try:
action = text.split()[0]
parameter = text.split()[1]
pin = text.split()[2]
except IndexError:
error('SMS Format not validated')
exit(ERROR_SMS_FORMAT)
else:
debug('action={}'.format(action))
debug('parameter={}'.format(parameter))
debug('pin from SMS={}'.format(pin))
debug('pin from API={}'.format(pincode))
# String[] commands = {"LOCK CAR", "UNLOCK CAR", "START ENGINE", "STOP ENGINE", "UP WINDOWS", "DOWN WINDOWS", "OPEN TRUNK", "ALARM START"};
# import pdb; pdb.set_trace()
if str(pincode) == pin:
info('Pincode validated')
if action in ['START','STOP','LOCK','UNLOCK','UP','DOWN','OPEN','CLOSE']:
if parameter in ['ENGINE','CAR','WINDOWS','TRUNK','ALARM']:
info('Received action {} for {}'.format(action,parameter))
else:
error('Parameter not implemented')
exit(ERROR_UNKNOWN_PARAMETER)
else:
error('Action not implemented')
exit(ERROR_UNKNOWN_ACTION)
else:
error('Pincode not validated')
exit(ERROR_PINCODE_VALIDATION)
exit(0) | [
"roxana.cazacu93@gmail.com"
] | roxana.cazacu93@gmail.com |
466f0141c621c5aa74cf85f313b58d9f62a6e995 | 6ab9a3229719f457e4883f8b9c5f1d4c7b349362 | /leetcode/00098_validate_binary_search_tree.py | 68b7dceaba479602951e0e5e99a36a25a8abc2fc | [] | no_license | ajmarin/coding | 77c91ee760b3af34db7c45c64f90b23f6f5def16 | 8af901372ade9d3d913f69b1532df36fc9461603 | refs/heads/master | 2022-01-26T09:54:38.068385 | 2022-01-09T11:26:30 | 2022-01-09T11:26:30 | 2,166,262 | 33 | 15 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isValidBST(self, root: TreeNode, left: int = None, right: int = None) -> bool:
if not root:
return True
if left is not None and root.val <= left:
return False
if right is not None and root.val >= right:
return False
return self.isValidBST(root.left, left, root.val) and self.isValidBST(root.right, root.val, right) | [
"mistermarin@gmail.com"
] | mistermarin@gmail.com |
94b3e93b823ead0b984d54042c90c619c2be22f8 | 873c87eb406fb0c8320c47431fc6d696c40dd29a | /Lecture9/sqlite_demo.py | c46affce7151571f415edf8679876bc5102e9d72 | [] | no_license | borislavtotev/SoftUni-Python | ae16d0ab07168529963c307e7612d4ad6b071b35 | 76e59ef698f50bec1c8fe2f4b48cf59fb0ee243b | refs/heads/master | 2021-01-10T14:19:21.871744 | 2016-02-16T18:21:28 | 2016-02-16T18:21:28 | 51,778,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,274 | py | import sys
import os
import csv
import sqlite3
from product import Product
from sale import Sale
def main():
product_catalog = load_product_catalog('./catalog.csv')
sales = load_sells('./sales-example.csv')
print(product_catalog)
print(sales)
db_filname = "sales-example.db"
with sqlite3.connect(db_filname, isolation_level=None) as connection:
create_tables(connection)
load_catalog_into_db(product_catalog, connection)
load_sale_into_db(sales, connection)
city_name = input("Въведете име на град: ")
current_city_sales = sales_by_city(city_name, connection)
if current_city_sales:
print(current_city_sales)
else:
print("Няма продажби в {}".format(city_name))
def sales_by_city(city_name, connection):
cursor = connection.cursor()
cursor.execute('select * from sale where city_name = ? order by sale_timestamp', [city_name])
results = cursor.fetchall()
return results
def create_tables(connection):
cursor = connection.cursor()
cursor.execute("""
create table if not exists sale (
id INTEGER PRIMARY KEY AUTOINCREMENT,
item_key varchar(200) NOT NULL,
country varchar(3),
city_name varchar(60),
sale_timestamp TEXT,
price NUMERIC
);
""")
cursor.execute("""
create table if not exists catalog (
id INTEGER PRIMARY KEY AUTOINCREMENT,
item_key varchar(200),
category varchar(200)
);
""")
def load_catalog_into_db(product_catalog, connection):
cursor = connection.cursor()
for product in product_catalog.values():
cursor.execute("insert into catalog (item_key, category) values (?, ?)", [product.id, product.category])
print("{} inserted".format(product.category))
def load_sale_into_db(sales, connection):
cursor = connection.cursor()
for sale in sales:
cursor.execute("""
insert into sale (item_key, country, city_name, sale_timestamp, price) values(?, ?, ?, ?, ?)""",
[sale.product_id, sale.country, sale.city, sale.sell_datetime.isoformat(), sale.price])
print("{} imported.".format(sale.city))
def load_product_catalog(file):
if os.access(file, os.R_OK) and os.path.isfile(file):
products = {}
with open(file) as f:
lines = csv.reader(f, dialect='excel')
for line in lines:
id = line[0]
product = Product(*line)
products[id] = product
return products
else:
raise ValueError("Inaccessible file '{}'".format(file))
def load_sells(file):
if os.access(file, os.R_OK) and os.path.isfile(file):
sales = []
with open(file) as f:
lines = csv.reader(f, dialect='excel')
for line in lines:
sale = Sale(*line)
sales.append(sale)
return sales
else:
raise ValueError("Inaccessible file '{}'".format(file))
if __name__ == "__main__":
sys.exit(main()) | [
"borislav.totev@gmail.com"
] | borislav.totev@gmail.com |
c88e8ccfcfc48ac220ee895c1127e4d392ed80c4 | f9f350f77ad134da266a2be1c9fba0c61b302e41 | /Brightness changing/src/BrightnessChange.py | 1a01dc5de2983d93f43a0566e3ad20c433569a3d | [] | no_license | shriyagayatri/Image-Manipulation | 440d849c6e85de0e9ad619cc8219b514ef54dada | 975cece2596bdd81c788d25873eb1f4a441463a2 | refs/heads/master | 2020-04-01T04:06:05.048755 | 2018-12-06T08:47:51 | 2018-12-06T08:47:51 | 152,849,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | from PIL import Image
image = Image.open("colourful.jpg")
# image.point creates a new copy of the original image
image.point(lambda x: x*0).save("dark.jpg")
image.point(lambda x: x*0.1).save("darkest.jpg")
image.point(lambda x: x*0.5).save("darker.jpg")
image.point(lambda x: x*1.5).save("bright.jpg")
image.point(lambda x: x*5.5).save("brighter.jpg")
image.point(lambda x: x*105.5).save("brightest.jpg")
| [
"noreply@github.com"
] | shriyagayatri.noreply@github.com |
857a7552a4d0c642273110e00bb8e7a7fefad271 | 48c9615d646ba9e44ec7b6d2f2b977b92d3e2db7 | /MainRequest.py | 45d314d6deccc76c14f3b5fd8826d11390c6a3a5 | [] | no_license | yukunqi/ProxySpider | 8ca5780360b66fed14d9e2f0cce7c1fb02fad504 | 1ef72a83f82e6791f8c1ece064d349d1f01fb204 | refs/heads/master | 2020-03-09T23:11:39.901841 | 2018-04-11T09:53:52 | 2018-04-11T09:53:52 | 129,052,998 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,285 | py | import pymongo
import random
import time
import requests
from config import *
from ip_spider.log_format import logger
from pymongo.errors import ServerSelectionTimeoutError
from pymongo.errors import ConnectionFailure
from ip_spider.UAS import *
from ip_proxy.delete_not_update_ip import check
from ip_proxy.work_spider import execute_spider
class MainRequest(object):
def __init__(self,mongo_instance=None,current_ip_proxy=None,ipList=None):
self.mongo=mongo_instance
self.current_ip=current_ip_proxy
self.ipList=ipList
self.host=host
self.port=port
self.database=database_name
self.collection_name=collection_name
self._connect()
def _connect(self):
try:
self.client = pymongo.MongoClient(self.host,self.port, serverSelectionTimeoutMS=database_connect_time_out,connectTimeoutMS=database_connect_time_out)
self.client.server_info()
msg = 'host: {} port: {} database_name : {} MongoDB数据库连接成功'.format(host, port, self.database)
logger.info(msg)
self.db = self.client[self.database]
self.get_all_IP(self.collection_name)
self.getRandomOne()
except ServerSelectionTimeoutError as e:
msg = 'host: {} port: {} database_name : {} MongoDB数据库连接失败 原因: 可能配置文件出错或者连接超时 超时时间为: {} 毫秒'.format(host, port, self.database, database_connect_time_out)
raise ConnectionFailure(msg)
def get_all_IP(self,collection_name):
collection = self.db[collection_name]
data = collection.find().sort("insert_time", pymongo.DESCENDING).sort("response_time", pymongo.ASCENDING)
ips = []
for i in data:
ips.append(i.get('ip'))
if len(ips) == 0:
logger.info("数据库内暂无IP")
self.update_ip_pool()
self.ipList=ips
return ips
def getRandomOne(self):
self.current_ip=random.choice(self.ipList)
return self.current_ip
def get_one(self,index):
len=self.getSize()
if self.ipList is None or len == 0:
raise IndexError
if index >= len:
index = 0
return self.ipList[index]
def getSize(self):
if self.ipList is None:
raise Exception
return len(self.ipList)
def update_ipList(self):
self.get_all_IP(self.collection_name)
def update_ip_pool(self):
logger.info("开始执行更新IP代理池中的IP并从网上抓取新的IP放入池中")
start_time = time.time()
check()
execute_spider()
end_time = time.time()
logger.info("刷新数据库中IP带内存中来")
self.update_ipList()
logger.info("IP代理池更新完毕.. 使用时间为 {} 秒".format(end_time - start_time))
def _request_with_proxy(self,url,use_proxy):
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"User-Agent": random.choice(PC_USER_AGENTS)
}
# 获取进入while循环的初始时间
start_time = time.time()
while True:
# 获取当前时间和之前的初始时间做比较,如果超出自定义的时间则raise requests.exceptions.ProxyError
end_time = time.time()
if int(end_time - start_time) > proxy_timeout:
logger.info(
"request with proxy 方法时间执行过长 可能原因: IP池内IP全部失效或被目标网站封掉IP其他异常错误 当前ip为 {} 程序进行休息状态 休息时长为: {} 秒".format(
self.current_ip, proxy_timeout))
time.sleep(proxy_timeout)
self.update_ip_pool()
msg = "IP代理池休息完毕并更新 请重新进行数据抓取 可能原因: 查找历史日志 当前ip为 {}".format(self.current_ip)
raise requests.exceptions.ProxyError(msg)
proxy = {
'http': self.current_ip,
'https': self.current_ip
}
if use_proxy:
try:
response = requests.get(url, proxies=proxy, timeout=request_timeout, headers=headers)
code = response.status_code
msg = "doing http request successfully current proxy ip is {} status_code :{}".format(self.current_ip, code)
logger.info(msg)
if code == 404:
msg = " 404 Client Error: Not Found for url:{}".format(url)
logger.info(msg)
return response
response.raise_for_status()
if code == 200 and custom_filter_str != '' and custom_filter_str in response.text:
raise Exception
return response
except requests.HTTPError as e:
logger.info(e)
self.current_ip = self.getRandomOne()
msg = "random pick a ip from ipList new ip is {}".format(self.current_ip)
logger.info(msg)
except Exception as e:
msg = "ip is {} can't use ".format(self.current_ip)
logger.info(msg)
self.current_ip = self.getRandomOne()
msg = "random pick a ip from ipList new ip is {}".format(self.current_ip)
logger.info(msg)
else:
print("no use proxy")
try:
response = requests.get(url, timeout=request_timeout, headers=headers)
return response
except Exception as e:
msg = "ip is {} can't use ".format(self.current_ip)
logger.info(msg)
self.current_ip = self.getRandomOne()
msg = "random pick a ip from ipList new ip is {}".format(self.current_ip)
logger.info(msg)
| [
"372563150@qq.com"
] | 372563150@qq.com |
24de50616429b8a6e2fbfb2b1db5e0a18e75c0cd | de7596dc6a55592ca9ce925086e194b81733fd37 | /backend/chk_shared_screen1__14206/settings.py | cea2dbec60f4c6dbaa6a2c5133c04f2c01283f2f | [] | no_license | crowdbotics-apps/chk-shared-screen1--14206 | 666f9eb4a8622062676312d2309e81214275d41d | 74e4672c57fa78e90f5ddabef384dfb55e068975 | refs/heads/master | 2023-01-01T00:13:30.182764 | 2020-10-29T12:46:23 | 2020-10-29T12:46:23 | 308,324,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,125 | py | """
Django settings for chk_shared_screen1__14206 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chk_shared_screen1__14206.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chk_shared_screen1__14206.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
3aef8b9e1763961935e6b39d7220c0188eb8a48e | 0bb8d664064ceead1d18f6b2ddbd6b59df42caab | /object_detection_3.py | 4844bfd6441ac3fe877175562e3ee20e45fdfd98 | [] | no_license | pavan3175/Streaming-Data-Face-Recognition | cd508fb05dc02b94e07fc1e4f2192fb25fafd630 | d4e06f1950ff6ce5fc73f3fe5a33fde59b7de930 | refs/heads/master | 2020-06-09T23:25:28.937390 | 2019-06-24T15:14:08 | 2019-06-24T15:14:08 | 193,526,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | import numpy
from imageai.Detection import VideoObjectDetection
import os
import cv2
execution_path = 'C:/Users/pparepal/Documents/vc_python'
c1=cv2.VideoCapture(0)
detector = VideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath( os.path.join(execution_path , "yolo.h5"))
detector.loadModel()
video_path = detector.detectObjectsFromVideo(input_file_path=c1,
output_file_path=os.path.join(execution_path, "traffic_detected_1")
, frames_per_second=20, log_progress=True,minimum_percentage_probability=30)
print(video_path) | [
"noreply@github.com"
] | pavan3175.noreply@github.com |
0e9d7d2932323728b9a906cd927a4f90ec8474e4 | 5faeeef55289a7499009a15814b6e786d3872d17 | /Day 8/better_caeser_cypher.py | abf3f0cac7a857701654aeffeb742bf8204ea8d7 | [] | no_license | sanskarlather/100-days-of-code-with-python | 1536c6dcf6e4b9a8dd8ca64631e762f941c29508 | 2c6d2f2b655650b9aaecdb0e4005f6e59708384d | refs/heads/master | 2023-03-02T18:05:23.559453 | 2021-02-09T05:19:01 | 2021-02-09T05:19:01 | 325,955,981 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | from caeser_cypher_logo import logo
print(logo)
n=0
def salad(n):
test=""
if n==3:
print("Thank You for trying this out")
elif n==1:
s=input("Enter the string\n")
a=int(input("by how much\n"))
for i in range(0,len(s)):
if ord(s[i])+a>122:
test+=chr(ord(s[i])+a-26)
else:
test+=chr(ord(s[i])+a)
elif n==2:
s=input("Enter the string\n")
a=int(input("by how much\n"))
for i in range(0,len(s)):
if ord(s[i])-a<96:
test+=chr(ord(s[i])-a+26)
else:
test+=chr(ord(s[i])-a)
return(test)
while n!=3:
n=int(input("Enter\n1. for Encoding\n2. for Decoding\n3. to quit\n"))
print(salad(n))
| [
"lathersanskar@gmail.com"
] | lathersanskar@gmail.com |
9a1baa646b31fef3fc681bd1e82233b44d8cf874 | 0e3c196a4fcf83a5fac79181911633945b9e2029 | /firstsecond.py | 546a694ecbd7da8044ef1f2b465aed3146bae3a2 | [] | no_license | eesa-syed/Python-assignments | 4a46e8b2f4b718e5dec428eece629ba74856943a | ba4917e9bedbfc03468a750262c4a64dec140504 | refs/heads/master | 2023-08-10T22:48:43.525097 | 2021-10-02T01:49:49 | 2021-10-02T01:49:49 | 281,324,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | x=lambda n: str(n)+'tsnrhtdd'[n%5*(n%100^15>4>n%10)::4]
print(x(22)) | [
"syedeesa2000@gmail.com"
] | syedeesa2000@gmail.com |
f475dbb8fbc4a6317422ec3ccc14a64b41538747 | cd2ce3913595d68e57411123bcfdc89e2be0f9d8 | /pages/views.py | d21f9d049ec9038d10168288e3aab5bc071ad24b | [] | no_license | hotblaze18/django_btredb_realtor_app | 43007b25947b9a242b62a21b1bbfb13af4e4eaa2 | ec5eafc6a8475f3a955210e6dbdfcb8208420ff3 | refs/heads/master | 2022-07-21T15:28:56.687110 | 2020-05-14T09:35:59 | 2020-05-14T09:35:59 | 263,867,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | from django.shortcuts import render
from django.http import HttpResponse
from listings.models import Listing
from realtors.models import Realtor
from listings.choices import state_choices, bedroom_choices, price_choices
# Create your views here.
def index(request):
listings = Listing.objects.all().order_by(
'-list_date').filter(is_published=True)[:3]
context = {
'listings': listings,
'state_choices': state_choices,
'bedroom_choices': bedroom_choices,
'price_choices': price_choices
}
return render(request, 'pages/index.html', context)
def about(request):
# Get all realtors.
realtors = Realtor.objects.order_by('-hire_date')
# Get MVP
mvp_realtor = Realtor.objects.all().filter(is_mvp=True)
context = {
'mvp_realtor': mvp_realtor[0],
'realtors': realtors,
}
return render(request, 'pages/about.html', context)
| [
"arora.pranav.2000@gmail.com"
] | arora.pranav.2000@gmail.com |
ae90dcd8b47936e8e2df0f50925d7909def355c7 | 27d2e5298d91cca2b7ab01382080b38ab9f65fa4 | /download_and_preprocess/index_db.py | c0d06bca5bd35f3e11fbed0ccdfb0d27de5dfa2c | [] | no_license | jhabib/midsmumbler | 6a67ffd05d65f59cfc8038ac43d3515287f18f5c | 5463b62a32e34c5873def493947cd3ec93deaff7 | refs/heads/master | 2021-01-12T14:25:19.071624 | 2016-09-26T04:16:49 | 2016-09-26T04:16:49 | 69,205,473 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | import sqlite3 as sq
import pickle
import cPickle as cp
import sys
import os
import time
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'enter database directory path'
else:
db_path_prefix = sys.argv[1]
db_names = ['gpfs1_bigrams.db',
'gpfs2_bigrams.db',
'gpfs3_bigrams.db'
]
for name in db_names:
db_path = os.path.join(db_path_prefix, name)
print db_path
with sq.connect(db_path) as conn:
try:
print 'creating index ... '
cur = conn.cursor()
cur.execute("CREATE INDEX index_SecondWord ON bigram_counts (SecondWord)")
conn.commit()
except sq.Error as e:
if conn:
conn.rollback()
print 'error occurred: %s' % e.args[0]
sys.exit(1)
| [
"mjawadhabib@gmail.com"
] | mjawadhabib@gmail.com |
7d905ed750c1b7499c31f6b380eb109e6df3bde0 | cea16e5eedf3a562bf908940fae49649121be7b7 | /env/lib/python3.7/site-packages/geocodio/__init__.py | 3da58b71986497e00240932d3d306ee341416625 | [] | no_license | petlgunjr/afterwardzazurefunction | f896ac799030688443f8eb7b226a95908e755743 | 823a9187f0ef29208ae43de59bd60c09f52bba67 | refs/heads/master | 2020-12-23T21:51:48.951790 | 2020-03-06T16:40:01 | 2020-03-06T16:40:01 | 237,285,522 | 0 | 0 | null | 2020-01-30T19:32:05 | 2020-01-30T19:09:12 | Python | UTF-8 | Python | false | false | 185 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'David Stanley'
__email__ = 'davidstanley01@gmail.com'
__version__ = '1.0.0'
from . client import Client
__all__ = [Client] | [
"petlgunjr@gmail.com"
] | petlgunjr@gmail.com |
ad3dcefde33d8c112e1d0d30f7c3f571c95ded40 | 3e9245f87debdfa1c322d3fb94c559b386b59440 | /Ex37.py | e1fcb1e4ef28c01834ddcb94dae168931f50bb09 | [] | no_license | Keikoyao/learnpythonthehardway | cb33328202f59b43df94ff158eabdb42c9e2a593 | dee000340757b8d11c3685eb6496999e56764840 | refs/heads/master | 2020-06-26T12:04:36.542227 | 2017-07-27T20:49:12 | 2017-07-27T20:49:12 | 97,019,490 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | # -- coding: utf-8 --
#while-loop:when you want to loop non-stop, when you have limited target, use for-loop
#The best way to adjust your code is to print out the key variables and see what's wrong.
#run and test your code step by step, don't debug till you have a very long script.
#Keywords
#remove the first matching value
a = [0, 2, 2, 3]
a.remove(2)
# reasult: a = [0, 2, 3]
#del removes a specific index
a = [3,2,2,1]
del a[1]
#result [3,2,1]
#pop returns the removed element
a=[4,3,5]
a.pop(1)
#result:3 a=[4,5]
#global make vairables global
global dummy
#assert:create assertion
assert 1 == 1
'''assert condition 相当于
if not condition
raise AssertionError()
'''
'''
yield
break
exec
class
raise
continue
finally
lambda
Escape Sequences
\\
\'
\"
\a
\b
\f
\n
\r
\t
\v
string Formats
%d
%i
%o
%u
%x
%X
%e
%E
%f
%F
%g
%G
%c
%r
%s
%%
operators
//
()
[]
{}
@
;
//=
%=
**=
'''
| [
"noreply@github.com"
] | Keikoyao.noreply@github.com |
adda6ac14ccc16b6a992911ec08f26f39d3c83e6 | d1ac319bf0249be8f147c08917720a60a08be3c2 | /knowledge/models.py | 49d279332a2e69056c218a8a1194a7ffddf03d1e | [] | no_license | eunki7/diquest_demo | 7361041df8bbd75fb2e7a6619fe5622920f2b0cc | 4ed82e5c2041251d5b8e3c6d63fabb50fbcd7278 | refs/heads/master | 2023-03-08T12:47:41.553527 | 2019-04-30T07:22:50 | 2019-04-30T07:22:50 | 172,851,287 | 0 | 3 | null | 2023-02-15T20:51:07 | 2019-02-27T05:44:31 | JavaScript | UTF-8 | Python | false | false | 2,030 | py | from django.contrib.auth.models import User
from django.core.validators import FileExtensionValidator
from django.db import models
from django.urls import reverse
from django.utils.text import slugify
import os
class Knowledge(models.Model):
title = models.CharField('TITLE', max_length=50)
slug = models.SlugField('SLUG', unique=True, allow_unicode=True, help_text='one word for title alias.')
description = models.CharField('DESCRIPTION', max_length=100, blank=True, help_text='simple description text.')
content = models.TextField('CONTENT')
create_date = models.DateTimeField('Create Date', auto_now_add=True)
modify_date = models.DateTimeField('Modify Date', auto_now=True)
attach_file = models.FileField(
upload_to='files/%Y/%m',
name='file',
default=None,
null=True,
blank=True,
validators=[FileExtensionValidator(["pdf", "zip"], message="Zip 또는 PDF 파일만 허용 가능")]
)
owner = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
class Meta:
verbose_name = 'knowledge'
verbose_name_plural = 'knowledges'
db_table = 'knowledge_details'
ordering = ('-modify_date',)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('knowledge:knowledge_detail', args=(self.slug,))
def get_previous_knowledge(self):
return self.get_previous_by_modify_date()
def get_next_knowledge(self):
return self.get_next_by_modify_date()
def get_file_absolute_url(self):
return self.file.url
def get_file_pdf_id_url(self):
return reverse('knowledge:knowledge_pdf_viewer', args=(self.id,))
def get_file_extension(self):
_, extension = os.path.splitext(self.file.name)
return extension.replace('.', '')
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.title, allow_unicode=True)
super(Knowledge, self).save(*args, **kwargs)
| [
"outsider7224@hanmail.net"
] | outsider7224@hanmail.net |
a3724100878ac05f674751bd7c7e91ab5a262726 | 7191eedd5ecbf39854864decfa3415ff32a2e644 | /DataDashboard/pagination.py | 4223ec7f96a54f37b8c096f997469fee80c31009 | [] | no_license | GID-Dashboard/GISDashboard | 385755c4d7b886af80fce084ed11217980ee8e62 | c382f4ae0b2b16de8be66e40262a8e5c61000b69 | refs/heads/master | 2022-07-14T22:23:15.288757 | 2020-05-12T02:54:24 | 2020-05-12T02:54:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | from rest_framework import pagination
class StandardResultsSetPagination(pagination.PageNumberPagination):
page_size = 7 # change the records per page from here
page_size_query_param = 'page_size' | [
"jdjwright@gmail.com"
] | jdjwright@gmail.com |
0b6256f4cd64720d720282b2af5c3ad1b14c828a | 31164e93151ba3224672075460a73f0b86ec4341 | /scrapbook/migrations/0016_auto__del_unique_photo_entry__order__del_unique_entry__order_book.py | 54593d165bf9f671f9474a02b1c8739fa6b80e77 | [] | no_license | tylerbrandt/Scrappy | 5817414691a6a8bd4ce51f881f8378df76dd534a | d39ae9f544cd079626cf2c6ba44f6a63ba317e4a | refs/heads/master | 2021-01-22T05:16:07.785933 | 2012-05-21T17:12:36 | 2012-05-21T17:12:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,924 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Entry', fields ['_order', 'book']
db.delete_unique('scrapbook_entry', ['_order', 'book_id'])
# Removing unique constraint on 'Photo', fields ['entry', '_order']
db.delete_unique('scrapbook_photo', ['entry_id', '_order'])
def backwards(self, orm):
# Adding unique constraint on 'Photo', fields ['entry', '_order']
db.create_unique('scrapbook_photo', ['entry_id', '_order'])
# Adding unique constraint on 'Entry', fields ['_order', 'book']
db.create_unique('scrapbook_entry', ['_order', 'book_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'scrapbook.book': {
'Meta': {'object_name': 'Book'},
'cover': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scrapbook.Photo']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'scrapbook.checkin': {
'Meta': {'object_name': 'Checkin'},
'checkin_id': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'venueURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'venue_name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'scrapbook.entry': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Entry'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scrapbook.Book']", 'null': 'True', 'blank': 'True'}),
'checkin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scrapbook.Checkin']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'cover_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'coverphoto'", 'null': 'True', 'to': "orm['scrapbook.Photo']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'scrapbook.photo': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Photo'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'entry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scrapbook.Entry']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
}
}
complete_apps = ['scrapbook'] | [
"tyler@Tylers-MacBook-Air.local"
] | tyler@Tylers-MacBook-Air.local |
7e401ebea09f60f6706a105e4e6ce60eaae91355 | c4c427bb2c5cf463690ea7132e95a9e7b8fe0146 | /112. Path Sum.py | 0b60340f373382a225a2cadf9816413c2acc478b | [] | no_license | taimo22/LeetCode_answers | fff572b58c70bcdf81b8eb0bd83ce9cda0e245c3 | 2132a330f5927667b9a99f90f9d58c0ffbd5904e | refs/heads/main | 2023-02-24T06:31:50.320543 | 2021-01-31T00:25:55 | 2021-01-31T00:25:55 | 334,540,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | #112. Path Sum
#https://leetcode.com/problems/path-sum/solution/
#my ans
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def hasPathSum(self, root: TreeNode, sum: int) -> bool:
stack = []
if root is not None:
stack.append((root.val, root))
while stack != []:
current_sum, root = stack.pop()
if root is not None and (root.right is None) and (root.left is None) and (current_sum==sum):
return True
if root is not None:
stack.append((current_sum + getattr(root.left, "val", 0), root.left))
stack.append((current_sum + getattr(root.right, "val", 0), root.right))
return False
| [
"45529701+taimo22@users.noreply.github.com"
] | 45529701+taimo22@users.noreply.github.com |
0e03ddfe7452fb12cceb00307946b405aad8bcab | 88dbfaa5134199ecb75b5220dbd1aa2d91f80d56 | /tests/test_encodings.py | 6e00b86ea702be04bab8fd0efe11cf141ff593a5 | [
"MIT"
] | permissive | AMDmi3/jsonslicer | 60f4ce71eb54f50f713a1315540ff1c38e8fbec8 | 2d72bf2fc52a210123e6145fed5a1bcf2ce6300f | refs/heads/master | 2022-10-28T02:13:28.527564 | 2022-10-25T16:43:47 | 2022-10-25T16:59:05 | 165,882,846 | 48 | 7 | MIT | 2020-05-20T07:19:48 | 2019-01-15T16:14:51 | C++ | UTF-8 | Python | false | false | 3,908 | py | # Copyright (c) 2019 Dmitry Marakasov <amdmi3@amdmi3.ru>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
from .common import run_js
SKIP_MISMATCHING_PATH_ENCODING = False
class TestJsonSlicerEncodings(unittest.TestCase):
def test_encodings_bytes_unicode_default(self):
self.assertEqual(
run_js(b'{"foo":"bar"}', ('foo',)),
['bar']
)
def test_encodings_bytes_unicode_unicode(self):
self.assertEqual(
run_js(b'{"foo":"bar"}', ('foo',), encoding='utf-8'),
['bar']
)
@unittest.skipIf(SKIP_MISMATCHING_PATH_ENCODING, 'Tests for mismatching path encoding skipped')
def test_encodings_bytes_unicode_binary(self):
self.assertEqual(
run_js(b'{"foo":"bar"}', ('foo',), binary=True),
[b'bar']
)
def test_encodings_unicode_unicode_default(self):
self.assertEqual(
run_js('{"foo":"bar"}', ('foo',)),
['bar']
)
def test_encodings_unicode_unicode_unicode(self):
self.assertEqual(
run_js('{"foo":"bar"}', ('foo',), encoding='utf-8'),
['bar']
)
@unittest.skipIf(SKIP_MISMATCHING_PATH_ENCODING, 'Tests for mismatching path encoding skipped')
def test_encodings_unicode_unicode_binary(self):
self.assertEqual(
run_js('{"foo":"bar"}', ('foo',), binary=True),
[b'bar']
)
@unittest.skipIf(SKIP_MISMATCHING_PATH_ENCODING, 'Tests for mismatching path encoding skipped')
def test_encodings_bytes_bytes_default(self):
self.assertEqual(
run_js(b'{"foo":"bar"}', (b'foo',)),
['bar']
)
@unittest.skipIf(SKIP_MISMATCHING_PATH_ENCODING, 'Tests for mismatching path encoding skipped')
def test_encodings_bytes_bytes_unicode(self):
self.assertEqual(
run_js(b'{"foo":"bar"}', (b'foo',), encoding='utf-8'),
['bar']
)
def test_encodings_bytes_bytes_binary(self):
self.assertEqual(
run_js(b'{"foo":"bar"}', (b'foo',), binary=True),
[b'bar']
)
@unittest.skipIf(SKIP_MISMATCHING_PATH_ENCODING, 'Tests for mismatching path encoding skipped')
def test_encodings_unicode_bytes_default(self):
self.assertEqual(
run_js('{"foo":"bar"}', (b'foo',)),
['bar']
)
@unittest.skipIf(SKIP_MISMATCHING_PATH_ENCODING, 'Tests for mismatching path encoding skipped')
def test_encodings_unicode_bytes_unicode(self):
self.assertEqual(
run_js('{"foo":"bar"}', (b'foo',), encoding='utf-8'),
['bar']
)
def test_encodings_unicode_bytes_binary(self):
self.assertEqual(
run_js('{"foo":"bar"}', (b'foo',), binary=True),
[b'bar']
)
if __name__ == '__main__':
unittest.main()
| [
"amdmi3@amdmi3.ru"
] | amdmi3@amdmi3.ru |
18665ea34c033295b8e4700a027f68063c854ab4 | dc99adb79f15b3889a7ef6139cfe5dfc614889b8 | /Aplikace_1_0/Source/libs/datastore/permanent_datastore.py | 6935c3ec09a6f86e8c847f2670ed1d8ef4f13de6 | [] | no_license | meloun/ew_aplikace | 95d1e4063a149a10bb3a96f372691b5110c26b7b | f890c020ad8d3d224f796dab3f1f222c1f6ba0eb | refs/heads/master | 2023-04-28T06:43:12.252105 | 2023-04-18T19:59:36 | 2023-04-18T19:59:36 | 2,674,595 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,752 | py | # -*- coding: utf-8 -*-
import libs.datastore.datastore as datastore
import libs.db.db_json as db_json
class PermanentDatastore(datastore.Datastore):
def __init__(self, filename, default_data):
#create datastore, default dictionary
datastore.Datastore.__init__(self, default_data)
#create db, restore: permanents from default dict
self.db = db_json.Db(filename, self.GetAllPermanents())
#update datastore from db
self.Update(self.db.load())
#consistency check, if not consistent then update the datastore
self.consistency_check = self.UpdateConsistencyDict(self.data, default_data)
print "I: Dstore: consistency check: ", self.consistency_check
if(self.consistency_check == False):
self.db.dump(self.GetAllPermanents())
def Update(self, update_dict):
#update data
datastore.Datastore.Update(self, update_dict)
#update file with permanents datapoints
self.db.dump(self.GetAllPermanents())
#update consistency
def UpdateConsistencyDict(self, destination, source):
ret = True
for k,v in source.iteritems():
if isinstance(v, dict):
#print "UCD----UCL", k, v
if self.UpdateConsistencyDict(destination[k], v) == False:
ret = False
elif isinstance(v, list):
if self.UpdateConsistencyList(destination[k], v) == False:
ret = False
else:
if k not in destination:
print "----NOT MATCH", k, v
destination[k] = v
ret = False
#else:
# print "-MATCH", k, v
return ret
def UpdateConsistencyList(self, destination, source):
ret = True
for i in range(len(source)):
if isinstance(source[i], dict):
#print "UCL----UCD", source[i]
if self.UpdateConsistencyDict(destination[i], source[i]) == False:
ret = False
elif isinstance(source[i], list):
#print "UCL----UCL", source[i]
if self.UpdateConsistencyList(destination[i], source[i]) == False:
ret = False
return ret
def Set(self, name, value, section = "GET_SET", permanent = True):
#update data
changed = datastore.Datastore.Set(self, name, value, section)
#update file
if changed and permanent and self.IsPermanent(name):
#print "zapis", name, value
self.db.dump(self.GetAllPermanents())
def SetItem(self, name, keys, value, section = "GET_SET", permanent = True, changed = True):
if(value == datastore.Datastore.GetItem(self, name, keys, section)):
return
#set item
datastore.Datastore.SetItem(self, name, keys, value, section, changed)
#store permanents to the file
if permanent and self.IsPermanent(name):
#print "zapis", name, keys, value, section
self.db.dump(self.GetAllPermanents())
if __name__ == "__main__":
mydatastore = PermanentDatastore('conf/conf_work.json', {"a":1, "b":2})
| [
"lubos.melichar@gmail.com"
] | lubos.melichar@gmail.com |
0e91cee1855fba255ec0b1aea83b7a10dc18e972 | ee85dbdd084da9efef362ec866718596c1bc1de0 | /code/probesim/strategy/mwscapprox.py | 0d56bebe05c57fa1f9b2f0e8cd873b4a1774d6df | [
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | carolinegsun/NetPAS | 2eea5feaf7f28fb8d608dd7f3e13db4d3a93694d | 8070af340091336a99c7503f88a0750b25b5811c | refs/heads/master | 2022-04-09T14:33:50.586359 | 2020-03-04T21:37:53 | 2020-03-04T21:37:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,214 | py | import random
import sys
from .. import util as ps
from ..heapqup import heapqup
# ---- helper functions ----
def number_path_set(path_list):
'''
This function takes a path list and numbers it (that is, add
a unique integer ID to each of the path in the set).
This function returns a hash map (dictionary).
'''
id_path_dict = dict()
i = 1
for path in path_list:
id_path_dict[i] = path
i += 1
return id_path_dict
def create_edge_dictionaries(id_path_dict):
id_edge_set_dict = dict()
all_edge_dict = dict()
for pid in id_path_dict:
for edge in ps.edgesin(id_path_dict[pid]):
id_edge_set_dict.setdefault(pid, set()).add(edge)
all_edge_dict.setdefault(edge, set()).add(pid)
return id_edge_set_dict, all_edge_dict
# ---- main simulation function ----
def simulate(graph, paths, *args, **kwargs):
"""
Runs the min-wt-set-cover probing strategy.
:param graph: Network topology
:param paths: Possible probing paths
:param args: Captures extra arguments
:param kwargs: The following optional keyword arguments are supported:
-- Strategy specific --
alpha: Probe/load tradeoff parameter [ISCC'17]
A setting of alpha=0 [default] will minimize probe (path) count
A setting of alpha=1 will minimize average edge load
k: Number of paths to probe per timestep in expectation
If specified, each path will be probed with probability min(1, float(k)/len(cover))
The default value is None, which will probe the entire cover for a single timestep (ignoring maxsteps), returning [cover]
-- Simulation options --
trialseed: random seed for the trial (randomly chosen if none is provided)
stopOnceCovered: end the trial when all target edges are covered
totalcov: target edges to cover (all probeable edges by default)
maxsteps: maximum number of timesteps (50 by default)
:return: a list of lists of (src,dst) pairs indicating which paths are probed at which time
if k is None, the list has length 1 and the set at index 0 contains paths in the computed cover
"""
alpha = float(kwargs.get('alpha', 0))
if alpha < 0 or alpha > 1:
raise ValueError
# trialseed = kwargs.get('trialseed', random.randint(-sys.maxint - 1, sys.maxint))
trialseed = kwargs.get('trialseed', None)
if trialseed is None:
trialseed = random.randint(-sys.maxint - 1, sys.maxint)
all_edges = ps.all_edge_set(paths)
stopOnceCovered = kwargs.get('stopOnceCovered', False)
if stopOnceCovered:
totalcov = kwargs.get('totalcov', all_edges)
else:
totalcov = None
maxsteps = kwargs.get('maxsteps', 50)
# FIRST, establish set cover using min-wt set-cover approximation algorithm
cover = list()
rgen = random.Random(trialseed)
path_list = ps.plfrompd(paths)
# assigning unique integer ID to each path -- keys are path IDs, values are lists of nodes (paths) -OK
id_path_dict = number_path_set(path_list)
id_edge_set_dict, all_edge_dict = create_edge_dictionaries(id_path_dict) # OK
score = lambda p: len(id_edge_set_dict[p]) / float(alpha * (len(id_path_dict[p]) - 1) + 1)
# - the heap is created from a dictionary, assumed to be unordered
# - the heap itself contains scores, which are totally & predictably order-able items
# - extraction of a path of max score is done using a reproducible tiebreaker using the seeded RNG
# (use of RNG object with lambda function parameter was tested)
max_PQ = heapqup({path_id: score(path_id) for path_id in id_path_dict}, reverse=True, tiebreak=lambda pathset: rgen.choice(sorted(pathset)))
all_path_edge_set = ps.all_edge_set(paths)
# while there are still edges to cover
while len(all_path_edge_set) > 0:
# get the path that has the most uncovered edges (uses reproducible tie-break)
curr_path_id = max_PQ.poll()
# this set will be used to update
# the priorities of the paths that were
# affected by the removal of the path selected above
affected_path_set = set()
# add path to probing sequence
curr_path = id_path_dict[curr_path_id]
cover.append((curr_path[0], curr_path[-1]))
# iterate through all edges in this path
# (iteration order doesn't matter here)
for edge in id_edge_set_dict[curr_path_id].copy():
# if the edge is in the set of uncovered
# edges, remove it
if edge in all_path_edge_set:
all_path_edge_set.remove(edge)
# get the path id set that corresponds to this edge
path_id_set_of_edge = all_edge_dict[edge]
# for each path in all the paths that contain this
# edge, delete the edge from the path's uncovered edge set
# and note this happened in an affected-path set
for pid in path_id_set_of_edge:
if edge in id_edge_set_dict[pid]:
id_edge_set_dict[pid].remove(edge)
affected_path_set.add(pid)
# update priorities for paths affected
for path_id in affected_path_set:
max_PQ[path_id] = score(path_id)
# NEXT, run the simulation to generate a probing sequence
# OR, simply return the cover as a single timestep if k is None
k = kwargs.get('k', None)
if k is None:
return [list(cover)]
seq = []
p = float(k)/len(cover)
cover.sort() # sort cover so that a predictable order of paths is used in random path selection
for _ in range(maxsteps):
# iteration here should proceed in order through the list cover
current = list(filter(lambda _: rgen.random() < p, cover))
seq.append(current)
if stopOnceCovered:
new_edges = ps.all_edge_set([paths[s][d] for s, d in current])
totalcov -= new_edges
if len(totalcov) == 0:
break
return seq | [
"noreply@github.com"
] | carolinegsun.noreply@github.com |
baff32f8d4e959a29d65143fcf22a0cd1dad34df | b913dee6f00862aa64b0320a8ec2a4bd10b657cf | /book/migrations/0001_initial.py | f7f8c0e3a058d75ccfb452dd048ec35f72bf7bbe | [] | no_license | carolinamaciel1/API_BOOKS-DJANGO_REST | ffb116975fbe72046a45d0119ec34193fa30276c | 48dbf1d64a4e4408790452f966a5e36dd8787fa0 | refs/heads/master | 2022-12-11T08:28:04.598906 | 2019-07-04T16:19:59 | 2019-07-04T16:19:59 | 194,790,373 | 0 | 0 | null | 2022-12-08T05:50:28 | 2019-07-02T04:52:12 | Python | UTF-8 | Python | false | false | 1,851 | py | # Generated by Django 2.2.3 on 2019-07-02 05:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('author', models.CharField(max_length=200)),
('publishing_company', models.CharField(blank=True, max_length=150, null=True)),
('registration_date', models.DateField()),
],
),
migrations.CreateModel(
name='CopyBook',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_register', models.DateField()),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='book.Book')),
],
),
migrations.CreateModel(
name='RentBook',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_initial_rent', models.DateField()),
('delivery_date_forecast', models.DateField()),
('date_devolution', models.DateField()),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='book.CopyBook')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"carolinamaciel37@gmail.com"
] | carolinamaciel37@gmail.com |
d5c800c6b1fa2c7ae68a564d4fff250af173b219 | 20448841fe75541d2f5b9879178e0f874daede38 | /Set .union() Operation.py | eb2344ff7dd432ac632bcd147dc8ff2a148d56e9 | [] | no_license | dnskvamsi/Hacker_rank_python_solutions | d421bfe231c3d6913fc5285c254ec67ea2c76845 | d118734f2cee1df64df02e0421fff68aa5c51cc7 | refs/heads/main | 2023-05-03T23:28:20.035678 | 2021-05-26T10:59:31 | 2021-05-26T10:59:31 | 370,945,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
s1=int(input())
stu=list(map(int,input().split(" ")))
s2=int(input())
stu2=list(map(int,input().split(" ")))
k=set(stu).union(set(stu2))
print(len(list(k)))
| [
"noreply@github.com"
] | dnskvamsi.noreply@github.com |
216717ee4e222b17a12889ece746e76bc0bf1825 | e75479bc0ec06b726c01e7be7cb6b1326d4fa617 | /EjerciciosPythonSewan/Parte_1/Ejercicio_8.py | d2e35c3f78dc142110f1cf8fc8a38ab045a8296c | [] | no_license | alvaromartinezsanchez/EjerciciosPython | d92a0d8189710e929f6ec78258b5b3b048a21ead | cc278281498fce1037feaba1cb5b752846821da3 | refs/heads/master | 2022-11-12T22:35:39.365687 | 2020-06-22T08:57:28 | 2020-06-22T08:57:28 | 274,087,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,816 | py | """Bandeja de entrada
En este ejercicio vamos a crear una bandeja de entrada de mensajes
enviados a usuarios, así como tres funciones, una para enviar mensajes,
otra obtener los pendientes de leer y otra para leer un mensaje.
-Cada mensaje tendrá tres campos, origen (nombre del usuarios que lo envió),
contenido, y si se ha leído o no
-Los usuarios se crean de forma dinámica al enviar un mensaje
-La función para enviar mensajes recibe el nombre del usuario que lo
envía y el contenido
-La función para leer un mensaje muestra el contenido, quien lo envía
lo marca como leído.
-La función para obtener el listado de los mensajes pendientes por
leer para un usuario dado, sólo muestra los que no están leídos,
un resumen del contenido del mensaje, y un identificador de este. """
bandejaEntrada=[]
bandejaEntradaUsuario=[]
Num_Mensaje=0
def enviarMensaje(origen, contenido):
global Num_Mensaje
Mensaje={"Id": Num_Mensaje, "Origen": origen, "Contenido": contenido, "Leido" : False}
bandejaEntrada.append(Mensaje)
Num_Mensaje+=1
return bandejaEntrada
def leerMensaje(id):
mensaje=""
for i in range(len(bandejaEntrada)):
if bandejaEntrada[i]["Id"]==id:
mensaje+="ID Origen Contenido Leido \n \n"
mensaje+= str(bandejaEntrada[i]["Id"])+" "
mensaje+= bandejaEntrada[i]["Origen"]+" "
mensaje+= bandejaEntrada[i]["Contenido"]+" "
bandejaEntrada[i]["Leido"]="True"+" "
mensaje+= bandejaEntrada[i]["Leido"]
return mensaje
def verMensajesNoLeidos(usuario):
Resumen=""
for i in range(len(bandejaEntrada)):
if bandejaEntrada[i]["Origen"]==usuario and bandejaEntrada[i]["Leido"]==False:
Resumen=bandejaEntrada[i]["Contenido"][0:15]
Mensaje={"Id": bandejaEntrada[i]["Id"],"Origen": bandejaEntrada[i]["Origen"],"Resumen":Resumen }
bandejaEntradaUsuario.append(Mensaje)
verTodosLosMensajes(bandejaEntradaUsuario)
def verTodosLosMensajes(bandeja):
mensaje=iter(bandeja)
for c in range(len(bandeja)):
print(next(mensaje))
print(" ")
enviarMensaje("Alvaro","Este es el contenido del mensaje")
enviarMensaje("Manolo","Parece que funciona....")
enviarMensaje("Alvaro","No lo dudaba en ningun momento..!!")
enviarMensaje("Luis","Si eso parece, ahora faltan las demas funciones")
enviarMensaje("Maria","Y a mi queeeeeeeeeeeeeeeeeeeeeee...")
print("----------BANDEJA ENTRADA--TODOS LOS MENSAJES----------\n")
verTodosLosMensajes(bandejaEntrada)
print("--------------LEER MENSAJE- Por Id Mensaje---------------")
print(leerMensaje(1))
print()
print("-------VER MENSAJES NO LEIDOS--Por Nombre Origen--------\n")
verMensajesNoLeidos("Alvaro") | [
"noreply@github.com"
] | alvaromartinezsanchez.noreply@github.com |
179495f51c0ca3686b172e62eca34a2ff82cb3eb | 5883449aa14eb5e8b3fa6ad4d03d1dfacc40ccee | /Amazon_Framework/DentOsTestbedLib/src/dent_os_testbed/lib/iptables/linux/linux_ip_tables_impl.py | 48c2c6fbe7b5cc156f72e7c3ec5682b0261dd382 | [
"Apache-2.0"
] | permissive | tld3daniel/testing | 826183f30d65f696e8476d4a584c4668355e0cb3 | e4c8221e18cd94e7424c30e12eb0fb82f7767267 | refs/heads/master | 2023-09-01T12:39:26.845648 | 2021-08-11T15:53:16 | 2021-08-11T15:53:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,339 | py | from dent_os_testbed.lib.iptables.linux.linux_ip_tables import LinuxIpTables
class LinuxIpTablesImpl(LinuxIpTables):
"""
iptables [-t table] {-A|-C|-D} chain rule-specification
ip6tables [-t table] {-A|-C|-D} chain rule-specification
iptables [-t table] -I chain [rulenum] rule-specification
iptables [-t table] -R chain rulenum rule-specification
iptables [-t table] -D chain rulenum
iptables [-t table] -S [chain [rulenum]]
iptables [-t table] {-F|-L|-Z} [chain [rulenum]] [options...]
iptables [-t table] -N chain
iptables [-t table] -X [chain]
iptables [-t table] -P chain target
iptables [-t table] -E old-chain-name new-chain-name
rule-specification = [matches...] [target]
match = -m matchname [per-match-options]
target = -j targetname [per-target-options]
"""
def format_update_rules(self, command, *argv, **kwarg):
"""
-A, --append chain rule-specification
Append one or more rules to the end of the selected chain. When the source and/or destination names resolve
to more than one address, a rule will be added for each possible address combination.
-C, --check chain rule-specification
Check whether a rule matching the specification does exist in the selected chain. This command uses the same
logic as -D to find a matching entry, but does not alter the existing
iptables configuration and uses its exit code to indicate success or failure.
-D, --delete chain rule-specification
-D, --delete chain rulenum
Delete one or more rules from the selected chain. There are two versions of this command: the rule
can be specified as a number in the chain (starting at 1 for the first rule) or a rule to match.
-I, --insert chain [rulenum] rule-specification
Insert one or more rules in the selected chain as the given rule number. So, if the rule number is 1,
the rule or rules are inserted at the head of the chain. This is also the
default if no rule number is specified.
-R, --replace chain rulenum rule-specification
Replace a rule in the selected chain. If the source and/or destination names resolve to multiple
addresses, the command will fail. Rules are numbered starting at 1.
"""
params = kwarg["params"]
cmd = "iptables "
cmd += "-t {} ".format(params["table"]) if "table" in params else ""
cmd += "--{} ".format(command)
cmd += "{} ".format(params["chain"]) if "chain" in params else ""
if "in-interface" in params:
cmd += "-i {} ".format(params["in-interface"])
if "source" in params:
cmd += "-s {} ".format(params["source"])
if "destination" in params:
cmd += "-d {} ".format(params["destination"])
if "protocol" in params:
cmd += "-p {} ".format(params["protocol"])
if "dport" in params:
cmd += "--dport {} ".format(params["dport"])
if "sport" in params:
cmd += "--sport {} ".format(params["sport"])
if "mac-source" in params:
cmd += "-m mac --mac-source {} ".format(params["mac-source"])
if "target" in params:
cmd += "-j {} ".format(params["target"])
return cmd
def format_show_rules(self, command, *argv, **kwarg):
"""
-L, --list [chain]
List all rules in the selected chain. If no chain is selected, all chains are listed. Like every other
iptables command, it applies to the specified table (filter is the default), so NAT rules get listed by
iptables -t nat -n -L
Please note that it is often used with the -n option, in order to avoid long reverse DNS lookups.
It is legal to specify the -Z (zero) option as well, in which case the chain(s) will be atomically listed
and zeroed. The exact output is affected by the other arguments given. The exact rules are suppressed
until you use iptables -L -v or iptables-save(8).
-S, --list-rules [chain]
Print all rules in the selected chain. If no chain is selected, all chains are printed like iptables-save.
Like every other iptables command, it applies to the specified table (filter is the default).
-F, --flush [chain]
Flush the selected chain (all the chains in the table if none is given). This is equivalent to deleting
all the rules one by one.
-Z, --zero [chain [rulenum]]
Zero the packet and byte counters in all chains, or only the given chain, or only the given rule in a chain.
It is legal to specify the -L, --list (list) option as well, to see the counters immediately before they are
cleared. (See above.)
"""
params = kwarg["params"]
############# Implement me ################
cmd = "iptables "
cmd += "-t {} ".format(params["table"]) if "table" in params else ""
cmd += "{} ".format(params["cmd_options"]) if "cmd_options" in params else ""
cmd += "--{} ".format(command)
if "chain" in params:
cmd += "{} ".format(params["chain"])
return cmd
def parse_show_rules(self, command, output, *argv, **kwarg):
lines = output.split("\n")
chain = None
chains = {}
rules = []
for line in lines:
if line.startswith("Chain"):
if chain is not None:
chains[chain] = rules
rules = []
chain = line.split(" ")[1]
continue
if line.startswith("num"):
continue
r = {}
t = line.split()
if len(t) < 10:
continue
"""
num pkts bytes target prot opt in out source destination
1 6432 353K ACCEPT all -- * * 127.0.0.1 127.0.0.1
2 0 0 ACCEPT tcp -- swp+ * 0.0.0.0/0 10.2.96.0/19 tcp spt:8883
"""
r["num"] = t.pop(0)
r["packets"] = t.pop(0)
r["bytes"] = t.pop(0)
r["target"] = t.pop(0)
r["keys"] = {}
r["keys"]["ipproto"] = t.pop(0)
r["keys"]["opt"] = t.pop(0)
r["keys"]["in"] = t.pop(0)
r["keys"]["out"] = t.pop(0)
r["keys"]["srcIp"] = t.pop(0)
r["keys"]["dstIp"] = t.pop(0)
if t:
more = t.pop(0)
if more in ["tcp", "udp"]:
while t:
l4port = t.pop(0)
if l4port.startswith("dpt"):
r["keys"]["dstPort"] = l4port.split(":")[1]
if l4port.startswith("spt"):
r["keys"]["srcPort"] = l4port.split(":")[1]
rules.append(r)
if chain is not None:
chains[chain] = rules
return chains
def format_update_chain(self, command, *argv, **kwarg):
"""
-N, --new-chain chain
Create a new user-defined chain by the given name. There must be no target of that name already.
-X, --delete-chain [chain]
Delete the optional user-defined chain specified. There must be no references to the chain.
If there are, you must delete or replace the referring rules before the chain can be deleted.
The chain must be empty, i.e. not contain any rules. If no argument is given, it will attempt
to delete every non-builtin chain in the table.
-P, --policy chain target
Set the policy for the built-in (non-user-defined) chain to the given target. The policy target
must be either ACCEPT or DROP.
-E, --rename-chain old-chain new-chain
Rename the user specified chain to the user supplied name. This is cosmetic, and has no effect
on the structure of the table.
"""
params = kwarg["params"]
cmd = "iptables {} ".format(command)
############# Implement me ################
return cmd
| [
"muchetan@amazon.com"
] | muchetan@amazon.com |
db04e4251289a2b13df6f327d687283cde1e585e | aaa762ce46fa0347cdff67464f56678ea932066d | /AppServer/lib/django-0.96/django/core/mail.py | b9966c2af023eea017e2bf5a0f22fe9c3067243a | [
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-3-Clause",
"MIT",
"GPL-2.0-or-later",
"MPL-1.1"
] | permissive | obino/appscale | 3c8a9d8b45a6c889f7f44ef307a627c9a79794f8 | be17e5f658d7b42b5aa7eeb7a5ddd4962f3ea82f | refs/heads/master | 2022-10-01T05:23:00.836840 | 2019-10-15T18:19:38 | 2019-10-15T18:19:38 | 16,622,826 | 1 | 0 | Apache-2.0 | 2022-09-23T22:56:17 | 2014-02-07T18:04:12 | Python | UTF-8 | Python | false | false | 4,253 | py | # Use this module for e-mailing.
from django.conf import settings
from email.MIMEText import MIMEText
from email.Header import Header
from email.Utils import formatdate
import smtplib
import socket
import time
import random
# Cache the hostname, but do it lazily: socket.getfqdn() can take a couple of
# seconds, which slows down the restart of the server.
class CachedDnsName(object):
def __str__(self):
return self.get_fqdn()
def get_fqdn(self):
if not hasattr(self, '_fqdn'):
self._fqdn = socket.getfqdn()
return self._fqdn
DNS_NAME = CachedDnsName()
class BadHeaderError(ValueError):
pass
class SafeMIMEText(MIMEText):
def __setitem__(self, name, val):
"Forbids multi-line headers, to prevent header injection."
if '\n' in val or '\r' in val:
raise BadHeaderError, "Header values can't contain newlines (got %r for header %r)" % (val, name)
if name == "Subject":
val = Header(val, settings.DEFAULT_CHARSET)
MIMEText.__setitem__(self, name, val)
def send_mail(subject, message, from_email, recipient_list, fail_silently=False, auth_user=None, auth_password=None):
"""
Easy wrapper for sending a single message to a recipient list. All members
of the recipient list will see the other recipients in the 'To' field.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
"""
if auth_user is None:
auth_user = settings.EMAIL_HOST_USER
if auth_password is None:
auth_password = settings.EMAIL_HOST_PASSWORD
return send_mass_mail([[subject, message, from_email, recipient_list]], fail_silently, auth_user, auth_password)
def send_mass_mail(datatuple, fail_silently=False, auth_user=None, auth_password=None):
"""
Given a datatuple of (subject, message, from_email, recipient_list), sends
each message to each recipient list. Returns the number of e-mails sent.
If from_email is None, the DEFAULT_FROM_EMAIL setting is used.
If auth_user and auth_password are set, they're used to log in.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
"""
if auth_user is None:
auth_user = settings.EMAIL_HOST_USER
if auth_password is None:
auth_password = settings.EMAIL_HOST_PASSWORD
try:
server = smtplib.SMTP(settings.EMAIL_HOST, settings.EMAIL_PORT)
if auth_user and auth_password:
server.login(auth_user, auth_password)
except:
if fail_silently:
return
raise
num_sent = 0
for subject, message, from_email, recipient_list in datatuple:
if not recipient_list:
continue
from_email = from_email or settings.DEFAULT_FROM_EMAIL
msg = SafeMIMEText(message, 'plain', settings.DEFAULT_CHARSET)
msg['Subject'] = subject
msg['From'] = from_email
msg['To'] = ', '.join(recipient_list)
msg['Date'] = formatdate()
try:
random_bits = str(random.getrandbits(64))
except AttributeError: # Python 2.3 doesn't have random.getrandbits().
random_bits = ''.join([random.choice('1234567890') for i in range(19)])
msg['Message-ID'] = "<%d.%s@%s>" % (time.time(), random_bits, DNS_NAME)
try:
server.sendmail(from_email, recipient_list, msg.as_string())
num_sent += 1
except:
if not fail_silently:
raise
try:
server.quit()
except:
if fail_silently:
return
raise
return num_sent
def mail_admins(subject, message, fail_silently=False):
"Sends a message to the admins, as defined by the ADMINS setting."
send_mail(settings.EMAIL_SUBJECT_PREFIX + subject, message, settings.SERVER_EMAIL, [a[1] for a in settings.ADMINS], fail_silently)
def mail_managers(subject, message, fail_silently=False):
"Sends a message to the managers, as defined by the MANAGERS setting."
send_mail(settings.EMAIL_SUBJECT_PREFIX + subject, message, settings.SERVER_EMAIL, [a[1] for a in settings.MANAGERS], fail_silently)
| [
"root@lucid64.hsd1.ca.comcast.net"
] | root@lucid64.hsd1.ca.comcast.net |
c6ab6b5a881525b9fd0cbc34430c66323cd4da68 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/hard.py | 22f87c30a5d7750ec2b410cb445691e2da331f1d | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 2,797 | py | ii = [('BentJDO2.py', 1), ('EmerRN.py', 2), ('LyelCPG2.py', 3), ('MarrFDI.py', 16), ('RogePAV2.py', 32), ('CoolWHM2.py', 9), ('KembFFF.py', 9), ('GodwWSL2.py', 11), ('ChanWS.py', 6), ('RogePAV.py', 41), ('SadlMLP.py', 8), ('FerrSDO3.py', 2), ('WilbRLW.py', 21), ('WilbRLW4.py', 13), ('RennJIT.py', 25), ('ProuWCM.py', 8), ('AubePRP2.py', 1), ('CookGHP.py', 5), ('ShawHDE.py', 1), ('MartHSI2.py', 6), ('LeakWTI2.py', 6), ('UnitAI.py', 1), ('KembFJ1.py', 7), ('WilkJMC3.py', 24), ('WilbRLW5.py', 11), ('LeakWTI3.py', 7), ('PettTHE.py', 21), ('MarrFDI3.py', 2), ('TennAP.py', 1), ('PeckJNG.py', 4), ('KnowJMM.py', 6), ('BailJD2.py', 8), ('AubePRP.py', 6), ('ChalTPW2.py', 3), ('GellWPT.py', 3), ('AdamWEP.py', 16), ('FitzRNS3.py', 18), ('WilbRLW2.py', 15), ('ClarGE2.py', 11), ('GellWPT2.py', 7), ('WilkJMC2.py', 10), ('CarlTFR.py', 61), ('SeniNSP.py', 8), ('LyttELD.py', 6), ('CoopJBT2.py', 8), ('TalfTAC.py', 3), ('GrimSLE.py', 4), ('RoscTTI3.py', 2), ('AinsWRR3.py', 20), ('CookGHP2.py', 4), ('KiddJAE.py', 20), ('AdamHMM.py', 1), ('BailJD1.py', 8), ('RoscTTI2.py', 4), ('CoolWHM.py', 6), ('MarrFDI2.py', 1), ('CrokTPS.py', 17), ('ClarGE.py', 16), ('LandWPA.py', 12), ('BuckWGM.py', 13), ('IrviWVD.py', 5), ('LyelCPG.py', 21), ('GilmCRS.py', 9), ('DaltJMA.py', 2), ('WestJIT2.py', 15), ('DibdTRL2.py', 11), ('AinsWRR.py', 11), ('CrocDNL.py', 39), ('MedwTAI.py', 12), ('LandWPA2.py', 5), ('WadeJEB.py', 9), ('FerrSDO2.py', 4), ('TalfTIT.py', 3), ('NewmJLP.py', 3), ('GodwWLN.py', 4), ('CoopJBT.py', 19), ('KirbWPW2.py', 14), ('SoutRD2.py', 3), ('BackGNE.py', 28), ('LeakWTI4.py', 10), ('LeakWTI.py', 7), ('MedwTAI2.py', 9), ('SoutRD.py', 2), ('DickCSG.py', 2), ('BuckWGM2.py', 5), ('WheeJPT.py', 9), ('MereHHB3.py', 10), ('HowiWRL2.py', 16), ('BailJD3.py', 16), ('MereHHB.py', 1), ('WilkJMC.py', 6), ('HogaGMM.py', 3), ('MartHRW.py', 19), ('MackCNH.py', 3), ('WestJIT.py', 11), ('BabbCEM.py', 13), ('FitzRNS4.py', 54), ('CoolWHM3.py', 7), ('DequTKM.py', 2), ('FitzRNS.py', 61), ('BentJRP.py', 21), ('EdgeMHT.py', 6), ('BowrJMM.py', 3), ('LyttELD3.py', 8), ('FerrSDO.py', 8), ('RoscTTI.py', 5), ('ThomGLG.py', 8), ('StorJCC.py', 2), ('KembFJ2.py', 7), ('LewiMJW.py', 18), ('BabbCRD.py', 2), ('BellCHM.py', 11), ('SomeMMH.py', 1), ('HaliTBC.py', 4), ('WilbRLW3.py', 30), ('AinsWRR2.py', 3), ('MereHHB2.py', 5), ('BrewDTO.py', 1), ('JacoWHI.py', 6), ('ClarGE3.py', 16), ('RogeSIP.py', 2), ('MartHRW2.py', 11), ('DibdTRL.py', 13), ('FitzRNS2.py', 70), ('HogaGMM2.py', 2), ('MartHSI.py', 13), ('EvarJSP.py', 5), ('NortSTC.py', 9), ('SadlMLP2.py', 8), ('BowrJMM2.py', 3), ('LyelCPG3.py', 23), ('BeckWRE.py', 2), ('TaylIF.py', 22), ('WordWYR.py', 6), ('DibdTBR.py', 4), ('ThomWEC.py', 2), ('KeigTSS.py', 4), ('KirbWPW.py', 13), ('WaylFEP.py', 2), ('BentJDO.py', 3), ('ClarGE4.py', 25), ('HowiWRL.py', 19)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
069695756d382ef840438429813535c14a8fbcd3 | bdee746158783e3daeac940cb1744486e2864355 | /geokey/core/tests/logger/test_log_videofile.py | 5603d3bc2d78c7071e1a2843ad1697fedea80413 | [
"Apache-2.0"
] | permissive | NeolithEra/geokey | c64a9bf2b5a3a9ae2f01adc60b33efd65b04395d | 16d31b5207de9f699fc01054baad1fe65ad1c3ca | refs/heads/master | 2020-12-13T18:03:53.008668 | 2019-11-02T10:12:26 | 2019-11-02T10:12:26 | 234,489,286 | 0 | 0 | NOASSERTION | 2020-01-17T06:52:14 | 2020-01-17T06:52:13 | null | UTF-8 | Python | false | false | 4,300 | py | """Tests for logger: model VideoFile."""
from django.test import TestCase
from geokey.core.models import LoggerHistory
from geokey.users.tests.model_factories import UserFactory
from geokey.projects.tests.model_factories import ProjectFactory
from geokey.categories.tests.model_factories import CategoryFactory
from geokey.contributions.tests.model_factories import (
LocationFactory,
ObservationFactory,
)
from geokey.contributions.tests.media.model_factories import VideoFileFactory
class LogVideoFileTest(TestCase):
"""Test model VideoFile."""
def setUp(self):
"""Set up test."""
self.user = UserFactory.create()
self.project = ProjectFactory.create(**{
'creator': self.user})
self.category = CategoryFactory.create(**{
'creator': self.user,
'project': self.project})
self.location = LocationFactory.create(**{
'creator': self.user})
self.observation = ObservationFactory.create(**{
'creator': self.user,
'location': self.location,
'project': self.project,
'category': self.category})
self.videofile = VideoFileFactory.create(**{
'creator': self.user,
'contribution': self.observation})
def test_log_create(self):
"""Test when video file is created gets created."""
log_count_init = LoggerHistory.objects.count()
videofile = VideoFileFactory.create(**{
'creator': self.user,
'contribution': self.observation})
log = LoggerHistory.objects.last()
log_count = LoggerHistory.objects.count()
self.assertNotEqual(log.user, {
'id': str(self.user.id),
'display_name': self.user.display_name})
self.assertEqual(log.project, {
'id': str(self.project.id),
'name': self.project.name})
self.assertEqual(log.usergroup, None)
self.assertEqual(log.category, {
'id': str(self.category.id),
'name': self.category.name})
self.assertEqual(log.field, None)
self.assertEqual(log.location, {
'id': str(self.location.id),
'name': self.location.name})
self.assertEqual(log.observation, {
'id': str(self.observation.id)})
self.assertEqual(log.comment, None)
self.assertEqual(log.mediafile, {
'id': str(videofile.id),
'name': videofile.name,
'type': 'VideoFile'})
self.assertEqual(log.subset, None)
self.assertEqual(log.action, {
'id': 'created',
'class': 'MediaFile'})
self.assertEqual(log_count, log_count_init + 1)
self.assertEqual(log.historical, None)
def test_log_delete(self):
"""Test when video file gets deleted."""
mediafile_id = self.videofile.id
mediafile_name = self.videofile.name
log_count_init = LoggerHistory.objects.count()
self.videofile.delete()
log = LoggerHistory.objects.last()
log_count = LoggerHistory.objects.count()
self.assertNotEqual(log.user, {
'id': str(self.user.id),
'display_name': self.user.display_name})
self.assertEqual(log.project, {
'id': str(self.project.id),
'name': self.project.name})
self.assertEqual(log.usergroup, None)
self.assertEqual(log.category, {
'id': str(self.category.id),
'name': self.category.name})
self.assertEqual(log.field, None)
self.assertEqual(log.location, {
'id': str(self.location.id),
'name': self.location.name})
self.assertEqual(log.observation, {
'id': str(self.observation.id)})
self.assertEqual(log.comment, None)
self.assertEqual(log.mediafile, {
'id': str(mediafile_id),
'name': mediafile_name,
'type': 'VideoFile'})
self.assertEqual(log.subset, None)
self.assertEqual(log.action, {
'id': 'deleted',
'class': 'MediaFile',
'field': 'status',
'value': 'deleted'})
self.assertEqual(log_count, log_count_init + 1)
self.assertEqual(log.historical, None)
| [
"noreply@github.com"
] | NeolithEra.noreply@github.com |
6818fa3cae3acad1fdf03d2dc50d5db778b3fdb6 | ad10f4d1530fe4ededfbb93ee31042c9e5c24e9a | /Data Structure/dataframe/21_dataframe에 현재 시간 기준 column 추가하기.py | ca7472dc6d67464724dfd8b9ff597bcc767ee050 | [] | no_license | WinterBlue16/Function-for-work | 0d76ea2c326e547ad0cc3171f4a5a09d02de5a58 | 38603549b448198c12b48c95147516dbbc3f28f2 | refs/heads/master | 2022-07-15T20:30:26.178739 | 2022-07-04T13:42:01 | 2022-07-04T13:42:01 | 238,364,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | """
dataframe에 현재 시간을 기준으로 한 column을 추가합니다.
"""
import pandas as pd
def add_datetime_col(df):
df['created_at'] = pd.to_datetime('now')
return df
| [
"leekh090163@gmail.com"
] | leekh090163@gmail.com |
d8779e568e33d24c95da2bb92c27147b6a00ed80 | 7773119bdfe6db21dac503fefe23d4456b58edb5 | /data/scripts/summarize.py | 234b16d69886455bbefba8dd3b9318d9cff0f8c3 | [
"MIT"
] | permissive | JoogsWasTaken/no-noscript | 1f87077a3fbbdbf61a51e88649b03fbb35ee144b | f423c9a3ad1a67d884c9ad51eb999a6b9bf48baa | refs/heads/master | 2022-12-16T07:06:52.922105 | 2020-09-10T07:40:47 | 2020-09-10T07:40:47 | 277,366,058 | 0 | 0 | MIT | 2020-09-10T07:41:34 | 2020-07-05T19:05:23 | Python | UTF-8 | Python | false | false | 3,061 | py | """
Creates a table containing the sanitized results of the benchmark.
This assumes that the main table has been split up.
This file is probably the jankiest out of all.
"""
from util import parse_csv_line, get_paths, as_bool, append_to_filename
from util import benchmark_columns as columns
import os
from math import floor, ceil
def median(lst):
"""
Computes the median value in a list of
numeric values.
"""
lst.sort()
l = len(lst)
if l % 2 == 0:
return (lst[floor(l / 2)] + lst[ceil(l / 2)]) / 2
else:
return lst[floor(l / 2)]
bm_file_path, _, _, _ = get_paths()
# File handles.
js_file = None
no_js_file = None
out_file = None
try:
# Prepare header for output file.
out_file = open(append_to_filename(bm_file_path, "_results"), "w")
csv_header = [ "url", "noscript", "scripts" ]
# Append headers for the median values.
for x in [ "js", "no_js" ]:
csv_header.append("median_load_" + x)
csv_header.append("median_domload_" + x)
csv_header.append("median_idle_" + x)
out_file.write(",".join(csv_header) + "\n")
js_file = open(append_to_filename(bm_file_path, "_js"), "r")
nojs_file = open(append_to_filename(bm_file_path, "_no_js"), "r")
# Skip CSV headers.
next(js_file)
next(nojs_file)
while True:
# Both are None if EOF is reached.
js_line = next(js_file, None)
nojs_line = next(nojs_file, None)
if js_line is None or nojs_line is None:
break
js_row = parse_csv_line(js_line)
nojs_row = parse_csv_line(nojs_line)
# 6 = index first median col.
for i in range(6, len(js_row)):
# Parse values into floats.
js_row[i] = float(js_row[i])
nojs_row[i] = float(nojs_row[i])
out_row = [
# col 1: url
js_row[columns["url"]],
# col 2: noscript exists?
as_bool(js_row[columns["noscript"]]) or as_bool(nojs_row[columns["noscript"]]),
# col 3: script exists?
(int(js_row[columns["scriptCount"]]) > 0) or (int(nojs_row[columns["scriptCount"]]) > 0),
# col 4: median load (js on)
median(js_row[6:11]),
# col 5: median domload (js on)
median(js_row[11:16]),
# col 6: median idle (js on)
median(js_row[16:21]),
# col 7: median load (js off)
median(nojs_row[6:11]),
# col 8: median domload (js off)
median(nojs_row[11:16]),
# col 9: median idle (js off)
median(nojs_row[16:21])
]
out_file.write(",".join([ str(x) for x in out_row ]) + "\n")
except IOError as e:
print("File IO error: {}".format(e))
finally:
# The error handling may be the cleanest out of
# all scripts though.
if js_file is not None:
js_file.close()
if nojs_file is not None:
nojs_file.close()
if out_file is not None:
out_file.close() | [
"mjugl@hs-mittweida.de"
] | mjugl@hs-mittweida.de |
bbb18f7782294604bc2614f3e8036877cec6f4c2 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startCirq1646.py | a4be0a640c12c3a50a9fd916217514b0b775b2e0 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,350 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=63
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=57
c.append(cirq.CZ.on(input_qubit[4],input_qubit[0])) # number=58
c.append(cirq.H.on(input_qubit[0])) # number=59
c.append(cirq.Z.on(input_qubit[4])) # number=55
c.append(cirq.CNOT.on(input_qubit[4],input_qubit[0])) # number=56
c.append(cirq.H.on(input_qubit[2])) # number=50
c.append(cirq.CZ.on(input_qubit[4],input_qubit[2])) # number=51
c.append(cirq.H.on(input_qubit[2])) # number=52
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=28
c.append(cirq.Z.on(input_qubit[3])) # number=42
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=29
c.append(cirq.H.on(input_qubit[0])) # number=30
c.append(cirq.H.on(input_qubit[0])) # number=43
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=44
c.append(cirq.H.on(input_qubit[0])) # number=45
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=35
c.append(cirq.H.on(input_qubit[0])) # number=60
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=61
c.append(cirq.H.on(input_qubit[0])) # number=62
c.append(cirq.X.on(input_qubit[0])) # number=39
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=40
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=37
c.append(cirq.H.on(input_qubit[0])) # number=46
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=47
c.append(cirq.H.on(input_qubit[0])) # number=48
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=27
c.append(cirq.X.on(input_qubit[1])) # number=10
c.append(cirq.X.on(input_qubit[2])) # number=11
c.append(cirq.X.on(input_qubit[3])) # number=12
c.append(cirq.X.on(input_qubit[0])) # number=13
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=22
c.append(cirq.Y.on(input_qubit[2])) # number=41
c.append(cirq.X.on(input_qubit[1])) # number=23
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=24
c.append(cirq.rx(1.0398671683382215).on(input_qubit[2])) # number=31
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.X.on(input_qubit[3])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq1646.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
3dc4a8e1627230c5c9466c36212543c6ef2ea1d1 | 200745cbb772f0ac7122b58301575401053044f6 | /dataset/mini_imagenet.py | e6c98ce8a7aa925ea9c7cee8e66853ebf602928c | [] | no_license | kadimakipp/GAN | 7ad6c19d4b25810d8f67e84eed848703bdf3d63a | e68912469c4df8f47e2c66b64cbf468ff68005bc | refs/heads/master | 2020-07-27T13:17:19.820690 | 2019-09-24T03:41:32 | 2019-09-24T03:41:32 | 209,102,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,250 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: kipp
@contact: kaidma.kipp@gmail.com
@site:
@software: PyCharm
@file: mini_imagenet.py
@time: 2019/9/23 下午1:51
# Shallow men believe in luck.
Strong men believe in cause and effect.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import pandas as pd
import torch
import torchvision
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import torch.nn.functional as F
from PIL import Image
"""
there are 100 classes with 600 samples of 84×84 color images per class. These 100 classes are divided into 64, 16,
and 20 classes respectively for sampling tasks for meta-training, meta-validation, and meta-test.
"""
class MiniImagenet(Dataset):
def __init__(self, root,transform,new_csv=True,train="train", ten_class = False):
super(MiniImagenet, self).__init__()
self.transform = transforms.Compose(transform)
self.__img_dir = os.path.join(root, "images")
if new_csv:
root = os.path.join(root, "new-csv")
__test_csv = os.path.join(root, "test.csv")
__val_csv = os.path.join(root, "val.csv")
__train_csv = os.path.join(root, "train.csv")
if train=="train":
self.csv = pd.read_csv(__train_csv)
elif train=="val":
self.csv = pd.read_csv(__val_csv)
elif train=="test":
self.csv = pd.read_csv(__test_csv)
elif train=="val&test":
csv_val = pd.read_csv(__val_csv)
csv_test = pd.read_csv(__test_csv)
self.csv = csv_val.append(csv_test,ignore_index=True)
else:
csv_val = pd.read_csv(__val_csv)
csv_test = pd.read_csv(__test_csv)
self.csv = csv_val.append(csv_test, ignore_index=True)
csv_train = pd.read_csv(__train_csv)
self.csv = self.csv.append(csv_train, ignore_index=True)
print('origin dataset len ',len(self.csv))
class_name = self.csv.drop_duplicates(['label'])
self.class_name = {}
for i, name in enumerate(class_name['label']):
if ten_class:
if i >= 10:
break
self.class_name.update({name: i})
if ten_class:
self.csv = self.csv[self.csv.apply(lambda x: x['label'] in self.class_name.keys(), axis=1)]
self.csv = self.csv.to_numpy()
@staticmethod
def reconstruct_miniimagenet(csv, root):
new_csv = os.path.join(root, "new-csv")
print(new_csv)
train = pd.DataFrame()
test = pd.DataFrame()
val = pd.DataFrame()
class_name = csv.drop_duplicates(['label'])
for name in class_name["label"]:
temp = csv[csv.apply(lambda x: x['label'] is name, axis=1)]
train = train.append(temp[:500], ignore_index=True)
test = test.append(temp[500:550], ignore_index=True)
val = val.append(temp[550:], ignore_index=True)
train = train.reindex(np.random.permutation(train.index))
test = test.reindex(np.random.permutation(test.index))
val = val.reindex(np.random.permutation(val.index))
train.to_csv(os.path.join(new_csv, 'train.csv'),index=False,header=True)
test.to_csv(os.path.join(new_csv, 'test.csv'), index=False, header=True)
val.to_csv(os.path.join(new_csv, 'val.csv'), index=False, header=True)
print("reconstruct mini imagenet dataset ")
def __len__(self):
return len(self.csv)
def __getitem__(self, index):
image, label = self.csv[index]
label = self.class_name[label]
image = Image.open(os.path.join(self.__img_dir, image))
image = self.transform(image)
return image, label
class miniImagenet(object):
def __init__(self):
self.root ="/media/kipp/work/Datas/mini-imagenet"
self.num_work = 4
self.shuffle =True
def Transform(self, img_size):
transform = [
# transforms.RandomCrop(224),
# transforms.RandomHorizontalFlip(0.5),
# transforms.RandomAffine(5),
transforms.Resize((img_size, img_size),Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
]
return transform
def get_loader(self, batch_size, img_size, mode="test"):
transform = self.Transform(img_size)
return torch.utils.data.DataLoader(
MiniImagenet(self.root,transform, train=mode),
batch_size=batch_size,
shuffle=self.shuffle,
num_workers = self.num_work
)
import matplotlib.pyplot as plt
def main():
plt.figure()
mini_imagenet = miniImagenet()
loader = mini_imagenet.get_loader(1, 224)
print(len(loader))
for i,(images, labels) in enumerate(loader):
if i >10:
break
#labels = F.one_hot(labels, num_classes=10)
print(labels)
dis_img = images[0].numpy().transpose(1,2,0)
dis_img = (dis_img+1)/2
plt.imshow(dis_img)
plt.show()
if __name__ == "__main__":
import fire
fire.Fire(main) | [
"kadima.kipp@gmail.com"
] | kadima.kipp@gmail.com |
d61ff3962723856798b1916d2bb9a409e310377d | feaab31dfbfe42faec531c7f29c8136c82bc8210 | /middlewares.py | 87132648e9bd2b2d0a4137f4c8c25b11fa707492 | [] | no_license | alexsanya/words-learn-assistant | fb9682847d31d4dd3987c96d52719aaee193c80b | 44e08eff47c8f8e7c5eb23ba27219cb935da04dc | refs/heads/main | 2023-06-16T07:50:02.067050 | 2021-07-17T08:17:34 | 2021-07-17T08:17:34 | 381,112,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | from aiogram import types
from aiogram.dispatcher.handler import CancelHandler
from aiogram.dispatcher.middlewares import BaseMiddleware
class AccessMiddleware(BaseMiddleware):
def __init__(self, access_id: int):
self.access_id = access_id
super().__init__()
async def on_process_message(self, message: types.Message, _):
if int(message.from_user.id) != int(self.access_id):
await message.answer("Access Denied")
print(f"Acces denied for user {message.from_user.id}")
raise CancelHandler() | [
"alexander.koval@ix.co"
] | alexander.koval@ix.co |
865c2f33699b4dea02382da9927653019fae2dfd | db57afdfe41a7497dc2fa1da53d06255a4c3a09b | /Project - Space Invaders/archive/V01.py | 3db63a18c9ab656738e85620ccfaa00a4b207f86 | [] | no_license | Varil426/AGH-Introduction-to-CS | 8a9236b33e7424446d4631404e26612d798e7896 | 0b8385f049273760b965af4a31aa523ea0459a5e | refs/heads/master | 2023-01-09T10:37:43.656832 | 2020-10-09T06:49:17 | 2020-10-09T06:49:17 | 302,556,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,960 | py | import pygame
import time
import random
pygame.init()
version = "0.1"
refreshRate = 33
process = True
bg = 0
screenWidth = 1280
screenHeight = 720
spacingVertical = 50
spacingHorizontal = 380
gameWidth = screenWidth - spacingHorizontal
gameHeight = screenHeight - spacingVertical * 2
gameArea = pygame.Surface((gameWidth, gameHeight))
pygame.display.set_caption("Space Invaders " + version)
window = pygame.display.set_mode((screenWidth, screenHeight))
class UI(object):
def __init__(self):
self.healthBarLength = gameWidth - 100
self.healthBar = pygame.Surface((self.healthBarLength, 20))
def draw(self):
window.blit(font.render("Health", False, (255,255,255)), (20, screenHeight - 45))
pygame.draw.rect(self.healthBar, (0, 0, 0), (0, 0, self.healthBarLength, 20))
pygame.draw.rect(self.healthBar, (255, 255, 255), (0, 0, self.healthBarLength, 20), 1)
pygame.draw.rect(self.healthBar, (255, 255, 255), (0, 0, int(self.healthBarLength * player.health/100), 20))
class player(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.health = 100
self.velocity = 15
self.damage = 25
self.maxBullets = 10
self.bulletsVelocity = 15
self.timeBetweenShots = 0.5
self.lastBullet = 0
def draw(self):
pygame.draw.rect(gameArea, (255, 0, 0), (self.x, self.y, self.width, self.height))
class enemy(object):
def __init__(self, x, y, width, height, health):
self.x = x
self.y = y
self.width = width
self.height = height
self.health = health
self.velocity = 5
self.bulletVelocity = 10
def draw(self):
pygame.draw.rect(gameArea, (0, 255, 0), (self.x, self.y, self.width, self.height))
class projectile(object):
def __init__(self, x, y, velocity, color):
self.x = x
self.y = y
self.velocity = velocity
self.width = 10
self.height = 20
self.color = color
def draw(self):
pygame.draw.rect(gameArea, self.color, (self.x, self.y, self.width, self.height))
class AI(object):
def __init__(self):
self.enemiesDirection = 1
self.counter = 1
self.lastShot = 0
def AIControls(self):
if self.enemiesDirection > 0:
for row in enemies:
index = len(row) - 1
if index > -1:
if row[index].x + row[index].velocity * self.enemiesDirection > gameWidth - row[index].width:
self.enemiesDirection = -1
else:
for row in enemies:
if len(row) > 0:
if row[0].x + row[0].velocity * self.enemiesDirection < 0:
self.enemiesDirection = 1
for row in enemies:
for enemy in row:
enemy.x += enemy.velocity * self.enemiesDirection
if time.time() - self.lastShot > self.counter:
row = random.randrange(0, 4)
while len(enemies[row]) == 0:
row = random.randrange(0, 4)
column = random.randrange(0, len(enemies[row]))
selected = enemies[row][column]
enemyBullets.append(projectile(selected.x + selected.width//2, selected.y + selected.height, selected.bulletVelocity, (255, 0, 0)))
self.lastShot = time.time()
def importImages():
global bg
bg = pygame.image.load('assets/img/bg.jpg')
def generateEnemies():
startingHeight = 20
stepVertical = 100
startingWidth = 75
stepHorizontal = 100
for i in range(0, 4):
for j in range(0, 8):
enemies[i].append(enemy(startingWidth + j*stepHorizontal, startingHeight + stepVertical*i, 50, 50, 25))
def killEnemy(index, enemy):
enemies[index].pop(enemies[index].index(enemy))
def removeBullet(bullet):
bullets.pop(bullets.index(bullet))
def removeEnemyBullet(bullet):
enemyBullets.pop(enemyBullets.index(bullet))
def colision():
startingHeight = 20
stepVertical = 100
for bullet in bullets:
if bullet.y > startingHeight and bullet.y < startingHeight + stepVertical:
for enemy in enemies[0]:
if enemy.y < bullet.y and enemy.y + enemy.height > bullet.y and enemy.x < bullet.x + bullet.width and enemy.x + enemy.width > bullet.x:
enemy.health -= player.damage
if enemy.health <= 0:
killEnemy(0, enemy)
removeBullet(bullet)
elif bullet.y > startingHeight + stepVertical * 1 and bullet.y < startingHeight + stepVertical * 2:
for enemy in enemies[1]:
if enemy.y < bullet.y and enemy.y + enemy.height > bullet.y and enemy.x < bullet.x + bullet.width and enemy.x + enemy.width > bullet.x:
enemy.health -= player.damage
if enemy.health <= 0:
killEnemy(1, enemy)
removeBullet(bullet)
elif bullet.y > startingHeight + stepVertical * 2 and bullet.y < startingHeight + stepVertical * 3:
for enemy in enemies[2]:
if enemy.y < bullet.y and enemy.y + enemy.height > bullet.y and enemy.x < bullet.x + bullet.width and enemy.x + enemy.width > bullet.x:
enemy.health -= player.damage
if enemy.health <= 0:
killEnemy(2, enemy)
removeBullet(bullet)
else:
for enemy in enemies[3]:
if enemy.y < bullet.y and enemy.y + enemy.height > bullet.y and enemy.x < bullet.x + bullet.width and enemy.x + enemy.width > bullet.x:
enemy.health -= player.damage
if enemy.health <= 0:
killEnemy(3, enemy)
removeBullet(bullet)
for bullet in enemyBullets:
if bullet.y + bullet.height > player.y and bullet.x + bullet.width > player.x and bullet.x < player.x + player.width:
removeEnemyBullet(bullet)
player.health -= 25
def refreshGameWindow():
window.blit(bg, (0, 0))
gameArea.fill((0, 0, 0))
for bullet in bullets:
if bullet.y - bullet.velocity < 0:
removeBullet(bullet)
else:
bullet.y -= bullet.velocity
bullet.draw()
for bullet in enemyBullets:
if bullet.y > gameHeight:
removeEnemyBullet(bullet)
else:
bullet.y += bullet.velocity
bullet.draw()
for row in enemies:
for enemy in row:
enemy.draw()
player.draw()
pygame.draw.rect(gameArea, (255, 255, 255), (0, 0, gameWidth, gameHeight), 1)
UI.draw()
window.blit(gameArea, (1, spacingVertical))
window.blit(UI.healthBar, (100, screenHeight - 35))
colision()
AI.AIControls()
pygame.display.update()
def controls():
pressedKeys = pygame.key.get_pressed()
if pressedKeys[pygame.K_LEFT] and player.x - player.velocity > 0:
player.x -= player.velocity
if pressedKeys[pygame.K_RIGHT] and player.x + player.velocity + player.width < gameWidth:
player.x += player.velocity
if pressedKeys[pygame.K_z] and len(bullets) < player.maxBullets and time.time() - player.lastBullet > player.timeBetweenShots:
player.lastBullet = time.time()
bullets.append(projectile(player.x + player.width//2, player.y, player.bulletsVelocity, (255, 255, 255)))
UI = UI()
font = pygame.font.SysFont("Arial", 30)
player = player(gameWidth//2, gameHeight - 55, 50, 50)
enemies = [[],[],[],[]]
AI = AI()
bullets = []
enemyBullets = []
importImages()
generateEnemies()
while process:
pygame.time.delay(refreshRate)
for event in pygame.event.get():
if event.type == pygame.QUIT:
process = False
controls()
refreshGameWindow()
pygame.quit() | [
"bartlomiejkregielewski@gmail.com"
] | bartlomiejkregielewski@gmail.com |
778cd820b7d00da1bca2f49da9c60917cf10e046 | b7236bb633e982a2ca75b5c70783d31d6b490434 | /gui_helloworld.py | 26d98996e2009fb09fe1e81672bd2c28fc0ce509 | [] | no_license | wuwenlong/Python | f88acce7b24bcc195f25c2f855183c10cfd86a63 | 1667711e24ccb3279229792be2c4a4f4ace7ff3e | refs/heads/master | 2021-03-30T22:26:08.011685 | 2018-03-13T09:11:43 | 2018-03-13T09:11:43 | 125,022,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from tkinter import *
import tkinter.messagebox as messagebox
class Application(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
def createWidgets(self):
self.nameInput = Entry(self)
self.nameInput.pack()
self.alertButton = Button(self, text='您好', command=self.hello)
self.alertButton.pack()
def hello(self):
name = self.nameInput.get() or 'world'
messagebox.showinfo('Message', 'Hello, %s' % name)
app = Application()
# 设置窗口标题:
app.master.title('Hello World')
# 主消息循环:
app.mainloop()
| [
"noreply@github.com"
] | wuwenlong.noreply@github.com |
441f545bbea3f22988f3da110207b42ccbea0f02 | 3c64f78ae8b0158f8365b283b64da22b710039b9 | /pset6-python/hello/hello.py | 7378cd527055237f231371d757bdfad8bcab094b | [] | no_license | tomasgoiba/cs50 | 08c58582bf0e42579fe58d9ede3af994ba44f262 | 4955f0ed6b8879cff7e21772da4ad6720c14af38 | refs/heads/master | 2023-03-24T21:33:36.627373 | 2021-03-21T14:08:41 | 2021-03-21T14:08:41 | 349,728,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | # Prompt user for their name
name = input("What is your name?\n")
# Print "hello" followed by the name of the user
print(f"hello, {name}") | [
"tomas_goiba@hotmail.com"
] | tomas_goiba@hotmail.com |
b42ca631e747f23c790aa736ca62349ae55f51f3 | 7c94fef9b1dd109efb9f7851871130b3e0f27b65 | /services/spaceapps/spaceapps/mini wifi drone/py_wifi_drone-master_PYTHON3/droneconfig.py | 5f17acbb5b105303e659656807044406d79953d1 | [] | no_license | turtlesallthewayup/spaceapps_webapp | c2eb14cd1a999bbe8ead32555b4592348881afb8 | 486ed9058c5d73dd47d7e195591c63b301496b5f | refs/heads/master | 2020-08-21T16:11:18.021549 | 2019-11-04T00:04:41 | 2019-11-04T00:04:41 | 216,195,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,684 | py | HANDSHAKE_DATA = bytearray([0x49, 0x54, 0x64, 0x00, 0x00, 0x00, 0x5D, 0x00,
0x00, 0x00, 0x81, 0x85, 0xFF, 0xBD, 0x2A, 0x29,
0x5C, 0xAD, 0x67, 0x82, 0x5C, 0x57, 0xBE, 0x41,
0x03, 0xF8, 0xCA, 0xE2, 0x64, 0x30, 0xA3, 0xC1,
0x5E, 0x40, 0xDE, 0x30, 0xF6, 0xD6, 0x95, 0xE0,
0x30, 0xB7, 0xC2, 0xE5, 0xB7, 0xD6, 0x5D, 0xA8,
0x65, 0x9E, 0xB2, 0xE2, 0xD5, 0xE0, 0xC2, 0xCB,
0x6C, 0x59, 0xCD, 0xCB, 0x66, 0x1E, 0x7E, 0x1E,
0xB0, 0xCE, 0x8E, 0xE8, 0xDF, 0x32, 0x45, 0x6F,
0xA8, 0x42, 0xEE, 0x2E, 0x09, 0xA3, 0x9B, 0xDD,
0x05, 0xC8, 0x30, 0xA2, 0x81, 0xC8, 0x2A, 0x9E,
0xDA, 0x7F, 0xD5, 0x86, 0x0E, 0xAF, 0xAB, 0xFE,
0xFA, 0x3C, 0x7E, 0x54, 0x4F, 0xF2, 0x8A, 0xD2,
0x93, 0xCD])
START_DRONE_DATA = bytearray([0xCC, 0x7F, 0x7F, 0x0, 0x7F, 0x0, 0x7F, 0x33])
FLY_DRONE_DATA = bytearray([0xCC, 0x80, 0x80, 0x7f, 0x80, 0x01, 0xFE, 0x33])
LAND_DRONE_DATA = bytearray([0xCC, 0x80, 0x80, 0x7f, 0x80, 0x02, 0xFE, 0x33])
# VIDEO CONFIG DATA
VIDEO_INITIALIZE = [
bytearray([0x49, 0x54, 0x64, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00,
0x00, 0x0F, 0x32, 0x81, 0x95, 0x45, 0x2E, 0xF5, 0xE1, 0xA9,
0x28, 0x10, 0x86, 0x63, 0x17, 0x36, 0xC3, 0xCA, 0xE2, 0x64,
0x30, 0xA3, 0xC1, 0x5E, 0x40, 0xDE, 0x30, 0xF6, 0xD6, 0x95,
0xE0, 0x30, 0xB7, 0xC2, 0xE5, 0xB7, 0xD6, 0x5D, 0xA8, 0x65,
0x9E, 0xB2, 0xE2, 0xD5, 0xE0, 0xC2, 0xCB, 0x6C, 0x59, 0xCD,
0xCB, 0x66, 0x1E, 0x7E, 0x1E, 0xB0, 0xCE, 0x8E, 0xE8, 0xDF,
0x32, 0x45, 0x6F, 0xA8, 0x42, 0xB7, 0x33, 0x0F, 0xB7, 0xC9,
0x57, 0x82, 0xFC, 0x3D, 0x67, 0xE7, 0xC3, 0xA6, 0x67, 0x28,
0xDA, 0xD8, 0xB5, 0x98, 0x48, 0xC7, 0x67, 0x0C, 0x94, 0xB2,
0x9B, 0x54, 0xD2, 0x37, 0x9E, 0x2E, 0x7A]),
bytearray([0x49, 0x54, 0x64, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00,
0x00, 0x54, 0xB2, 0xD1, 0xF6, 0x63, 0x48, 0xC7, 0xCD, 0xB6,
0xE0, 0x5B, 0x0D, 0x1D, 0xBC, 0xA8, 0x1B, 0xCA, 0xE2, 0x64,
0x30, 0xA3, 0xC1, 0x5E, 0x40, 0xDE, 0x30, 0xF6, 0xD6, 0x95,
0xE0, 0x30, 0xB7, 0xC2, 0xE5, 0xB7, 0xD6, 0x5D, 0xA8, 0x65,
0x9E, 0xB2, 0xE2, 0xD5, 0xE0, 0xC2, 0xCB, 0x6C, 0x59, 0xCD,
0xCB, 0x66, 0x1E, 0x7E, 0x1E, 0xB0, 0xCE, 0x8E, 0xE8, 0xDF,
0x32, 0x45, 0x6F, 0xA8, 0x42, 0xB7, 0x33, 0x0F, 0xB7, 0xC9,
0x57, 0x82, 0xFC, 0x3D, 0x67, 0xE7, 0xC3, 0xA6, 0x67, 0x28,
0xDA, 0xD8, 0xB5, 0x98, 0x48, 0xC7, 0x67, 0x0C, 0x94, 0xB2,
0x9B, 0x54, 0xD2, 0x37, 0x9E, 0x2E, 0x7A])
]
STREAM_START = bytearray([0x49, 0x54, 0x64, 0x00, 0x00, 0x00, 0x58, 0x00, 0x00, 0x00,
0x80, 0x86, 0x38, 0xC3, 0x8D, 0x13, 0x50, 0xFD, 0x67, 0x41, 0xC2,
0xEE, 0x36, 0x89, 0xA0, 0x54, 0xCA, 0xE2, 0x64, 0x30, 0xA3, 0xC1,
0x5E, 0x40, 0xDE, 0x30, 0xF6, 0xD6, 0x95, 0xE0, 0x30, 0xB7, 0xC2,
0xE5, 0xB7, 0xD6, 0x5D, 0xA8, 0x65, 0x9E, 0xB2, 0xE2, 0xD5, 0xE0,
0xC2, 0xCB, 0x6C, 0x59, 0xCD, 0xCB, 0x66, 0x1E, 0x7E, 0x1E, 0xB0,
0xCE, 0x8E, 0xE8, 0xDF, 0x32, 0x45, 0x6F, 0xA8, 0x42, 0xEB, 0x20,
0xBE, 0x38, 0x3A, 0xAB, 0x05, 0xA8, 0xC2, 0xA7, 0x1F, 0x2C, 0x90,
0x6D, 0x93, 0xF7, 0x2A, 0x85, 0xE7, 0x35, 0x6E, 0xFF, 0xE1, 0xB8,
0xF5, 0xAF, 0x09, 0x7F, 0x91, 0x47, 0xF8, 0x7E])
HEARTBEAT = bytearray([0x49, 0x54, 0x64, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0xeb,
0xdf, 0xee, 0xab, 0x01, 0x76, 0x64, 0x3e, 0x47, 0x07, 0x60, 0x2d, 0xe5, 0xe9,
0x86, 0x59, 0xac, 0xef, 0x63, 0xf7, 0x71, 0x57, 0xab, 0x2f, 0x53, 0xe3, 0xf7,
0x68, 0xec, 0xd9, 0xe1, 0x85, 0x47, 0xb8, 0xc2, 0x2e, 0x21, 0xd0, 0x1b, 0xfb,
0x6b, 0x3d, 0xe3, 0x25, 0xa2, 0x7b, 0x8f, 0xb3, 0xac, 0xef, 0x63, 0xf7, 0x71,
0x57, 0xab, 0x2f, 0x53, 0xe3, 0xf7, 0x68, 0xec, 0xd9, 0xe1, 0x85, 0xb7, 0x33,
0x0f, 0xb7, 0xc9, 0x57, 0x82, 0xfc, 0x3d, 0x67, 0xe7, 0xc3, 0xa6, 0x67, 0x28,
0xda, 0xd8, 0xb5, 0x98, 0x48, 0xc7, 0x67, 0x0c, 0x94, 0xb2, 0x9b, 0x54, 0xd2,
0x37, 0x9e, 0x2e, 0x7a])
HEARTBEAT_RATE = 5.0
| [
"joaopedrokaspary@hotmail.com"
] | joaopedrokaspary@hotmail.com |
96a7c7a80cfe0a8eee5bbb51004ddcf3219a0bae | 5eaa3e43afd9069829dfff3dc4b4a9f4072131ac | /Vize Çalışması 1.py | 944fa44f126f15c347cc5b3a305cf3e444129612 | [] | no_license | volkanyildiz/Image_Processing | d55112059b9519f7bcac65b0b136943c6594f548 | 4db59084921d117121f14e87128fcf2bd7907d44 | refs/heads/master | 2020-03-31T18:28:38.395781 | 2019-01-01T19:47:41 | 2019-01-01T19:47:41 | 152,460,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py |
# coding: utf-8
# In[1]:
liste = [0,1,2,3,4,5,6,7,8,9]
for i in range(len(liste)):
print(liste[i])
# In[2]:
print(liste)
# In[3]:
import random
# In[4]:
s= random.randint(5,10)
print(s)
# In[5]:
s
# In[6]:
for i in range(10):
print(i)
# In[12]:
liste2=[]
for i in range(10):
liste2.append(random.randint(0,10))
print (liste2)
# In[13]:
import numpy as np
# In[20]:
x=4
liste2=np.arange(x)
liste3=liste2+7
print(liste2)
print(liste3)
# In[17]:
s= np.arange(20)
s
# In[23]:
import matplotlib.pyplot as plt
# In[24]:
img= plt.imread("turtle.jpg")
plt.imshow(img)
# plt.show()
# In[26]:
plt.imshow(img)
plt.show()
# In[27]:
print(img.shape)
# In[36]:
def fonksiyon1(img):
print("Resmin boyutu = ",img.ndim)
print("Resmin Shape değeri = ",img.shape)
print("Kırmızı için min değer = ",img[:,:,0].min()) # : -> tümü
print("Kırmızı için max değer = ",img[:,:,0].max()) # 0 kırmızı, 1 yeşil , 2 mavi
print("Yeşil için min değer = ",img[:,:,1].min())
print("Yeşil için max değer = ",img[:,:,1].max())
print("Mavi için min değer = ",img[:,:,2].min())
print("Mavi için max değer = ",img[:,:,2].max())
# In[37]:
fonksiyon1(img)
# In[4]:
s= random.randint(5,6)
print(s)
# In[5]:
s= random.randint(5,6)
print(s)
# In[6]:
s= random.randint(5,6)
print(s)
| [
"noreply@github.com"
] | volkanyildiz.noreply@github.com |
3c6920fd556e9c8e818a39d2f5644c70aa619222 | a8e8ae98c26a54a99ea840a10140e4c5c4080f27 | /external/workload-automation/wa/workloads/stress_ng/__init__.py | 9cf1a7d70eb25e29226a15e28fdd39399af418d4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only"
] | permissive | ARM-software/lisa | c51ea10d9f1ec1713a365ca0362f176c6a333191 | be8427f24d7565c0668cd51ed7ed55867fcec889 | refs/heads/main | 2023-08-30T20:55:20.646965 | 2023-08-29T15:15:12 | 2023-08-29T16:19:20 | 47,548,304 | 200 | 131 | Apache-2.0 | 2023-09-14T11:03:27 | 2015-12-07T11:32:56 | Jupyter Notebook | UTF-8 | Python | false | false | 5,840 | py | # Copyright 2015, 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=attribute-defined-outside-init
import os
from wa import Workload, Parameter, ConfigError, Executable
from wa.framework.exception import WorkloadError
from wa.utils.exec_control import once
from wa.utils.serializer import yaml
class StressNg(Workload):
name = 'stress-ng'
description = """
Run the stress-ng benchmark.
stress-ng will stress test a computer system in various selectable ways. It
was designed to exercise various physical subsystems of a computer as well
as the various operating system kernel interfaces.
stress-ng can also measure test throughput rates; this can be useful to
observe performance changes across different operating system releases or
types of hardware. However, it has never been intended to be used as a
precise benchmark test suite, so do NOT use it in this manner.
The official website for stress-ng is at:
http://kernel.ubuntu.com/~cking/stress-ng/
Source code are available from:
http://kernel.ubuntu.com/git/cking/stress-ng.git/
"""
parameters = [
Parameter('stressor', kind=str, default='cpu',
allowed_values=['cpu', 'io', 'fork', 'switch', 'vm', 'pipe',
'yield', 'hdd', 'cache', 'sock', 'fallocate',
'flock', 'affinity', 'timer', 'dentry',
'urandom', 'sem', 'open', 'sigq', 'poll'],
description='''
Stress test case name. The cases listed in
allowed values come from the stable release
version 0.01.32. The binary included here
compiled from dev version 0.06.01. Refer to
man page for the definition of each stressor.
'''),
Parameter('extra_args', kind=str, default="",
description='''
Extra arguments to pass to the workload.
Please note that these are not checked for validity.
'''),
Parameter('threads', kind=int, default=0,
description='''
The number of workers to run. Specifying a negative
or zero value will select the number of online
processors.
'''),
Parameter('duration', kind=int, default=60,
description='''
Timeout for test execution in seconds
''')
]
@once
def initialize(self, context):
if not self.target.is_rooted:
raise WorkloadError('stress-ng requires root premissions to run')
resource = Executable(self, self.target.abi, 'stress-ng')
host_exe = context.get_resource(resource)
StressNg.binary = self.target.install(host_exe)
def setup(self, context):
self.log = self.target.path.join(self.target.working_directory,
'stress_ng_output.txt')
self.results = self.target.path.join(self.target.working_directory,
'stress_ng_results.yaml')
self.command = ('{} --{} {} {} --timeout {}s --log-file {} --yaml {} '
'--metrics-brief --verbose'
.format(self.binary, self.stressor, self.threads,
self.extra_args, self.duration, self.log,
self.results))
self.timeout = self.duration + 10
def run(self, context):
self.output = self.target.execute(self.command, timeout=self.timeout,
as_root=True)
def extract_results(self, context):
self.host_file_log = os.path.join(context.output_directory,
'stress_ng_output.txt')
self.host_file_results = os.path.join(context.output_directory,
'stress_ng_results.yaml')
self.target.pull(self.log, self.host_file_log)
self.target.pull(self.results, self.host_file_results)
context.add_artifact('stress_ng_log', self.host_file_log, 'log', "stress-ng's logfile")
context.add_artifact('stress_ng_results', self.host_file_results, 'raw', "stress-ng's results")
def update_output(self, context):
with open(self.host_file_results, 'r') as stress_ng_results:
results = yaml.load(stress_ng_results)
try:
metric = results['metrics'][0]['stressor']
throughput = results['metrics'][0]['bogo-ops']
context.add_metric(metric, throughput, 'ops')
# For some stressors like vm, if test duration is too short, stress_ng
# may not able to produce test throughput rate.
except TypeError:
msg = '{} test throughput rate not found. Please increase test duration and retry.'
self.logger.warning(msg.format(self.stressor))
def validate(self):
if self.stressor == 'vm' and self.duration < 60:
raise ConfigError('vm test duration needs to be >= 60s.')
@once
def finalize(self, context):
if self.uninstall:
self.target.uninstall('stress-ng')
| [
"douglas.raillard@arm.com"
] | douglas.raillard@arm.com |
2dd191df2926ec7646f8237bd963367f6f5fc723 | fff333e6a307969e0ab2eecd8cd68104eaaae506 | /ex15.py | 2a553a5716b03ef23d3d3a48f0ae17bc8ef91274 | [] | no_license | anjalirmenon/pythonhardway | b01ef055755e49426eab674c56769a93b2e75a82 | e1ceeb7122605048d997e12ba9a6871c86ea3879 | refs/heads/master | 2021-01-01T18:42:15.293336 | 2012-06-30T09:41:36 | 2012-06-30T09:41:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | from sys import argv
script, filename = argv
text = open(filename,'r')
print "here is your file %r:" % filename
print text.read()
print "type the filename again:"
filen = raw_input("->")
textn = open(filen,'r')
print textn.read()
| [
"anjali@anjali-Inspiron-1525.(none)"
] | anjali@anjali-Inspiron-1525.(none) |
a456e6512b512c884477e8b40179be2c3e94e3d0 | d3de9296f9ce0d881f7d44647872714cea9de1cd | /Chapter_9/Exercise_1.py | 8ebe2822b662b9afd9325511cb3ec042d1d2e0e6 | [] | no_license | NBarnfield/CS-3-Coding-Exercises | baf7ac6832ab6bf05cabc66f437dec051844985b | b7bf2216a70d57c3f39a145f0242605c49365bd7 | refs/heads/master | 2021-04-06T12:31:45.667659 | 2018-07-16T22:36:32 | 2018-07-16T22:36:32 | 125,324,488 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | # Ask user for file name input.
file = input("Enter file: ")
# Attempt to open the file, otherwise quit on failure.
try:
fhand = open(file, 'r')
except IOError:
print("File name {} is not valid.".format(file))
quit()
# Create a dictionary
flat_land_text = dict()
# Strip each of the lines and feed it into the dictionary
for line in fhand:
line = line.rstrip()
text_list = line.split()
for word in text_list:
flat_land_text[word] = flat_land_text.get(word, 0) + 1
print("The end result of all value pairs is ", flat_land_text)
# Ask user for input and check to see if it is in the dictionary. Print boolean value.
while True:
user_input = input("What word would you like to test to see if it is in the dictionary? Enter DONE! to finish.")
if user_input == 'DONE!': break
print(user_input in flat_land_text)
| [
"22405972+NBarnfield@users.noreply.github.com"
] | 22405972+NBarnfield@users.noreply.github.com |
6559f83f0d0cbcf7125aa5a36c0b754ee5fa8a2d | a416e5d08d5623d92a7becdf287ca2bb6dcc1e37 | /Widgets/FileDialogWidget.py | d8ef54e7503ce59a819fcc1fedfcc282e1b16f44 | [] | no_license | DaGnYMilleR/DiskUsage | 7199e856e31e4f486a80510504cf09e3a13a47c4 | 6e4aab17be110a5e4b793edb335ae04d213ec232 | refs/heads/master | 2022-12-20T21:58:10.161703 | 2020-09-22T03:50:23 | 2020-09-22T03:50:23 | 265,805,864 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class FileDialogWidget(QWidget):
def __init__(self, parent=None):
super().__init__(parent, Qt.Window)
file_dialog_layout = QVBoxLayout()
self.setLayout(file_dialog_layout)
lb = QLabel('Hello! This is disk usage app \n choose working directory')
lb.setFont(QFont('SansSerif', 20))
lb.setGeometry(0, 0, 300, 100)
self.open_file_dialog_button = QPushButton("...Browse...")
file_dialog_layout.addWidget(lb, alignment=Qt.AlignTop)
file_dialog_layout.addWidget(self.open_file_dialog_button, 200)
self.setGeometry(500, 300, 300, 300)
self.show() | [
"danil.lunyow@mail.com"
] | danil.lunyow@mail.com |
4d0e6b0ee336732c1d3d260b1010a25768e65c70 | c29e2304a90b8e836c84e6cb5d85821c534197e7 | /db_monitor_query/settings.py | f36e673a302535c9c8422942830d80716f9e9f2e | [] | no_license | devin1982/MySQLGuard | f5312ff20fdd13b9ddad7610c6c665180867c46f | 3289e17a7ba20415189c27281ed1a6bd5ddf6dd7 | refs/heads/master | 2021-04-15T15:18:27.315448 | 2016-06-12T02:46:13 | 2016-06-12T02:46:13 | 60,685,929 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,526 | py | """
Django settings for db_monitor_query project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o(1%%qzfdb+a$sapiw@7i_r&5m)7*8$q!=!fnseeb3tn$bat6%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'querystat',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'pagination.middleware.PaginationMiddleware',
)
ROOT_URLCONF = 'db_monitor_query.urls'
WSGI_APPLICATION = 'db_monitor_query.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
#}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'db_monitor',
'USER':'',
'PASSWORD':'',
'HOST':'',
'PORT':'3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
#LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
#for session
SESSION_COOKIE_AGE = 60*10
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_SAVE_EVERY_REQUEST = True
STATIC_ROOT='/root/p_web/db_monitor_query/static'
| [
"351430851@qq.com"
] | 351430851@qq.com |
5be631217ba587316fab5111465242cefa9532a0 | 6c706fb7923a560ccbc1b196799f2b81d15212e5 | /kinonh/kinonh.py | 4890f67ae775775e781e0b4c001ed54449a34292 | [] | no_license | Torak28/Misc | 99051186bd9426a5a07091c3f7d2ce5cb49e6b43 | 0c1107f9dfe1c6e4e9f8154ab81c186b45cc0da7 | refs/heads/master | 2023-06-11T15:55:06.127824 | 2023-05-30T18:25:38 | 2023-05-30T18:25:38 | 161,793,978 | 0 | 0 | null | 2023-05-29T13:39:27 | 2018-12-14T14:17:50 | HTML | UTF-8 | Python | false | false | 556 | py | import requests
import re
from bs4 import BeautifulSoup
import os
link = 'https://www.kinonh.pl/artykul.do?id=2412'
r = r'href="(.+?)"'
base = 'https://www.kinonh.pl/'
page = requests.get(link)
page.encoding = 'utf-8'
if page.status_code == 200:
soup = BeautifulSoup(page.text, 'html.parser')
scripts = soup.find_all("a", {"class": "audio"})
for script in scripts:
matches = re.findall(r, str(script))
for i in range(len(matches)):
link = str(matches[i]).replace(' ', '%20', 3)
cmd = 'wget ' + base + link
os.system(cmd)
# 13 wykładów | [
"zelechowski28@gmail.com"
] | zelechowski28@gmail.com |
c42a78a810297a0b2065cc571e5e48246c02022b | 318c76cec15ebbcd382eab914d3ae6bb4c4d3b74 | /Choropleth-Map.py | fe0a54bafc13e2a38361eb2919e6992f0cd2d623 | [] | no_license | Sneha46749/Webmaps-using-Python-and-Folium | c6c47da6a376484b65924adbe28b0552ec942de7 | 3a0c0d6889c529bd3d54eb6a28d0d1cd42d55bfc | refs/heads/master | 2022-11-29T16:02:06.255232 | 2020-08-08T16:00:44 | 2020-08-08T16:00:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | import folium
import pandas
data = pandas.read_csv("Volcanoes.txt")
lat = list(data["LAT"]) #converting the data from dataframe list to python list
lon = list(data["LON"])
elev = list(data["ELEV"])
map2 = folium.Map(location=[36.58, -99.09], zoom_start=6, tiles="Stamen Terrain")
fg=folium.FeatureGroup(name="My Map")
def color_producer(elevation):
if elevation < 1000:
return 'green'
elif 1000 <= elevation < 3000 :
return 'orange'
else:
return 'red'
for lt, ln, el in zip(lat, lon, elev): #zip function is used when we iterate through two lists at a time
fg.add_child(folium.CircleMarker(location=[lt, ln], popup=str(el)+ " m",
fill_color=color_producer(el), color = 'grey',
fill_opacity = 0.7)) #popup attribute takes a string input
fg.add_child(folium.GeoJson(data = open("world.json", "r" , encoding = "utf-8-sig").read(),
style_function = lambda x: {'fillColor' : 'green' if x['properties']['POP2005'] < 10000000
else 'orange' if 10000000<= x['properties']['POP2005'] < 20000000 else 'red'}))
map2.add_child(fg)
map2.save("Map2.html")
| [
"sneha46749@gmail.com"
] | sneha46749@gmail.com |
bea089a19723769c6d90932cf4642c10f5085e80 | dc82ab2928c5a357acf6294097fcd07b21d061a2 | /dtreesc.py | dbd362f19f2508a396e2800857c2a988eaa78eb4 | [] | no_license | akshat-khare/mlass3 | ab6b32c650459c8b117626bb3447c8f0064294ba | 4691c5dab68eb67b4eeb6aa9bab7d4036aa4f1e2 | refs/heads/master | 2020-05-04T20:21:57.138520 | 2019-04-16T00:59:59 | 2019-04-16T00:59:59 | 179,435,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,213 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import sys
trainfname = open(sys.argv[1], 'r')
xarrtrainori = []
yarrtrain = []
xidentifier= [3,1,2,2,3,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3]
medianarr = [0.0]*23
xnumchildhelper=[]
for i in range(23):
if(xidentifier[i]!=2):
xnumchildhelper.append(2)
else:
if(i==2):
xnumchildhelper.append(7)
elif(i==3):
xnumchildhelper.append(4)
elif(i>=5 and i<=10):
xnumchildhelper.append(12)
print(xnumchildhelper)
# 1 means binary, 2 means categorical, 3 means continous
for i in range(23):
xarrtrainori.append([]);
numtrain=0
for line in trainfname:
numtrain+=1
if(numtrain<=2):
continue
linearr= line.split(',')
# print(linearr)
for i in range(23):
xarrtrainori[i].append(int(linearr[i+1]))
yarrtrain.append(int(linearr[24]))
# if(numtrain>600):
# break
numtrain = numtrain-2
print("parsing done")
# print(xarrtrainori)
# print("y is")
# print(yarrtrain)
# In[2]:
valifname = open(sys.argv[2], 'r')
xarrvaliori = []
yarrvali = []
for i in range(23):
xarrvaliori.append([]);
numvali=0
for line in valifname:
numvali+=1
if(numvali<=2):
continue
linearr= line.split(',')
# print(linearr)
for i in range(23):
xarrvaliori[i].append(int(linearr[i+1]))
yarrvali.append(int(linearr[24]))
# if(numtrain>600):
# break
numvali = numvali-2
print("parsing done")
# print(xarrtrainori)
# print("y is")
# print(yarrtrain)
# In[3]:
testfname = open(sys.argv[3], 'r')
xarrtestori = []
yarrtest = []
for i in range(23):
xarrtestori.append([]);
numtest=0
for line in testfname:
numtest+=1
if(numtest<=2):
continue
linearr= line.split(',')
# print(linearr)
for i in range(23):
xarrtestori[i].append(int(linearr[i+1]))
yarrtest.append(int(linearr[24]))
# if(numtrain>600):
# break
numtest = numtest-2
print("parsing done")
# print(xarrtrainori)
# print("y is")
# print(yarrtrain)
# In[4]:
xarrtrain=[]
for i in range(23):
xarrtrain.append([])
for i in range(23):
if(i>=5 and i<=10):
for j in range(numtrain):
xarrtrain[i].append(xarrtrainori[i][j]+2)
elif(i==1):
for j in range(numtrain):
xarrtrain[i].append(xarrtrainori[i][j]-1)
elif(xidentifier[i]!=3):
for j in range(numtrain):
xarrtrain[i].append(xarrtrainori[i][j])
else:
templist=[]
for j in range(numtrain):
templist.append(xarrtrainori[i][j])
# templist = xarrtrainori[i]
templist.sort()
# print(templist)
median=0.0
if(numtrain%2==1):
median=templist[int(numtrain/2)]
else:
median = (0.5*(templist[int(numtrain/2)] + templist[int(numtrain/2)-1]))
medianarr[i] = median
print("median for "+str(i) + " is "+ str(median))
for j in range(numtrain):
if(xarrtrainori[i][j]>median):
xarrtrain[i].append(1)
else:
xarrtrain[i].append(0)
# print(xarrtrain)
# print(yarrtrain)
# In[5]:
xarrvali=[]
for i in range(23):
xarrvali.append([])
for i in range(23):
if(i>=5 and i<=10):
for j in range(numvali):
xarrvali[i].append(xarrvaliori[i][j]+2)
elif(i==1):
for j in range(numvali):
xarrvali[i].append(xarrvaliori[i][j]-1)
elif(xidentifier[i]!=3):
for j in range(numvali):
xarrvali[i].append(xarrvaliori[i][j])
else:
# templist=[]
# for j in range(numvali):
# templist.append(xarrvaliori[i][j])
# # templist = xarrvaliori[i]
# templist.sort()
# print(templist)
median=medianarr[i]
# if(numvali%2==1):
# median=templist[int(numvali/2)]
# else:
# median = (0.5*(templist[int(numvali/2)] + templist[int(numvali/2)+1]))
print("median for "+str(i) + " is "+ str(median))
for j in range(numvali):
if(xarrvaliori[i][j]>median):
xarrvali[i].append(1)
else:
xarrvali[i].append(0)
# print(xarrvali)
# print(yarrvali)
# In[6]:
import numpy as np
xarrvalinp = np.array(xarrvali).reshape((23,numvali))
xarrvalinp = np.transpose(xarrvalinp)
# In[7]:
xarrtest=[]
for i in range(23):
xarrtest.append([])
for i in range(23):
if(i>=5 and i<=10):
for j in range(numtest):
xarrtest[i].append(xarrtestori[i][j]+2)
elif(i==1):
for j in range(numtest):
xarrtest[i].append(xarrtestori[i][j]-1)
elif(xidentifier[i]!=3):
for j in range(numtest):
xarrtest[i].append(xarrtestori[i][j])
else:
# templist=[]
# for j in range(numtest):
# templist.append(xarrtestori[i][j])
# # templist = xarrtestori[i]
# templist.sort()
# print(templist)
median=medianarr[i]
# if(numtest%2==1):
# median=templist[int(numtest/2)]
# else:
# median = (0.5*(templist[int(numtest/2)] + templist[int(numtest/2)+1]))
print("median for "+str(i) + " is "+ str(median))
for j in range(numtest):
if(xarrtestori[i][j]>median):
xarrtest[i].append(1)
else:
xarrtest[i].append(0)
# print(xarrtest)
# print(yarrtest)
# In[8]:
# print(xarrtrain[4])
# print(xarrtrainori[0])
# In[9]:
debug=0
# 1 is true 0 is false
import math
def printtree(thisnode):
print(thisnode.xsplit)
print("|")
if(thisnode.childlist==None):
print("None is child")
return
if(len(thisnode.childlist)==0):
print("Leaf "+str(thisnode.yleaf))
for i in thisnode.childlist:
printtree(i)
print("--")
class Node:
def __init__(self,ylist,target):
self.ylist=[]
for i in range(len(ylist)):
(self.ylist).append(ylist[i])
self.childlist=None
self.target=[]
for i in range(len(target)):
(self.target).append(target[i])
self.xsplit=[]
self.yleaf=None
self.spliton=None
self.splitvalue=None
for i in range(23):
(self.xsplit).append(-1)
def setchild(self,childlist):
self.childlist=[]
for i in range(len(childlist)):
(self.childlist).append(childlist[i])
# print("appending")
# printtree(self)
# self.childlist=childlist
def updatexsplit(self,index,val):
(self.xsplit)[index]=val
def setxsplit(self,xsplit):
for i in range(len(xsplit)):
(self.xsplit)[i]=xsplit[i]
def setyleaf(self,val):
self.yleaf=val
def setspliton(self,val):
self.spliton=val
def setsplitval(self,val):
self.splitvalue= val
def entropy(arr):
temp=0.0
sumarr=(arr[0]+arr[1])*1.0
if((arr[0]+arr[1])==0):
if(debug==1): print("Zero Zero case in entropy----------")
return math.log(2)/math.log(math.exp(1))
elif(arr[0]==0 or arr[1]==0):
if(debug==1): print("Zero one or one zero entropy case------")
return 0.0
for i in range(2):
temp += -((1.0*arr[i])/sumarr)*((math.log((1.0*arr[i])/sumarr))/math.log(math.exp(1)))
return temp
def test(arr,thisnode):
if(len(thisnode.childlist)==1):
return thisnode.childlist[0].yleaf
else:
temp = thisnode.spliton
return test(arr, thisnode.childlist[arr[temp]])
def choosebestattr(thisnode):
itarr =[]
for i in range(23):
if((thisnode.xsplit[i])>=0):
itarr.append(float('-inf'))
continue
tempinf=[]
numtarget=len(thisnode.target)
yattarr=[]
numattr=[]
for j in range(xnumchildhelper[i]):
yattarr.append([0,0])
numattr.append(0)
for j in thisnode.target:
tempk = xarrtrain[i][j]
numattr[tempk]=numattr[tempk]+1
if(yarrtrain[j]==0):
yattarr[tempk][0]=yattarr[tempk][0]+1
else:
yattarr[tempk][1]=yattarr[tempk][1]+1
for j in range(xnumchildhelper[i]):
temp = ( (1.0*(numattr[j])) / (1.0*numtarget) ) * (entropy(yattarr[j]) )
tempinf.append(temp)
temp=0.0
for j in range(xnumchildhelper[i]):
temp+=tempinf[j]
if(debug==1): print("hb is")
if(debug==1): print(temp)
temp = entropy(thisnode.ylist)-temp
# print(entropy(thisnode.ylist))
itarr.append(temp)
tempval=itarr[0]
maxone=0
for i in range(23):
if(itarr[22-i]>tempval):
tempval=itarr[22-i]
maxone=22-i
if(debug==1): print(itarr)
if(debug==1): print("max inf gain is "+str(tempval))
return maxone
def allfeatureexplored(thisnode):
for i in range(23):
if(thisnode.xsplit[i]<0):
#False
return 1
#True
return 0
def grownode(thisnode):
if(thisnode.ylist[0]==0):
tempnode = Node(thisnode.ylist,thisnode.target)
tempnode.setchild([])
tempnode.yleaf=1
tempnode.setxsplit(thisnode.xsplit)
temp=[]
temp.append(tempnode)
thisnode.setchild(temp)
return
elif(thisnode.ylist[1]==0):
tempnode = Node(thisnode.ylist,thisnode.target)
tempnode.setchild([])
tempnode.yleaf=0
tempnode.setxsplit(thisnode.xsplit)
temp=[]
temp.append(tempnode)
thisnode.setchild(temp)
return
elif(allfeatureexplored(thisnode)==0):
tempnode = Node(thisnode.ylist,thisnode.target)
tempnode.setchild([])
if(tempnode.ylist[1]>tempnode.ylist[0]):
tempnode.yleaf=1
else:
tempnode.yleaf=0
tempnode.setxsplit(thisnode.xsplit)
temp=[]
temp.append(tempnode)
thisnode.setchild(temp)
return
else:
bestattr=choosebestattr(thisnode)
if(debug==1): print("best attr is "+str(bestattr))
tempnumchild = xnumchildhelper[bestattr]
tempchildarr=[]
for i in range(tempnumchild):
temptarget=[]
tempylist=[0,0]
for j in thisnode.target:
if(xarrtrain[bestattr][j]==i):
temptarget.append(j)
if(yarrtrain[j]==0):
tempylist[0]= tempylist[0]+1
else:
tempylist[1]= tempylist[1]+1
tempnode = Node(tempylist,temptarget)
tempnode.setxsplit(thisnode.xsplit)
tempnode.updatexsplit(bestattr,i)
# print("bestarr is "+str(bestattr))
# print("i is "+str(i))
# print(tempnode.xsplit)
# print(i)
# print(tempylist)
grownode(tempnode)
tempchildarr.append(tempnode)
# print("before setting child")
# printtree(thisnode)
thisnode.setspliton(bestattr)
thisnode.setchild(tempchildarr)
# print("after setting child")
# printtree(thisnode)
return
tempysplit=[0,0]
temptarget=[]
for i in range(numtrain):
temptarget.append(i)
if(yarrtrain[i]==0):
tempysplit[0]= tempysplit[0]+1
else:
tempysplit[1]= tempysplit[1]+1
root = Node(tempysplit,temptarget)
# In[23]:
xarrtrainc=[]
for i in range(23):
xarrtrainc.append([])
for i in range(23):
if(i>=5 and i<=10):
for j in range(numtrain):
xarrtrainc[i].append(xarrtrainori[i][j]+2)
elif(i==1):
for j in range(numtrain):
xarrtrainc[i].append(xarrtrainori[i][j]-1)
elif(xidentifier[i]!=3):
for j in range(numtrain):
xarrtrainc[i].append(xarrtrainori[i][j])
else:
for j in range(numtrain):
xarrtrainc[i].append(xarrtrainori[i][j])
# print(xarrtrain)
# print(yarrtrain)
# In[24]:
# part c
debug=0
medianarr=[0.0]*23
maxsplitallowed=30
maxsplitarrmy=[0]*23
def allfeatureexploredc(thisnode, numsplit):
# print("hello")
temp=0
for i in range(23):
if(xidentifier[i]!=3):
if(thisnode.xsplit[i]<0):
#False
temp+=1
# return 1
else:
if(numsplit[i]>maxsplitarrmy[i]):
# print(numsplit[i])
# print(i)
# print("update")
maxsplitarrmy[i]=numsplit[i]
if(numsplit[i]<maxsplitallowed):
temp+=1
# return 1
#True
return temp
xnumchildhelperc=[]
for i in range(23):
if(xidentifier[i]!=3):
xnumchildhelperc.append(xnumchildhelper[i])
else:
xnumchildhelperc.append(2)
def choosebestattrc(thisnode,numsplit):
#print("---------------choose best attribute--------------")
itarr =[]
medianarr = [0.0]*23
for i in range(23):
if(xidentifier[i]!=3):
if((thisnode.xsplit[i])>=0):
itarr.append(float('-inf'))
continue
else:
#print(numsplit)
if(numsplit[i] >= maxsplitallowed):
itarr.append(float('-inf'))
continue
tempinf=[]
numtarget=len(thisnode.target)
yattarr=[]
numattr=[]
tempmedian=0.0
if(xidentifier[i]==3):
templist =[]
for j in thisnode.target:
templist.append(xarrtrainc[i][j])
templist.sort()
#print(len(xarrtrainc[i]))
#print(len(templist))
#print(numtarget)
# tempmedianindex = templist[int(numtarget/2)]
# print(tempmedianindex)
if(numtarget%2==1):
tempmedian=templist[int(numtarget/2)]
else:
tempmedian=0.5*(templist[int(numtarget/2)]+templist[int(numtarget/2) -1])
medianarr[i]=tempmedian
for j in range(xnumchildhelperc[i]):
yattarr.append([0,0])
numattr.append(0)
for j in thisnode.target:
if(xidentifier[i]==3):
if(xarrtrainc[i][j]>tempmedian):
tempk=1
else:
tempk=0
else:
tempk = xarrtrain[i][j]
numattr[tempk]=numattr[tempk]+1
if(yarrtrain[j]==0):
yattarr[tempk][0]=yattarr[tempk][0]+1
else:
yattarr[tempk][1]=yattarr[tempk][1]+1
for j in range(xnumchildhelperc[i]):
temp = ( (1.0*(numattr[j])) / (1.0*numtarget) ) * (entropy(yattarr[j]) )
tempinf.append(temp)
temp=0.0
for j in range(xnumchildhelperc[i]):
temp+=tempinf[j]
if(debug==1): print("hb is")
if(debug==1): print(temp)
temp = entropy(thisnode.ylist)-temp
# print(entropy(thisnode.ylist))
itarr.append(temp)
tempval=itarr[0]
maxone=0
for i in range(23):
if(itarr[22-i]>tempval):
tempval=itarr[22-i]
maxone=22-i
if(debug==1): print(itarr)
if(debug==1): print("max inf gain is "+str(tempval))
return maxone, medianarr
def grownodec(thisnode, numsplit):
if(thisnode.ylist[0]==0):
tempnode = Node(thisnode.ylist,thisnode.target)
tempnode.setchild([])
tempnode.yleaf=1
tempnode.setxsplit(thisnode.xsplit)
temp=[]
temp.append(tempnode)
thisnode.setchild(temp)
return
elif(thisnode.ylist[1]==0):
tempnode = Node(thisnode.ylist,thisnode.target)
tempnode.setchild([])
tempnode.yleaf=0
tempnode.setxsplit(thisnode.xsplit)
temp=[]
temp.append(tempnode)
thisnode.setchild(temp)
return
elif(allfeatureexploredc(thisnode,numsplit)<=23):
tempnode = Node(thisnode.ylist,thisnode.target)
tempnode.setchild([])
if(tempnode.ylist[1]>tempnode.ylist[0]):
tempnode.yleaf=1
else:
tempnode.yleaf=0
tempnode.setxsplit(thisnode.xsplit)
temp=[]
temp.append(tempnode)
thisnode.setchild(temp)
return
else:
bestattr, medianarr=choosebestattrc(thisnode,numsplit)
if(debug==1): print("best attr is "+str(bestattr))
tempnumchild = xnumchildhelperc[bestattr]
tempchildarr=[]
for i in range(tempnumchild):
temptarget=[]
tempylist=[0,0]
for j in thisnode.target:
if(xidentifier[bestattr]==3):
if((xarrtrainc[bestattr][j]>medianarr[bestattr] and i==1) or (xarrtrainc[bestattr][j]<=medianarr[bestattr] and i==0 ) ):
temptarget.append(j)
if(yarrtrain[j]==0):
tempylist[0]= tempylist[0]+1
else:
tempylist[1]= tempylist[1]+1
else:
if(xarrtrainc[bestattr][j]==i):
temptarget.append(j)
if(yarrtrain[j]==0):
tempylist[0]= tempylist[0]+1
else:
tempylist[1]= tempylist[1]+1
tempnode = Node(tempylist,temptarget)
tempnode.setxsplit(thisnode.xsplit)
tempnode.updatexsplit(bestattr,i)
# print("bestarr is "+str(bestattr))
# print("i is "+str(i))
# print(tempnode.xsplit)
# print(i)
# print(tempylist)
numsplitnew=[]
for j in range(23):
numsplitnew.append(numsplit[j])
if(xidentifier[bestattr]==3):
numsplitnew[bestattr] = numsplitnew[bestattr]+1
grownodec(tempnode,numsplitnew)
tempchildarr.append(tempnode)
# print("before setting child")
# printtree(thisnode)
thisnode.setspliton(bestattr)
if(xidentifier[bestattr]==3):
thisnode.setsplitval(medianarr[bestattr])
thisnode.setchild(tempchildarr)
# print("after setting child")
# printtree(thisnode)
return
tempysplit=[0,0]
temptarget=[]
for i in range(numtrain):
temptarget.append(i)
if(yarrtrain[i]==0):
tempysplit[0]= tempysplit[0]+1
else:
tempysplit[1]= tempysplit[1]+1
root = Node(tempysplit,temptarget)
grownodec(root, [0]*23)
print("done")
if(debug==1): printtree(root)
# In[25]:
def testc(arr,thisnode):
if(len(thisnode.childlist)==1):
return thisnode.childlist[0].yleaf
else:
temp = thisnode.spliton
if(xidentifier[temp]==3):
temp2=arr[temp]
temp3=thisnode.splitvalue
if(temp2>temp3):
return testc(arr,thisnode.childlist[1])
else:
return testc(arr,thisnode.childlist[0])
else:
return testc(arr, thisnode.childlist[arr[temp]])
# In[26]:
debug=0
# print(len(root.childlist))
# test([1]*23,root)
numright=0
numwrong=0
for i in range(numtrain):
temp=[]
for j in range(23):
temp.append(xarrtrainc[j][i])
ypred = testc(temp,root)
if(debug==1): print(ypred)
if(ypred==yarrtrain[i]):
if(debug==1): print("right")
numright+=1
else:
if(debug==1): print("wrong")
numwrong+=1
# print("hell")
print((numright*1.0)/(1.0*numtrain))
# In[27]:
xarrvalic=[]
for i in range(23):
xarrvalic.append([])
for i in range(23):
if(i>=5 and i<=10):
for j in range(numvali):
xarrvalic[i].append(xarrvaliori[i][j]+2)
elif(i==1):
for j in range(numvali):
xarrvalic[i].append(xarrvaliori[i][j]-1)
elif(xidentifier[i]!=3):
for j in range(numvali):
xarrvalic[i].append(xarrvaliori[i][j])
else:
for j in range(numvali):
xarrvalic[i].append(xarrvaliori[i][j])
# print(xarrvali)
# print(yarrvali)
# In[28]:
# test([1]*23,root)
numright=0
numwrong=0
for i in range(numvali):
temp=[]
for j in range(23):
temp.append(xarrvalic[j][i])
ypred = testc(temp,root)
# print(ypred)
if(ypred!=0 and ypred!=1):
print("error")
if(ypred==yarrvali[i]):
# print("right")
numright+=1
else:
# print("wrong")
numwrong+=1
# print("hell")
print(numright)
print(numwrong)
print(numvali)
print((numright*1.0)/(1.0*numvali))
# In[29]:
xarrtestc=[]
for i in range(23):
xarrtestc.append([])
for i in range(23):
if(i>=5 and i<=10):
for j in range(numtest):
xarrtestc[i].append(xarrtestori[i][j]+2)
elif(i==1):
for j in range(numtest):
xarrtestc[i].append(xarrtestori[i][j]-1)
elif(xidentifier[i]!=3):
for j in range(numtest):
xarrtestc[i].append(xarrtestori[i][j])
else:
for j in range(numtest):
xarrtestc[i].append(xarrtestori[i][j])
# print(xarrtest)
# print(yarrtest)
# In[30]:
# test([1]*23,root)
debug=0
numright=0
numwrong=0
for i in range(numtest):
temp=[]
for j in range(23):
temp.append(xarrtestc[j][i])
ypred = testc(temp,root)
if(debug==1): print(ypred)
if(ypred!=0 and ypred!=1):
print("error")
if(ypred==yarrtest[i]):
if(debug==1): print("right")
numright+=1
else:
if(debug==1): print("wrong")
numwrong+=1
# print("hell")
print(numright)
print(numwrong)
print(numvali)
print((numright*1.0)/(1.0*numtest))
def numnodes(root):
if((len(root.childlist))==1):
return 1
else:
temp=0
for i in root.childlist:
temp += numnodes(i)
return temp +1
print(numnodes(root))
mytemp=[]
for i in range(23):
if(xidentifier[i]==3):
mytemp.append(0)
else:
mytemp.append(1)
print(mytemp)
print(maxsplitarrmy) | [
"akshat.khare08@gmail.com"
] | akshat.khare08@gmail.com |
84cf08f129e4d543d9128851991feaf46e16f436 | dd8a536628852156f380acc89af88959d09d484e | /models/common.py | 684aff58b9c0344094f2688dcafaa1009b238c0a | [
"MIT"
] | permissive | yairkit/flowstep3d | b53c720007acc6ad5dcf5a42d3b7e5b223817e8b | d339a8872365ba5a93cce02650ce06b64b41057a | refs/heads/main | 2023-06-04T20:10:50.244276 | 2021-06-24T14:37:48 | 2021-06-24T14:37:48 | 354,534,639 | 21 | 6 | null | null | null | null | UTF-8 | Python | false | false | 7,493 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.pointnet2 import pointnet2_utils as pointutils
class FlowEmbedding(nn.Module):
def __init__(self, radius, nsample, in_channel, mlp, pooling='max', corr_func='concat', knn=True, use_instance_norm=False):
super(FlowEmbedding, self).__init__()
self.radius = radius
self.nsample = nsample
self.knn = knn
self.pooling = pooling
self.corr_func = corr_func
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
if corr_func is 'concat':
last_channel = in_channel * 2 + 3
for out_channel in mlp:
self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1, bias=False))
if use_instance_norm:
self.mlp_bns.append(nn.InstanceNorm2d(out_channel, affine=True))
else:
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
def forward(self, pos1, pos2, feature1, feature2):
"""
Input:
pos1: (batch_size, 3, npoint)
pos2: (batch_size, 3, npoint)
feature1: (batch_size, channel, npoint)
feature2: (batch_size, channel, npoint)
Output:
pos1: (batch_size, 3, npoint)
feat1_new: (batch_size, mlp[-1], npoint)
"""
pos1_t = pos1.permute(0, 2, 1).contiguous()
pos2_t = pos2.permute(0, 2, 1).contiguous()
B, N, C = pos1_t.shape
if self.knn:
dist, idx = pointutils.knn(self.nsample, pos1_t, pos2_t)
tmp_idx = idx[:, :, 0].unsqueeze(2).repeat(1, 1, self.nsample).to(idx.device)
idx[dist > self.radius] = tmp_idx[dist > self.radius]
else:
# If the ball neighborhood points are less than nsample,
# than use the knn neighborhood points
idx, cnt = pointutils.ball_query(self.radius, self.nsample, pos2_t, pos1_t)
_, idx_knn = pointutils.knn(self.nsample, pos1_t, pos2_t)
cnt = cnt.view(B, -1, 1).repeat(1, 1, self.nsample)
idx = idx_knn[cnt > (self.nsample - 1)]
pos2_grouped = pointutils.grouping_operation(pos2, idx) # [B, 3, N, S]
pos_diff = pos2_grouped - pos1.view(B, -1, N, 1) # [B, 3, N, S]
feat2_grouped = pointutils.grouping_operation(feature2, idx) # [B, C, N, S]
if self.corr_func == 'concat':
feat_diff = torch.cat([feat2_grouped, feature1.view(B, -1, N, 1).repeat(1, 1, 1, self.nsample)], dim=1)
feat1_new = torch.cat([pos_diff, feat_diff], dim=1) # [B, 2*C+3,N,S]
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
feat1_new = F.relu(bn(conv(feat1_new)))
feat1_new = torch.max(feat1_new, -1)[0] # [B, mlp[-1], npoint]
return pos1, feat1_new
class PointNetSetAbstraction(nn.Module):
def __init__(self, npoint, radius, nsample, in_channel, mlp, group_all,
return_fps=False, use_xyz=True, use_act=True, act=F.relu, mean_aggr=False, use_instance_norm=False):
super(PointNetSetAbstraction, self).__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.group_all = group_all
self.use_xyz = use_xyz
self.use_act = use_act
self.mean_aggr = mean_aggr
self.act = act
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = (in_channel + 3) if use_xyz else in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1, bias=False))
if use_instance_norm:
self.mlp_bns.append(nn.InstanceNorm2d(out_channel, affine=True))
else:
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
if group_all:
self.queryandgroup = pointutils.GroupAll(self.use_xyz)
else:
self.queryandgroup = pointutils.QueryAndGroup(radius, nsample, self.use_xyz)
self.return_fps = return_fps
def forward(self, xyz, points, fps_idx=None):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, S, C]
new_points: sample points feature data, [B, S, D']
"""
device = xyz.device
B, C, N = xyz.shape
xyz = xyz.contiguous()
xyz_t = xyz.permute(0, 2, 1).contiguous()
if (self.group_all == False) and (self.npoint != -1):
if fps_idx == None:
fps_idx = pointutils.furthest_point_sample(xyz_t, self.npoint) # [B, N]
new_xyz = pointutils.gather_operation(xyz, fps_idx) # [B, C, N]
else:
new_xyz = xyz
new_points, _ = self.queryandgroup(xyz_t, new_xyz.transpose(2, 1).contiguous(), points) # [B, 3+C, N, S]
# new_xyz: sampled points position data, [B, C, npoint]
# new_points: sampled points data, [B, C+D, npoint, nsample]
for i, conv in enumerate(self.mlp_convs):
if self.use_act:
bn = self.mlp_bns[i]
new_points = self.act(bn(conv(new_points)))
else:
new_points = conv(new_points)
if self.mean_aggr:
new_points = torch.mean(new_points, -1)
else:
new_points = torch.max(new_points, -1)[0]
if self.return_fps:
return new_xyz, new_points, fps_idx
else:
return new_xyz, new_points
class PointNetFeaturePropogation(nn.Module):
def __init__(self, in_channel, mlp):
super(PointNetFeaturePropogation, self).__init__()
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
self.apply_mlp = mlp is not None
last_channel = in_channel
if self.apply_mlp:
for out_channel in mlp:
self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm1d(out_channel))
last_channel = out_channel
def forward(self, pos1, pos2, feature1, feature2):
"""
Input:
pos1: input points position data, [B, C, N]
pos2: sampled input points position data, [B, C, S]
feature1: input points data, [B, D, N]
feature2: input points data, [B, D, S]
Return:
feat_new: upsampled points data, [B, D', N]
"""
pos1_t = pos1.permute(0, 2, 1).contiguous()
pos2_t = pos2.permute(0, 2, 1).contiguous()
B, C, N = pos1.shape
dists, idx = pointutils.three_nn(pos1_t, pos2_t)
dists[dists < 1e-10] = 1e-10
weight = 1.0 / dists
weight = weight / torch.sum(weight, -1, keepdim=True) # [B,N,3]
interpolated_feat = torch.sum(pointutils.grouping_operation(feature2, idx) * weight.view(B, 1, N, 3),
dim=-1) # [B,C,N,3]
if feature1 is not None:
feat_new = torch.cat([interpolated_feat, feature1], 1)
else:
feat_new = interpolated_feat
if self.apply_mlp:
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
feat_new = F.relu(bn(conv(feat_new)))
return feat_new
| [
"yairkit@gmail.com"
] | yairkit@gmail.com |
0e477dbc10cac89afc1d95e800229c48b8241efd | d86c9efc218512c45ab5e07da9fa956bd2a1aa88 | /gpcconstants.py | c1610071c1a3a84fb86a1336ae692ad467926dca | [] | no_license | qtxbryan/FYPJDownloader | 0f886a59a1913339230668378ad92537393692e0 | b77e0707119dca09b7def868d3f7d2113be98173 | refs/heads/master | 2020-04-06T17:33:44.452786 | 2018-11-15T07:12:09 | 2018-11-15T07:12:09 | 157,664,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,208 | py | # categories retrieved from Google Play Store
CATEGORY = {
'ANDROID_WEAR': 'ANDROID_WEAR',
'ART_AND_DESIGN': 'ART_AND_DESIGN',
'AUTO_AND_VEHICLES': 'AUTO_AND_VEHICLES',
'BEAUTY': 'BEAUTY',
'BOOKS_AND_REFERENCE': 'BOOKS_AND_REFERENCE',
'BUSINESS': 'BUSINESS',
'COMICS': 'COMICS',
'COMMUNICATION': 'COMMUNICATION',
'DATING': 'DATING',
'EDUCATION': 'EDUCATION',
'ENTERTAINMENT': 'ENTERTAINMENT',
'EVENTS': 'EVENTS',
'FINANCE': 'FINANCE',
'FOOD_AND_DRINK': 'FOOD_AND_DRINK',
'HEALTH_AND_FITNESS': 'HEALTH_AND_FITNESS',
'HOUSE_AND_HOME': 'HOUSE_AND_HOME',
'LIBRARIES_AND_DEMO': 'LIBRARIES_AND_DEMO',
'LIFESTYLE': 'LIFESTYLE',
'MAPS_AND_NAVIGATION': 'MAPS_AND_NAVIGATION',
'MEDICAL': 'MEDICAL',
'MUSIC_AND_AUDIO': 'MUSIC_AND_AUDIO',
'NEWS_AND_MAGAZINES': 'NEWS_AND_MAGAZINES',
'PARENTING': 'PARENTING',
'PERSONALIZATION': 'PERSONALIZATION',
'PHOTOGRAPHY': 'PHOTOGRAPHY',
'PRODUCTIVITY': 'PRODUCTIVITY',
'SHOPPING': 'SHOPPING',
'SOCIAL': 'SOCIAL',
'SPORTS': 'SPORTS',
'TOOLS': 'TOOLS',
'TRAVEL_AND_LOCAL': 'TRAVEL_AND_LOCAL',
'VIDEO_PLAYERS': 'VIDEO_PLAYERS',
'WEATHER': 'WEATHER',
'GAME': 'GAME',
'GAME_ACTION': 'GAME_ACTION',
'GAME_ADVENTURE': 'GAME_ADVENTURE',
'GAME_ARCADE': 'GAME_ARCADE',
'GAME_BOARD': 'GAME_BOARD',
'GAME_CARD': 'GAME_CARD',
'GAME_CASINO': 'GAME_CASINO',
'GAME_CASUAL': 'GAME_CASUAL',
'GAME_EDUCATIONAL': 'GAME_EDUCATIONAL',
'GAME_MUSIC': 'GAME_MUSIC',
'GAME_PUZZLE': 'GAME_PUZZLE',
'GAME_RACING': 'GAME_RACING',
'GAME_ROLE_PLAYING': 'GAME_ROLE_PLAYING',
'GAME_SIMULATION': 'GAME_SIMULATION',
'GAME_SPORTS': 'GAME_SPORTS',
'GAME_STRATEGY': 'GAME_STRATEGY',
'GAME_TRIVIA': 'GAME_TRIVIA',
'GAME_WORD': 'GAME_WORD',
'FAMILY': 'FAMILY',
'FAMILY_ACTION': 'FAMILY_ACTION',
'FAMILY_BRAINGAMES': 'FAMILY_BRAINGAMES',
'FAMILY_CREATE': 'FAMILY_CREATE',
'FAMILY_EDUCATION': 'FAMILY_EDUCATION',
'FAMILY_MUSICVIDEO': 'FAMILY_MUSICVIDEO',
'FAMILY_PRETEND': 'FAMILY_PRETEND'
}
COLLECTION = {
'TOP_FREE': 'topselling_free',
'NEW_FREE': 'topselling_new_free',
'GROSSING': 'topgrossing',
'TRENDING': 'movers_shakers'
}
GSCRAPPER_DIR = '/root/GooglePlayCrawler/google-play-api-master'
| [
"qtxbryan@gmail.com"
] | qtxbryan@gmail.com |
88b3e6880ce673410ca83591864b5b4b37ea19a7 | 81407be1385564308db7193634a2bb050b4f822e | /the-python-standard-library-by-example/socket/socket_socketpair.py | 8ad13087f52f0fcdc2e9f9dab9e50f2b5dca1853 | [
"MIT"
] | permissive | gottaegbert/penter | 6db4f7d82c143af1209b4259ba32145aba7d6bd3 | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | refs/heads/master | 2022-12-30T14:51:45.132819 | 2020-10-09T05:33:23 | 2020-10-09T05:33:23 | 305,266,398 | 0 | 0 | MIT | 2020-10-19T04:56:02 | 2020-10-19T04:53:05 | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Parent/child communication through a socket pair.
"""
#end_pymotw_header
import socket
import os
parent, child = socket.socketpair()
pid = os.fork()
if pid:
print 'in parent, sending message'
child.close()
parent.sendall('ping')
response = parent.recv(1024)
print 'response from child:', response
parent.close()
else:
print 'in child, waiting for message'
parent.close()
message = child.recv(1024)
print 'message from parent:', message
child.sendall('pong')
child.close()
| [
"350840291@qq.com"
] | 350840291@qq.com |
e16dcee46b6896c565cd3e4f89addc19e1a257c8 | ce3d4ee5d9c42f72b79258ffcba3dd8625b36cec | /venv/lib/python3.7/site-packages/environs.py | 9464c16b99c45e58f9875ece81abef5e7955c981 | [
"MIT"
] | permissive | danigfavero/ada | b2be55874343a77d650dad499591ef1ebc8f0d5f | 3c3842023c22510eedf207b23c418f22389622aa | refs/heads/master | 2020-08-19T01:40:17.614537 | 2019-10-17T22:09:13 | 2019-10-17T22:09:13 | 215,860,483 | 0 | 0 | MIT | 2019-10-17T18:32:14 | 2019-10-17T18:32:11 | null | UTF-8 | Python | false | false | 10,521 | py | # -*- coding: utf-8 -*-
import contextlib
import inspect
import functools
import json as pyjson
import os
import re
try:
import urllib.parse as urlparse
except ImportError:
# Python 2
import urlparse
try:
from collections.abc import Mapping
except ImportError:
# Python 2
from collections import Mapping
import marshmallow as ma
from dotenv import load_dotenv
from dotenv.main import _walk_to_root
__version__ = "4.2.0"
__all__ = ["EnvError", "Env"]
MARSHMALLOW_VERSION_INFO = tuple(
[int(part) for part in ma.__version__.split(".") if part.isdigit()]
)
class EnvError(ValueError):
pass
_PROXIED_PATTERN = re.compile(r"\s*{{\s*(\S*)\s*}}\s*")
def _field2method(field_or_factory, method_name, preprocess=None):
def method(self, name, default=ma.missing, subcast=None, **kwargs):
missing = kwargs.pop("missing", None) or default
if isinstance(field_or_factory, type) and issubclass(
field_or_factory, ma.fields.Field
):
field = field_or_factory(missing=missing, **kwargs)
else:
field = field_or_factory(subcast=subcast, missing=missing, **kwargs)
parsed_key, raw_value, proxied_key = self._get_from_environ(name, ma.missing)
self._fields[parsed_key] = field
if raw_value is ma.missing and field.missing is ma.missing:
raise EnvError(
'Environment variable "{}" not set'.format(proxied_key or parsed_key)
)
if raw_value or raw_value == "":
value = raw_value
else:
value = field.missing
if preprocess:
value = preprocess(value, subcast=subcast, **kwargs)
try:
value = field.deserialize(value)
except ma.ValidationError as err:
raise EnvError(
'Environment variable "{}" invalid: {}'.format(name, err.args[0])
)
else:
self._values[parsed_key] = value
return value
method.__name__ = str(method_name) # cast to str for Py2 compat
return method
def _func2method(func, method_name):
def method(self, name, default=ma.missing, subcast=None, **kwargs):
parsed_key, raw_value, proxied_key = self._get_from_environ(name, default)
if raw_value is ma.missing:
raise EnvError(
'Environment variable "{}" not set'.format(proxied_key or parsed_key)
)
value = func(raw_value, **kwargs)
self._fields[parsed_key] = ma.fields.Field(**kwargs)
self._values[parsed_key] = value
return value
method.__name__ = str(method_name) # cast to str for Py2 compat
return method
# From webargs
def _dict2schema(dct):
"""Generate a `marshmallow.Schema` class given a dictionary of
`Fields <marshmallow.fields.Field>`.
"""
attrs = dct.copy()
if MARSHMALLOW_VERSION_INFO[0] < 3:
class Meta(object):
strict = True
attrs["Meta"] = Meta
return type(str(""), (ma.Schema,), attrs)
def _make_list_field(**kwargs):
subcast = kwargs.pop("subcast", None)
inner_field = ma.Schema.TYPE_MAPPING[subcast] if subcast else ma.fields.Field
return ma.fields.List(inner_field, **kwargs)
def _preprocess_list(value, **kwargs):
return value if ma.utils.is_iterable_but_not_string(value) else value.split(",")
def _preprocess_dict(value, **kwargs):
if isinstance(value, Mapping):
return value
subcast = kwargs.get("subcast")
return {
key.strip(): subcast(val.strip()) if subcast else val.strip()
for key, val in (item.split("=") for item in value.split(",") if value)
}
def _preprocess_json(value, **kwargs):
return pyjson.loads(value)
def _dj_db_url_parser(value, **kwargs):
try:
import dj_database_url
except ImportError:
raise RuntimeError(
"The dj_db_url parser requires the dj-database-url package. "
"You can install it with: pip install dj-database-url"
)
return dj_database_url.parse(value, **kwargs)
def _dj_email_url_parser(value, **kwargs):
try:
import dj_email_url
except ImportError:
raise RuntimeError(
"The dj_email_url parser requires the dj-email-url package. "
"You can install it with: pip install dj-email-url"
)
return dj_email_url.parse(value, **kwargs)
class URLField(ma.fields.URL):
def _serialize(self, value, attr, obj):
return value.geturl()
# Override deserialize rather than _deserialize because we need
# to call urlparse *after* validation has occurred
def deserialize(self, value, attr=None, data=None):
ret = super(URLField, self).deserialize(value, attr, data)
return urlparse.urlparse(ret)
class Env(object):
"""An environment variable reader."""
__call__ = _field2method(ma.fields.Field, "__call__")
default_parser_map = dict(
bool=_field2method(ma.fields.Bool, "bool"),
str=_field2method(ma.fields.Str, "str"),
int=_field2method(ma.fields.Int, "int"),
float=_field2method(ma.fields.Float, "float"),
decimal=_field2method(ma.fields.Decimal, "decimal"),
list=_field2method(_make_list_field, "list", preprocess=_preprocess_list),
dict=_field2method(ma.fields.Dict, "dict", preprocess=_preprocess_dict),
json=_field2method(ma.fields.Field, "json", preprocess=_preprocess_json),
datetime=_field2method(ma.fields.DateTime, "datetime"),
date=_field2method(ma.fields.Date, "date"),
timedelta=_field2method(ma.fields.TimeDelta, "timedelta"),
uuid=_field2method(ma.fields.UUID, "uuid"),
url=_field2method(URLField, "url"),
dj_db_url=_func2method(_dj_db_url_parser, "dj_db_url"),
dj_email_url=_func2method(_dj_email_url_parser, "dj_email_url"),
)
def __init__(self):
self._fields = {}
self._values = {}
self._prefix = None
self.__parser_map__ = self.default_parser_map.copy()
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self._values)
__str__ = __repr__
@staticmethod
def read_env(path=None, recurse=True, stream=None, verbose=False, override=False):
"""Read a .env file into os.environ.
If .env is not found in the directory from which this method is called,
the default behavior is to recurse up the directory tree until a .env
file is found. If you do not wish to recurse up the tree, you may pass
False as a second positional argument.
"""
# By default, start search from the same file this function is called
if path is None:
frame = inspect.currentframe().f_back
caller_dir = os.path.dirname(frame.f_code.co_filename)
start = os.path.join(os.path.abspath(caller_dir))
else:
start = path
if recurse:
for dirname in _walk_to_root(start):
check_path = os.path.join(dirname, ".env")
if os.path.exists(check_path):
return load_dotenv(
check_path, stream=stream, verbose=verbose, override=override
)
else:
if path is None:
start = os.path.join(start, ".env")
return load_dotenv(start, stream=stream, verbose=verbose, override=override)
@contextlib.contextmanager
def prefixed(self, prefix):
"""Context manager for parsing envvars with a common prefix."""
try:
old_prefix = self._prefix
if old_prefix is None:
self._prefix = prefix
else:
self._prefix = "{}{}".format(old_prefix, prefix)
yield self
finally:
# explicitly reset the stored prefix on completion and exceptions
self._prefix = None
self._prefix = old_prefix
def __getattr__(self, name, **kwargs):
try:
return functools.partial(self.__parser_map__[name], self)
except KeyError:
raise AttributeError("{} has no attribute {}".format(self, name))
def add_parser(self, name, func):
"""Register a new parser method with the name ``name``. ``func`` must
receive the input value for an environment variable.
"""
self.__parser_map__[name] = _func2method(func, method_name=name)
return None
def parser_for(self, name):
"""Decorator that registers a new parser method with the name ``name``.
The decorated function must receive the input value for an environment variable.
"""
def decorator(func):
self.add_parser(name, func)
return func
return decorator
def add_parser_from_field(self, name, field_cls):
"""Register a new parser method with name ``name``, given a marshmallow ``Field``."""
self.__parser_map__[name] = _field2method(field_cls, method_name=name)
def dump(self):
"""Dump parsed environment variables to a dictionary of simple data types (numbers
and strings).
"""
schema = _dict2schema(self._fields)()
dump_result = schema.dump(self._values)
return dump_result.data if MARSHMALLOW_VERSION_INFO[0] < 3 else dump_result
def _get_from_environ(self, key, default, proxied=False):
"""Access a value from os.environ. Handles proxied variables, e.g. SMTP_LOGIN={{MAILGUN_LOGIN}}.
Returns a tuple (envvar_key, envvar_value, proxied_key). The ``envvar_key`` will be different from
the passed key for proxied variables. proxied_key will be None if the envvar isn't proxied.
The ``proxied`` flag is recursively passed if a proxy lookup is required to get a
proxy env key.
"""
env_key = self._get_key(key, omit_prefix=proxied)
value = os.environ.get(env_key, default)
if hasattr(value, "strip"):
match = _PROXIED_PATTERN.match(value)
if match: # Proxied variable
proxied_key = match.groups()[0]
return (
key,
self._get_from_environ(proxied_key, default, proxied=True)[1],
proxied_key,
)
return env_key, value, None
def _get_key(self, key, omit_prefix=False):
return self._prefix + key if self._prefix and not omit_prefix else key
| [
"grey.pedrinho@gmail.com"
] | grey.pedrinho@gmail.com |
bf85a2582b197720eae98996b3d095ad30983e9a | c64658390d26702a3237ec776b9c4fa19183dd51 | /Using_TransferLearning/VGG16_Model.py | 071d8dbbfb7c244631249429bb8a179264a480b6 | [] | no_license | 5a7man/ML_Competiton | 49b4ac941276eea0457c857d17f88e043681584a | 8872216480c47f71ee63bbe54410ef247eb169db | refs/heads/master | 2023-01-20T15:08:18.189907 | 2020-12-01T16:54:58 | 2020-12-01T16:54:58 | 316,051,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,423 | py | # -*- coding: utf-8 -*-
"""TL_vgg16_kerasAPI.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/10QTvjCnU8hkrF7gOIaDqy2ncXYCMprmV
"""
import numpy as np
from tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization
from tensorflow.keras import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras import Model
# Data Importing
x_train = np.load('/content/drive/My Drive/Compition/xs.npy')
y_train = np.load('/content/drive/My Drive/Compition/ys.npy')
x_train = x_train/255.0
def build_model():
vgg16 = VGG16(include_top = False, input_shape=(32,32,3),weights='imagenet')
for layer in vgg16.layers:
layer.trainable = False
x = Flatten()(vgg16.output)
x = Dense(500,activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.3)(x)
x = Dense(250,activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.25)(x)
x = Dense(70,activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Dense(9,activation='softmax')(x)
m = Model(inputs = vgg16.inputs, outputs = x)
return m
model = build_model()
early_stopping = EarlyStopping(monitor = 'val_accuracy', patience = 20, restore_best_weights = True)
lr_sechudle = ReduceLROnPlateau(monitor = 'val_accuracy', factor = 0.2, patience = 12 )
model.compile(optimizer=Adam(1e-3),
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'])
model.summary()
generator = ImageDataGenerator(rotation_range=20,
zoom_range = 0.2,
horizontal_flip = True,
vertical_flip = True,
width_shift_range = 0.1,
height_shift_range = 0.1,
validation_split = 0.2)
model.fit(generator.flow(x_train,y_train,batch_size = 25),
validation_data = generator.flow(x_train,y_train,batch_size = 25, subset = 'validation'),
steps_per_epoch = len(x_train)/25,
epochs = 100,
verbose = 2,
callbacks=[early_stopping, lr_sechudle])
model.save('model.h5') | [
"58083322+5a7man@users.noreply.github.com"
] | 58083322+5a7man@users.noreply.github.com |
c15c8e1573fdf0576a1e0922a58fb761e4bf722a | 83684f905274bea5ec0aa81e57abe6d6af5f65c6 | /v8-addon/product_stone_search_ept/py/product_box.py | a99910b56d48ae8dd6527630d8abac5f67878099 | [] | no_license | arpanv/pansuriya | f47064679aa2c98c1e9d3a6f0605c98a685e00cf | b8615b70c33b79d8b2454cef4151d3f83c3bc77f | refs/heads/master | 2020-04-06T06:54:00.271098 | 2014-09-17T14:06:08 | 2014-09-17T14:06:08 | 24,102,473 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | import math
import re
from openerp import tools
from openerp.osv import osv, fields
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class product_box_ept(osv.osv):
_name = 'product.box.ept'
_description = 'Product Box'
_columns = {
'name':fields.char('Name',required=True),
'code':fields.char('Code'),
'product_ids':fields.one2many('product.product','box_id_ept',string="Products"),
}
product_box_ept() | [
"sohil@sohil.(none)"
] | sohil@sohil.(none) |
e1cfb7649de923193677b19c2893f29c7ad311f6 | fafa93f61ecf526c924639b4cf4a6775a388377f | /calibration.py | 2c0fa727180b59f3fc9ebe4c4d3756bed448358c | [] | no_license | Gavinwxy/Image-based-Localization-and-Tracking-of-a-Contiuum-Flexible-Robot | fbc3d34d07f13b5da704e91c5bc085f0a44a3d89 | 47aba0e2ca0bb201fe3f3417887c02645c094ae7 | refs/heads/master | 2020-04-27T08:26:05.058061 | 2019-08-23T15:07:41 | 2019-08-23T15:07:41 | 174,171,145 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,219 | py | import cv2
import numpy as np
import glob
import matplotlib.pyplot as plt
from utils import imshow, imshow2
# Construct Object Points
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((36, 3), np.float32)
objp[0] = (0, 0, 0)
objp[1] = (0, 2, 0)
objp[2] = (0, 4, 0)
objp[3] = (0, 6, 0)
objp[4] = (1, 1, 0)
objp[5] = (1, 3, 0)
objp[6] = (1, 5, 0)
objp[7] = (1, 7, 0)
objp[8] = (2, 0, 0)
objp[9] = (2, 2, 0)
objp[10] = (2, 4, 0)
objp[11] = (2, 6, 0)
objp[12] = (3, 1, 0)
objp[13] = (3, 3, 0)
objp[14] = (3, 5, 0)
objp[15] = (3, 7, 0)
objp[16] = (4, 0, 0)
objp[17] = (4, 2, 0)
objp[18] = (4, 4, 0)
objp[19] = (4, 6, 0)
objp[20] = (5, 1, 0)
objp[21] = (5, 3, 0)
objp[22] = (5, 5, 0)
objp[23] = (5, 7, 0)
objp[24] = (6, 0, 0)
objp[25] = (6, 2, 0)
objp[26] = (6, 4, 0)
objp[27] = (6, 6, 0)
objp[28] = (7, 1, 0)
objp[29] = (7, 3, 0)
objp[30] = (7, 5, 0)
objp[31] = (7, 7, 0)
objp[32] = (8, 0, 0)
objp[33] = (8, 2, 0)
objp[34] = (8, 4, 0)
objp[35] = (8, 6, 0)
# Define Blob Detector
# Setup SimpleBlobDetector parameters.
blobParams = cv2.SimpleBlobDetector_Params()
# Change thresholds
blobParams.minThreshold = 8
blobParams.maxThreshold = 255
# Filter by Area.
blobParams.filterByArea = True
blobParams.minArea = 64 # minArea may be adjusted to suit for your experiment
blobParams.maxArea = 700 #1000 # maxArea may be adjusted to suit for your experiment
# Filter by Circularity
blobParams.filterByCircularity = True
blobParams.minCircularity = 0.1
# Filter by Convexity
blobParams.filterByConvexity = True
blobParams.minConvexity = 0.87
# Filter by Inertia
blobParams.filterByInertia = True
blobParams.minInertiaRatio = 0.5
# Create a detector with the parameters
blobDetector = cv2.SimpleBlobDetector_create(blobParams)
# Blob Detector for accurate point detection (left view)
img_left = cv2.imread('cam2_11.png')
gray = cv2.cvtColor(img_left, cv2.COLOR_BGR2GRAY)
keypoints = blobDetector.detect(gray) # Detect blobs.
points = {}
for keypoint in keypoints:
points[keypoint.pt[0]] = keypoint.pt[1]
coordx = [key for key in points.keys()]
coordx.sort(reverse=True)
acc_coords = []
for x in coordx:
acc_coords.append([[x, points[x]]])
acc_coords = np.array(acc_coords)
acc_coords_left = acc_coords.astype('float32')
#np.save('blob_loc_left.npy', acc_coords_left)
imgs = glob.glob('./data_calib/cam2/*.png')
img_points = []
obj_points = []
for frame in imgs:
obj_points.append(objp)
img = cv2.imread(frame)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
keypoints = blobDetector.detect(gray) # Detect blobs.
points = {}
for keypoint in keypoints:
points[keypoint.pt[0]] = keypoint.pt[1]
coordx = [key for key in points.keys()]
coordx.sort(reverse=True)
acc_coords = []
for x in coordx:
acc_coords.append([[x, points[x]]])
img_points.append(acc_coords)
acc_coords = np.array(acc_coords).astype('float32')
#
im_with_keypoints = cv2.drawChessboardCorners(img, (4,9), acc_coords, True)
cv2.imshow('img', im_with_keypoints)
cv2.waitKey(5000)
#
cv2.destroyAllWindows()
img_points_left = np.array(img_points).astype('float32')
# Blob Detector for accurate point detection (right view)
# Points in the right view
img_right = cv2.imread('cam1_14.png')
gray = cv2.cvtColor(img_right, cv2.COLOR_BGR2GRAY)
keypoints = blobDetector.detect(gray) # Detect blobs.
points = {}
for keypoint in keypoints:
points[keypoint.pt[0]] = keypoint.pt[1]
coordx = [key for key in points.keys()]
coordx.sort(reverse=True)
acc_coords = []
for x in coordx:
acc_coords.append([[x, points[x]]])
acc_coords = np.array(acc_coords)
acc_coords_right = acc_coords.astype('float32')
#np.save('blob_loc_right.npy', acc_coords_right)
imgs = glob.glob('./data_calib/cam1/*.png')
img_points = []
obj_points = []
for frame in imgs:
obj_points.append(objp)
img = cv2.imread(frame)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
keypoints = blobDetector.detect(gray) # Detect blobs.
points = {}
for keypoint in keypoints:
points[keypoint.pt[0]] = keypoint.pt[1]
coordx = [key for key in points.keys()]
coordx.sort(reverse=True)
#coordx.sort()
acc_coords = []
for x in coordx:
acc_coords.append([[x, points[x]]])
img_points.append(acc_coords)
acc_coords = np.array(acc_coords).astype('float32')
#
im_with_keypoints = cv2.drawChessboardCorners(img, (4,9), acc_coords, True)
cv2.imshow('img', im_with_keypoints)
cv2.waitKey(5000)
#
cv2.destroyAllWindows()
img_points_right = np.array(img_points).astype('float32')
# Left view calibration result
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points_left, gray.shape[::-1], None, None)
data = {'ret': ret, 'intr_mat': mtx, 'distortion_coeff': dist, 'R': rvecs, 'T': tvecs}
#np.save('calibration_coeff_left.npy', data)
# right view calibration result
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points_right, gray.shape[::-1], None, None)
data = {'ret': ret, 'intr_mat': mtx, 'distortion_coeff': dist, 'R': rvecs, 'T': tvecs}
#np.save('calibration_coeff_right.npy', data) | [
"xiaoyang.wong318@gmail.com"
] | xiaoyang.wong318@gmail.com |
1fff82cd038d8994320954689957150347257e93 | 48cbea4784808788e1df99662c2e9d305aa27526 | /AppImageBuilder/app_dir/builder.py | be9ac9ce6583bcd849094c8efb15c3f995677f82 | [
"MIT"
] | permissive | blanksteer/appimage-builder | 5bc0aaecf5db89c3f496c2bd7808cfbf9d9a422c | 377cb8bba7d7972c0bb695b9c7c13ecdf28a83a3 | refs/heads/master | 2022-12-24T17:18:10.265744 | 2020-09-30T18:15:24 | 2020-09-30T18:15:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,750 | py | # Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import logging
import os
from AppImageBuilder.app_dir.runtime.generator import RuntimeGenerator
from AppImageBuilder.app_dir.bundlers.file_bundler import FileBundler
from .app_info.bundle_info import BundleInfo
from .app_info.desktop_entry_generator import DesktopEntryGenerator
from .app_info.icon_bundler import IconBundler
from .app_info.loader import AppInfoLoader
from AppImageBuilder.app_dir.bundlers.factory import BundlerFactory
class BuilderError(RuntimeError):
pass
class Builder:
def __init__(self, recipe):
self.recipe = recipe
self.bundlers = []
self.generator = None
self._load_config()
def _load_config(self):
self.app_dir_conf = self.recipe.get_item('AppDir')
self.cache_dir = os.path.join(os.path.curdir, 'appimage-builder-cache')
self._load_app_dir_path()
self._load_app_info_config()
bundler_factory = BundlerFactory(self.app_dir_path, self.cache_dir)
bundler_factory.runtime = self.recipe.get_item('AppDir/runtime/generator', "wrapper")
for bundler_name in bundler_factory.list_bundlers():
if bundler_name in self.app_dir_conf:
bundler_settings = self.app_dir_conf[bundler_name]
bundler = bundler_factory.create(bundler_name, bundler_settings)
self.bundlers.append(bundler)
self.file_bundler = FileBundler(self.recipe)
def _load_app_dir_path(self):
self.app_dir_path = os.path.abspath(self.recipe.get_item('AppDir/path'))
os.makedirs(self.app_dir_path, exist_ok=True)
def _load_app_info_config(self):
loader = AppInfoLoader()
self.app_info = loader.load(self.recipe)
def build(self):
logging.info("=================")
logging.info("Generating AppDir")
logging.info("=================")
self._bundle_dependencies()
self._generate_runtime()
self._write_bundle_information()
def _bundle_dependencies(self):
logging.info("")
logging.info("Bundling dependencies")
logging.info("---------------------")
for bundler in self.bundlers:
bundler.run()
def _generate_runtime(self):
logging.info("")
logging.info("Generating runtime")
logging.info("__________________")
runtime = RuntimeGenerator(self.recipe)
runtime.generate()
def _write_bundle_information(self):
logging.info("")
logging.info("Generating metadata")
logging.info("___________________")
self._bundle_app_dir_icon()
self._generate_app_dir_desktop_entry()
self._generate_bundle_info()
def _bundle_app_dir_icon(self):
icon_bundler = IconBundler(self.app_dir_path, self.app_info.icon)
icon_bundler.bundle_icon()
def _generate_app_dir_desktop_entry(self):
desktop_entry_editor = DesktopEntryGenerator(self.app_dir_path)
desktop_entry_editor.generate(self.app_info)
def _generate_bundle_info(self):
info = BundleInfo(self.app_dir_path, self.bundlers)
info.generate()
| [
"contact@azubieta.net"
] | contact@azubieta.net |
369617deec4aeef5623b3948ced0bd4485769dd6 | 3b69a93cccdd3116744e074ef9821df34cbc1dd1 | /DataStorer.py | 7bc75b99b07d4d2b3a851221c2e6a695b067df73 | [] | no_license | muirt/bbqbadass_server | 46feceeb83abc8941c2d3e450d32bfdaad700715 | 2de69ce30bd267c1e94fd6db19fcbe0d51ada367 | refs/heads/master | 2021-01-10T06:52:17.828220 | 2016-03-17T23:45:53 | 2016-03-17T23:45:53 | 52,071,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | import time
import configuration
import CurrentIO
def StoreData():
currentTime = int(time.time())
dbEntry = {'inputs': [], 'time': currentTime }
for input in configuration.InputList:
name = input.Name
state = CurrentIO.getInputState(name)
inputEntry = {'name': name, 'state': state}
#dbEntry['inputs'].append(inputEntry)
#recorder.addInputReading(dbEntry)
##get output states and put them in the output database
outputEntry = { 'state': CurrentIO.getOutputState('controlOutput'), 'time': currentTime}
#recorder.addOutputReading(outputEntry)
| [
"root@a10Lime.(none)"
] | root@a10Lime.(none) |
952b2a59ada3549de630d1106ce4e69903fc3d0c | dc585ad3a87189c93033a3840a5d5a677d89db0f | /ElasticBeamImpact/ElasticBeamEqns/lib/functions.py | 6d95379c017fdc76596c1043f85998a8b5154c35 | [] | no_license | obokhove/SurfsUP20142019 | cfc318445c94dcadec7737d2831bebe79d206e94 | 5bddcc42a5788c6e81e6a31a41c5bdb5da46f899 | refs/heads/master | 2023-06-24T15:18:35.479647 | 2023-06-22T08:02:31 | 2023-06-22T08:02:31 | 234,537,676 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,466 | py | """
Auxiliary functions.
"""
import firedrake as fd
from . import beam
from . import parameters as prm
import os
import numpy as np
fd.parameters["form_compiler"]["cpp_optimize"] = True
def write_energy_to_file(filename, E_tab, time_tab):
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
outfile = open(filename, 'w')
for ind in range(len(time_tab)):
outfile.write(str(time_tab[ind]) + '\t' + str(E_tab[ind]) + '\n')
outfile.close()
def initialize_energies(t_len):
E = dict()
E['p'] = np.zeros(t_len)
E['k'] = np.zeros(t_len)
E['t'] = np.zeros(t_len)
return E
def update_energies(B, E, i):
E['p'][i] = B.E_pot()
E['k'][i] = B.E_kin()
E['t'][i] = E['p'][i] + E['k'][i]
def write_energies_to_files(E, t_tab):
write_energy_to_file("energy/Ep", E['p'], t_tab)
write_energy_to_file("energy/Ek", E['k'], t_tab)
write_energy_to_file("energy/Et", E['t'], t_tab)
def time_evolution():
params = prm.dim2nondim(prm.parameters)
B = beam.Beam(**params)
E = initialize_energies(len(B.t))
update_energies(B, E, 0)
n = 0
n_modulo = 100
for i, t in enumerate(B.t):
if i==0: continue
B.evolve_time()
if n%n_modulo == 0:
print('time = ', t * params['T'])
B.output_data()
update_energies(B, E, i)
n+=1
write_energies_to_files(E, B.t)
B.write_raw() | [
"noreply@github.com"
] | obokhove.noreply@github.com |
86f23652ca781acfd6d1f493185c442a6d2bd25b | 9ebd37765d98c245f9e90b719b03680bbf2f69e1 | /sources/BadParser.py | fa3d4a388a31e1061161f341926078be69666e09 | [] | no_license | icYFTL/ShadowServants-Brute-Python | e25964ad1e819f3185a7c55916fcb374153245c0 | ee5d0e2fdd6dfdad57bf03e8f99607c25a2bc3c1 | refs/heads/master | 2020-04-23T17:38:01.912148 | 2019-03-15T14:07:08 | 2019-03-15T14:07:08 | 171,338,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | # Version 1.2 alpha
'''
How to use:
Example:
type_of_proxy = 1 # 1 - http, 2 - https, 3 - socks4, 4 - socks5
proxycount = 5000 # There's a limit. Read https://www.proxy-list.download/
a = BadParser(type_of_proxy,proxycount)
data = a.Grab()
# data = [1.1.1.1:8000, ...]
'''
import requests
class BadParser:
def __init__(self, kind, count):
print('[BadParser v.1.2 alpha]\n')
if kind == 1:
self.kind = 'http'
elif kind == 2:
self.kind = 'https'
elif kind == 3:
self.kind = 'socks4'
elif kind == 4:
self.kind = 'socks5'
self.count = count
self.handled = 0
self.proxy_list = []
def Grab(self):
print('Work initiated. Getting data from server.')
r = requests.get('https://www.proxy-list.download/api/v1/get?type={}&anon=elite'.format(self.kind))
print('Getting done. Parsing started.')
r = r.text.split('\r\n')
for i in r:
if self.count == 'max':
if i != '':
self.proxy_list.append(i)
self.handled += 1
else:
if int(self.handled) < int(self.count):
if i != '':
self.proxy_list.append(i)
self.handled += 1
else:
break
print('\nTotal parsed: {}\nWork done.\n'.format(self.handled))
return self.proxy_list
| [
"savap0@yandex.ru"
] | savap0@yandex.ru |
cb34ff3cdf7cedcf98d716929411450445415a15 | 6c5757d8d39bf60eb248e2b2a20091270129317d | /custom_functions.py | 66a69ac57b3b194de2f396423e38527f68b4d179 | [] | no_license | juin-hongg/attendance_system | b5f7ef2d582423fa27828d42759ec9b14a626f15 | f8ea0e621a0955ec8d226973bf0675c1caf8950f | refs/heads/master | 2023-01-28T04:47:26.228831 | 2020-12-09T13:13:44 | 2020-12-09T13:13:44 | 319,960,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,896 | py | import pyowm
from datetime import datetime, timedelta
def get_elapsed_time(cst):
"""
This function gets the time elapsed since the current class start time
:param cst: current class start time
:return: 0 if the current time is EARLIER than the current class start time else the total time elapsed in seconds
"""
# current time
curr_time = datetime.now().time()
# current time converted from Time object to Timedelta object to facilitate calculation
curr_time = timedelta(hours=curr_time.hour, minutes=curr_time.minute, seconds=curr_time.second)
# 0 if the current time is EARLIER than the current class start time else the total time elapsed
return (curr_time - cst).total_seconds() if (curr_time > cst) else 0
def set_current_weather(cur, unit_code, year, sem, week, cst):
"""
This function is to retrieve current weather status of KUALA LUMPUR, SELANGOR through web API and
save the data into the database
:param cur: Database Cursor
:param unit_code: the unit code of the current class
:param year: current year
:param sem: current semester
:param week: current week of the current semester
:param cst: current class start time
:return: None
"""
# retrieve the current weather status of KUALA LUMPUR, SELANGOR through web API
owm = pyowm.OWM("38a7ee6448c1e85e81a48536316502db")
weather = (owm.weather_at_place("Kuala Lumpur, Malaysia")).get_weather()
status = weather.get_status()
# insert the weather status when the current class is happening in the classroom into the database
# to facilitate data analytics on the web dashboard
query = "INSERT " \
"INTO " \
"WEATHER " \
"(" \
"UNIT_CODE, " \
"YEAR, " \
"SEMESTER, " \
"WEEK, " \
"CLASS_DATETIME, " \
"WEATHER" \
") " \
"VALUES " \
"(" \
"'{}', '{}', '{}', '{}', '{}', '{}'" \
")".format(unit_code, year, sem, week, cst, status)
cur.execute(query)
def get_start_date(cur, year, sem):
"""
This function is to get the start date of the current semester
:param cur: Database Cursor
:param year: current year
:param sem: current semester
:return: the start date of the current semester
"""
# retrieve the start date of the current semester
query = "SELECT " \
"START_DATE " \
"FROM " \
"SEMESTER " \
"WHERE " \
"YEAR = '{}' " \
"AND " \
"SEMESTER = '{}'".format(year, sem)
cur.execute(query)
# convert the start date from a string to a Datime object to facilitate the calculation process
start_date = datetime.strptime(cur.fetchall()[0][0], '%Y-%m-%d')
# start date in Datetime type
return start_date
| [
"juinhong4@gmail.com"
] | juinhong4@gmail.com |
24cf381ade836b3bbdf9c48d26ed4710c70d97f3 | 1d4a0cf3d970d9e1c81f9395a59088768e1b179e | /LeanEuler/CleanTaxParser/CleanTaxParser.py | 03dd4e93b1057f210280af61507905b64cebe98f | [
"Apache-2.0"
] | permissive | idaks/LeanEuler | a3577bceab736ccc1f105f4c29b432b77ea0a379 | a8fba6c08e25b1cd293dac9013256354cd8a95e2 | refs/heads/master | 2021-07-01T17:59:08.227590 | 2019-04-23T15:14:06 | 2019-04-23T15:14:06 | 118,785,114 | 1 | 0 | Apache-2.0 | 2019-03-13T07:09:56 | 2018-01-24T15:38:16 | Python | UTF-8 | Python | false | false | 19,224 | py | # Generated from CleanTax.g4 by ANTLR 4.7.1
# encoding: utf-8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\16")
buf.write("N\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b")
buf.write("\t\b\4\t\t\t\3\2\6\2\24\n\2\r\2\16\2\25\3\2\3\2\3\3\3")
buf.write("\3\6\3\34\n\3\r\3\16\3\35\3\3\7\3!\n\3\f\3\16\3$\13\3")
buf.write("\3\4\3\4\6\4(\n\4\r\4\16\4)\3\4\3\4\3\5\3\5\6\5\60\n\5")
buf.write("\r\5\16\5\61\3\5\7\5\65\n\5\f\5\16\58\13\5\3\6\3\6\3\6")
buf.write("\3\6\3\6\3\6\3\7\3\7\5\7B\n\7\3\b\3\b\3\t\3\t\6\tH\n\t")
buf.write("\r\t\16\tI\3\t\3\t\3\t\2\2\n\2\4\6\b\n\f\16\20\2\3\3\2")
buf.write("\6\7\2M\2\23\3\2\2\2\4\31\3\2\2\2\6%\3\2\2\2\b-\3\2\2")
buf.write("\2\n9\3\2\2\2\fA\3\2\2\2\16C\3\2\2\2\20E\3\2\2\2\22\24")
buf.write("\5\4\3\2\23\22\3\2\2\2\24\25\3\2\2\2\25\23\3\2\2\2\25")
buf.write("\26\3\2\2\2\26\27\3\2\2\2\27\30\5\b\5\2\30\3\3\2\2\2\31")
buf.write("\33\7\3\2\2\32\34\7\r\2\2\33\32\3\2\2\2\34\35\3\2\2\2")
buf.write("\35\33\3\2\2\2\35\36\3\2\2\2\36\"\3\2\2\2\37!\5\6\4\2")
buf.write(" \37\3\2\2\2!$\3\2\2\2\" \3\2\2\2\"#\3\2\2\2#\5\3\2\2")
buf.write("\2$\"\3\2\2\2%\'\7\4\2\2&(\7\r\2\2\'&\3\2\2\2()\3\2\2")
buf.write("\2)\'\3\2\2\2)*\3\2\2\2*+\3\2\2\2+,\7\5\2\2,\7\3\2\2\2")
buf.write("-/\t\2\2\2.\60\7\r\2\2/.\3\2\2\2\60\61\3\2\2\2\61/\3\2")
buf.write("\2\2\61\62\3\2\2\2\62\66\3\2\2\2\63\65\5\n\6\2\64\63\3")
buf.write("\2\2\2\658\3\2\2\2\66\64\3\2\2\2\66\67\3\2\2\2\67\t\3")
buf.write("\2\2\28\66\3\2\2\29:\7\b\2\2:;\7\r\2\2;<\5\f\7\2<=\7\r")
buf.write("\2\2=>\7\t\2\2>\13\3\2\2\2?B\5\16\b\2@B\5\20\t\2A?\3\2")
buf.write("\2\2A@\3\2\2\2B\r\3\2\2\2CD\7\f\2\2D\17\3\2\2\2EG\7\n")
buf.write("\2\2FH\7\f\2\2GF\3\2\2\2HI\3\2\2\2IG\3\2\2\2IJ\3\2\2\2")
buf.write("JK\3\2\2\2KL\7\13\2\2L\21\3\2\2\2\n\25\35\")\61\66AI")
return buf.getvalue()
class CleanTaxParser ( Parser ):
grammarFileName = "CleanTax.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'taxonomy'", "'('", "')'", "'articulation'",
"'articulations'", "'['", "']'", "'{'", "'}'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "RCC_BASIC_5", "TEXT", "WHITESPACE" ]
RULE_ct_input = 0
RULE_tax_desc = 1
RULE_tax_sub_desc = 2
RULE_articulations_desc = 3
RULE_articulation = 4
RULE_relation = 5
RULE_rcc5_rel = 6
RULE_rcc32_rel = 7
ruleNames = [ "ct_input", "tax_desc", "tax_sub_desc", "articulations_desc",
"articulation", "relation", "rcc5_rel", "rcc32_rel" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
RCC_BASIC_5=10
TEXT=11
WHITESPACE=12
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.1")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class Ct_inputContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def articulations_desc(self):
return self.getTypedRuleContext(CleanTaxParser.Articulations_descContext,0)
def tax_desc(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CleanTaxParser.Tax_descContext)
else:
return self.getTypedRuleContext(CleanTaxParser.Tax_descContext,i)
def getRuleIndex(self):
return CleanTaxParser.RULE_ct_input
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCt_input" ):
listener.enterCt_input(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCt_input" ):
listener.exitCt_input(self)
def ct_input(self):
localctx = CleanTaxParser.Ct_inputContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_ct_input)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 17
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 16
self.tax_desc()
self.state = 19
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==CleanTaxParser.T__0):
break
self.state = 21
self.articulations_desc()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Tax_descContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TEXT(self, i:int=None):
if i is None:
return self.getTokens(CleanTaxParser.TEXT)
else:
return self.getToken(CleanTaxParser.TEXT, i)
def tax_sub_desc(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CleanTaxParser.Tax_sub_descContext)
else:
return self.getTypedRuleContext(CleanTaxParser.Tax_sub_descContext,i)
def getRuleIndex(self):
return CleanTaxParser.RULE_tax_desc
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTax_desc" ):
listener.enterTax_desc(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTax_desc" ):
listener.exitTax_desc(self)
def tax_desc(self):
localctx = CleanTaxParser.Tax_descContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_tax_desc)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 23
self.match(CleanTaxParser.T__0)
self.state = 25
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 24
self.match(CleanTaxParser.TEXT)
self.state = 27
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==CleanTaxParser.TEXT):
break
self.state = 32
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==CleanTaxParser.T__1:
self.state = 29
self.tax_sub_desc()
self.state = 34
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Tax_sub_descContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TEXT(self, i:int=None):
if i is None:
return self.getTokens(CleanTaxParser.TEXT)
else:
return self.getToken(CleanTaxParser.TEXT, i)
def getRuleIndex(self):
return CleanTaxParser.RULE_tax_sub_desc
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTax_sub_desc" ):
listener.enterTax_sub_desc(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTax_sub_desc" ):
listener.exitTax_sub_desc(self)
def tax_sub_desc(self):
localctx = CleanTaxParser.Tax_sub_descContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_tax_sub_desc)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 35
self.match(CleanTaxParser.T__1)
self.state = 37
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 36
self.match(CleanTaxParser.TEXT)
self.state = 39
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==CleanTaxParser.TEXT):
break
self.state = 41
self.match(CleanTaxParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Articulations_descContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TEXT(self, i:int=None):
if i is None:
return self.getTokens(CleanTaxParser.TEXT)
else:
return self.getToken(CleanTaxParser.TEXT, i)
def articulation(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(CleanTaxParser.ArticulationContext)
else:
return self.getTypedRuleContext(CleanTaxParser.ArticulationContext,i)
def getRuleIndex(self):
return CleanTaxParser.RULE_articulations_desc
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArticulations_desc" ):
listener.enterArticulations_desc(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArticulations_desc" ):
listener.exitArticulations_desc(self)
def articulations_desc(self):
localctx = CleanTaxParser.Articulations_descContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_articulations_desc)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 43
_la = self._input.LA(1)
if not(_la==CleanTaxParser.T__3 or _la==CleanTaxParser.T__4):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 45
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 44
self.match(CleanTaxParser.TEXT)
self.state = 47
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==CleanTaxParser.TEXT):
break
self.state = 52
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==CleanTaxParser.T__5:
self.state = 49
self.articulation()
self.state = 54
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArticulationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TEXT(self, i:int=None):
if i is None:
return self.getTokens(CleanTaxParser.TEXT)
else:
return self.getToken(CleanTaxParser.TEXT, i)
def relation(self):
return self.getTypedRuleContext(CleanTaxParser.RelationContext,0)
def getRuleIndex(self):
return CleanTaxParser.RULE_articulation
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArticulation" ):
listener.enterArticulation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArticulation" ):
listener.exitArticulation(self)
def articulation(self):
localctx = CleanTaxParser.ArticulationContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_articulation)
try:
self.enterOuterAlt(localctx, 1)
self.state = 55
self.match(CleanTaxParser.T__5)
self.state = 56
self.match(CleanTaxParser.TEXT)
self.state = 57
self.relation()
self.state = 58
self.match(CleanTaxParser.TEXT)
self.state = 59
self.match(CleanTaxParser.T__6)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RelationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def rcc5_rel(self):
return self.getTypedRuleContext(CleanTaxParser.Rcc5_relContext,0)
def rcc32_rel(self):
return self.getTypedRuleContext(CleanTaxParser.Rcc32_relContext,0)
def getRuleIndex(self):
return CleanTaxParser.RULE_relation
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRelation" ):
listener.enterRelation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRelation" ):
listener.exitRelation(self)
def relation(self):
localctx = CleanTaxParser.RelationContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_relation)
try:
self.state = 63
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [CleanTaxParser.RCC_BASIC_5]:
self.enterOuterAlt(localctx, 1)
self.state = 61
self.rcc5_rel()
pass
elif token in [CleanTaxParser.T__7]:
self.enterOuterAlt(localctx, 2)
self.state = 62
self.rcc32_rel()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Rcc5_relContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def RCC_BASIC_5(self):
return self.getToken(CleanTaxParser.RCC_BASIC_5, 0)
def getRuleIndex(self):
return CleanTaxParser.RULE_rcc5_rel
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRcc5_rel" ):
listener.enterRcc5_rel(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRcc5_rel" ):
listener.exitRcc5_rel(self)
def rcc5_rel(self):
localctx = CleanTaxParser.Rcc5_relContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_rcc5_rel)
try:
self.enterOuterAlt(localctx, 1)
self.state = 65
self.match(CleanTaxParser.RCC_BASIC_5)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Rcc32_relContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def RCC_BASIC_5(self, i:int=None):
if i is None:
return self.getTokens(CleanTaxParser.RCC_BASIC_5)
else:
return self.getToken(CleanTaxParser.RCC_BASIC_5, i)
def getRuleIndex(self):
return CleanTaxParser.RULE_rcc32_rel
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRcc32_rel" ):
listener.enterRcc32_rel(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRcc32_rel" ):
listener.exitRcc32_rel(self)
def rcc32_rel(self):
localctx = CleanTaxParser.Rcc32_relContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_rcc32_rel)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 67
self.match(CleanTaxParser.T__7)
self.state = 69
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 68
self.match(CleanTaxParser.RCC_BASIC_5)
self.state = 71
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (_la==CleanTaxParser.RCC_BASIC_5):
break
self.state = 73
self.match(CleanTaxParser.T__8)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
| [
"sahil1105@hotmail.com"
] | sahil1105@hotmail.com |
5a71efd94cdeda2ade61ec8e122438f31aa20dbc | db7b7ec5177533020c028b717166d0f446b983ce | /py_lab2/my_pkg/bin_conv.py | 018dabf37cadc5844053544d2bd51ce538d02ef7 | [] | no_license | BREG001/osp_repo_sub | 63c97dc54eac778bd841f163df9bd367df451ed1 | 0588f41ccffc0b2aab0fb0cba79ca87586cef3a9 | refs/heads/master | 2021-05-18T15:08:35.890106 | 2020-04-27T16:07:44 | 2020-04-27T16:07:44 | 251,288,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | #!/usr/bin/python3
def bin_conv():
bin = int(input("input binary number:"))
bin_ = bin
temp = 0
oct = 0
i = 0
asdf = 0
while (bin_ >= 1):
for j in range(0, 3):
temp += (bin_ % 10) * pow(2, j)
bin_ = int(bin_ / 10)
oct += (temp * pow(10, i))
i += 1
temp = 0
print("=> OCT> %d" % oct)
bin_ = bin
dec = 0
i = 0
while (bin_ >= 1):
dec += (bin_ % 10) * pow(2, i)
bin_ = int(bin_ / 10)
i += 1
print("=> DEC> %d" % dec)
bin_ = bin
hex = ""
i = 0
while (bin_ >= 1):
for j in range(0, 4):
temp += (bin_ % 10) * pow(2, j)
bin_ = int(bin_ / 10)
if (temp < 10):
hex = chr(temp + ord("0")) + hex
else:
hex = chr(temp - 10 + ord("A")) + hex
temp = 0
i += 1
print("=> HEX>",hex)
if __name__=='__main__':
bin_conv()
| [
"ws4368@naver.com"
] | ws4368@naver.com |
c2ac30e4850c35d33c5fa9e7dce9116a572785b8 | 4a638dcdd81396dd699296d0202f199504bcd8c6 | /overpass.py | 22213c204bf8422de83862ac459a994e3427a337 | [] | no_license | lydiaxing/bwsi2017 | a7c2285dbb5bedccd40837ba575dcf30e365dc43 | 317a6ebf523e40f0e3138d669057d40239903e63 | refs/heads/master | 2021-01-22T01:43:38.836633 | 2017-09-03T00:46:27 | 2017-09-03T00:46:27 | 102,231,888 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,835 | py | #!/usr/bin/python
import rospy
from std_msgs.msg import Bool
from sensor_msgs.msg import LaserScan
from ackermann_msgs.msg import AckermannDriveStamped
from nav_msgs.msg import Odometry
import numpy as np
import math
class OverPass_Node:
def __init__(self):
#threshold distance where the robot is still seeing itself (meters)
self.thresholdRobotSelfSeeing = 0.5
#number of laser scan points to average together
self.k = 20
rospy.Subscriber("/scan", LaserScan, self.laser_callback)
rospy.Subscriber("/ackermann_cmd_mux/output", AckermannDriveStamped,self.ackermann_cmd_input_callback)
self.cmd_pub = rospy.Publisher("/ackermann_cmd", AckermannDriveStamped, queue_size=10)
self.odom_cb = rospy.Subscriber("/odom", Odometry,self.odom_callback, queue_size=10)
self.SAFE = True
self.SIDE_DISTANCE = 0.26
self.FRONT_DISTANCE = 0
self.currentSpeed = 0
self.currentAngleOffset = 0
self.turnOffset = 0
self.currentOdom = Odometry()
self.minRange = 1000
self.currentStep = 0
def odom_callback(self,msg):
self.currentOdom = msg
def laser_callback(self, msg):
self.currentRange = 0
self.currentAngle = 0
self.posX = 0
self.posY = 0
self.counter = 0
self.turnOffset = int(math.degrees(self.currentAngleOffset) * 4)
#rospy.loginfo(self.turnOffset)
self.minRange = 1000
for x in range(80 ,1000):
if self.minRange > msg.ranges[x] :
self.minRange = msg.ranges[x]
for x in range(80,1000):
if self.minRange == msg.ranges[x]:
self.currentStep = x
break
for x in range(180 - self.turnOffset,900 - self.turnOffset) :
self.currentRange = msg.ranges[x]
self.currentAngle = (x - 180 - self.turnOffset) / 4
self.currentAngle = math.radians(self.currentAngle)
self.posX = self.currentRange * math.cos(self.currentAngle)
self.posY = self.currentRange * math.sin(self.currentAngle)
if self.posX < self.SIDE_DISTANCE and self.posX > -self.SIDE_DISTANCE and self.posY < self.FRONT_DISTANCE :
self.counter = self.counter + 1
if self.counter > 2 :
#rospy.loginfo("not safe")
self.SAFE = False
else :
self.SAFE = True
def ackermann_cmd_input_callback(self, msg):
self.currentSpeed = msg.drive.speed
if self.SAFE:
self.FRONT_DISTANCE = 0.5 * (1.794144**self.currentOdom.twist.twist.linear.x) + 0.1
elif self.currentOdom.twist.twist.linear.x < 0.01:
self.FRONT_DISTANCE = 0.5 * (1.794144**self.currentOdom.twist.twist.linear.x) + 0.1
if self.currentOdom.twist.twist.linear.x < 0 :
self.FRONT_DISTANCE = .5
#rospy.loginfo(self.currentSpeed)
#rospy.loginfo(self.currentOdom.twist.twist.linear.x)
#self.currentAngleOffset = msg.drive.steering_angle
#rospy.loginfo(self.FRONT_DISTANCE)
#print("ackermann_callback")
#print(self.SAFE)
if self.SAFE :
self.cmd_pub.publish(msg)
else:
back_up_msg = AckermannDriveStamped()
back_up_msg.drive.speed = 0
if msg.drive.speed != 0 and self.currentOdom.twist.twist.linear.x < 0.01 :
back_up_msg.drive.speed = -0.5
if self.currentStep > 540 :
back_up_msg.drive.steering_angle = 3
else :
back_up_msg.drive.steering_angle = -3
if self.currentOdom.twist.twist.linear.x > 0:
if self.currentStep > 540 :
back_up_msg.drive.steering_angle = -3
else :
back_up_msg.drive.steering_angle = 3
#back_up_msg.drive.steering_angle = 0
back_up_msg.header.stamp = rospy.Time.now()
self.cmd_pub.publish(back_up_msg)
if __name__ == '__main__':
rospy.init_node("OverPassNode")
node = OverPassNode()
rospy.spin()
| [
"xinglydia@gmail.com"
] | xinglydia@gmail.com |
6b9a1e5837577c0b9ee09f0991941d0b6c85a2b7 | b436ac4d46fb9a8019948fde91c2590d49c86c4d | /protein-translation/protein_translation.py | 7c33c784516594e08fe6a888fb97756968ba1c45 | [] | no_license | SirObi/exercism-python | c3fa4a3a3b28d6e1bd342101603515bb233eebfc | 02b1b6e46e08211d6885a2b192f05bec27ac3605 | refs/heads/master | 2021-06-11T01:38:44.365160 | 2019-07-06T18:56:27 | 2019-07-06T18:56:27 | 167,589,338 | 0 | 0 | null | 2021-04-20T17:56:27 | 2019-01-25T17:52:21 | Python | UTF-8 | Python | false | false | 854 | py |
# Codons are related - this is best represented as a tree/graph (simplified here)
MAP = {
"AU": "Methionine",
"UA": "Tyrosine",
"UC": "Serine",
"UG": {
"G": "Tryptophan",
"U": "Cysteine",
"C": "Cysteine"
},
"UU": {
"U": "Phenylalanine",
"C": "Phenylalanine",
"A": "Leucine",
"G": "Leucine"
}
}
STOP_CODON = "UAA, UAG, UGA"
def proteins(strand):
'''Traverses a tree of codons and returns aminoacid name'''
translation = []
next_codon = zip(*[iter(strand)] * 3)
for codon in next_codon:
codon = ''.join(codon)
if codon in STOP_CODON:
return translation
protein = MAP[codon[0:2]]
protein = protein[codon[2]] if isinstance(protein, dict) else protein
translation.append(protein)
return translation
| [
"obi.orciuch@onfido.com"
] | obi.orciuch@onfido.com |
a63330c736bf3f049319c6b98f4b620ef70fc6f8 | 0393de557686f3a7c81d1a60bbfb3895d1e18cb1 | /StreamLink/usr/lib/python3.8/site-packages/streamlink/plugins/albavision.py | 6ee7957f8ee4687e5df8a97c396b32b3a77bf92b | [] | no_license | yazidzebiri/eePlugins | e91193b419ab34131a952da00e2f8e1b08cad420 | 36fa3dd9a8d10b4a452a33962add68f1a12d6b58 | refs/heads/master | 2023-06-17T16:02:37.079183 | 2021-07-07T12:44:05 | 2021-07-07T12:44:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,926 | py | """
Support for the live streams on Albavision sites
- http://www.tvc.com.ec/envivo
- http://www.rts.com.ec/envivo
- http://www.elnueve.com.ar/en-vivo
- http://www.atv.pe/envivo/ATV
- http://www.atv.pe/envivo/ATVMas
"""
import logging
import re
import time
from streamlink import PluginError
from streamlink.compat import quote, range, urlencode, urlparse
from streamlink.plugin import Plugin
from streamlink.stream import HLSStream
from streamlink.utils import update_scheme
log = logging.getLogger(__name__)
class Albavision(Plugin):
_url_re = re.compile(r"https?://(?:www\.)?(tvc.com.ec|rts.com.ec|elnueve.com.ar|atv.pe)/en-?vivo(?:/ATV(?:Mas)?)?")
_token_input_re = re.compile(r"Math.floor\(Date.now\(\) / 3600000\),'([a-f0-9OK]+)'")
_live_url_re = re.compile(r"LIVE_URL = '(.*?)';")
_playlist_re = re.compile(r"file:\s*'(http.*m3u8)'")
_token_url_re = re.compile(r"https://.*/token/.*?\?rsk=")
_channel_urls = {
'ATV': 'http://dgrzfw9otv9ra.cloudfront.net/player_atv.html?iut=',
'ATVMas': 'http://dgrzfw9otv9ra.cloudfront.net/player_atv_mas.html?iut=',
'Canal5': 'http://dxejh4fchgs18.cloudfront.net/player_televicentro.html?iut=',
'Guayaquil': 'http://d2a6tcnofawcbm.cloudfront.net/player_rts.html?iut=',
'Quito': 'http://d3aacg6baj4jn0.cloudfront.net/reproductor_rts_o_quito.html?iut=',
}
def __init__(self, url):
super(Albavision, self).__init__(url)
self._page = None
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
@property
def page(self):
if not self._page:
self._page = self.session.http.get(self.url)
return self._page
def _get_token_url(self, channelnumber):
token = self._get_live_url_token(channelnumber)
if token:
m = self._token_url_re.findall(self.page.text)
token_url = m and m[channelnumber]
if token_url:
return token_url + token
else:
log.error("Could not find site token")
@staticmethod
def transform_token(token_in, date):
token_out = list(token_in)
offset = len(token_in)
for i in range(offset - 1, -1, -1):
p = (i * date) % offset
# swap chars at p and i
token_out[i], token_out[p] = token_out[p], token_out[i]
token_out = ''.join(token_out)
if token_out.endswith("OK"):
return token_out[:-2]
else:
log.error("Invalid site token: {0} => {1}".format(token_in, token_out))
def _get_live_url_token(self, channelnumber):
m = self._token_input_re.findall(self.page.text)
log.debug("Token input: {0}".format(m[channelnumber]))
if m:
date = int(time.time() // 3600)
return self.transform_token(m[channelnumber], date) or self.transform_token(m[channelnumber], date - 1)
def _get_token(self, channelnumber):
token_url = self._get_token_url(channelnumber)
if token_url:
res = self.session.http.get(token_url)
data = self.session.http.json(res)
if data['success']:
return data['token']
def _get_streams(self):
m = self._live_url_re.search(self.page.text)
playlist_url = m and update_scheme(self.url, m.group(1))
player_url = self.url
live_channel = None
p = urlparse(player_url)
channelnumber = 0
if p.netloc.endswith("tvc.com.ec"):
live_channel = "Canal5"
elif p.netloc.endswith("rts.com.ec"):
live_channel = "Guayaquil"
elif p.netloc.endswith("atv.pe"):
if p.path.endswith(("ATVMas", "ATVMas/")):
live_channel = "ATVMas"
channelnumber = 1
else:
live_channel = "ATV"
token = self._get_token(channelnumber)
log.debug("token {0}".format(token))
if playlist_url:
log.debug("Found playlist URL in the page")
else:
if live_channel:
log.debug("Live channel: {0}".format(live_channel))
player_url = self._channel_urls[live_channel] + quote(token)
page = self.session.http.get(player_url, raise_for_status=False)
if "block access from your country." in page.text:
raise PluginError("Content is geo-locked")
m = self._playlist_re.search(page.text)
playlist_url = m and update_scheme(self.url, m.group(1))
else:
log.error("Could not find the live channel")
if playlist_url:
stream_url = "{0}?{1}".format(playlist_url, urlencode({"iut": token}))
return HLSStream.parse_variant_playlist(self.session, stream_url, headers={"referer": player_url})
__plugin__ = Albavision
| [
"zdzislaw22@windowslive.com"
] | zdzislaw22@windowslive.com |
b8f7783476de85f6a2b7ad67a79e0767980ce466 | e3afb5c1df8ecaac3555d0318aa111b34cd7724a | /RadixSort.py | ba27c45d3dd88281141d5c7da564e675b96ef24b | [] | no_license | Avinash18046/Sorting-Python | f71683a7af34dca40c06198abd7588fbcb57937d | 5f25adc1d2fbd84f6696e1bce290de625f028723 | refs/heads/main | 2023-08-02T10:43:35.572685 | 2021-10-02T09:42:23 | 2021-10-02T09:42:23 | 412,750,185 | 1 | 0 | null | 2021-10-02T09:28:40 | 2021-10-02T09:28:40 | null | UTF-8 | Python | false | false | 1,202 | py | def countingSort(arr, exp1):
n = len(arr)
# The output array elements that will have sorted arr
output = [0] * (n)
# initialize count array as 0
count = [0] * (10)
# Store count of occurrences in count[]
for i in range(0, n):
index = (arr[i]/exp1)
count[int((index)%10)] += 1
# Change count[i] so that count[i] now contains actual
# position of this digit in output array
for i in range(1,10):
count[i] += count[i-1]
# Build the output array
i = n-1
while i>=0:
index = (arr[i]/exp1)
output[ count[ int((index)%10) ] - 1] = arr[i]
count[int((index)%10)] -= 1
i -= 1
# Copying the output array to arr[],
# so that arr now contains sorted numbers
i = 0
for i in range(0,len(arr)):
arr[i] = output[i]
# Method to do Radix Sort
def radixSort(arr):
# Find the maximum number to know number of digits
max1 = max(arr)
# Do counting sort for every digit. Note that instead
# of passing digit number, exp is passed. exp is 10^i
# where i is current digit number
exp = 1
while max1/exp > 0:
countingSort(arr,exp)
exp *= 10
# Driver code to test above
arr = [ 170, 45, 75, 90, 802, 24, 2, 66]
radixSort(arr)
for i in range(len(arr)):
print(arr[i]),
| [
"noreply@github.com"
] | Avinash18046.noreply@github.com |
7518016cfc9c6146f9aeac73199a889b77b08bb5 | cf9ba2393abe81c173dc1b822f03aa9eb05b0a61 | /Topsisz.py | 877ef6592c6ea970b1f70875ab4a92251ea29645 | [
"MIT"
] | permissive | zabhitak/TOPSIS-Abhinav-101803706 | 5b2066b47109608f9ba759713f1e14fa45fdf151 | 08b3a4b9887ba30027284f60ccc134bcbbb28d4f | refs/heads/master | 2023-01-07T01:48:02.911109 | 2020-11-12T05:17:11 | 2020-11-12T05:17:11 | 311,130,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,297 | py | import sys
import pandas as pd
import numpy as np
import os
class myexception(Exception):
pass
def normalised_mat(filename,weights,impact):
dataset = pd.read_csv(filename)
column_values = dataset.iloc[:,1:].values
if (dataset.shape[1]<3):
raise myexception("Input file must contain three or more columns")
if (len(weights)!=len(impact)!=len(column_values[0][:])):
raise myexception("Number of weights, number of impacts and number of columns (from 2nd to last columns) must be same")
is_int = dataset.apply(lambda s: pd.to_numeric(s, errors='coerce').notnull().all())
for i in range(1,len(is_int)):
if is_int[i]!=True:
raise myexception("For the Given Dataset from column2 all values must be numeric")
sum_columns = np.zeros(len(column_values[0]),dtype=float)
for i in range (len(column_values)):
for j in range (len(column_values[i])):
sum_columns[j] += np.square(column_values[i][j])
for i in range(len(sum_columns)):
sum_columns[i]=np.sqrt(sum_columns[i])
for i in range(len(column_values)):
for j in range(len(column_values[i])):
column_values[i][j]=column_values[i][j]/sum_columns[j]
return (column_values)
def weight_assign(column_values,weights):
weights=weights.split(',')
sum_weights = 0
sum_weights = sum(map(float,weights))
for i in range(len(weights)):
weights[i]=float(weights[i])/sum_weights
weighted_column_values=[]
for i in range(len(column_values)):
temp=[]
for j in range(len(column_values[i])):
temp.append(column_values[i][j]*weights[j])
weighted_column_values.append(temp)
return(weighted_column_values)
def performance_score(weighted_column,impacts):
q =weighted_column
q = np.array(q)
q[:,0]
Vjpositive=np.zeros(len(weighted_column[0]),dtype=float)
Vjnegative=np.zeros(len(weighted_column[0]),dtype=float)
for i in range(len(weighted_column[0])):
if impacts[i]=='+':
Vjpositive[i]=max(q[:,i])
Vjnegative[i]=min(q[:,i])
elif impacts[i]=='-':
Vjpositive[i]=min(q[:,i])
Vjnegative[i]=max(q[:,i])
Sjpositive=np.zeros(len(weighted_column),dtype=float)
Sjnegative=np.zeros(len(weighted_column),dtype=float)
for i in range(len(weighted_column)):
for j in range(len(weighted_column[i])):
Sjpositive[i]+=np.square(weighted_column[i][j]-Vjpositive[j])
Sjnegative[i]+=np.square(weighted_column[i][j]-Vjnegative[j])
for i in range(len(Sjpositive)):
Sjpositive[i]=np.sqrt(Sjpositive[i])
Sjnegative[i]=np.sqrt(Sjnegative[i])
Performance_score=[0]*len(weighted_column)
for i in range(len(weighted_column)):
Performance_score[i]=Sjnegative[i]/(Sjnegative[i]+Sjpositive[i])
return(Performance_score)
def adding_data(score,filename):
df = pd.read_csv(filename)
df = df.assign(Topsis_Score=score)
df['Rank'] = df['Topsis_Score'].rank(method='max',ascending=False)
return (df)
def main():
if len(sys.argv)!=5:
raise myexception("You have missed an input file..,Use : python topsis.py inputfile.csv “1,1,1,2” “+,+,-,+” result.csv")
path=os.getcwd()
filename = os.path.join(path,sys.argv[1])
if not os.path.exists(filename):
raise myexception("file does not exists")
weights = sys.argv[2]
impacts = sys.argv[3]
result = sys.argv[4]
impacts=impacts.split(',')
for i in range(0,len(impacts)):
if impacts[i]=='-':
continue
elif impacts[i]=='+':
continue
else:
raise myexception("Impacts must be either +ve or -ve.")
data_table = normalised_mat(filename,weights,impacts) # normalised the matrix
wt_given = weight_assign(data_table,weights) # mutiply each value with their respective weights
Performance = performance_score(wt_given,impacts) # calculating performace score for given weight and impact
new_dataset = adding_data(Performance,filename) # now adding performace_score and rank to the csv
new_dataset.to_csv(result)
if __name__=='__main__':
main() | [
"zabhitak1292000@gmail.com"
] | zabhitak1292000@gmail.com |
c94a6cceb87bc3039ad2a7bc7a6bb86363be9e9f | 29e43a0ff67fff987bfe9fb51b85ab0f60cf0ff9 | /inward/apps.py | 165441933d1ec467ddd331f06c172fe449c6f92c | [
"MIT"
] | permissive | kwabena-aboah/dms-efile | 5c42b9016d4416f7af6c701272e1bee59c093fcf | 0e9fe6b059dcb441dac29d7b2710f66e8f4d855c | refs/heads/master | 2022-12-10T05:12:56.038469 | 2022-02-19T18:37:10 | 2022-02-19T18:37:10 | 179,473,534 | 0 | 0 | MIT | 2022-12-08T01:43:42 | 2019-04-04T10:12:20 | Python | UTF-8 | Python | false | false | 87 | py | from django.apps import AppConfig
class InwardConfig(AppConfig):
name = 'inward'
| [
"obed30mintah@yahoo.com"
] | obed30mintah@yahoo.com |
8b4a81d4fc267ceeff9bff7fb3ff4286888a5000 | c5c7542a3b532740e20495aed22b91d58a66e576 | /hbaselines/goal_conditioned/sac.py | 342b00f6a291b006d697ef5e1df87914b7c73b52 | [
"MIT"
] | permissive | jesbu1/h-baselines | 964c80d1bb54bcd070de8ba55f940bc836078df1 | f6f775bb18de22527f2d01d73bd733ed2e435ba3 | refs/heads/master | 2022-12-07T03:18:37.077556 | 2020-09-03T06:41:49 | 2020-09-03T06:41:49 | 282,724,302 | 0 | 0 | MIT | 2020-07-26T20:04:03 | 2020-07-26T20:04:03 | null | UTF-8 | Python | false | false | 12,889 | py | """SAC-compatible goal-conditioned hierarchical policy."""
import numpy as np
from hbaselines.goal_conditioned.base import GoalConditionedPolicy as \
BaseGoalConditionedPolicy
from hbaselines.fcnet.sac import FeedForwardPolicy
class GoalConditionedPolicy(BaseGoalConditionedPolicy):
"""SAC-compatible goal-conditioned hierarchical policy."""
def __init__(self,
sess,
ob_space,
ac_space,
co_space,
buffer_size,
batch_size,
actor_lr,
critic_lr,
verbose,
tau,
gamma,
layer_norm,
layers,
act_fun,
use_huber,
target_entropy,
num_levels,
meta_period,
intrinsic_reward_type,
intrinsic_reward_scale,
relative_goals,
off_policy_corrections,
hindsight,
subgoal_testing_rate,
connected_gradients,
use_fingerprints,
fingerprint_range,
centralized_value_functions,
cg_weights,
scope=None,
env_name="",
num_envs=1):
"""Instantiate the goal-conditioned hierarchical policy.
Parameters
----------
sess : tf.compat.v1.Session
the current TensorFlow session
ob_space : gym.spaces.*
the observation space of the environment
ac_space : gym.spaces.*
the action space of the environment
co_space : gym.spaces.*
the context space of the environment
buffer_size : int
the max number of transitions to store
batch_size : int
SGD batch size
actor_lr : float
actor learning rate
critic_lr : float
critic learning rate
verbose : int
the verbosity level: 0 none, 1 training information, 2 tensorflow
debug
tau : float
target update rate
gamma : float
discount factor
layer_norm : bool
enable layer normalisation
layers : list of int or None
the size of the neural network for the policy
act_fun : tf.nn.*
the activation function to use in the neural network
use_huber : bool
specifies whether to use the huber distance function as the loss
for the critic. If set to False, the mean-squared error metric is
used instead
target_entropy : float
target entropy used when learning the entropy coefficient. If set
to None, a heuristic value is used.
num_levels : int
number of levels within the hierarchy. Must be greater than 1. Two
levels correspond to a Manager/Worker paradigm.
meta_period : int
meta-policy action period
intrinsic_reward_type : str
the reward function to be used by the lower-level policies. See the
base goal-conditioned policy for a description.
intrinsic_reward_scale : float
the value that the intrinsic reward should be scaled by
relative_goals : bool
specifies whether the goal issued by the higher-levels policies is
meant to be a relative or absolute goal, i.e. specific state or
change in state
off_policy_corrections : bool
whether to use off-policy corrections during the update procedure.
See: https://arxiv.org/abs/1805.08296
hindsight : bool
whether to include hindsight action and goal transitions in the
replay buffer. See: https://arxiv.org/abs/1712.00948
subgoal_testing_rate : float
rate at which the original (non-hindsight) sample is stored in the
replay buffer as well. Used only if `hindsight` is set to True.
connected_gradients : bool
whether to use the connected gradient update actor update procedure
to the higher-level policy. See: https://arxiv.org/abs/1912.02368v1
cg_weights : float
weights for the gradients of the loss of the lower-level policies
with respect to the parameters of the higher-level policies. Only
used if `connected_gradients` is set to True.
use_fingerprints : bool
specifies whether to add a time-dependent fingerprint to the
observations
fingerprint_range : (list of float, list of float)
the low and high values for each fingerprint element, if they are
being used
centralized_value_functions : bool
specifies whether to use centralized value functions
"""
super(GoalConditionedPolicy, self).__init__(
sess=sess,
ob_space=ob_space,
ac_space=ac_space,
co_space=co_space,
buffer_size=buffer_size,
batch_size=batch_size,
actor_lr=actor_lr,
critic_lr=critic_lr,
verbose=verbose,
tau=tau,
gamma=gamma,
layer_norm=layer_norm,
layers=layers,
act_fun=act_fun,
use_huber=use_huber,
num_levels=num_levels,
meta_period=meta_period,
intrinsic_reward_type=intrinsic_reward_type,
intrinsic_reward_scale=intrinsic_reward_scale,
relative_goals=relative_goals,
off_policy_corrections=off_policy_corrections,
hindsight=hindsight,
subgoal_testing_rate=subgoal_testing_rate,
connected_gradients=connected_gradients,
cg_weights=cg_weights,
use_fingerprints=use_fingerprints,
fingerprint_range=fingerprint_range,
centralized_value_functions=centralized_value_functions,
scope=scope,
env_name=env_name,
num_envs=num_envs,
meta_policy=FeedForwardPolicy,
worker_policy=FeedForwardPolicy,
additional_params=dict(
target_entropy=target_entropy,
),
)
# ======================================================================= #
# Auxiliary methods for HIRO #
# ======================================================================= #
# TODO
def _log_probs(self, meta_actions, worker_obses, worker_actions):
"""Calculate the log probability of the next goal by the meta-policies.
Parameters
----------
meta_actions : array_like
(batch_size, m_ac_dim, num_samples) matrix of candidate higher-
level policy actions
worker_obses : array_like
(batch_size, w_obs_dim, meta_period + 1) matrix of lower-level
policy observations
worker_actions : array_like
(batch_size, w_ac_dim, meta_period) list of lower-level policy
actions
Returns
-------
array_like
(batch_size, num_samples) fitness associated with every state /
action / goal pair
Helps
-----
* _sample_best_meta_action(self):
"""
fitness = []
batch_size, goal_dim, num_samples = meta_actions.shape
_, _, meta_period = worker_actions.shape
# Loop through the elements of the batch.
for i in range(batch_size):
# Extract the candidate goals for the current element in the batch.
# The worker observations and actions from the meta period of the
# current batch are also collected to compute the log-probability
# of a given candidate goal.
goals_per_sample = meta_actions[i, :, :].T
worker_obses_per_sample = worker_obses[i, :, :].T
worker_actions_per_sample = worker_actions[i, :, :].T
# This will be used to store the cumulative log-probabilities of a
# given candidate goal for the entire meta-period.
fitness_per_sample = np.zeros(num_samples)
# Create repeated representations of each worker action for each
# candidate goal.
tiled_worker_actions_per_sample = np.tile(
worker_actions_per_sample, (num_samples, 1))
# Create repeated representations of each worker observation for
# each candidate goal. The indexing of worker_obses_per_sample is
# meant to do the following:
# 1. We remove the last observation since it does not correspond
# to any action for the current meta-period.
# 2. Unlike the TD3 implementation, we keep the trailing context
# (goal) terms since they are needed to compute the log-prob
# of a given action when feeding to logp_action.
tiled_worker_obses_per_sample = np.tile(
worker_obses_per_sample[:-1, :], (num_samples, 1))
# Create repeated representations of each candidate goal for each
# worker observation in a meta period.
tiled_goals_per_sample = np.tile(
goals_per_sample, meta_period).reshape(
(num_samples * meta_period, goal_dim))
# If relative goals are being used, update the later goals to match
# what they would be under the relative goals difference approach.
if self.relative_goals:
goal_diff = worker_obses_per_sample[:-1, :] - np.tile(
worker_obses_per_sample[0, :], (meta_period, 1))
tiled_goals_per_sample += \
np.tile(goal_diff, (num_samples, 1))[:, :goal_dim]
# Compute the log-probability of each action using the logp_action
# attribute of the SAC lower-level policy.
normalized_error = self.sess.run(
self.policy[-1].logp_action,
feed_dict={
self.policy[-1].obs_ph: tiled_worker_obses_per_sample,
self.policy[-1].action_ph: tiled_worker_actions_per_sample,
}
)
# Sum the different normalized errors to get the fitness of each
# candidate goal.
for j in range(num_samples):
fitness_per_sample[j] = np.sum(
normalized_error[j * meta_period: (j+1) * meta_period])
fitness.append(fitness_per_sample)
return np.array(fitness)
# ======================================================================= #
# Auxiliary methods for HRL-CG #
# ======================================================================= #
def _setup_connected_gradients(self):
"""Create the connected gradients meta-policy optimizer."""
raise NotImplementedError # TODO
def _connected_gradients_update(self,
obs0,
actions,
rewards,
obs1,
terminals1,
update_actor=True):
"""Perform the gradient update procedure for the HRL-CG algorithm.
This procedure is similar to update_from_batch, expect it runs the
self.cg_optimizer operation instead of the policy object's optimizer,
and utilizes some information from the worker samples as well.
Parameters
----------
obs0 : list of array_like
(batch_size, obs_dim) matrix of observations for every level in the
hierarchy
actions : list of array_like
(batch_size, ac_dim) matrix of actions for every level in the
hierarchy
obs1 : list of array_like
(batch_size, obs_dim) matrix of next step observations for every
level in the hierarchy
rewards : list of array_like
(batch_size,) vector of rewards for every level in the hierarchy
terminals1 : list of numpy bool
(batch_size,) vector of done masks for every level in the hierarchy
update_actor : bool
specifies whether to update the actor policy of the meta policy.
The critic policy is still updated if this value is set to False.
Returns
-------
[float, float]
higher-level policy critic loss
float
higher-level policy actor loss
"""
raise NotImplementedError # TODO
| [
"noreply@github.com"
] | jesbu1.noreply@github.com |
f370e68f5f151f81a8bdc822000422bb3a00eb2f | 20f951bd927e4e5cde8ef7781813fcf0d51cc3ea | /fossir/modules/events/payment/testing/fixtures.py | cd8ec800a3c8878de5cf225e634d8c0ddd435ece | [] | no_license | HodardCodeclub/SoftwareDevelopment | 60a0fbab045cb1802925d4dd5012d5b030c272e0 | 6300f2fae830c0c2c73fe0afd9c684383bce63e5 | refs/heads/master | 2021-01-20T00:30:02.800383 | 2018-04-27T09:28:25 | 2018-04-27T09:28:25 | 101,277,325 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 714 | py |
import pytest
from fossir.modules.events.payment.models.transactions import PaymentTransaction, TransactionStatus
@pytest.fixture
def create_transaction():
"""Returns a callable which lets you create transactions"""
def _create_transaction(status, **params):
params.setdefault('amount', 10)
params.setdefault('currency', 'USD')
params.setdefault('provider', '_manual')
params.setdefault('data', {})
return PaymentTransaction(status=status, **params)
return _create_transaction
@pytest.fixture
def dummy_transaction(create_transaction):
"""Gives you a dummy successful transaction"""
return create_transaction(status=TransactionStatus.successful)
| [
"hodardhazwinayo@gmail.com"
] | hodardhazwinayo@gmail.com |
b2a469d2a6e34aba032faf3f8067fe1c2bbcc24c | 8573030aa0d57dae152041c05dbedc6de0c187c9 | /bluetooth_serial_test.py | 3614100769033bcaad02259bc74f0275a2268d6a | [] | no_license | aradicaldreamer/Singing_Tree | a691ae8b0519761e669710bcea859fb5597282da | d27e04251e455efc4067875db266ad3451d145c9 | refs/heads/master | 2021-09-11T13:31:12.981443 | 2018-04-08T00:02:15 | 2018-04-08T00:02:15 | 112,475,605 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | #! /usr/bin/python
import serial
#import pyfirmata
from time import sleep
bluetoothSerial = serial.Serial("/dev/rfcomm0", baudrate=115200)
from pyfirmata import Arduino, util
board = Arduino('/dev/rfcomm0')
board.digital[4].write(255)
| [
"31594961+aradicaldreamer@users.noreply.github.com"
] | 31594961+aradicaldreamer@users.noreply.github.com |
27ab1bab32d6bdc3f021bf7ca29e7d2821c32dac | 4dc4f06a2c3bdff4dabb8e27434dcd42479f1b4a | /coursera/functions.py | 6f9f8a99cabe50208f6a2cfacc4f46492a15cd57 | [] | no_license | fgokdata/exercises-python | 4a259eab02e8b64ee5a2ef3382f33a111a3fbcaf | 51689a89829f37c6a6646d009df32fde7f80ab9b | refs/heads/master | 2023-03-16T04:48:35.734158 | 2021-03-07T20:59:14 | 2021-03-07T20:59:14 | 325,111,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | def printAll(*args): # All the arguments are 'packed' into args which can be treated like a tuple
print("No of arguments:", len(args))
for argument in args:
print(argument)
#printAll with 3 arguments
printAll('Horsefeather','Adonis','Bone')
#printAll with 4 arguments
printAll('Sidecar','Long Island','Mudslide','Carriage')
##############
def printDictionary(**args):
for key in args:
print(key + " : " + args[key])
printDictionary(Country='Canada',Province='Ontario',City='Toronto') | [
"fgokdata@gmail.com"
] | fgokdata@gmail.com |
730187e0dca39b4ce449a65b80b8caee1e53e2d0 | 264b75d3e5862022b4d5a27f062b684e3a85b5be | /car_dealer/serializers.py | 941316cda8f26a97a731d9f69a92ee1158083f69 | [] | no_license | greatday4april/database-project | 9a7c0ad5d6aefd10ecaab78ba8b61a0abadee55f | b449d8de4fd38655fd600888c1aa460b74659940 | refs/heads/master | 2023-04-18T16:11:00.237546 | 2021-05-15T20:05:50 | 2021-05-15T20:05:50 | 367,564,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,345 | py | from typing import DefaultDict
from rest_framework import serializers
from car_dealer.models import Customer, PurchaseBill, Service, ServiceAppointment, ServicePackage, ServicePerformed
import datetime
class CustomerSerializer(serializers.ModelSerializer):
class Meta:
model = Customer
fields = '__all__'
class PurchaseBillSerializer(serializers.ModelSerializer):
class Meta:
model = PurchaseBill
fields = '__all__'
class ServiceItemSerializer(serializers.ModelSerializer):
class Meta:
model = ServicePerformed
fields = '__all__'
class ServiceAppointmentSerializer(serializers.ModelSerializer):
estimated_time = serializers.SerializerMethodField()
line_items = serializers.SerializerMethodField()
total_cost = serializers.SerializerMethodField()
def get_total_cost(self, appt: ServiceAppointment):
return '${}'.format(sum(line_item['cost'] for line_item in self.get_line_items_impl(appt)))
def get_line_items(self, appt: ServiceAppointment):
line_items = self.get_line_items_impl(appt)
new_line_items = []
for value in line_items:
new_line_items.append(value)
new_line_items[-1]['labor_time'] = str(value['labor_time'])
return new_line_items
def get_line_items_impl(self, appt: ServiceAppointment):
names = appt.service_package.service_names
services = Service.objects.filter(name__in=names).all()
services_performed = ServicePerformed.objects.filter(appt=appt)
line_items = []
for service in services:
line_items.append({
"item": service.name,
"type": str(service.type),
"labor_time": service.labor_time,
"cost": service.cost
})
for service_performed in services_performed:
service = service_performed.service
line_items.append({
"item": service.name,
"type": str(service.type),
"labor_time": service.labor_time,
"cost": service.cost
})
return line_items
def get_estimated_time(self, appt: ServiceAppointment):
labor_times = [
line_item['labor_time'] for line_item in self.get_line_items_impl(appt)
]
delta = datetime.timedelta()
for labor_time in labor_times:
delta += labor_time
return str(delta)
class Meta:
model = ServiceAppointment
fields = '__all__'
class SaleStatsSerializer(serializers.Serializer):
stats = serializers.SerializerMethodField(read_only=True)
begin_date = serializers.DateField(write_only=True)
end_date = serializers.DateField(write_only=True)
def get_stats(self, bills):
numbers = DefaultDict(int)
profit = DefaultDict(float)
for bill in bills:
vehicle = '{} {} {}'.format(bill.vin.year, bill.vin.make, bill.vin.model)
bill: PurchaseBill = bill
profit[vehicle] += bill.price
numbers[vehicle] += 1
stats = []
for vin, value in profit.items():
stats.append({
'vehicle': vin,
'profit': value,
'sale_number': numbers[vin]
})
return stats
| [
"greatday4april@gmail.com"
] | greatday4april@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.