hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
df510c16faa10d361dd9e9908330f9b25302d7cf | 2,098 | py | Python | library/twisted/mod/regex.py | Kelbec-Nef/EVE-bot-discord | 30432beb482ca56a10fda8aefdb4c1b9802e4ceb | [
"MIT"
] | 59 | 2016-06-27T13:12:18.000Z | 2021-06-23T12:09:16.000Z | modules/regex.py | andimiller/twistedbot | f462385ab951f1b74e19a2ebb6b3d58da4e2a03f | [
"MIT"
] | 112 | 2016-07-06T15:32:09.000Z | 2022-03-08T07:22:54.000Z | modules/regex.py | andimiller/twistedbot | f462385ab951f1b74e19a2ebb6b3d58da4e2a03f | [
"MIT"
] | 59 | 2016-07-04T22:22:26.000Z | 2020-05-15T20:38:08.000Z | import re
import datetime
def sub(message, regex):
regex=re.split("(?<!\\\\)/",regex)
if len(regex)>3:
regex[3] = regex[3].strip()
if not regex[3]:
count = 1
elif "g" in regex[3]:
count = 0
elif regex[3].isdigit():
count = int(regex[3])
else:
count = 1
return re.sub(regex[1], regex[2], message, count)
def substitute(tbot, user, channel, msg):
if user in tbot.messages:
newmessage = sub(tbot.messages[user], msg)
if newmessage != tbot.messages[user]:
tbot.messages[user] = newmessage
tbot.msg(channel, "<%s> %s" % (user, newmessage))
else:
tbot.msg(channel, "Uh %s... you haven't said anything yet" % user)
substitute.rule="^s\/.*"
def directedsubstitute(tbot, user, channel, msg):
(target, regex) = re.compile("^(.*?): (.*)").match(msg).groups()
if target in tbot.messages:
newmessage = sub(tbot.messages[target], regex)
if newmessage != tbot.messages[target]:
tbot.messages[target] = newmessage
tbot.msg(channel, "%s thinks %s meant: %s" % (user, target, newmessage))
else:
tbot.msg(channel, "%s: %s doesn't exist! You don't have to correct them!" % (user, target))
directedsubstitute.rule="^.*?: s/.*"
def lastmsg(tbot, user, channel, msg):
msg = msg.split()
if len(msg)>1 and msg[1] in tbot.messages:
tbot.msg(channel, "%s: I last saw %s say: %s" % (user, msg[1], tbot.messages[msg[1]]))
lastmsg.rule="^!lastmsg"
def seen(tbot, user, channel, msg):
msg = msg.split()
if len(msg)>1 and msg[1] in tbot.seen:
tbot.msg(channel, tbot.seen[msg[1]])
seen.rule="^!seen"
def storemessage(tbot, user, channel, msg):
if not hasattr(tbot, "seen"):
tbot.seen = dict()
if not hasattr(tbot, "messages"):
tbot.messages = dict()
if not msg.startswith("s/"):
tbot.messages[user]=msg
tbot.seen[user]= "I last saw %s at %s in %s. " % (user,datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), channel)
storemessage.rule=".*"
| 34.966667 | 122 | 0.582459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 295 | 0.14061 |
df51d4ecd268e7723e07531a6a92ba19cedaab25 | 257 | py | Python | 02/01/isupper.py | pylangstudy/201708 | 126b1af96a1d1f57522d5a1d435b58597bea2e57 | [
"CC0-1.0"
] | null | null | null | 02/01/isupper.py | pylangstudy/201708 | 126b1af96a1d1f57522d5a1d435b58597bea2e57 | [
"CC0-1.0"
] | 39 | 2017-07-31T22:54:01.000Z | 2017-08-31T00:19:03.000Z | 02/01/isupper.py | pylangstudy/201708 | 126b1af96a1d1f57522d5a1d435b58597bea2e57 | [
"CC0-1.0"
] | null | null | null | s = 'abc'; print(s.isupper(), s)
s = 'Abc'; print(s.isupper(), s)
s = 'aBc'; print(s.isupper(), s)
s = 'abC'; print(s.isupper(), s)
s = 'abc'; print(s.isupper(), s)
s = 'ABC'; print(s.isupper(), s)
s = 'abc'; print(s.capitalize().isupper(), s.capitalize())
| 32.125 | 58 | 0.568093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.136187 |
df526964a362846faf5e5acce4b5cfd42b2a61de | 562 | py | Python | tests/test_scriptfields.py | ttimasdf/pyes | e5dd8bfde7ca72b9bdf52f31ee7dbbf9681891ac | [
"BSD-3-Clause"
] | 175 | 2015-01-04T00:41:48.000Z | 2022-01-12T08:42:28.000Z | tests/test_scriptfields.py | yoloseem/pyes | d146d7cbe8a883b7b6a821e4c41acb16d2a5e3d0 | [
"BSD-3-Clause"
] | 35 | 2015-01-23T16:17:33.000Z | 2021-05-17T12:12:29.000Z | tests/test_scriptfields.py | yoloseem/pyes | d146d7cbe8a883b7b6a821e4c41acb16d2a5e3d0 | [
"BSD-3-Clause"
] | 69 | 2015-01-10T17:28:26.000Z | 2021-10-13T06:55:56.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from pyes import scriptfields
class ScriptFieldsTest(unittest.TestCase):
def test_scriptfieldserror_imported(self):
self.assertTrue(hasattr(scriptfields, 'ScriptFieldsError'))
def test_ignore_failure(self):
fields = scriptfields.ScriptFields("a_field", "return _source.field", ignore_failure=True)
serialized = fields.serialize()
self.assertIn("ignore_failure", serialized.get("a_field", {}))
if __name__ == '__main__':
unittest.main()
| 33.058824 | 98 | 0.729537 | 403 | 0.717082 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.192171 |
df53e4881da04ab4f402645f9a8a2b4862a0381d | 1,049 | py | Python | flow/urls.py | Xinghui-Wu/FlowMeter | c2780fe7f8b8d2d6136296e53d1ee95e5660afc4 | [
"MIT"
] | null | null | null | flow/urls.py | Xinghui-Wu/FlowMeter | c2780fe7f8b8d2d6136296e53d1ee95e5660afc4 | [
"MIT"
] | null | null | null | flow/urls.py | Xinghui-Wu/FlowMeter | c2780fe7f8b8d2d6136296e53d1ee95e5660afc4 | [
"MIT"
] | 1 | 2021-05-17T13:01:26.000Z | 2021-05-17T13:01:26.000Z | """FlowMeter URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.urls import path
import flow
from . import views
urlpatterns = [
path('', views.flow_view),
path('select/', views.select_device),
path('sniff/', views.sniff),
path('get_flow/', views.get_flow),
path('address/', views.address_analyze),
path('name/', views.name_analyze),
path('burst/', views.burst_analyze)
]
| 32.78125 | 77 | 0.701621 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 687 | 0.654909 |
df586b1e318707fb32e47861376aae613e4a3f07 | 15,033 | py | Python | psp/gcp_utils.py | amckenna41/DCBLSTM_PSP | feec5bbf700782484af3b4873fd742ff9f7a7272 | [
"MIT"
] | 1 | 2021-05-06T09:48:45.000Z | 2021-05-06T09:48:45.000Z | psp/gcp_utils.py | amckenna41/DCBLSTM_PSP | feec5bbf700782484af3b4873fd742ff9f7a7272 | [
"MIT"
] | null | null | null | psp/gcp_utils.py | amckenna41/DCBLSTM_PSP | feec5bbf700782484af3b4873fd742ff9f7a7272 | [
"MIT"
] | null | null | null | ################################################################################
######## Google Cloud Platform Utilities ########
################################################################################
#Importing required libraries and dependancies
import numpy as np
import pandas as pd
import os
import glob
import subprocess
from google.cloud import storage, exceptions
from googleapiclient import errors
from googleapiclient import discovery
from google.cloud import pubsub_v1
from google.oauth2 import service_account
from oauth2client.client import GoogleCredentials
import tensorflow as tf
from tensorflow.keras.utils import plot_model
import pickle
import json
from subprocess import Popen, PIPE
try:
from _globals import *
except:
from . _globals import *
#initialise storage bucket object
BUCKET = None
def initialise_bucket(bucket):
"""
Description:
Initialise GCP storage bucket and client.
Args:
:bucket (str): name of GCP Storage bucket.
Returns:
None
"""
global BUCKET
storage_client = storage.Client()
bucketName = bucket.replace('gs://','')
bucket = storage_client.get_bucket(bucketName)
BUCKET = bucket
def save_history(history, model_output_folder):
"""
Description:
Save and upload model history to GCP Storage.
Args:
:history (dict): model history.
:model_output_folder (str): output folder where all models assets and results are stored.
Returns:
None
"""
#open history pickle file for writing
try:
f = open('history.pckl', 'wb')
pickle.dump(history.history, f)
f.close()
except pickle.UnpicklingError as e:
print('Error', e)
except (AttributeError, EOFError, ImportError, IndexError) as e:
print(traceback.format_exc(e))
except Exception as e:
print(traceback.format_exc(e))
print('Error creating history pickle.')
#create history blob path
blob_path = os.path.join(model_output_folder, 'history.pckl')
#upload history to bucket
upload_file(blob_path,'history.pckl')
def upload_directory(local_path, gcs_folder_path):
"""
Description:
Upload directory recursively to GCP Storage.
Args:
:local_path (str): local path to directory.
:gcs_folder_path (str): blob folder path on GCP.
Returns:
None
Reference:
https://stackoverflow.com/questions/25599503/how-to-upload-folder-on-google-cloud-storage-using-python-api
"""
if not (os.path.isdir(local_path)):
raise OSError('Path to local directory not found.')
#recursively iterate through directory, uploading each file individually to GCP bucket.
for local_file in glob.glob(local_path + '/**'):
if not os.path.isfile(local_file):
upload_directory(local_file, gcs_folder_path + "/" + os.path.basename(local_file))
else:
remote_path = os.path.join(gcs_folder_path, local_file[1 + len(local_path):])
upload_file(remote_path, local_file)
def upload_file(blob_path, filepath):
"""
Description:
Upload blob to bucket.
Args:
:blob_path (str): path of blob object in bucket.
:filepath (str): local filepath of object.
Returns:
None
"""
#initialise blob in bucket
blob = BUCKET.blob(blob_path)
#upload blob to specified bucket
try:
blob.upload_from_filename(filepath)
except Exception as e:
print("Error uploading blob {} to storage bucket {} .".format(blob_path, e.message))
def download_file(blob_path, filepath):
"""
Description:
Download blob object from GCP Storage bucket to local dir.
Args:
:blob_path (str): path of blob object in bucket.
:filepath (str): local filepath for downloaded blob.
Returns:
None
"""
#initialise blob in bucket
blob = BUCKET.blob(blob_path)
#download blob from GCP Storage bucket to local filepath
try:
blob.download_to_filename(filepath)
print('Blob {} downloaded to {}.'.format(blob_path, filepath))
except Exception as e:
print("Error downloading blob {} from storage bucket {}.".format(blob_path, e.message))
def get_job_hyperparmeters(project_id, job_name):
"""
Description:
Output results from hyperparameter tuning process to csv.
Args:
:project_id (str): name of GCP project.
:job_name (str): name of GCP Ai-Platform job.
Returns:
:df (pandas DataFrame): dataframe of hyperparameters and their associated results from training.
"""
#get GCP credentials for making request
job_id = '{}/jobs/{}'.format(project_id, job_name)
credentials = GoogleCredentials.get_application_default()
ml = discovery.build('ml', 'v1', credentials=credentials)
#make request to hyperparameter job using Google API Client
try:
request = ml.projects().jobs().get(name=job_id).execute()
if request['state'] != "SUCCEEDED": #check that job has completed
print('Hyperparameter tuning job not completed.')
return
except errors.HttpError as err:
print('Error getting job details')
print(err._get_reason())
col = []
row = []
#set the columns of the dataframe to the hyperparameter variables
for column in request['trainingOutput']['trials'][0]['hyperparameters']:
col.append(column)
#for each successful trial, append each hyperparameter metric to the row array
for cols in col:
for trial in range(0, len(request['trainingOutput']['trials'])):
#check hyperparameter has SUCCEEDED
if request['trainingOutput']['trials'][trial]['state'] == "SUCCEEDED":
row.append(request['trainingOutput']['trials'][trial]['hyperparameters'][cols])
#transform row list into a numpy array
row_np = np.asarray(row)
num_params = len(request['trainingOutput']['trials'][0]['hyperparameters'])
#horizontally split numpy array into each of the different columns
row_np = np.split(row_np, num_params)
#create dataframe from hyperparameter metrics
df = pd.DataFrame(row_np)
#transpose dataframe
df = df.T
#set columns of dataframe to metric names
df.columns = col
#append evaluation score and trial ID to dataframe
eval_score = []
trial_id = []
for trial in range(0, len(request['trainingOutput']['trials'])):
eval_score.append(request['trainingOutput']['trials'][trial]['finalMetric']['objectiveValue'])
trial_id.append(request['trainingOutput']['trials'][trial]['trialId'])
df['eval_score'] = eval_score
df['trial_id'] = trial_id
#sort dataframe by the evaluation score
df.sort_values('eval_score')
#put evaluation score and trial ID to beginning of dataframe columns
eval = df['eval_score']
df.drop(labels=['eval_score'], axis=1,inplace = True)
df.insert(0, 'eval_score', eval)
trial_id = df['trial_id']
df.drop(labels=['trial_id'], axis=1,inplace = True)
df.insert(0, 'trial_id', trial_id)
#export dataframe to a csv
df_filename = job_name + "_hyperparameter_tuning"
df.to_csv(df_file_name, encoding='utf-8', index=False)
#upload csv to GCP Storage
blob_path = job_name + ".csv"
upload_file(blob_path,df_filename)
return df
def list_bucket_objects():
"""
Description:
List all objects in bucket.
Args:
None
Returns:
None
"""
#use Google Storage API to get and print all objects in bucket
try:
blobs = storage_client.list_blobs(BUCKET_NAME) #global BUCKET_NAME variable
except Exception as e:
print("Error listing blobs from {} bucket: {}".format(BUCKET_NAME, e.message))
#print bucket blobs
for blob in blobs:
print(blob.name)
def delete_blob(blob_name):
"""
Description:
Delete blob from GCP storage bucket.
Args:
:blob_name (str): name of blob to delete.
Returns:
None
"""
#get blob from bucket using GCP storage API
blob = BUCKET.blob(blob_name)
#delete blob
try:
blob.delete()
print("Blob {} deleted from {} bucket.".format(blob_name, BUCKET_NAME))
except Exception as e:
print("Error deleting blob {} from {} bucket: {}.".format(blob_name, BUCKET_NAME, e.message))
def get_model_output(job_name):
"""
Description:
Output results from metrics captured when training model. Output to CSV.
Args:
:job_name (str): name of AI Platform training job.
Returns:
None
"""
#local filepath and GCP blob name
model_output_csv_blob = os.path.join(job_name, 'output_results.csv')
#converting model_output dictionary to pandas Dataframe
model_output_df = pd.DataFrame(model_output, index=[0])
#exporting Dataframe to CSV
model_output_df.to_csv('output_results.csv',index=False)
#uploading blob to cloud storage
upload_file(os.path.join(job_name, 'output_results.csv'),'output_results.csv')
return model_output_df
def visualise_model(model, save_folder):
"""
Description:
Visualise Keras TF model, including its layers, connections and data types.
Args:
:model (Keras.model): Keras model to visualise.
:save_folder (str): filepath for model directory to store model img in.
Returns:
None
"""
plot_model(model, to_file=os.path.join(save_folder,'model.png'),
show_shapes=True, show_dtype=True)
def get_trainable_parameters(model):
"""
Description:
Calculate the number of trainable and non-trainable parameters in Keras model.
Args:
:model (Keras.model): Keras model to calculate parameters for.
Returns:
:trainable_params (int): number of trainable parameters.
:non_trainable_params (int): number of non-trainable parameters.
:total_params (int): total number of trainable + non-trainable parameters.
"""
trainable_params = count_params(model.trainable_weights)
non_trainable_params = count_params(model.non_trainable_weights)
total_params = trainable_params + non_trainable_params
return trainable_params, non_trainable_params, total_params
def append_all_output(output_results_df, all_results="all_results.csv"):
"""
Description:
Append training results/parameters of current job to CSV containing
results/parameters of all previous jobs.
Args:
:all_results (str): filepath to csv containing results of previous jobs.
:all_results (str): filepath to csv containing results of current job.
Returns:
None
"""
#check if results file exists in bucket, if so then download locally
if (BUCKET.blob(all_results).exists()):
download_file(all_results, all_results)
else:
return
#read csv results file
all_results_df = pd.read_csv(all_results)
#append results of current training job to all_results file
all_results_df = all_results_df.append(output_results_df)
#export results to csv
all_results_df.to_csv(all_results, index=False)
#upload updated results file to bucket
upload_file(all_results, all_results)
def parse_json_arch(arch_json):
"""
Description:
Parse model architecture JSON.
Args:
:arch_json (str): filepath to model json.
Returns:
None
"""
pass
def get_job_status(job_name):
"""
Description:
Get training status of GCP Ai-Platform training job.
Args:
:job_name (str): name of training job.
Returns:
:status (str): training status of job.
:err_message (str): error message of job.
"""
# job_logs = subprocess.check_output(["gcloud", "ai-platform","jobs","describe",job_name])
job_logs = subprocess.Popen(["gcloud", "ai-platform","jobs","describe",job_name], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = job_logs.communicate(b"input data that is passed to subprocess' stdin")
#parse job status from command-line output
status=""
for item in (output.decode('UTF-8')).split("\n"):
if ("state:" in item):
status = item.strip()
status = (status[status.find(':')+1:]).strip()
#parse error message from command-line output
err_message=""
if (status=="FAILED"):
for item in (output.decode('UTF-8')).split("\n"):
if ("errorMessage:" in item):
err_message = item.strip()
#get err_message down to etag
return status, err_message
def setup_tpu():
"""
Description:
Initialize TPU for training.
Args:
None
Returns:
:strategy (TPUStrategy): TPU training strategy object.
References:
[1]: https://www.tensorflow.org/guide/tpu
[2]: https://cloud.google.com/tpu/docs/tpus
"""
resolver = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
return strategy
class StepDecay():
"""
Description:
Step Decay Learning rate scheduler.
Args:
:initAlpha (float): initial learning rate (default=0.0005).
:factor (float): drop factor (default=0.8).
:dropEvery (int): number of epochs learning rate is dropped (default=40).
Returns:
Result from step decay function.
"""
def __init__(self, initAlpha=0.0005, factor=0.8, dropEvery=40):
self.initAlpha = initAlpha
self.factor = factor
self.dropEvery = dropEvery
def __call__(self, epoch):
exp = np.floor((epoch + 1) / self.dropEvery)
alpha = self.initAlpha * (self.factor ** exp)
return float(alpha)
class ExponentialDecay():
"""
Description:
Exponential Decay Learning rate scheduler.
Args:
:initAlpha (float): initial learning rate (default=0.0005).
:k (float): power/exponent of the exponential (default=0.8).
Returns:
Result from exponential decay function.
"""
def __init__(self, initAlpha=0.0005, k=0.8):
self.initAlpha = initAlpha
self.k = k
def __call__(self, epoch):
return (self.initAlpha * math.exp(-k*epoch))
class TimedBased():
"""
Description:
Timed based Decay Learning rate scheduler.
Args:
:initAlpha (float): initial learning rate (default=0.0005).
:epochs (int): number of epochs.
Returns:
Result from timed based decay function.
"""
def __init__(self, initAlpha=0.01, epochs=100):
self.initAlpha = initAlpha
self.epochs = epochs
decay = self.initAlpha / self.epochs
def __call__(self, lr, epochs):
decay = self.initAlpha / self.epochs
return ((lr *1) / (1 + decay * self.epochs))
| 33.03956 | 123 | 0.656622 | 1,779 | 0.11834 | 0 | 0 | 0 | 0 | 0 | 0 | 7,928 | 0.527373 |
df58cc87adff05d56c9092da2fe099ff0776f22b | 6,033 | py | Python | Glassdoor scraping/main.py | stancld/MSc-Project | 31a57ed58a902fe998649b948c61e70ca78729a4 | [
"MIT"
] | 2 | 2021-05-27T12:43:20.000Z | 2022-02-24T07:01:55.000Z | Glassdoor scraping/main.py | stancld/MSc-Project | 31a57ed58a902fe998649b948c61e70ca78729a4 | [
"MIT"
] | 5 | 2021-03-19T08:52:22.000Z | 2021-09-22T19:21:44.000Z | Glassdoor scraping/main.py | stancld/MSc-Project | 31a57ed58a902fe998649b948c61e70ca78729a4 | [
"MIT"
] | 2 | 2020-09-29T03:27:38.000Z | 2020-11-07T05:41:10.000Z | # import libraries
import time
import datetime
from argparse import ArgumentParser
from datetime import date
import re
import json
import numpy as np
import pandas as pd
import django
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from GlassdoorScraper import GlassdoorScraper
from set_django_db import set_django_db
# parameters/arguments
parser = ArgumentParser()
parser.add_argument(
'--chrome_driver_path',
default='/mnt/c/Data/UCL/@MSC Project - Data and sources/chromedriver.exe',
help='An absolute path to the ChromeDriver.'
)
parser.add_argument(
'--headless',
action='store_true',
help='If --headless is passed in, the `headless` browsing is used.'
)
parser.add_argument(
'--email',
help='Email used for log in to the Glassdoor account.'
)
parser.add_argument(
'-p', '--password',
help='Password used for log in to the Glassdoor account.'
)
parser.add_argument(
'-c', '--credentials',
default='/mnt/c/Data/UCL/@MSc Project - Data and sources/credentials.json',
help='Path to credential file containing email and password\
used for log in to the Glassdoor account.'
)
parser.add_argument(
'--companies',
help="An absolute path to the list of companies (txt file)."
)
parser.add_argument(
'-u', '--url',
help='An absolute path to the list of URL address (txt file)\
to the landing page of reviews for a given company.'
)
parser.add_argument(
'--location',
default='London',
help="A location we are interested in.\
Default='London'"
)
parser.add_argument(
'--max_review_age',
help='An indication how old reviews are to be scraped.\
Define if min_date is not provided.'
)
parser.add_argument(
'--min_date',
help="An indication up to which date reviews are to be scraped.\
format='yyyy-mm-dd'\
Define iff max_review_age is not provided."
)
parser.add_argument(
'--mysite_path',
default='/mnt/c/Data/UCL/@MSc Project/DB/mysite/',
help='An absolute path to the django application containing models for the DB.\
This is required iff output_path is not passed in.'
)
parser.add_argument(
'--output_path',
help='An absolute path of the output csv/xlsx file storing the scraped data.\
This is required iff mysite_path is not passed in.'
)
parser.add_argument(
'-l', '--limit',
help='A number of pages to be scraped.\
This is an ideal option for testing, otherwise no limit is passed.'
)
args = parser.parse_args()
# some value assignments and sanity checks
## credentials
if args.credentials:
try:
with open(args.credentials) as f:
credentials = json.loads(f.read())
args.email = credentials['email']
args.password = credentials['password']
except FileNotFoundError:
raise Exception('The filepath given does not exist.')
else:
try:
args.email, args.password = args.email, args.password # a simple way how to verify whether email and password were passed in
except ValueError:
raise Exception('Neiter filepath to the credentials, nor email and password are specified.\
Please, provide either path to the fiel with credentials or email/password directly to cmd.')
## file path to the txt file with companies
if args.companies:
try:
with open(args.companies, 'r') as f:
companies = [line.strip() for line in f]
print(f"{len(companies)} companies are to be scraped.")
except FileNotFoundError:
raise Exception('The filepath given does noe exist or the format of the file is not appropriate')
else:
raise Exception('Filepath to the text file containing companies must be provided.')
## if url provided, the length of the url file and companies file must be the same
if args.url:
if args.url:
try:
with open(args.url, 'r') as f:
urls = [line.strip() for line in f]
except FileNotFoundError:
raise Exception('The filepath given does noe exist or the format of the file is not appropriate')
else:
urls = [None for company in companies]
raise Exception('Both parameters companies and url must be given.')
## min_date | max_reivew_age
if (args.min_date!=None) & (args.max_review_age!=None):
raise Exception('Only one parameter out of min_date and max_review_age can be specified!')
## file path to the output files
if args.output_path:
file_type = args.output_path.split('.')[-1]
if file_type not in ['csv', 'xlsx', 'xls']:
raise Exception('Invalid file path format.')
if args.limit:
try:
args.limit = int(args.limit)
except Exception:
raise TypeError('Limit must be a type of an integer!.')
else:
args.limit = float(np.inf)
#######################
##### APPLICATION #####
#######################
def main():
if args.mysite_path :
# import Company - django.db.models.Model
set_django_db(mysite_path=args.mysite_path)
from tables_daniel.models import Review, Company
else:
Review = None
scraper = GlassdoorScraper(
chrome_driver_path=args.chrome_driver_path,
email=args.email,
password=args.password,
headless_browsing=args.headless,
review_writer=Review,
company_reader=Company,
max_review_age=args.max_review_age,
min_date=args.min_date
)
for company_name, url in zip(companies, urls):
scraper.getOnReviewsPage(
company_name=company_name,
location=args.location,
url=url
)
scraper.acceptCookies()
scraper.scrape(
company_name=company_name,
location=args.location,
limit=args.limit
)
if __name__=='__main__':
main() | 30.469697 | 132 | 0.671805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,723 | 0.451351 |
df599e9d10321b7bdbba2162c39d2da1b3bddbed | 468 | py | Python | week12/api/urls.py | yestemir/web | 5bdead66c26a3c466701e25ecae9720f04ad4118 | [
"Unlicense"
] | null | null | null | week12/api/urls.py | yestemir/web | 5bdead66c26a3c466701e25ecae9720f04ad4118 | [
"Unlicense"
] | 13 | 2021-03-10T08:46:52.000Z | 2022-03-02T08:13:58.000Z | week12/api/urls.py | yestemir/web | 5bdead66c26a3c466701e25ecae9720f04ad4118 | [
"Unlicense"
] | null | null | null | from django.urls import path
#from api.views import company_list, company_details, company_vacancies, vacancies_list, vacancy_detail
from api.views import company_list, company_details
urlpatterns = [
path('companies/', company_list),
path('companies/<int:company_id>/', company_details),
#path('companies/<int:company_id>/vacancies/', company_vacancies),
#path('vacancies/', vacancies_list),
#path('vacancies/<int:vacancy_id>', vacancy_detail)
]
| 39 | 103 | 0.758547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 297 | 0.634615 |
df5accf9e59165fe020819b87f459f1232b0ff9c | 904 | py | Python | ex02_randomness_test/tests/test_main.py | ittigorn-tra/exercises | c1edc8eea7483eb3f61109bf3540e94e04b1867c | [
"MIT"
] | null | null | null | ex02_randomness_test/tests/test_main.py | ittigorn-tra/exercises | c1edc8eea7483eb3f61109bf3540e94e04b1867c | [
"MIT"
] | null | null | null | ex02_randomness_test/tests/test_main.py | ittigorn-tra/exercises | c1edc8eea7483eb3f61109bf3540e94e04b1867c | [
"MIT"
] | null | null | null | from logging import getLogger
from lottery_config import LotteryConfig
from main import draw_lottery
logger = getLogger()
def test_draw_lottery():
for test_count in range(10000):
draw_results = draw_lottery()
logger.info(f'Test #{str(test_count).ljust(4)} Draw Results : {draw_results}')
# check if return type is as in the requirement
assert isinstance(draw_results, list)
# check if properties is as in config
assert len(draw_results) == LotteryConfig.DRAW_SIZE.value
assert min(draw_results) >= LotteryConfig.BALL_MIN_NO.value
assert max(draw_results) <= LotteryConfig.BALL_MAX_NO.value
# check that the previous ball is always less than current ball
previous_ball = LotteryConfig.BALL_MIN_NO.value - 1
for ball in draw_results:
assert ball > previous_ball
previous_ball = ball
| 33.481481 | 86 | 0.698009 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 212 | 0.234513 |
df5bcc25e0eb5cf59dcd57c576a359b2d756670b | 1,308 | py | Python | .ipynb_checkpoints/get-checkpoint.py | Ferruolo/delphi | 369d74053ff211f7114b01d51621a388693a0dc7 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/get-checkpoint.py | Ferruolo/delphi | 369d74053ff211f7114b01d51621a388693a0dc7 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/get-checkpoint.py | Ferruolo/delphi | 369d74053ff211f7114b01d51621a388693a0dc7 | [
"MIT"
] | null | null | null |
import requests
import io
import dask
from bs4 import BeautifulSoup as BS
import nltk
import pandas
import numpy as np
def News(ticker):
B = BS(requests.get(f"https://www.wsj.com/market-data/quotes/{ticker}", headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'}).content, features="html.parser")
News = B.find('ul', {'id': "newsSummary_c"})
News = [a.getText() for a in News.find_all('a')]
News = [nltk.word_tokenize(h) for h in News]
return dask.dataframe.from_array(np.asarray(News))
api_key = 'MZE3U0MSR1DCE53Z'
def daily(ticker, outputsize = 'compact'):
csv = pandas.read_csv(io.StringIO(requests.get(f'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&&symbol={ticker}&apikey={api_key}&outputsize={outputsize}&datatype=csv').content.decode('utf-8')))
return csv
def intraday_data(ticker, time='1min', outputsize = 'compact'):
return pandas.read_csv(io.StringIO(requests.get(f'https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol={ticker}&interval={time}&apikey={api_key}&outputsize={outputsize}&datatype=csv').content.decode('utf-8')))
def tickers():
return pandas.read_csv("NYSE_TICKERS.csv").iloc[:,0]
| 39.636364 | 250 | 0.714832 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 586 | 0.448012 |
df5bcf4144a389293dde7bcc338b8d079f6fde6d | 6,587 | py | Python | glom/test/test_path_and_t.py | justinvanwinkle/glom | 6451e9800b50078b60ebb480be83385cc70b1b3a | [
"BSD-3-Clause"
] | null | null | null | glom/test/test_path_and_t.py | justinvanwinkle/glom | 6451e9800b50078b60ebb480be83385cc70b1b3a | [
"BSD-3-Clause"
] | null | null | null | glom/test/test_path_and_t.py | justinvanwinkle/glom | 6451e9800b50078b60ebb480be83385cc70b1b3a | [
"BSD-3-Clause"
] | null | null | null |
from pytest import raises
from glom import glom, Path, S, T, A, PathAccessError, GlomError, BadSpec
def test_list_path_access():
assert glom(list(range(10)), Path(1)) == 1
def test_path():
_obj = object()
target = {'a': {'b.b': [None, {_obj: [None, None, 'd']}]}}
assert glom(target, Path('a', 'b.b', 1, _obj, -1)) == 'd'
def test_empty_path_access():
target = {}
assert glom(target, Path()) is target
assert glom(target, (Path(), Path(), Path())) is target
dup_dict = glom(target, {'target': Path(),
'target2': Path()})
dup_dict['target'] is target
dup_dict['target2'] is target
def test_path_t_roundtrip():
# check that T repr roundrips
assert repr(T['a'].b.c()) == "T['a'].b.c()"
assert repr(T[1:]) == "T[1:]"
assert repr(T[::3, 1:, 1:2, :2:3]) == "T[::3, 1:, 1:2, :2:3]"
# check that Path repr roundtrips
assert repr(Path('a', 1, 'b.b', -1.0)) == "Path('a', 1, 'b.b', -1.0)"
# check that Path repr roundtrips when it contains Ts
assert repr(Path(T['a'].b, 'c', T['d'].e)) == "Path(T['a'].b, 'c', T['d'].e)"
# check that T instances containing Path access revert to repring with Path
assert repr(Path(T['a'].b, 'c', T['d'].e).path_t) == "Path(T['a'].b, 'c', T['d'].e)"
# check that Paths containing only T objects reduce to a T (joining the T objects)
assert repr(Path(T['a'].b, T.c())) == "T['a'].b.c()"
# check that multiple nested paths reduce
assert repr(Path(Path(Path('a')))) == "Path('a')"
# check builtin repr
assert repr(T[len]) == 'T[len]'
assert repr(T.func(len, sum)) == 'T.func(len, sum)'
def test_path_access_error_message():
# test fuzzy access
with raises(GlomError) as exc_info:
glom({}, 'a.b')
assert ("PathAccessError: could not access 'a', part 0 of Path('a', 'b'), got error: KeyError"
in exc_info.exconly())
ke = repr(KeyError('a')) # py3.7+ changed the keyerror repr
assert repr(exc_info.value) == "PathAccessError(" + ke + ", Path('a', 'b'), 0)"
# test multi-part Path with T, catchable as a KeyError
with raises(KeyError) as exc_info:
# don't actually use glom to copy your data structures please
glom({'a': {'b': 'c'}}, Path('a', T.copy(), 'd'))
assert ("PathAccessError: could not access 'd', part 3 of Path('a', T.copy(), 'd'), got error: KeyError"
in exc_info.exconly())
ke = repr(KeyError('d')) # py3.7+ changed the keyerror repr
assert repr(exc_info.value) == "PathAccessError(" + ke + ", Path('a', T.copy(), 'd'), 3)"
# test AttributeError
with raises(GlomError) as exc_info:
glom({'a': {'b': 'c'}}, Path('a', T.b))
assert ("PathAccessError: could not access 'b', part 1 of Path('a', T.b), got error: AttributeError"
in exc_info.exconly())
ae = repr(AttributeError("'dict' object has no attribute 'b'"))
assert repr(exc_info.value) == "PathAccessError(" + ae + ", Path(\'a\', T.b), 1)"
def test_t_picklability():
import pickle
class TargetType(object):
def __init__(self):
self.attribute = lambda: None
self.attribute.method = lambda: {'key': lambda x: x * 2}
spec = T.attribute.method()['key'](x=5)
rt_spec = pickle.loads(pickle.dumps(spec))
assert repr(spec) == repr(rt_spec)
assert glom(TargetType(), spec) == 10
s_spec = S.attribute
assert repr(s_spec) == repr(pickle.loads(pickle.dumps(s_spec)))
def test_a_forbidden():
with raises(BadSpec):
A() # cannot assign to function call
with raises(BadSpec):
glom(1, A) # cannot assign without destination
def test_s_magic():
assert glom(None, S.test, scope={'test': 'value'}) == 'value'
with raises(PathAccessError):
glom(1, S.a) # ref to 'a' which doesn't exist in scope
with raises(PathAccessError):
glom(1, A.b.c)
return
def test_path_len():
assert len(Path()) == 0
assert len(Path('a', 'b', 'c')) == 3
assert len(Path.from_text('1.2.3.4')) == 4
assert len(Path(T)) == 0
assert len(Path(T.a.b.c)) == 3
assert len(Path(T.a()['b'].c.d)) == 5
def test_path_getitem():
path = Path(T.a.b.c)
assert path[0] == Path(T.a)
assert path[1] == Path(T.b)
assert path[2] == Path(T.c)
assert path[-1] == Path(T.c)
assert path[-2] == Path(T.b)
with raises(IndexError, match='Path index out of range'):
path[4]
with raises(IndexError, match='Path index out of range'):
path[-14]
return
def test_path_slices():
path = Path(T.a.b, 1, 2, T(test='yes'))
assert path[::] == path
# positive indices
assert path[3:] == Path(2, T(test='yes'))
assert path[1:3] == Path(T.b, 1)
assert path[:3] == Path(T.a.b, 1)
# positive indices backwards
assert path[2:1] == Path()
# negative indices forward
assert path[-1:] == Path(T(test='yes'))
assert path[:-2] == Path(T.a.b, 1)
assert path[-3:-1] == Path(1, 2)
# negative indices backwards
assert path[-1:-3] == Path()
# slicing and stepping
assert path[1::2] == Path(T.b, 2)
def test_path_values():
path = Path(T.a.b, 1, 2, T(test='yes'))
assert path.values() == ('a', 'b', 1, 2, ((), {'test': 'yes'}))
assert Path().values() == ()
def test_path_items():
path = Path(T.a, 1, 2, T(test='yes'))
assert path.items() == (('.', 'a'),
('P', 1), ('P', 2),
('(', ((), {'test': 'yes'})))
assert Path().items() == ()
def test_path_eq():
assert Path('a', 'b') == Path('a', 'b')
assert Path('a') != Path('b')
assert Path() != object()
def test_path_eq_t():
assert Path(T.a.b) == T.a.b
assert Path(T.a.b.c) != T.a.b
def test_startswith():
ref = T.a.b[1]
assert Path(ref).startswith(T)
assert Path(ref).startswith(T.a.b)
assert Path(ref).startswith(ref)
assert Path(ref).startswith(ref.c) is False
assert Path('a.b.c').startswith(Path())
assert Path('a.b.c').startswith('a.b.c')
with raises(TypeError):
assert Path('a.b.c').startswith(None)
return
def test_from_t_identity():
ref = Path(T.a.b)
assert ref.from_t() == ref
assert ref.from_t() is ref
def test_t_dict_key():
target = {'a': 'A'}
assert glom(target, {T['a']: 'a'}) == {'A': 'A'}
def test_t_dunders():
with raises(AttributeError) as exc_info:
T.__name__
assert 'use T.__("name__")' in str(exc_info.value)
assert glom(1, T.__('class__')) is int
| 27.560669 | 108 | 0.570366 | 164 | 0.024898 | 0 | 0 | 0 | 0 | 0 | 0 | 1,803 | 0.273721 |
df5d5ba2560d1eb8c0481b8f8f3df57ed776d13a | 83 | py | Python | dissononce/dh/x448/private.py | dineshks1/dissononce | 154297aba0e9fdedad9279278f748bd8e4f790c6 | [
"MIT"
] | 34 | 2019-04-18T03:35:51.000Z | 2022-03-20T13:35:04.000Z | dissononce/dh/x448/private.py | dineshks1/dissononce | 154297aba0e9fdedad9279278f748bd8e4f790c6 | [
"MIT"
] | 2 | 2019-04-24T06:42:33.000Z | 2019-07-17T19:40:40.000Z | dissononce/dh/x448/private.py | dineshks1/dissononce | 154297aba0e9fdedad9279278f748bd8e4f790c6 | [
"MIT"
] | 16 | 2019-05-02T08:29:17.000Z | 2021-12-06T22:50:37.000Z | from dissononce.dh import private
class PrivateKey(private.PrivateKey):
pass
| 13.833333 | 37 | 0.783133 | 46 | 0.554217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
df5dac3db1c3737b475d3ae31df6900df6cb8e93 | 6,850 | py | Python | main.py | BXRSRUDIOS/Shooter-Game | 0df64e0e5852f51b7ad4f5bb8883e7207d867fc6 | [
"MIT"
] | null | null | null | main.py | BXRSRUDIOS/Shooter-Game | 0df64e0e5852f51b7ad4f5bb8883e7207d867fc6 | [
"MIT"
] | null | null | null | main.py | BXRSRUDIOS/Shooter-Game | 0df64e0e5852f51b7ad4f5bb8883e7207d867fc6 | [
"MIT"
] | null | null | null | from pygame import *
from random import randint
import json
#fonts and captions
font.init()
font1 = font.SysFont('Comic Sans', 60)
win = font1.render('Dam you actually poggers', True, (255, 255, 255))
lose = font1.render('Yeah you suck, you lost', True, (180, 0, 0))
font2 = font.SysFont('Comic Sans', 36)
ammo_lose = font1.render('You ran out of Ammo :(', True, (180, 0, 0))
#we need the following images:
img_back = "galaxy.jpg" #game background
img_hero = "rocket.png" #hero
img_bullet = "bullet.png" #bullet
img_enemy = "ufo.png" #enemy
img_powerup = "Ruby_Icon.png"
# very cool images yummy images
score = 0 #ships destroyed
lost = 0 #ships missed
max_lost = 3 #lose if you miss that many
#parent class for other sprites
class GameSprite(sprite.Sprite):
#class constructor
def __init__(self, player_image, player_x, player_y, size_x, size_y, player_speed):
#Call for the class (Sprite) constructor:
sprite.Sprite.__init__(self)
#every sprite must store the image property
self.image = transform.scale(image.load(player_image), (size_x, size_y))
self.speed = player_speed
#every sprite must have the rect property that represents the rectangle it is fitted in
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
#method drawing the character on the window
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
dday = 2
level = 1
#main player class
class Player(GameSprite):
#method to control the sprite with arrow keys
def update(self):
keys = key.get_pressed()
if keys[K_LEFT] and self.rect.x > 5:
self.rect.x -= self.speed
if keys[K_RIGHT] and self.rect.x < win_width - 80:
self.rect.x += self.speed
#method to "shoot" (use the player position to create a bullet there)
def fire(self):
bullet = Bullet(img_bullet, self.rect.centerx, self.rect.top, 15, 20, -15)
bullets.add(bullet)
#enemy sprite class
class Enemy(GameSprite):
#enemy movement
def update(self):
self.rect.y += self.speed
global lost
#disappears upon reaching the screen edge
if self.rect.y > win_height:
self.rect.x = randint(80, win_width - 80)
self.rect.y = 0
lost = lost + 1
#bullet sprite class
class Bullet(GameSprite):
#enemy movement
def update(self):
self.rect.y += self.speed
#disappears upon reaching the screen edge
if self.rect.y < 0:
self.kill()
#Create a window
win_width = 700
win_height = 500
display.set_caption("Shooter")
window = display.set_mode((win_width, win_height))
background = transform.scale(image.load(img_back), (win_width, win_height))
#create sprites
ship = Player(img_hero, 5, win_height - 100, 80, 100, 10)
monsters = sprite.Group()
powerups = sprite.Group()
for i in range(1, dday):
monster = Enemy(img_enemy, randint(80, win_width - 80), -40, 80, 50, randint(1, 5))
monsters.add(monster)
bullets = sprite.Group()
#the finish variable: as soon as True is there, sprites stop working in the main loop
finish = False
#Main game loop:
run = True #the flag is reset by the window close button
score_req = 1
total_score = 0
ammo = 20
with open('best_score.json', "r") as f:
data = json.load(f)
best_play = data["best_score"]
def restart():
global score, lost, dday, level, score_req, ammo, best_play
score = 0
lost = 0
text = font2.render("Score: " + str(score), 1, (255, 255, 255))
window.blit(text, (10, 20))
text_lose = font2.render("Missed: " + str(lost), 1, (255, 255, 255))
window.blit(text_lose, (10, 50))
for i in bullets:
i.kill()
for i in monsters:
i.kill()
dday = dday + 1
level += 1
ammo += 20
with open("best_score.json", "r") as f:
data = json.load(f)
best_play = data["best_score"]
score_req = score_req + randint(1, 5)
for i in range(1, dday):
monster = Enemy(img_enemy, randint(80, win_width - 80), -40, 80, 50, randint(1, 5))
monsters.add(monster)
def update_stats_on_lose():
global best_play, total_score, level, score_req, dday, ammo
if best_play < total_score:
best_play = total_score
data["best_score"] = total_score
with open('best_score.json', "w") as f:
json.dump(data, f)
total_score = 0
level = 0
score_req = 0
dday = 1
ammo = 0
while run:
#"Close" button press event
for e in event.get():
if e.type == QUIT:
run = False
#event of pressing the space bar - the sprite shoots
elif e.type == KEYDOWN:
if e.key == K_SPACE:
fire_sound.play()
ammo -= 1
ship.fire()
if not finish:
#update the background
window.blit(background,(0,0))
#write text on the screen
text_level = font2.render("Level: " + str(level), 1, (255, 255, 255))
window.blit(text_level, (10, 80))
text_ammo = font2.render("Ammo: " + str(ammo), 1, (255, 255, 255))
window.blit(text_ammo, (560, 10))
text_highscore = font2.render("Best Score Ever: " + str(best_play), 1, (255, 255, 255))
window.blit(text_highscore, (10, 140))
text_score_overall = font2.render("Total Score: " + str(total_score), 1, (255,255,255))
window.blit(text_score_overall, (10, 110))
text = font2.render("This Level Score: " + str(score), 1, (255, 255, 255))
window.blit(text, (10, 20))
spriteslist = sprite.groupcollide(
monsters, bullets, True, True
)
spriteslist2 = sprite.groupcollide(
powerups, bullets, True, True
)
if spriteslist2:
amount_of_powerups_claimed += 1
if ammo == 0:
update_stats_on_lose()
window.blit(ammo_lose, (10, 200))
finish = True
if spriteslist:
score += 1
total_score += 1
monster = Enemy(img_enemy, randint(80, win_width - 80), -40, 80, 50, randint(1, 5))
monsters.add(monster)
if score >= score_req:
window.blit(win, (10, 200))
finish = True
text_lose = font2.render("Missed: " + str(lost), 1, (255, 255, 255))
window.blit(text_lose, (10, 50))
if lost >= 3:
update_stats_on_lose()
window.blit(lose, (10, 200))
finish = True
#launch sprite movements
ship.update()
monsters.update()
bullets.update()
powerups.update()
#update them in a new location in each loop iteration
ship.reset()
monsters.draw(window)
bullets.draw(window)
powerups.draw(window)
display.update()
if finish == True:
time.delay(1000)
restart()
finish = False
#the loop is executed each 0.05 sec
time.delay(50)
| 29.148936 | 94 | 0.625109 | 1,719 | 0.250949 | 0 | 0 | 0 | 0 | 0 | 0 | 1,499 | 0.218832 |
df5faeaade38723b81e813a93ac912c8c14cd092 | 863 | py | Python | lib_bgp_data/collectors/traceroutes/tables.py | jfuruness/lib_bgp_data | 25f7d57b9e2101c7aefb325e8d728bd91f47d557 | [
"BSD-3-Clause"
] | 16 | 2018-09-24T05:10:03.000Z | 2021-11-29T19:18:59.000Z | lib_bgp_data/collectors/traceroutes/tables.py | jfuruness/lib_bgp_data | 25f7d57b9e2101c7aefb325e8d728bd91f47d557 | [
"BSD-3-Clause"
] | 4 | 2019-10-09T18:54:17.000Z | 2021-03-05T14:02:50.000Z | lib_bgp_data/collectors/traceroutes/tables.py | jfuruness/lib_bgp_data | 25f7d57b9e2101c7aefb325e8d728bd91f47d557 | [
"BSD-3-Clause"
] | 3 | 2018-09-17T17:35:18.000Z | 2020-03-24T16:03:31.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
from ...utils.database import Generic_Table
class ROAs_Table(Generic_Table):
"""Announcements table class"""
__slots__ = []
name = "roas"
columns = ["asn", "prefix", "max_length", "created_at"]
def _create_tables(self):
"""Creates tables if they do not exist"""
sql = """CREATE UNLOGGED TABLE IF NOT EXISTS roas (
asn bigint,
prefix cidr,
max_length integer,
created_at bigint
) ;"""
self.execute(sql)
def create_index(self):
"""Creates a bunch of indexes to be used on the table"""
logging.debug("Creating index on roas")
sql = """CREATE INDEX IF NOT EXISTS roas_index
ON roas USING GIST(prefix inet_ops)"""
self.execute(sql)
| 24.657143 | 64 | 0.579374 | 752 | 0.871379 | 0 | 0 | 0 | 0 | 0 | 0 | 518 | 0.600232 |
df616c1c677615ca6b5710038c78f36aa08da811 | 5,880 | py | Python | install/app_store/tk-multi-about/v0.2.7/python/tk_multi_about/dialog.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | null | null | null | install/app_store/tk-multi-about/v0.2.7/python/tk_multi_about/dialog.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | null | null | null | install/app_store/tk-multi-about/v0.2.7/python/tk_multi_about/dialog.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | 1 | 2020-02-15T10:42:56.000Z | 2020-02-15T10:42:56.000Z | # Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import tank
import unicodedata
import os
import sys
import threading
from tank.platform.qt import QtCore, QtGui
from tank.platform import restart
from .ui.dialog import Ui_Dialog
class AppDialog(QtGui.QWidget):
def __init__(self, app):
QtGui.QWidget.__init__(self)
# set up the UI
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self._app = app
# set up the browsers
self.ui.context_browser.set_app(self._app)
self.ui.context_browser.set_label("Your Current Work Context")
self.ui.context_browser.enable_search(False)
self.ui.context_browser.action_requested.connect( self.show_in_sg )
self.ui.app_browser.set_app(self._app)
self.ui.app_browser.set_label("Currently Running Apps")
self.ui.app_browser.action_requested.connect( self.show_app_in_app_store )
self.ui.environment_browser.set_app(self._app)
self.ui.environment_browser.set_label("The Current Environment")
self.ui.environment_browser.enable_search(False)
self.ui.environment_browser.action_requested.connect( self.show_engine_in_app_store )
self.ui.jump_to_fs.clicked.connect( self.show_in_fs )
self.ui.support.clicked.connect( self.open_helpdesk )
self.ui.reload_apps.clicked.connect( self.reload )
self.ui.close.clicked.connect( self.close )
# load data from shotgun
self.setup_context_list()
self.setup_apps_list()
self.setup_environment_list()
# When there is no file system locations, hide the "Jump to the File System" button.
if not self._app.context.filesystem_locations:
self.ui.jump_to_fs.setVisible(False)
########################################################################################
# make sure we trap when the dialog is closed so that we can shut down
# our threads. Nuke does not do proper cleanup on exit.
def closeEvent(self, event):
self.ui.context_browser.destroy()
self.ui.app_browser.destroy()
self.ui.environment_browser.destroy()
# okay to close!
event.accept()
########################################################################################
# basic business logic
def setup_context_list(self):
self.ui.context_browser.clear()
self.ui.context_browser.load({})
def setup_apps_list(self):
self.ui.app_browser.clear()
self.ui.app_browser.load({})
def setup_environment_list(self):
self.ui.environment_browser.clear()
self.ui.environment_browser.load({})
def open_helpdesk(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl("http://support.shotgunsoftware.com"))
def reload(self):
"""
Reload templates and restart engine.
"""
restart()
def show_in_fs(self):
"""
Jump from context to FS
"""
# launch one window for each location on disk
paths = self._app.context.filesystem_locations
for disk_location in paths:
# get the setting
system = sys.platform
# run the app
if system == "linux2":
cmd = 'xdg-open "%s"' % disk_location
elif system == "darwin":
cmd = 'open "%s"' % disk_location
elif system == "win32":
cmd = 'cmd.exe /C start "Folder" "%s"' % disk_location
else:
raise Exception("Platform '%s' is not supported." % system)
exit_code = os.system(cmd)
if exit_code != 0:
self._app.log_error("Failed to launch '%s'!" % cmd)
def show_in_sg(self):
"""
Jump to shotgun
"""
curr_selection = self.ui.context_browser.get_selected_item()
if curr_selection is None:
return
data = curr_selection.sg_data
# steps do not have detail pages in shotgun so omit those
if data["type"] == "Step":
return
sg_url = "%s/detail/%s/%d" % (self._app.shotgun.base_url, data["type"], data["id"])
QtGui.QDesktopServices.openUrl(QtCore.QUrl(sg_url))
def show_app_in_app_store(self):
"""
Jump to app store
"""
curr_selection = self.ui.app_browser.get_selected_item()
if curr_selection is None:
return
doc_url = curr_selection.data.get("documentation_url")
if doc_url is None:
QtGui.QMessageBox.critical(self,
"No Documentation found!",
"Sorry, this app does not have any associated documentation!")
else:
QtGui.QDesktopServices.openUrl(QtCore.QUrl(doc_url))
def show_engine_in_app_store(self):
"""
Jump to app store
"""
curr_selection = self.ui.environment_browser.get_selected_item()
if curr_selection is None:
return
doc_url = curr_selection.data.get("documentation_url")
if doc_url:
QtGui.QDesktopServices.openUrl(QtCore.QUrl(doc_url))
| 34.385965 | 101 | 0.584354 | 5,236 | 0.890476 | 0 | 0 | 0 | 0 | 0 | 0 | 1,715 | 0.291667 |
df61b88c89a2f284a18232a1fc105ff2477b2b45 | 4,665 | py | Python | FHIR_Tester_backend/services/monkey/MonkeyInterpreter.py | ideaworld/FHIR_Tester | 62844af2de510b65535df5ae60da03a082097df0 | [
"MIT"
] | null | null | null | FHIR_Tester_backend/services/monkey/MonkeyInterpreter.py | ideaworld/FHIR_Tester | 62844af2de510b65535df5ae60da03a082097df0 | [
"MIT"
] | 4 | 2020-06-05T17:40:18.000Z | 2022-02-11T03:38:16.000Z | FHIR_Tester_backend/services/monkey/MonkeyInterpreter.py | bowen1993/FHIR_Tester | 62844af2de510b65535df5ae60da03a082097df0 | [
"MIT"
] | 1 | 2016-11-22T01:04:16.000Z | 2016-11-22T01:04:16.000Z | from CodeGenerator import *
class MonkeyInterpreter:
def __init__(self, prog, filename="", identify="", base_path=""):
self.prog = prog
self.func_table = {}
self.code_str = ''
self.filename = filename
self.url = ''
self.driver = ''
self.indent = 0
self._code_init()
self.base_path = base_path
self.id = identify
self.image_index = 0
def _code_init(self):
self.code_str = """from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import time
user_agent = (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Safari/602.1.50'"
)
steps = []
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = user_agent
"""
def save(self):
if len(self.filename) > 0:
code_file = open(self.filename, 'w')
code_file.write(self.code_str)
code_file.close()
return (True, 'Success')
else:
return (False, 'No filename or empty filename')
def curr_indent(self):
return ' '*self.indent
def add_screenshot(self, filename, hint):
step_info_dict = {
'basepath': self.base_path,
'filename':filename,
'id':self.id,
'hint':hint
}
self.code_str += "driver.get_screenshot_as_file('%(basepath)s/%(id)s_%(filename)s')\nsteps.append(('%(hint)s','%(basepath)s/%(id)s_%(filename)s'))\n" % step_info_dict
def get_screenshot_code(self, filename, hint):
step_info_dict = {
'basepath': self.base_path,
'filename':filename,
'id':self.id,
'hint':hint
}
return "driver.get_screenshot_as_file('%(basepath)s/%(id)s_%(filename)s')\nsteps.append(('%(hint)s','%(basepath)s/%(id)s_%(filename)s'))\n" % step_info_dict
def translate(self):
for index, action in enumerate(self.prog):
print action
if action['type'] == 'auth':
self.code_str += self.transAuth(action)
elif 'single' in action['type']:
self.code_str += self.transSingleAction(action)
elif 'target' in action['type']:
self.code_str += self.transTargetAction(action)
elif 'command' in action['type']:
self.code_str += self.transCommandAction(action)
elif 'judge' in action['type']:
self.code_str += self.transJudgeAction(action)
elif 'repeat' in action['type']:
self.code_str += self.transRepeat(action)
elif 'task' in action['type']:
self.code_str += self.transTask(action)
if "driver.get(" in self.code_str:
self.add_screenshot("%d.png"%self.image_index, action['move'])
self.image_index += 1
self.code_str += "driver.close()"
def transAuth(self, action):
username = action['username']
password = action['password']
return "driver.switch_to.alert.authenticate('%s','%s')" % (username, password)
def transSingleAction(self, action):
move = action['move']
if move == 'DoGenomicAuth':
is_success, increase,stmt_str = globals()[move](self.get_screenshot_code, self.image_index)
self.image_index += increase
else:
is_success, stmt_str = globals()[move]()
if is_success:
return stmt_str
def transTargetAction(self, action):
move = action['move']
args = {
'target':action['target']
}
if 'value' in action:
args['value'] = action['value']
is_success, stmt_str = globals()[move](**args)
if is_success:
return stmt_str
def transCommandAction(self, action):
move = action['move']
args = {
'value':action['value']
}
is_success, stmt_str = globals()[move](**args)
if is_success:
return stmt_str
def transJudgeAction(self, action):
args = {
'target':action['target'],
'value':action['expect'],
'is_equal' : action['is_equal']
}
is_success, stmt_str = Judge(**args)
if is_success:
return stmt_str
def transRepeat(self, action):
return ""
def transTask(self, action):
return ""
def get_code_content(self):
return self.code_str | 35.075188 | 174 | 0.569132 | 4,637 | 0.993998 | 0 | 0 | 0 | 0 | 0 | 0 | 1,153 | 0.24716 |
df61ee34acb06ae49cb7ec1c9a176bab65771caa | 3,619 | py | Python | arviz/plots/backends/matplotlib/distcomparisonplot.py | sudojarvis/arviz | 73531be4f23df7d764b2e3bec8c5ef5cb882590d | [
"Apache-2.0"
] | 1,159 | 2018-04-03T08:50:54.000Z | 2022-03-31T18:03:52.000Z | arviz/plots/backends/matplotlib/distcomparisonplot.py | sudojarvis/arviz | 73531be4f23df7d764b2e3bec8c5ef5cb882590d | [
"Apache-2.0"
] | 1,656 | 2018-03-23T14:15:05.000Z | 2022-03-31T14:00:28.000Z | arviz/plots/backends/matplotlib/distcomparisonplot.py | sudojarvis/arviz | 73531be4f23df7d764b2e3bec8c5ef5cb882590d | [
"Apache-2.0"
] | 316 | 2018-04-03T14:25:52.000Z | 2022-03-25T10:41:29.000Z | """Matplotlib Density Comparison plot."""
import matplotlib.pyplot as plt
import numpy as np
from ...distplot import plot_dist
from ...plot_utils import _scale_fig_size
from . import backend_kwarg_defaults, backend_show
def plot_dist_comparison(
ax,
nvars,
ngroups,
figsize,
dc_plotters,
legend,
groups,
textsize,
labeller,
prior_kwargs,
posterior_kwargs,
observed_kwargs,
backend_kwargs,
show,
):
"""Matplotlib Density Comparison plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(),
**backend_kwargs,
}
if prior_kwargs is None:
prior_kwargs = {}
if posterior_kwargs is None:
posterior_kwargs = {}
if observed_kwargs is None:
observed_kwargs = {}
if backend_kwargs is None:
backend_kwargs = {}
(figsize, _, _, _, linewidth, _) = _scale_fig_size(figsize, textsize, 2 * nvars, ngroups)
backend_kwargs.setdefault("figsize", figsize)
posterior_kwargs.setdefault("plot_kwargs", {})
posterior_kwargs["plot_kwargs"]["color"] = posterior_kwargs["plot_kwargs"].get("color", "C0")
posterior_kwargs["plot_kwargs"].setdefault("linewidth", linewidth)
posterior_kwargs.setdefault("hist_kwargs", {})
posterior_kwargs["hist_kwargs"].setdefault("alpha", 0.5)
prior_kwargs.setdefault("plot_kwargs", {})
prior_kwargs["plot_kwargs"]["color"] = prior_kwargs["plot_kwargs"].get("color", "C1")
prior_kwargs["plot_kwargs"].setdefault("linewidth", linewidth)
prior_kwargs.setdefault("hist_kwargs", {})
prior_kwargs["hist_kwargs"].setdefault("alpha", 0.5)
observed_kwargs.setdefault("plot_kwargs", {})
observed_kwargs["plot_kwargs"]["color"] = observed_kwargs["plot_kwargs"].get("color", "C2")
observed_kwargs["plot_kwargs"].setdefault("linewidth", linewidth)
observed_kwargs.setdefault("hist_kwargs", {})
observed_kwargs["hist_kwargs"].setdefault("alpha", 0.5)
if ax is None:
axes = np.empty((nvars, ngroups + 1), dtype=object)
fig = plt.figure(**backend_kwargs)
gs = fig.add_gridspec(ncols=ngroups, nrows=nvars * 2)
for i in range(nvars):
for j in range(ngroups):
axes[i, j] = fig.add_subplot(gs[2 * i, j])
axes[i, -1] = fig.add_subplot(gs[2 * i + 1, :])
else:
axes = ax
if ax.shape != (nvars, ngroups + 1):
raise ValueError(
"Found {} shape of axes, which is not equal to data shape {}.".format(
axes.shape, (nvars, ngroups + 1)
)
)
for idx, plotter in enumerate(dc_plotters):
group = groups[idx]
kwargs = (
prior_kwargs
if group.startswith("prior")
else posterior_kwargs
if group.startswith("posterior")
else observed_kwargs
)
for idx2, (
var_name,
sel,
isel,
data,
) in enumerate(plotter):
label = f"{group}"
plot_dist(
data,
label=label if legend else None,
ax=axes[idx2, idx],
**kwargs,
)
plot_dist(
data,
label=label if legend else None,
ax=axes[idx2, -1],
**kwargs,
)
if idx == 0:
axes[idx2, -1].set_xlabel(labeller.make_label_vert(var_name, sel, isel))
if backend_show(show):
plt.show()
return axes
| 29.663934 | 97 | 0.584692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 523 | 0.144515 |
df622c6b331e5a543976b1e74365b5b1c5deac19 | 1,027 | py | Python | migrations/versions/1a869ac514c_.py | isabella232/comport | 117123862415261095a917ed7f2037c1f986b474 | [
"BSD-3-Clause"
] | 35 | 2015-11-14T18:32:45.000Z | 2022-01-23T15:15:05.000Z | migrations/versions/1a869ac514c_.py | codeforamerica/comport | 117123862415261095a917ed7f2037c1f986b474 | [
"BSD-3-Clause"
] | 119 | 2015-11-20T22:45:34.000Z | 2022-02-10T23:02:36.000Z | migrations/versions/1a869ac514c_.py | isabella232/comport | 117123862415261095a917ed7f2037c1f986b474 | [
"BSD-3-Clause"
] | 19 | 2015-11-20T20:41:52.000Z | 2022-01-26T04:12:34.000Z | """empty message
Revision ID: 1a869ac514c
Revises: 52887f8e06b
Create Date: 2015-09-29 11:06:57.293537
"""
# revision identifiers, used by Alembic.
revision = '1a869ac514c'
down_revision = '52887f8e06b'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('interesteds',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('agency', sa.String(length=255), nullable=False),
sa.Column('location', sa.String(length=255), nullable=False),
sa.Column('phone', sa.String(length=255), nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.Column('comments', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('interesteds')
### end Alembic commands ###
| 28.527778 | 65 | 0.686465 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.418695 |
df656759ef9cfb9cda2ab717e57721e5c9991075 | 6,213 | py | Python | site_scons/grouptest.py | svn2github/Escript | 9c616a3b164446c65d4b8564ecd04fafd7dcf0d2 | [
"Apache-2.0"
] | null | null | null | site_scons/grouptest.py | svn2github/Escript | 9c616a3b164446c65d4b8564ecd04fafd7dcf0d2 | [
"Apache-2.0"
] | 1 | 2019-01-14T03:07:43.000Z | 2019-01-14T03:07:43.000Z | site_scons/grouptest.py | svn2github/Escript | 9c616a3b164446c65d4b8564ecd04fafd7dcf0d2 | [
"Apache-2.0"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2003-2018 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2018 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
class GroupTest(object):
_allfuncs = []
def __init__(self, name, exec_cmd, evars, python_dir, working_dir, test_list, single_process_tests=[]):
self.name=name
self.python_dir=python_dir
self.working_dir=working_dir
self.test_list=test_list
self.exec_cmd=exec_cmd
self.evars=evars
self.mkdirs=[]
self.single_process_tests=single_process_tests
self._allfuncs.append(name)
def makeDir(self,dirname):
self.mkdirs.append(dirname)
#stdloc means that the files are in standard locations so don't use prefix
def makeHeader(build_platform, prefix, stdloc):
res="""#!/bin/sh
#############################################
# This file is autogenerated by scons.
# It will be regenerated each time scons is run
#############################################
failed () {
echo "Execution failed for $@"
exit 1
}
if [ $# -lt 2 ]; then
echo "Usage: $0 build_dir wrapper_options [groupname]"
echo Runs all or a group of unit tests. Options must be a single string.
exit 2
fi
case "$1" in
/*) ;;
*) echo "build_dir needs to be an absolute path"; exit 4;;
esac
NUMPROCS=1
NUMNODES=1
while getopts ':n:p:' option $2
do
case "$option" in
"n") NUMNODES=$OPTARG ;;
"p") NUMPROCS=$OPTARG ;;
esac
done
MPIPROD=$(($NUMPROCS * $NUMNODES))
"""
res+="BUILD_DIR=$1"+"/"+build_platform
res+="\nif [ ! -d $BUILD_DIR ]\nthen\n echo Can not find build directory $BUILD_DIR\n exit 2\nfi\n"
if stdloc:
res+="""MPITYPE=`run-escript -c | grep mpi=`
export OLD_PYTHON=$PYTHONPATH
BATCH_ROOT=`pwd`
BINRUNNER="run-escript -b $2"
PYTHONRUNNER="run-escript $2"
PYTHONTESTRUNNER="run-escript $2 $BATCH_ROOT/tools/testrunner.py"
"""
else:
res+="""MPITYPE=`{0}/bin/run-escript -c | grep mpi=`
BATCH_ROOT=`pwd`
export LD_LIBRARY_PATH={0}/lib:$LD_LIBRARY_PATH
export OLD_PYTHON={0}:$PYTHONPATH
BINRUNNER="{0}/bin/run-escript -b $2"
PYTHONRUNNER="{0}/bin/run-escript $2"
PYTHONTESTRUNNER="{0}/bin/run-escript $2 {0}/tools/testrunner.py"
""".format(prefix)
if build_platform=='darwin':
res+="export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH:$DYLD_LIBRARY_PATH\n"
return res
makeHeader=staticmethod(makeHeader)
def makeString(self):
res="%s () {\n"%self.name
tt="\t"
build_dir = self.working_dir.replace("$BATCH_ROOT", "$BUILD_DIR")
for d in self.mkdirs:
res=res+tt+"if [ ! -d "+str(d)+" ]\n"+tt+"then\n"+tt+"\tmkdir -p "+str(d)+"\n"+tt+"fi\n"
for v in self.evars:
res=res+tt+"export "+str(v[0])+"="+str(v[1])+"\n"
res=res+tt+"if [ ! -d "+str(self.working_dir)+" ]\n"+tt+"then\n"+tt+"\tmkdir -p "+str(self.working_dir)+"\n"+tt+"fi\n"
if len(self.python_dir)>0:
res=res+tt+"export PYTHONPATH="+self.python_dir+":$OLD_PYTHON"+"\n"+tt+"cd "+self.working_dir+"\n"
else:
res=res+tt+"export PYTHONPATH=$OLD_PYTHON"+"\n"+tt+"cd "+self.working_dir+"\n"
if len(self.single_process_tests) > 0:
res+=tt+"if [ $MPIPROD -le 1 ]; then\n"
#res+=tt+'if [ "$MPITYPE" == "mpi=none" ]; then\n'
tt+="\t"
for t in self.single_process_tests:
res=res+tt+"echo Starting "+t+"\n"+tt+"date\n"
skipoutputfile = ""
failoutputfile = ""
cmd = self.exec_cmd
exit_on_failure = " || failed %s"%t
if "examples" not in build_dir and "PYTHONRUNNER" in self.exec_cmd \
and "/tools/" not in build_dir:
skipoutputfile = " -skipfile={0}/{1}".format(build_dir, t.replace(".py", ".skipped"))
failoutputfile = " -failfile={0}/{1}".format(build_dir, t.replace(".py", ".failed"))
cmd = cmd.replace("PYTHONRUNNER", "PYTHONTESTRUNNER")
res += "".join([tt, cmd, t, failoutputfile, skipoutputfile, exit_on_failure, "\n"])
res += tt+"echo Completed "+t+"\n"
tt="\t"
res+=tt+"fi\n"
for t in self.test_list:
res=res+tt+"echo Starting "+t+"\n"+tt+"date\n"
skipoutputfile = ""
failoutputfile = ""
cmd = self.exec_cmd
exit_on_failure = " || failed %s"%t
if "examples" not in build_dir and "PYTHONRUNNER" in self.exec_cmd \
and "/tools/" not in build_dir:
skipoutputfile = " -skipfile={0}/{1}".format(build_dir, t.replace(".py", ".skipped"))
failoutputfile = " -failfile={0}/{1}".format(build_dir, t.replace(".py", ".failed"))
cmd = cmd.replace("PYTHONRUNNER", "PYTHONTESTRUNNER")
res += "".join([tt, cmd, t, failoutputfile, skipoutputfile, exit_on_failure, "\n"])
res += tt+"echo Completed "+t+"\n"
res=res+"}\n"
return res
def makeFooter(self):
res="if [ $# -gt 2 ]; then\n\teval $3\nelse\n\t"
res+="\n\t".join(self._allfuncs)
res+="\nfi\nfind $BUILD_DIR -name '*.failed' | xargs cat; find $BUILD_DIR -name '*.failed' | xargs cat | diff -q - /dev/null >/dev/null\n"
return res
| 39.573248 | 146 | 0.574441 | 5,283 | 0.850314 | 0 | 0 | 0 | 0 | 0 | 0 | 3,269 | 0.526155 |
df6602339dcbf026d961630c8f4e5ef6a2e16385 | 2,708 | py | Python | setup.py | wusung/ipython-notebook-tabs | ee7b3922c7d01cf6182bce63ef69e8a20f7229f9 | [
"Apache-2.0"
] | null | null | null | setup.py | wusung/ipython-notebook-tabs | ee7b3922c7d01cf6182bce63ef69e8a20f7229f9 | [
"Apache-2.0"
] | null | null | null | setup.py | wusung/ipython-notebook-tabs | ee7b3922c7d01cf6182bce63ef69e8a20f7229f9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import re
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
# Backwards compatibility for Python 2.x
try:
from itertools import ifilter
filter = ifilter
except ImportError:
pass
from os.path import join as pjoin, splitext, split as psplit
def get_version():
'''
Version slurping without importing bookstore, since dependencies may not be
met until setup is run.
'''
version_regex = re.compile(r"__version__\s+=\s+"
r"['\"](\d+.\d+.\d+\w*)['\"]$")
versions = filter(version_regex.match, open("kyper/__init__.py"))
try:
version = next(versions)
except StopIteration:
raise Exception("Bookstore version not set")
return version_regex.match(version).group(1)
version = get_version()
# Utility for publishing the bookstore, courtesy kennethreitz/requests
if sys.argv[-1] == 'publish':
print("Publishing bookstore {version}".format(version=version))
os.system('python setup.py sdist upload')
sys.exit()
packages = ['kyper',
'kyper.nbformat',
'kyper.nbformat.v1',
'kyper.nbformat.v2',
'kyper.nbformat.v3',
'kyper.nbformat.v4',
'kyper.utils',]
requires = []
with open('requirements.txt') as reqs:
requires = reqs.read().splitlines()
setup(name='module-tabs',
version=version,
description='IPython notebook storage on OpenStack Swift + Rackspace.',
long_description=open('README.rst').read(),
author='Wusung Peng',
author_email='wusung.peng@kyperdata.com',
url='https://git.kyper.co/wusung.peng/ipython-notebook-fix.git',
packages=find_packages(),
package_data = {
# If any package contains *.txt or *.rst files, include them:
'': ['*.json', '*.json'],
},
include_package_data=True,
install_requires=requires,
license=open('LICENSE').read(),
zip_safe=False,
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Framework :: IPython',
'Environment :: OpenStack',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Distributed Computing',
),
scripts={pjoin('bin/kyper-nbconvert'): 'kyper-nbconvert'},
)
| 29.758242 | 79 | 0.621492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,277 | 0.471566 |
df69bf1dcf080485d6aacd665889f3da20ecf986 | 1,791 | py | Python | list7/decoder.py | luk9400/kodowanie | 042ac9eb324b55ec02ad0108e77cf8c80c16d82c | [
"MIT"
] | null | null | null | list7/decoder.py | luk9400/kodowanie | 042ac9eb324b55ec02ad0108e77cf8c80c16d82c | [
"MIT"
] | null | null | null | list7/decoder.py | luk9400/kodowanie | 042ac9eb324b55ec02ad0108e77cf8c80c16d82c | [
"MIT"
] | null | null | null | from sys import argv
G = [
"00000000",
"11010010",
"01010101",
"10000111",
"10011001",
"01001011",
"11001100",
"00011110",
"11100001",
"00110011",
"10110100",
"01100110",
"01111000",
"10101010",
"00101101",
"11111111",
]
errors = 0
def from_hamming(bits):
for code in G:
hamm1 = [int(k) for k in bits]
hamm2 = [int(k) for k in code]
diff = []
i = 0
while i < 8:
if hamm1[i] != hamm2[i]:
diff.append(i + 1)
i += 1
if len(diff) == 0:
return bits[2] + bits[4] + bits[5] + bits[6]
if len(diff) == 1:
return code[2] + code[4] + code[5] + code[6]
if len(diff) == 2:
global errors
errors += 1
return None
return None
def decode(bitstring):
decoded = ""
while len(bitstring) >= 8:
nibble = bitstring[0:8]
nibble = from_hamming(nibble)
if nibble != None:
decoded += nibble
else:
decoded += "0000"
bitstring = bitstring[8:]
print(f"W {errors} blokach napotkano 2 błędy")
return decoded
def main():
if len(argv) == 3:
with open(argv[1], "rb") as f, open(argv[2], "wb") as output:
payload = f.read()
hexstring = payload.hex()
bitstring = "".join(
[
"{0:08b}".format(int(hexstring[x : x + 2], base=16))
for x in range(0, len(hexstring), 2)
]
)
result = decode(bitstring)
b = bytes(int(result[i : i + 8], 2) for i in range(0, len(result), 8))
output.write(b)
if __name__ == "__main__":
main()
| 19.467391 | 82 | 0.45952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.132738 |
df6ab267b27cf156ebd687d010ea932b0b451fd7 | 1,520 | py | Python | kfdata/tests/test_attribute.py | kylef-archive/KFData.py | 685d58255c9f8518834e395d94d3b75d3dd3eceb | [
"BSD-3-Clause"
] | 1 | 2015-11-08T13:23:39.000Z | 2015-11-08T13:23:39.000Z | kfdata/tests/test_attribute.py | kylef/KFData.py | 685d58255c9f8518834e395d94d3b75d3dd3eceb | [
"BSD-3-Clause"
] | null | null | null | kfdata/tests/test_attribute.py | kylef/KFData.py | 685d58255c9f8518834e395d94d3b75d3dd3eceb | [
"BSD-3-Clause"
] | null | null | null | import unittest
from kfdata.attributes import Attribute, NumberAttribute, BooleanAttribute
class AttributeTests(unittest.TestCase):
def setUp(self):
self.attribute = Attribute(name='firstName', is_indexed=True, is_optional=False)
def test_creation(self):
self.assertEqual(self.attribute.name, 'firstName')
self.assertTrue(self.attribute.is_indexed)
self.assertFalse(self.attribute.is_optional)
self.assertFalse(self.attribute.is_transient)
def test_str(self):
self.assertEqual(str(self.attribute), 'firstName')
def test_repr(self):
self.assertEqual(repr(self.attribute), '<Attribute firstName>')
def test_equality(self):
self.assertEqual(self.attribute, Attribute(name='firstName',
is_indexed=True, is_optional=False))
def test_inequality(self):
self.assertNotEqual(self.attribute, Attribute(name='firstName',
is_indexed=True, is_optional=True))
class NumberAttributeTests(unittest.TestCase):
def test_creation(self):
attribute = NumberAttribute('age', minimum_value=5, maximum_value=10)
self.assertEqual(attribute.name, 'age')
self.assertEqual(attribute.default_value, 0)
self.assertEqual(attribute.minimum_value, 5)
self.assertEqual(attribute.maximum_value, 10)
class BooleanAttributeTests(unittest.TestCase):
def test_default(self):
attribute = BooleanAttribute('isHuman')
self.assertEqual(attribute.default_value, False)
| 35.348837 | 88 | 0.718421 | 1,420 | 0.934211 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.063816 |
df6acc88067fd0d6c4d32d01e3f30d88b50e8f50 | 8,589 | py | Python | fucker.py | MacsedSub/ZBH-Fucker | c39381ef5331fd11924c0ec7f0b76bea46ed98b3 | [
"MIT"
] | 4 | 2020-01-21T09:49:29.000Z | 2021-02-07T00:39:03.000Z | fucker.py | MacsedSub/ZBH-Fucker | c39381ef5331fd11924c0ec7f0b76bea46ed98b3 | [
"MIT"
] | null | null | null | fucker.py | MacsedSub/ZBH-Fucker | c39381ef5331fd11924c0ec7f0b76bea46ed98b3 | [
"MIT"
] | 2 | 2020-12-08T07:20:10.000Z | 2020-12-29T10:29:36.000Z | import threading # 多线程模块
import queue # 队列模块
import requests
import time
import random
import json
import os
import re
import traceback
from urllib.parse import quote
from config import fakeVisitorCount
from config import startUserID
from config import spreadToken
from config import UAs
from config import province
from config import city
from config import scode
class ProtectThread(threading.Thread):
def __init__(self,num):
threading.Thread.__init__(self,name = 'pro' + str(num))
self.num = num
self.setName('pro' + str(num))
def run(self):
print('守护进程' + str(self.num) +'已启动')
initThreadsName = ['IP']
for i in range(1,fakeVisitorCount):
initThreadsName.append(str(i))
while True:
print('守护进程' + str(self.num) + '正在进行守护')
nowThreadsName=[]#用来保存当前线程名称
now=threading.enumerate()#获取当前线程名
for i in now:
nowThreadsName.append(i.getName())#保存当前线程名称
for ip in initThreadsName:
if ip in nowThreadsName:
pass #当前某线程名包含在初始化线程组中,可以认为线程仍在运行
else:
if ip == 'IP':
print ('==='+ 'IPGeter不在线,正在重新启动' + '===')
IPThread = IPGeter(IPList,UserIDList,currentUserID)
IPThread.start()
IPThread.join()
elif ip == 'pro1':
print ('==='+ '保护进程1不在线,正在重新启动' + '===')
protectT = ProtectThread(1)
protectT.start()
ProtectTs.append(protectT)
protectT.join()
elif ip == 'pro2':
print ('==='+ '保护进程2不在线,正在重新启动' + '===')
protectT = ProtectThread(2)
protectT.start()
ProtectTs.append(protectT)
protectT.join()
elif ip != 'MainThread':
print ('==='+ 'FakeVisitor进程'+ ip + ' 不在线,正在重新启动' + '===')
VisitorT = FakeVisitor(int(ip),IPList,UserIDList)
VisitorT.start()
VisitorTs.append(VisitorT)
VisitorT.join()
time.sleep(1)#隔一段时间重新运行,检测有没有线程down
class IPGeter(threading.Thread): # 解析线程类
# 初始化属性
def __init__(self,ips,users,currentUserID):
threading.Thread.__init__(self,name = 'IP')
self.ips=ips
self.users = users
self.currentUserID = currentUserID
self.setName('IP')
def run(self):
print('启动IP获取者')
# 无限循环,
while True:
if len(self.ips) == 0 or len(self.users) == 0:
if len(self.ips) == 0:
print('IP池耗尽,正在获取新的代理')
url = 'http://www.66ip.cn/nmtq.php?getnum=20&isp=0&anonymoustype=4&start=&ports=&export=&ipaddress=&area=0&proxytype=0&api=66ip'
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'}
ip_port_format = '[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}:[0-9]{1,5}'
try:
response = requests.get(url,headers=headers)
except requests.ConnectionError as e:
print('IP代理请求异常,稍后会重试,若一直异常,请检查你的代理设置 WARNING:该请求不可通过终端socks5代理发送')
else:
if response.status_code == 200:
rawData = response.content.decode('gbk')
proxyAll = re.findall(ip_port_format,rawData)
for ip in proxyAll:
self.ips.append(ip)
print('获取代理成功')
else:
print('获取代理请求错误状态码:' + str(response.status_code))
print('获取代理失败,1秒后重试')
if len(self.users) == 0:
print('用户ID耗尽,正在获取新的用户ID')
for i in range(0,fakeVisitorCount):
self.users.append(self.currentUserID + i)
self.currentUserID = self.currentUserID + fakeVisitorCount
print('用户ID获取成功')
else:
time.sleep(1)
print('IP获取者退出')
class FakeVisitor(threading.Thread):
def __init__(self,num,ips,users):
threading.Thread.__init__(self,name = str(num))
self.ips=ips
self.users = users
self.num = num
self.setName(str(num))
def run(self):
print('FakeVisitor ' + str(self.num) + '已启动')
global successCount
while True:
if len(self.ips) == 0 or len(self.users) == 0:
print('FakeVisitor ' + str(self.num) + '暂无可用IP/用户ID 1秒后重新获取')
time.sleep(1)
else:
ip = self.ips[0]
del self.ips[0]
user = self.users[0]
del self.users[0]
proxies = {'http':'http://' + ip , 'https' : 'https://' + ip}
url = 'http://admin.zhinengdayi.com/front/spread/bindUserAndSpread?frontUserId='+str(user)+'&spreadToken='+spreadToken
headers = {'User-Agent': random.sample(UAs,1)[0]}
try:
response = requests.get(url,headers = headers,proxies = proxies)
except requests.ConnectionError as e:
print('ID:' + str(user) + ' 绑定ID-Token请求异常,将会更换IP&ID重试')
else:
if response.status_code == 200:
recordCount = 0
url = 'http://admin.zhinengdayi.com/weixin/api/user/addUserViewLog?userId='+ str(user) +'&userProvince='+ quote(province) +'&userCity='+ quote(city) +'&sCode='+ scode + '&infoId=&majorId=&viewSourceUrl=&pageUrl=http%3A%2F%2Fweixin.zhinengdayi.com%2Fbuild%2Findex.html%3Fscode%3D'+ scode +'%23%2F%3Fuid%3D'+ str(user) +'%26spreadToken%3D' + spreadToken
headers = {'User-Agent': random.sample(UAs,1)[0]}
for i in range(1,6):
print('ID:' + str(user) + ' 正在进行第' + str(i) + '次增加访问记录请求')
try:
response = requests.get(url,headers = headers,proxies = proxies)
except requests.ConnectionError as e:
print('ID:' + str(user) + ' 的第' + str(i) + '次增加访问记录请求异常,将会重试')
else:
if response.status_code == 200:
recordCount = recordCount + 1
time.sleep(random.randint(2,5))
if recordCount >= 4:
lock.acquire()
successCount = successCount + 1
print('成功增加一个有效用户,总共已完成' + str(successCount) + '个,ID是' + str(user))
lock.release()
else:
print(str(user) + '的访问请求失败,将会更换ID重试')
time.sleep(random.randint(1,3))
IPList = []
UserIDList = []
VisitorTs = []
ProtectTs = []
currentUserID = startUserID
successCount = 0
lock = threading.Lock()
IPThread = IPGeter(IPList,UserIDList,currentUserID)
IPThread.start()
for i in range(1,fakeVisitorCount):
VisitorT = FakeVisitor(i,IPList,UserIDList)
VisitorT.start()
VisitorTs.append(VisitorT)
for i in range(1,2):
protectT = ProtectThread(i)
protectT.start()
ProtectTs.append(protectT)
IPThread.join()
for VisitorT in VisitorTs:
VisitorT.join()
for protectT in ProtectTs:
protectT.join()
| 36.862661 | 375 | 0.461288 | 8,361 | 0.897392 | 0 | 0 | 0 | 0 | 0 | 0 | 2,099 | 0.225287 |
df6be0074b0c935074c3035e1e691a3882bcdeeb | 5,973 | py | Python | hs_composite_resource/tests/test_composite_resource_user_zone.py | tommac7/hydroshare | 87c4543a55f98103d2614bf4c47f7904c3f9c029 | [
"BSD-3-Clause"
] | null | null | null | hs_composite_resource/tests/test_composite_resource_user_zone.py | tommac7/hydroshare | 87c4543a55f98103d2614bf4c47f7904c3f9c029 | [
"BSD-3-Clause"
] | null | null | null | hs_composite_resource/tests/test_composite_resource_user_zone.py | tommac7/hydroshare | 87c4543a55f98103d2614bf4c47f7904c3f9c029 | [
"BSD-3-Clause"
] | null | null | null | import os
from django.test import TransactionTestCase
from django.contrib.auth.models import Group
from django.conf import settings
from hs_core import hydroshare
from hs_core.models import BaseResource
from hs_core.hydroshare.utils import resource_file_add_process, resource_file_add_pre_process
from hs_core.views.utils import create_folder
from hs_core.testing import TestCaseCommonUtilities
from hs_file_types.models import GenericLogicalFile
class CompositeResourceTest(TestCaseCommonUtilities, TransactionTestCase):
def setUp(self):
super(CompositeResourceTest, self).setUp()
super(CompositeResourceTest, self).assert_federated_irods_available()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.user = hydroshare.create_account(
'user1@nowhere.com',
username='user1',
first_name='Creator_FirstName',
last_name='Creator_LastName',
superuser=False,
groups=[self.group]
)
super(CompositeResourceTest, self).create_irods_user_in_user_zone()
self.raster_file_name = 'small_logan.tif'
self.raster_file = 'hs_composite_resource/tests/data/{}'.format(self.raster_file_name)
# transfer this valid tif file to user zone space for testing
# only need to test that tif file stored in iRODS user zone space can be used to create a
# composite resource and the file gets set to GenericLogicalFile type
# Other relevant tests are adding a file to resource, deleting a file from resource
# and deleting composite resource stored in iRODS user zone
# Other detailed tests don't need to be retested for irods user zone space scenario since
# as long as the tif file in iRODS user zone space can be read with metadata extracted
# correctly, other functionalities are done with the same common functions regardless of
# where the tif file comes from, either from local disk or from a federated user zone
irods_target_path = '/' + settings.HS_USER_IRODS_ZONE + '/home/' + self.user.username + '/'
file_list_dict = {self.raster_file: irods_target_path + self.raster_file_name}
super(CompositeResourceTest, self).save_files_to_user_zone(file_list_dict)
def tearDown(self):
super(CompositeResourceTest, self).tearDown()
super(CompositeResourceTest, self).assert_federated_irods_available()
super(CompositeResourceTest, self).delete_irods_user_in_user_zone()
def test_file_add_to_composite_resource(self):
# only do federation testing when REMOTE_USE_IRODS is True and irods docker containers
# are set up properly
super(CompositeResourceTest, self).assert_federated_irods_available()
# test that when we add file to an existing composite resource, the added file
# automatically set to genericlogicalfile type
self.assertEqual(BaseResource.objects.count(), 0)
self.composite_resource = hydroshare.create_resource(
resource_type='CompositeResource',
owner=self.user,
title='Test Composite Resource With Files Added From Federated Zone',
auto_aggregate=False
)
# there should not be any GenericLogicalFile object at this point
self.assertEqual(GenericLogicalFile.objects.count(), 0)
# add a file to the resource
fed_test_file_full_path = '/{zone}/home/{username}/{fname}'.format(
zone=settings.HS_USER_IRODS_ZONE, username=self.user.username,
fname=self.raster_file_name)
res_upload_files = []
resource_file_add_pre_process(resource=self.composite_resource, files=res_upload_files,
source_names=[fed_test_file_full_path], user=self.user,
folder=None)
resource_file_add_process(resource=self.composite_resource, files=res_upload_files,
source_names=[fed_test_file_full_path], user=self.user,
auto_aggregate=False)
# there should be one resource at this point
self.assertEqual(BaseResource.objects.count(), 1)
self.assertEqual(self.composite_resource.resource_type, "CompositeResource")
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# create the generic aggregation (logical file)
GenericLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
# check that the resource file is associated with GenericLogicalFile
res_file = self.composite_resource.files.first()
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
# there should be 1 GenericLogicalFile object at this point
self.assertEqual(GenericLogicalFile.objects.count(), 1)
# test adding a file to a folder (Note the UI does not support uploading a iRODS file
# to a specific folder)
# create the folder
new_folder = "my-new-folder"
new_folder_path = os.path.join("data", "contents", new_folder)
create_folder(self.composite_resource.short_id, new_folder_path)
resource_file_add_pre_process(resource=self.composite_resource, files=res_upload_files,
source_names=[fed_test_file_full_path], user=self.user,
folder=new_folder)
resource_file_add_process(resource=self.composite_resource, files=res_upload_files,
source_names=[fed_test_file_full_path], user=self.user,
folder=new_folder, auto_aggregate=False)
self.assertEqual(self.composite_resource.files.all().count(), 2)
self.composite_resource.delete()
| 51.051282 | 99 | 0.697807 | 5,519 | 0.923991 | 0 | 0 | 0 | 0 | 0 | 0 | 1,712 | 0.286623 |
df6c0d5895a889ba43d3854f304957c6a4db6d15 | 2,310 | py | Python | tests/test_compat.py | eisensheng/pytest-catchlog | f6b05b0afb8f8934b33e0c78a495ebfd8b3599d6 | [
"MIT"
] | 85 | 2015-02-24T20:14:30.000Z | 2021-04-29T09:07:59.000Z | tests/test_compat.py | eisensheng/pytest-catchlog | f6b05b0afb8f8934b33e0c78a495ebfd8b3599d6 | [
"MIT"
] | 67 | 2015-04-22T16:07:47.000Z | 2020-03-19T14:14:19.000Z | tests/test_compat.py | eisensheng/pytest-catchlog | f6b05b0afb8f8934b33e0c78a495ebfd8b3599d6 | [
"MIT"
] | 25 | 2015-05-17T15:22:55.000Z | 2019-01-07T09:21:24.000Z | # -*- coding: utf-8 -*-
import pytest
def test_camel_case_aliases(testdir):
testdir.makepyfile('''
import logging
logger = logging.getLogger(__name__)
def test_foo(caplog):
caplog.setLevel(logging.INFO)
logger.debug('boo!')
with caplog.atLevel(logging.WARNING):
logger.info('catch me if you can')
''')
result = testdir.runpytest()
assert result.ret == 0
with pytest.raises(pytest.fail.Exception):
result.stdout.fnmatch_lines(['*- Captured *log call -*'])
result = testdir.runpytest('-rw')
assert result.ret == 0
result.stdout.fnmatch_lines('''
=*warning summary*=
*WL1*test_camel_case_aliases*caplog.setLevel()*deprecated*
*WL1*test_camel_case_aliases*caplog.atLevel()*deprecated*
''')
def test_property_call(testdir):
testdir.makepyfile('''
import logging
logger = logging.getLogger(__name__)
def test_foo(caplog):
logger.info('boo %s', 'arg')
assert caplog.text == caplog.text() == str(caplog.text)
assert caplog.records == caplog.records() == list(caplog.records)
assert (caplog.record_tuples ==
caplog.record_tuples() == list(caplog.record_tuples))
''')
result = testdir.runpytest()
assert result.ret == 0
result = testdir.runpytest('-rw')
assert result.ret == 0
result.stdout.fnmatch_lines('''
=*warning summary*=
*WL1*test_property_call*caplog.text()*deprecated*
*WL1*test_property_call*caplog.records()*deprecated*
*WL1*test_property_call*caplog.record_tuples()*deprecated*
''')
def test_records_modification(testdir):
testdir.makepyfile('''
import logging
logger = logging.getLogger(__name__)
def test_foo(caplog):
logger.info('boo %s', 'arg')
assert caplog.records
assert caplog.records()
del caplog.records()[:] # legacy syntax
assert not caplog.records
assert not caplog.records()
logger.info('foo %s', 'arg')
assert caplog.records
assert caplog.records()
''')
result = testdir.runpytest()
assert result.ret == 0
| 28.518519 | 77 | 0.59697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,645 | 0.712121 |
df6d8794143baa565bed712b5539fdd5723cacc2 | 968 | py | Python | ectrello/commands/board.py | hadi-alnehlawi/ECTRELLI-PYPI | c06c25a6b2ae823d16b426190f58005414a756fd | [
"MIT"
] | null | null | null | ectrello/commands/board.py | hadi-alnehlawi/ECTRELLI-PYPI | c06c25a6b2ae823d16b426190f58005414a756fd | [
"MIT"
] | null | null | null | ectrello/commands/board.py | hadi-alnehlawi/ECTRELLI-PYPI | c06c25a6b2ae823d16b426190f58005414a756fd | [
"MIT"
] | null | null | null | import click
from trello.client import Client
key = "0581b1db0a42258051a8a25fb301e247"
token = "e6985b1a4afdfb4168814ca486e76ff704e171d5751ce9db8c96731f8b1cc0cb"
client = Client(key, token)
show_help = """ \n
TEXT=all show all boards.\n
TEXT=first show first board.\n
TEXT=last show last board.\n
"""
@click.command()
@click.option("--show", required=False, help=show_help)
def cli(show):
"""
Show board of your trello\n
ex: trellocli board --show all
"""
if show == "all" or show is None:
boards = client.get_boards()
print(boards)
elif show == "first":
boards = client.get_boards()
board = client.get_board(id=boards[0].id)
print(board)
elif show == "last":
boards = client.get_boards()
board = client.get_board(id=boards[-1].id)
print(board)
elif show == "--help":
print("helping ")
else:
board = client.get_board(id=show)
print(board)
| 24.820513 | 74 | 0.637397 | 0 | 0 | 0 | 0 | 661 | 0.682851 | 0 | 0 | 320 | 0.330579 |
df6daf1f508afcbdd19a3d77ece59a5c6974f4e9 | 1,706 | py | Python | main.py | prozum/mastermixer | e858ea60e10d3e3d02c934b3b1d277856acbdb4c | [
"MIT"
] | null | null | null | main.py | prozum/mastermixer | e858ea60e10d3e3d02c934b3b1d277856acbdb4c | [
"MIT"
] | null | null | null | main.py | prozum/mastermixer | e858ea60e10d3e3d02c934b3b1d277856acbdb4c | [
"MIT"
] | null | null | null | from sqlite3 import dbapi2 as sqlite3
from libliquor import Actuator,Ranger,Mixer
config = dict(
DEBUG=True,
DATABASE='/tmp/flaskr.db',
)
# Setup objects
if not config["DEBUG"]:
motor = Actuator(10)
piston = Actuator(11)
valves = [Actuator(12), Actuator(13)]
rangers = [Ranger(0), Ranger(1)]
mixer = Mixer(motor,piston,valves,rangers)
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
return connect_db()
def make_recipe():
db = get_db()
order,product = db.execute('SELECT Id, P_Id FROM Orders ORDER BY Id ASC LIMIT 1').fetchone() # Get order and product
db.execute('DELETE FROM Orders WHERE Id ='+str(order)) # Delete order
db.commit()
data = list(db.execute('SELECT Stat1, Stat2, Stat3 FROM Products Where Id ='+str(product)).fetchone()) # Get drink data
load = data[0]/100.0
data = data[1:]
rations = 0
for i in range(len(data)):
rations+=data[i]
recipe = []
for i in range(len(data)):
recipe.append(data[i]/float(rations)*load)
return recipe
def show_recipe(recipe,max=250):
print(recipe)
for i in range(len(recipe)):
print("liquid "+str(i)+":"+str(recipe[i]*max))
def main():
while 1:
recipe = make_recipe()
if config["DEBUG"]:
show_recipe(recipe)
break
mixer.mix_drink(recipe)
mixer.serve()
time.sleep(10)
if __name__ == "__main__":
main() | 25.088235 | 123 | 0.613716 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 411 | 0.240914 |
df6ed2183ff909d2ec6fe9857be8189e0bccd3ed | 572 | py | Python | conftest.py | Shy/migration-env-demo | 3290dbbc933c727f6c92b08cb476d34b95b16d6d | [
"MIT"
] | 1 | 2018-05-17T17:41:02.000Z | 2018-05-17T17:41:02.000Z | conftest.py | Shy/migration-env-demo | 3290dbbc933c727f6c92b08cb476d34b95b16d6d | [
"MIT"
] | 4 | 2018-05-29T04:27:11.000Z | 2018-05-29T15:47:20.000Z | conftest.py | Shy/migration-env-demo | 3290dbbc933c727f6c92b08cb476d34b95b16d6d | [
"MIT"
] | null | null | null | # conftest.py
import pytest
import os
from myapp import create_app, create_contentful
from dotenv import load_dotenv
load_dotenv()
SPACE_ID = os.getenv("SPACE_ID")
DELIVERY_API_KEY = os.getenv("DELIVERY_API_KEY")
TESTING_ENV = "circle_testing"
@pytest.fixture
def app():
app = create_app(SPACE_ID, DELIVERY_API_KEY, environment_id=TESTING_ENV)
app.debug = True
return app
@pytest.fixture
def contentful_client():
contentful_client = create_contentful(
SPACE_ID, DELIVERY_API_KEY, environment_id=TESTING_ENV
)
return contentful_client
| 21.185185 | 76 | 0.769231 | 0 | 0 | 0 | 0 | 320 | 0.559441 | 0 | 0 | 57 | 0.09965 |
df6fa3edddb6528b81a9ef12ac57b8e80546f328 | 1,454 | py | Python | db.py | L1mPeX/Casino-Bot | b08be7be75854cc2e386906a9ef8ee3ab768ae78 | [
"MIT"
] | 2 | 2020-11-21T17:33:32.000Z | 2021-08-17T16:52:57.000Z | db.py | L1mPeX/Casino-Bot | b08be7be75854cc2e386906a9ef8ee3ab768ae78 | [
"MIT"
] | null | null | null | db.py | L1mPeX/Casino-Bot | b08be7be75854cc2e386906a9ef8ee3ab768ae78 | [
"MIT"
] | null | null | null | #!/usr/bin/python3.6
import sqlite3
def make_connect():
conn = sqlite3.connect("bot.db")
cursor = conn.cursor()
return conn, cursor
def create_tables():
conn, cursor = make_connect()
try:
cursor.execute("CREATE TABLE users(user_id INTEGER, bal FLOAT, count_games INTEGER, sum_games FLOAT, ref_id INTEGER, ref_sum FLOAT)")
except:
pass
try:
cursor.execute("CREATE TABLE qiwi_popoln(user_id INTEGER, sum FLOAT, phone INTEGER, random_code INTEGER)")
except:
pass
try:
cursor.execute("CREATE TABLE withdraw(user_id INTEGER, sum FLOAT, num TEXT, type INTEGER)")
except:
pass
try:
cursor.execute("CREATE TABLE dice(hash INTEGER, sum_bet FLOAT, creator_user_id INTEGER, player_user_id INTEGER, creator_value INTEGER, player_value INTEGER, type TEXT)")
except:
pass
try:
cursor.execute("CREATE TABLE history_dice(user_id INTEGER, hash INTEGER, sum_win FLOAT)")
except:
pass
try:
cursor.execute("CREATE TABLE ban(user_id INTEGER, S INTEGER)")
except:
pass
try:
cursor.execute("CREATE TABLE nontrueusers(user_id INTEGER, S INTEGER)")
except:
pass
try:
cursor.execute("CREATE TABLE quetions(hash TEXT, quetion TEXT, answer TEXT)")
except:
pass
try:
cursor.execute("CREATE TABLE support(user_id INTEGER, quetion TEXT)")
except:
pass
| 32.311111 | 178 | 0.658184 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 751 | 0.516506 |
df709514b8d346cc0a38a3bd3f1378bb77a71fa2 | 4,561 | py | Python | tests/unit-tests/test_rst_lists.py | hazemelraffiee/confluencebuilder | c283e7fb513c156b9b6e0ba3694fc3e0468a74c9 | [
"BSD-2-Clause"
] | 90 | 2016-07-21T00:39:19.000Z | 2019-03-08T08:27:17.000Z | tests/unit-tests/test_rst_lists.py | hazemelraffiee/confluencebuilder | c283e7fb513c156b9b6e0ba3694fc3e0468a74c9 | [
"BSD-2-Clause"
] | 124 | 2016-10-18T20:06:48.000Z | 2019-03-08T04:41:53.000Z | tests/unit-tests/test_rst_lists.py | hazemelraffiee/confluencebuilder | c283e7fb513c156b9b6e0ba3694fc3e0468a74c9 | [
"BSD-2-Clause"
] | 39 | 2016-07-21T00:39:52.000Z | 2019-03-06T14:33:31.000Z | # -*- coding: utf-8 -*-
"""
:copyright: Copyright 2020-2022 Sphinx Confluence Builder Contributors (AUTHORS)
:license: BSD-2-Clause (LICENSE)
"""
from tests.lib.testcase import ConfluenceTestCase
from tests.lib.testcase import setup_builder
from tests.lib import parse
import os
class TestConfluenceRstLists(ConfluenceTestCase):
@classmethod
def setUpClass(cls):
super(TestConfluenceRstLists, cls).setUpClass()
cls.dataset = os.path.join(cls.datasets, 'common')
cls.filenames = [
'lists',
]
@setup_builder('confluence')
def test_storage_rst_lists(self):
out_dir = self.build(self.dataset, filenames=self.filenames)
with parse('lists', out_dir) as data:
root_tags = data.find_all(recursive=False)
self.assertEqual(len(root_tags), 3)
# ##########################################################
# bullet list
# ##########################################################
bullet_list = root_tags.pop(0)
self.assertEqual(bullet_list.name, 'ul')
items = bullet_list.find_all('li', recursive=False)
self.assertEqual(len(items), 4)
self.assertEqual(items[0].text.strip(), 'first bullet')
self.assertEqual(items[2].text.strip(), 'third item')
self.assertEqual(items[3].text.strip(), 'forth item')
complex_list = items[1]
complex_tags = complex_list.find_all(recursive=False)
self.assertEqual(complex_tags[0].name, 'p')
self.assertEqual(complex_tags[0].text.strip(), 'second item')
self.assertEqual(complex_tags[1].name, 'p')
self.assertEqual(complex_tags[1].text.strip(),
'second paragraph in the second item')
self.assertEqual(complex_tags[2].name, 'p')
self.assertEqual(complex_tags[2].text.strip(),
'third paragraph in the second item')
self.assertEqual(complex_tags[3].name, 'ul')
sublist = complex_tags[3].find_all('li', recursive=False)
self.assertEqual(len(sublist), 3)
# ##########################################################
# enumerated list
# ##########################################################
enumerated_list = root_tags.pop(0)
self.assertEqual(enumerated_list.name, 'ol')
css_style = 'list-style-type: decimal'
self.assertTrue(enumerated_list.has_attr('style'))
self.assertTrue(css_style in enumerated_list['style'])
items = enumerated_list.find_all('li', recursive=False)
self.assertEqual(len(items), 2)
self.assertEqual(items[0].text.strip(), 'enumerated a1')
self.assertEqual(items[1].text.strip(), 'enumerated a2')
# ##########################################################
# enumerated list (styled)
# ##########################################################
enumerated_list = root_tags.pop(0)
self.assertEqual(enumerated_list.name, 'ol')
css_style = 'list-style-type: decimal'
self.assertTrue(enumerated_list.has_attr('style'))
self.assertTrue(css_style in enumerated_list['style'])
items = enumerated_list.find_all('li', recursive=False)
self.assertEqual(len(items), 4)
css_style = 'list-style-type: lower-alpha'
sublist1 = items[0].find('ol', recursive=False)
self.assertIsNotNone(sublist1)
self.assertTrue(sublist1.has_attr('style'))
self.assertTrue(css_style in sublist1['style'])
css_style = 'list-style-type: upper-alpha'
sublist2 = items[1].find('ol', recursive=False)
self.assertIsNotNone(sublist2)
self.assertTrue(sublist2.has_attr('style'))
self.assertTrue(css_style in sublist2['style'])
css_style = 'list-style-type: decimal'
sublist3 = items[2].find('ol', recursive=False)
self.assertIsNotNone(sublist3)
self.assertTrue(sublist3.has_attr('style'))
self.assertTrue(css_style in sublist3['style'])
css_style = 'list-style-type: lower-roman'
sublist4 = items[3].find('ol', recursive=False)
self.assertIsNotNone(sublist4)
self.assertTrue(sublist4.has_attr('style'))
self.assertTrue(css_style in sublist4['style'])
| 40.362832 | 80 | 0.557772 | 4,278 | 0.937952 | 0 | 0 | 4,218 | 0.924797 | 0 | 0 | 1,057 | 0.231747 |
df72a98051b9545e547ff5e6b00c007b9064d190 | 422 | py | Python | src/cira_am/config.py | quistian/dns-firewall | d99e77a763f06bb0ec81192dffa534c0c3b3e30c | [
"BSD-2-Clause"
] | 1 | 2021-11-16T15:23:46.000Z | 2021-11-16T15:23:46.000Z | src/cira_am/config.py | quistian/dns-firewall | d99e77a763f06bb0ec81192dffa534c0c3b3e30c | [
"BSD-2-Clause"
] | null | null | null | src/cira_am/config.py | quistian/dns-firewall | d99e77a763f06bb0ec81192dffa534c0c3b3e30c | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
'''
Global Variables convention:
* start with UpperCase
* have no _ character
* may have mid UpperCase words
'''
Debug = True
Silent = True
Verbose = False
CustomerName = 'customer_name'
AuthHeader = {'Content-Type': 'application/json'}
BaseURL = "https://firewall-api.d-zone.ca"
AuthURL = 'https://firewall-auth.d-zone.ca/auth/realms/D-ZoneFireWall/protocol/openid-connect/token'
| 19.181818 | 100 | 0.7109 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 316 | 0.748815 |
df7519102fb79f7448397cf68ad58cb1a141e426 | 652 | py | Python | 安装三方库.py | gaoguole/ctpgao | bb5a521d4e12f0cb318999e599f2e110b03915da | [
"MIT"
] | 73 | 2020-10-12T02:15:04.000Z | 2022-03-30T10:00:25.000Z | 安装三方库.py | TiantangX/ctpgao | bb5a521d4e12f0cb318999e599f2e110b03915da | [
"MIT"
] | 5 | 2020-10-15T08:56:44.000Z | 2020-11-19T04:16:30.000Z | 安装三方库.py | TiantangX/ctpgao | bb5a521d4e12f0cb318999e599f2e110b03915da | [
"MIT"
] | 16 | 2020-10-12T02:15:22.000Z | 2021-12-23T00:35:22.000Z | import os
os.system("pip install tqsdk -i https://pypi.tuna.tsinghua.edu.cn/simple")
os.system("pip install numba -i https://pypi.tuna.tsinghua.edu.cn/simple")
os.system("pip install janus -i https://pypi.tuna.tsinghua.edu.cn/simple")
os.system("pip install redis -i https://pypi.tuna.tsinghua.edu.cn/simple")
os.system("pip install aioredis -i https://pypi.tuna.tsinghua.edu.cn/simple")
os.system("pip install schedule -i https://pypi.tuna.tsinghua.edu.cn/simple")
os.system("pip install pyqt5 -i https://pypi.tuna.tsinghua.edu.cn/simple")
os.system("pip install PyQt5-tools -i http://pypi.douban.com/simple --trusted-host=pypi.douban.com") | 65.2 | 100 | 0.739264 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 536 | 0.822086 |
df772962564e76e30a2b4e2a05a98491303c2ed2 | 5,776 | py | Python | smartystreets_python_sdk/client_builder.py | jasonrfarkas/smartystreets-python-sdk | bcb94efc09c795222eb1bd85544073a6cc063a46 | [
"Apache-2.0"
] | null | null | null | smartystreets_python_sdk/client_builder.py | jasonrfarkas/smartystreets-python-sdk | bcb94efc09c795222eb1bd85544073a6cc063a46 | [
"Apache-2.0"
] | null | null | null | smartystreets_python_sdk/client_builder.py | jasonrfarkas/smartystreets-python-sdk | bcb94efc09c795222eb1bd85544073a6cc063a46 | [
"Apache-2.0"
] | null | null | null | import smartystreets_python_sdk as smarty
from smartystreets_python_sdk.us_street import Client as USStreetClient
from smartystreets_python_sdk.us_zipcode import Client as USZIPClient
from smartystreets_python_sdk.us_extract import Client as USExtractClient
from smartystreets_python_sdk.us_autocomplete import Client as USAutocompleteClient
from smartystreets_python_sdk.international_street import Client as InternationalStreetClient
class ClientBuilder:
def __init__(self, signer):
"""
The ClientBuilder class helps you build a client object for one of the supported SmartyStreets APIs.
You can use ClientBuilder's methods to customize settings like maximum retries or timeout duration.
These methods are chainable, so you can usually get set up with one line of code.
"""
self.signer = signer
self.serializer = smarty.NativeSerializer()
self.http_sender = None
self.max_retries = 5
self.max_timeout = 10000
self.url_prefix = None
self.proxy = None
self.debug = None
self.header = None
self.INTERNATIONAL_STREET_API_URL = "https://international-street.api.smartystreets.com/verify"
self.US_AUTOCOMPLETE_API_URL = "https://us-autocomplete.api.smartystreets.com/suggest"
self.US_EXTRACT_API_URL = "https://us-extract.api.smartystreets.com"
self.US_STREET_API_URL = "https://us-street.api.smartystreets.com/street-address"
self.US_ZIP_CODE_API_URL = "https://us-zipcode.api.smartystreets.com/lookup"
def retry_at_most(self, max_retries):
"""
Sets the maximum number of times to retry sending the request to the API. (Default is 5)
Returns self to accommodate method chaining.
"""
self.max_retries = max_retries
return self
def with_max_timeout(self, max_timeout):
"""
The maximum time (in milliseconds) to wait for a connection, and also to wait for
the response to be read. (Default is 10000)
Returns self to accommodate method chaining.
"""
self.max_timeout = max_timeout
return self
def with_sender(self, sender):
"""
Default is a series of nested senders. (See build_sender()
Returns self to accommodate method chaining.
"""
self.http_sender = sender
return self
def with_serializer(self, serializer):
"""
Changes the Serializer from the default.
Returns self to accommodate method chaining.
"""
self.serializer = serializer
return self
def with_base_url(self, base_url):
"""
This may be useful when using a local installation of the SmartyStreets APIs.
base_url is a string that defaults to the URL for the API corresponding to the Client object being built.
Returns self to accommodate method chaining.
"""
self.url_prefix = base_url
return self
def with_proxy(self, host, username=None, password=None):
"""
Assigns a proxy through which to send all Lookups.
:param host: The proxy host including port, but not scheme. (example: localhost:8080)
:param username: Username to authenticate with the proxy server
:param password: Password to authenticate with the proxy server
:return: Returns self to accommodate method chaining.
"""
self.proxy = smarty.Proxy(host, username, password)
return self
def with_custom_header(self, custom_header):
"""
Create custom headers when necessary.
:param custom_header: Input your custom headers
:return: Returns self to accommodate method chaining
"""
self.header = custom_header
return self
def with_debug(self):
"""
Enables debug mode, which will print information about the HTTP request and response to the console.
Returns self to accommodate method chaining.
"""
self.debug = True
return self
def build_international_street_api_client(self):
self.ensure_url_prefix_not_null(self.INTERNATIONAL_STREET_API_URL)
return InternationalStreetClient(self.build_sender(), self.serializer)
def build_us_autocomplete_api_client(self):
self.ensure_url_prefix_not_null(self.US_AUTOCOMPLETE_API_URL)
return USAutocompleteClient(self.build_sender(), self.serializer)
def build_us_extract_api_client(self):
self.ensure_url_prefix_not_null(self.US_EXTRACT_API_URL)
return USExtractClient(self.build_sender(), self.serializer)
def build_us_street_api_client(self):
self.ensure_url_prefix_not_null(self.US_STREET_API_URL)
return USStreetClient(self.build_sender(), self.serializer)
def build_us_zipcode_api_client(self):
self.ensure_url_prefix_not_null(self.US_ZIP_CODE_API_URL)
return USZIPClient(self.build_sender(), self.serializer)
def build_sender(self):
if self.http_sender is not None:
return self.http_sender
sender = smarty.RequestsSender(self.max_timeout, self.proxy)
sender.debug = self.debug
sender = smarty.StatusCodeSender(sender)
if self.header is not None:
sender = smarty.CustomHeaderSender(self.header, sender)
if self.signer is not None:
sender = smarty.SigningSender(self.signer, sender)
if self.max_retries > 0:
sender = smarty.RetrySender(self.max_retries, sender)
sender = smarty.URLPrefixSender(self.url_prefix, sender)
return sender
def ensure_url_prefix_not_null(self, url):
if self.url_prefix is None:
self.url_prefix = url
| 38 | 113 | 0.691136 | 5,337 | 0.923996 | 0 | 0 | 0 | 0 | 0 | 0 | 2,213 | 0.383137 |
df77d20b06e6fc4fab68bc80bf0e87b46ceb6892 | 14,294 | py | Python | src/util.py | uguryagmur/RealTimeObjectDetection | e54d153266c6b4f54a49266fe05ceca13ebe3cdd | [
"MIT"
] | null | null | null | src/util.py | uguryagmur/RealTimeObjectDetection | e54d153266c6b4f54a49266fe05ceca13ebe3cdd | [
"MIT"
] | null | null | null | src/util.py | uguryagmur/RealTimeObjectDetection | e54d153266c6b4f54a49266fe05ceca13ebe3cdd | [
"MIT"
] | null | null | null | """Utility functions for Object Detection Networks"""
from __future__ import division
import cv2
import torch
import numpy as np
from PIL import Image, ImageDraw
def xyxy2xywh(box: torch.Tensor) -> torch.Tensor:
"""
Returns the xywh format of the input bounding box tensor
Arguments:
box (torch.Tensor): Bounding box tensor in xyxy format
Output:
output (torch.Tensor): Bounding box tensor in xywh format
"""
output = torch.zeros(box.size())
output[..., 0] = (box[..., 2] + box[..., 0])/2
output[..., 1] = (box[..., 3] + box[..., 1])/2
output[..., 2] = box[..., 2] - box[..., 0]
output[..., 3] = box[..., 3] - box[..., 1]
output[..., 4:] = box[..., 4:]
return output
def xywh2xyxy(box: torch.Tensor) -> torch.Tensor:
"""
Returns the xyxy format of the input bounding box tensor
Arguments:
box (torch.Tensor): Bounding box tensor in xywh format
Output:
output (torch.Tensor): Bounding box tensor in xyxy format
"""
output = torch.zeros(box.size())
output[..., 0] = (box[..., 0] - box[..., 2]/2)
output[..., 1] = (box[..., 1] - box[..., 3]/2)
output[..., 2] = (box[..., 0] + box[..., 2]/2)
output[..., 3] = (box[..., 1] + box[..., 3]/2)
output[..., 4:] = box[..., 4:]
return output
def xywh2YOLO(box: torch.Tensor, stride: float,
anchor: tuple):
"""
Returns the bounding box in a format similar to the last output
layer of the YOLO
Arguments:
box (torch.Tensor): Boudning box tensor in xywh format
stride (float): stride of the current layer to decrease the size
anchor (list): it is a list of tuples of anchor boxes
Outputs:
x_coor (int): x coordinate of the detection grid for the given box
y_coor (int): y coordinate of the detection grid for the given box
x (float): x output of the YOLO layer for the corresponding box
y (float): y output of the YOLO layer for the corresponding box
w (float): w output of the YOLO layer for the corresponding box
h (float): h output of the YOLO layer for the corresponding box
"""
x = box[..., 0].item()/stride
x_coor = int(box[..., 0].item()/stride)
y = box[..., 1].item()/stride
y_coor = int(box[..., 1].item()/stride)
x -= x_coor
y -= y_coor
w = torch.log(box[..., 2] / anchor[0] + 1e-16).item()
h = torch.log(box[..., 3] / anchor[1] + 1e-16).item()
return y_coor, x_coor, y, x, w, h
def draw_boxes(img: torch.Tensor, bbox: torch.Tensor,
from_tensor=False):
"""
Draws the given bounding boxes on the sample image and show it to
user to check whether the comming data is correct or not
Arguments:
img (torch.Tensor, PIL.Image): Image sample
bbox (torch.Tensor): Bounding box for corresponding image
from_tensor (bool): Flag for whether the input is tensor or PIL image
"""
if from_tensor:
img = img.transpose(0, 1).transpose(1, 2).numpy()*255
img = Image.fromarray(np.uint8(img))
draw = ImageDraw.Draw(img)
for b in bbox:
if b[5] != 1:
continue
box = b[:4].numpy()
bbox = [0, 0, 0, 0]
bbox[0] = int(box[0] - box[2]/2)
bbox[1] = int(box[1] - box[3]/2)
bbox[2] = int(box[0] + box[2]/2)
bbox[3] = int(box[1] + box[3]/2)
draw.rectangle(bbox, outline='red')
img.show()
def confidence_mask(tensor: torch.Tensor, confidence: float) -> torch.Tensor:
"""
Returns the masked form of the input tensor with respect to given
confidence
Arguments:
tensor (torch.Tensor): tensor to be masked by confidence
confidence (float): confidence score to use in masking
"""
# confidence masking for direct output of the YOLO layer
conf_mask = (tensor[:, :, 4] > confidence).float().unsqueeze(2)
return tensor*conf_mask
def bbox_iou(box1: torch.Tensor, box2: torch.Tensor) -> torch.Tensor:
"""
Returns the IoU of two bounding boxes
Arguments:
box1 (torch.Tensor) : coor tensor of the first box to calculate IoU
box2 (torch.Tensor) : coor tensor of the first box to calculate IoU
Returns:
iou (float): intersection/union ratio for the given bounding boxes
"""
# get the coordinates of bounding boxes
b1_x1, b1_y1 = box1[..., 0], box1[..., 1]
b1_x2, b1_y2 = box1[..., 2], box1[..., 3]
b2_x1, b2_y1 = box2[..., 0], box2[..., 1]
b2_x2, b2_y2 = box2[..., 2], box2[..., 3]
# get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * \
torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, min=0)
# union Area
b1_area = (b1_x2 - b1_x1 + 1)*(b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1)*(b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area)
return iou
def bbox_iou_wh(wh1: tuple, wh2: tuple) -> float:
"""
Returns the IoU value between an anchor box and bounding
box width and height
Arguments:
wh1 (tuple, list): width and height pair for first box
wh2 (tuple, list): width and height pair for second box
Returns:
iou (float): intersection/union for given w-h pairs
"""
w1, h1 = wh1[0], wh1[1]
w2, h2 = wh2[0], wh2[0]
intersect_area = min(w1, w2) * min(h1, h2)
union_area = w1*h1 + w2*h2 - intersect_area
return intersect_area/union_area
def predict_transform(prediction, inp_dim, anchors, num_class,
CUDA, TRAIN=False) -> torch.Tensor:
"""
Returns the prediction tensor with respect to the output of the YOLO
Detection Layers outputs
Arguements:
prediction (torch.Tensor) : output tensor of the Detection Layer
inp_dim (torch.Tensor) : input image dimensions
anchors (torch.Tensor) : anchors of the Darknet
num_class (int) : number of classes can be detected by Darknet
CUDA (bool): CUDA
TRAIN (bool):
Returns:
detection (torch.Tensor): masked and reorganized form of the detection
"""
batch_size = prediction.size(0)
stride = inp_dim // prediction.size(2)
grid_size = inp_dim // stride
bbox_attrs = 5 + num_class
num_anchors = len(anchors)
prediction = prediction.view(
batch_size, bbox_attrs*num_anchors, grid_size*grid_size)
prediction = prediction.transpose(1, 2).contiguous()
prediction = prediction.view(
batch_size, grid_size*grid_size*num_anchors, bbox_attrs)
# sigmoid the centre_x, centre_Y. and object confidencce
prediction[:, :, 0] = torch.sigmoid(prediction[:, :, 0])
prediction[:, :, 1] = torch.sigmoid(prediction[:, :, 1])
prediction[:, :, 4:] = torch.sigmoid(prediction[:, :, 4:])
# for trianing no offset addition
if not TRAIN:
# decreasing the size of the anchor boxes to match with prediction
anchors = [(a[0]/stride, a[1]/stride) for a in anchors]
# log space transform height and the width
anchors = torch.FloatTensor(anchors)
# add the center offsets
grid = torch.arange(grid_size)
b, a = torch.meshgrid(grid, grid)
x_offset = a.reshape(-1, 1)
y_offset = b.reshape(-1, 1)
if CUDA:
anchors = anchors.cuda()
x_offset = x_offset.cuda()
y_offset = y_offset.cuda()
x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(
1, num_anchors).view(-1, 2).unsqueeze(0)
prediction[:, :, :2] += x_y_offset
anchors = anchors.repeat(grid_size*grid_size, 1).unsqueeze(0)
prediction[:, :, 2:4] = torch.exp(prediction[:, :, 2:4])*anchors
prediction[:, :, :4] *= stride
return prediction # -> shape = [batch, #_of_boxes, 5 + #_of_classes]
def write_results(prediction, num_class, confidence=0.6,
nms_conf=0.4) -> torch.Tensor:
"""
Returns the results of the predictions of the Darknet
as bounding boxes and class of the object
Arguments:
prediction (torch.Tensor) : output of the Darknet network
num_class (int) : number of classes can be detected by Darknet
confidence (float) : confidence of the detection
nms_conf (float) : non-max supression threshold (default=0.4)
Returns:
output (torch.Tensor, int): Returns 0 if there is no detection and
returns the bounding boxes attributes of darknet for each object
"""
# confidence masking
prediction = confidence_mask(prediction, confidence)
# transforming box attributes to corner coordinates
box_corner = prediction.new(prediction.shape)
box_corner[:, :, 0] = (prediction[:, :, 0] - prediction[:, :, 2]/2)
box_corner[:, :, 1] = (prediction[:, :, 1] - prediction[:, :, 3]/2)
box_corner[:, :, 2] = (prediction[:, :, 0] + prediction[:, :, 2]/2)
box_corner[:, :, 3] = (prediction[:, :, 1] + prediction[:, :, 3]/2)
prediction[:, :, :4] = box_corner[:, :, :4]
# obtaining the batch_size of the input tensor
batch_size = prediction.size(0)
write = False
for ind in range(batch_size):
image_pred = prediction[ind]
# generating the max confidence sequence
max_conf, max_conf_score = torch.max(image_pred[:, 5:5 +
num_class], 1)
max_conf = max_conf.float().unsqueeze(1)
max_conf_score = max_conf_score.float().unsqueeze(1)
seq = (image_pred[:, :5], max_conf, max_conf_score)
image_pred = torch.cat(seq, 1)
non_zero_ind = (torch.nonzero(image_pred[:, 4]))
try:
image_pred_ = image_pred[non_zero_ind.squeeze(), :].view(-1, 7)
except IndexError:
continue
# if there is no detection for this batch image
if image_pred_.shape[0] == 0:
continue
# get the various classes detected in the image
# -1 index holds the class index
img_classes = torch.unique(image_pred_[:, -1])
for cls in img_classes:
# perform NMS
# get the detections with one particular class
cls_mask = image_pred_ * \
(image_pred_[:, -1] == cls).float().unsqueeze(1)
class_mask_ind = torch.nonzero(cls_mask[:, -2]).squeeze()
image_pred_class = image_pred_[class_mask_ind].view(-1, 7)
# sorting the detections
conf_sort_index = torch.sort(
image_pred_class[:, 4], descending=True)[1]
image_pred_class = image_pred_class[conf_sort_index]
idx = image_pred_class.size(0) # Number of detections
for i in range(idx):
# get the IOUs of all boxes
# in the loop
try:
ious = bbox_iou(image_pred_class[i].unsqueeze(
0), image_pred_class[i+1:])
except (IndexError, ValueError):
break
# zero out all the detections that have IoU > treshhold
iou_mask = (ious < nms_conf).float().unsqueeze(1)
image_pred_class[i+1:] *= iou_mask
# remove the non-zero entries
non_zero_ind = torch.nonzero(image_pred_class[:, 4]).squeeze()
image_pred_class = image_pred_class[non_zero_ind].view(-1, 7)
# repeat the batch for as many detections of the class in the img
batch_ind = image_pred_class.new(
image_pred_class.size(0), 1).fill_(ind)
seq = batch_ind, image_pred_class
if not write:
output = torch.cat(seq, 1)
write = True
else:
out = torch.cat(seq, 1)
output = torch.cat((output, out))
try:
return output
except NameError:
return 0
def letterbox_image(img, inp_dim) -> np.ndarray:
"""
Resize image with unchanged aspect ratio using padding
Arguments:
img (np.ndarray) : image which will be reshaped as numpy array
inp_dim (list) : Darknet input image dimensions
Returns:
canvas (np.ndarray): Resized and padded input image for Darknet
"""
img_w, img_h = img.shape[1], img.shape[0]
w, h = inp_dim
new_w = int(img_w * min(w/img_w, h/img_h))
new_h = int(img_h * min(w/img_w, h/img_h))
resized_image = cv2.resize(
img, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
canvas = np.full((inp_dim[1], inp_dim[0], 3), 128)
canvas[(h-new_h)//2:(h-new_h)//2 + new_h, (w-new_w) //
2:(w-new_w)//2 + new_w, :] = resized_image
return canvas
def prep_image(img, inp_dim, mode='BGR') -> torch.Tensor:
"""
Prepare image for inputting to the neural network as torch.Tensor
Arguments:
img (np.ndarray) : input image as numpy array form
inp_dim (list) : input image dimensions
Returns:
img (torch.Tensor): Transposed, padded and resized input image
"""
assert mode == 'BGR' or mode == 'RGB'
# scaling image by protecting the aspect-ratio
img = (letterbox_image(img, (inp_dim, inp_dim)))
# transpose for the pytorch tensor
if mode == 'RGB':
img = img.transpose((2, 0, 1)).copy()
else:
img = img[:, :, ::-1].transpose((2, 0, 1)).copy()
# normalization of the image
img = torch.from_numpy(img).float().div(255.0).unsqueeze(0)
return img
def load_classes(names_file_path: str) -> list:
"""Load the classes from dataset file
Arguments:
names_file_path (str) : path of the names file of the dataset
Returns:
names (list): class labels for the detected objects
"""
fp = open(names_file_path, "r")
names = fp.read().split("\n")[:-1]
return names
| 34.694175 | 81 | 0.591787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,718 | 0.400028 |
df7a2150324a753c57fa8af8c06a9e46c60e1d9b | 5,512 | py | Python | model/warp.py | askerlee/rift | d4dbf42b82f1f83dfab18f8da8fe3a1d0a716fa2 | [
"MIT"
] | 11 | 2022-02-14T08:31:04.000Z | 2022-03-29T08:20:17.000Z | model/warp.py | askerlee/rift | d4dbf42b82f1f83dfab18f8da8fe3a1d0a716fa2 | [
"MIT"
] | 3 | 2022-02-14T11:19:15.000Z | 2022-03-19T05:11:25.000Z | model/warp.py | askerlee/rift | d4dbf42b82f1f83dfab18f8da8fe3a1d0a716fa2 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Precomputed coordinate grids dictionary, with (tensor device, tensor size) as the keys.
backwarp_tenGrid = {}
# backwarp and multiwarp are doing backward warping using the forward flow.
# backwarp feature maps according to flow. ten: tensor?
def backwarp(tenInput, tenFlow):
k = (str(tenFlow.device), str(tenFlow.size()))
if k not in backwarp_tenGrid:
tenHorizontal = torch.linspace(-1.0, 1.0, tenFlow.shape[3], device=device).view(
1, 1, 1, tenFlow.shape[3]).expand(tenFlow.shape[0], -1, tenFlow.shape[2], -1)
tenVertical = torch.linspace(-1.0, 1.0, tenFlow.shape[2], device=device).view(
1, 1, tenFlow.shape[2], 1).expand(tenFlow.shape[0], -1, -1, tenFlow.shape[3])
backwarp_tenGrid[k] = torch.cat(
[tenHorizontal, tenVertical], 1).to(device)
# channel 0: x (horizontal), channel 1: y (vertical).
tenFlow = torch.cat([tenFlow[:, 0:1, :, :] / ((tenInput.shape[3] - 1.0) / 2.0),
tenFlow[:, 1:2, :, :] / ((tenInput.shape[2] - 1.0) / 2.0)], 1)
g = (backwarp_tenGrid[k] + tenFlow).permute(0, 2, 3, 1)
return torch.nn.functional.grid_sample(input=tenInput, grid=g, mode='bilinear', padding_mode='border', align_corners=True)
# Warp images with multiple groups of flow, and combine them into one group with flow group attention.
# If M==1, multiwarp falls back to backwarp.
def multiwarp(img0, img1, multiflow, multimask_score, M):
img0_warped_list = []
img1_warped_list = []
multimaskm0_score_list = []
multimaskm1_score_list = []
# multiflow at dim=1:
# flowm0_1, flowm0_2, ..., flowm0_M, flowm1_1, flowm1_2, ..., flowm1_M
# m0 means flow from middle to img0, m1 means flow from middle to img1.
# Each block has 2 channels.
for i in range(M):
# mid -> 0 flow to warp img0, which approximates mid.
img0_warped = backwarp(img0, multiflow[:, i*2 : i*2+2])
img0_warped_list.append(img0_warped)
# Warp the mask scores. The scores are generated mostly based on
# unwarped images, and there's misalignment between warped images and unwarped
# scores. Therefore, we need to warp the mask scores as well.
# But doing so only leads to very slight improvement (~0.02 psnr).
maskm0_score_warped = backwarp(multimask_score[:, [i]], multiflow[:, i*2 : i*2+2])
multimaskm0_score_list.append(maskm0_score_warped)
if img1 is not None:
# m->1 flow to warp img1, which approximates mid.
# m->1 flow starts from the 2*M-th channel.
img1_warped = backwarp(img1, multiflow[:, 2*M+i*2 : 2*M+i*2+2])
img1_warped_list.append(img1_warped)
maskm1_score_warped = backwarp(multimask_score[:, [i+M]], multiflow[:, i*2+2*M : i*2+2*M+2])
multimaskm1_score_list.append(maskm1_score_warped)
else:
# placeholder.
img1_warped_list.append(None)
if M == 1:
return img0_warped_list[0], img1_warped_list[0]
# multimask_score: 2*M+1 channels. 2*M for M groups of ML/MR flow attention scores,
# ML: 0.5 -> 0, MR: 0.5 -> 1.
# ML_0, ML_1, ..., ML_M, MR_0, ..., MR_M, ML~MR weight
# 1: mask, for the warp0-warp1 combination weight.
# For sofi, the global mask scores may be bidirectional. In that case, there are totally 2*M+2 channels.
if img1 is not None:
assert multimask_score.shape[1] == 2*M+1 or multimask_score.shape[1] == 2*M+2
# img0_warped_list, img1_warped_list are two lists, each of length M.
# => [16, M, 3, 224, 224]
warped_img0s = torch.stack(img0_warped_list, dim=1)
multimaskm0_score = torch.stack(multimaskm0_score_list, dim=1)
# warp0_attn: [16, M, 1, 224, 224]
warp0_attn = torch.softmax(multimaskm0_score, dim=1)
img0_warped = (warp0_attn * warped_img0s).sum(dim=1)
if img1 is not None:
warped_img1s = torch.stack(img1_warped_list, dim=1)
multimaskm1_score = torch.stack(multimaskm1_score_list, dim=1)
warp1_attn = torch.softmax(multimaskm1_score, dim=1)
img1_warped = (warp1_attn * warped_img1s).sum(dim=1)
else:
img1_warped = None
return img0_warped, img1_warped
# Use flow group attention to combine multiple flow groups into one.
def multimerge_flow(multiflow, multimask_score, M):
if M == 1:
multiflowm0, multiflowm1 = multiflow[:, :2], multiflow[:, 2:4]
flow = multiflow
else:
multiflowm0 = multiflow[:, :2*M]
multiflowm1 = multiflow[:, 2*M:]
# multiflow: [16, 4*M, 224, 224]
mf_unpack_shape = list(multiflow.shape)
mf_unpack_shape[1:2] = [M, 2]
# multiflowm0, multiflowm1: [16, M, 2, 224, 224]
multiflowm0_unpack = multiflowm0.reshape(mf_unpack_shape)
multiflowm1_unpack = multiflowm1.reshape(mf_unpack_shape)
# warp0_attn, warp1_attn: [16, M, 1, 224, 224]
# multiflow is unwarped, so we don't need to warp the mask scores.
warp0_attn = torch.softmax(multimask_score[:, :M], dim=1).unsqueeze(dim=2)
warp1_attn = torch.softmax(multimask_score[:, M:2*M], dim=1).unsqueeze(dim=2)
# flowm0, flowm1: [16, 2, 224, 224]
flowm0 = (warp0_attn * multiflowm0_unpack).sum(dim=1)
flowm1 = (warp1_attn * multiflowm1_unpack).sum(dim=1)
flow = torch.cat([flowm0, flowm1], dim=1)
return flow
| 48.778761 | 126 | 0.646589 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,811 | 0.328556 |
df7cecff3f2a6d3cc5c868cae77bf8a3ce21f622 | 1,302 | py | Python | api/models.py | UAACC/pro | c5424574427ac3521cb70b0d62b841fa128f5166 | [
"MIT"
] | null | null | null | api/models.py | UAACC/pro | c5424574427ac3521cb70b0d62b841fa128f5166 | [
"MIT"
] | null | null | null | api/models.py | UAACC/pro | c5424574427ac3521cb70b0d62b841fa128f5166 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import AbstractUser
class Author(models.Model):
username = models.CharField(max_length=50, unique=True)
display_name = models.CharField(max_length=50, null=True, blank=True)
email = models.EmailField(null=True, blank=True)
github = models.URLField(null=True, blank=True)
bio = models.TextField(null=True, blank=True)
is_approved = models.BooleanField(default=False)
class Post(models.Model):
title = models.CharField(max_length=256)
description = models.CharField(max_length=256, default="")
authorId = models.ForeignKey(Author, on_delete=models.CASCADE, related_name="posts")
published = models.DateTimeField(default=timezone.now)
class Comment(models.Model):
content = models.CharField(max_length=256, default="")
authorId = models.ForeignKey(Author, on_delete=models.CASCADE)
postId = models.ForeignKey(Post, on_delete=models.CASCADE, related_name="comments")
# class FriendRequest(models.Model):
# from_user = models.ForeignKey(
# Author, related_name='from_user', on_delete=models.CASCADE, related_name="comments"
# )
# to_user = models.ForeignKey(
# Author, related_name='to_user', on_delete=models.CASCADE
# ) | 39.454545 | 93 | 0.740399 | 892 | 0.6851 | 0 | 0 | 0 | 0 | 0 | 0 | 300 | 0.230415 |
df7d22a849c97e60605eb8e66647c84ea4d9d627 | 3,705 | py | Python | miniworld/model/network/interface/Interfaces.py | miniworld-project/miniworld_core | c591bad232b78eae99e8f55cb1b907c1e228484b | [
"MIT"
] | 5 | 2019-05-11T14:57:15.000Z | 2021-07-05T00:35:25.000Z | miniworld/model/network/interface/Interfaces.py | miniworld-project/miniworld_core | c591bad232b78eae99e8f55cb1b907c1e228484b | [
"MIT"
] | 27 | 2017-03-17T07:11:02.000Z | 2019-05-26T23:36:56.000Z | miniworld/model/network/interface/Interfaces.py | miniworld-project/miniworld_core | c591bad232b78eae99e8f55cb1b907c1e228484b | [
"MIT"
] | 6 | 2017-05-03T12:11:33.000Z | 2020-04-03T11:44:27.000Z | from collections import defaultdict
from miniworld.errors import InterfaceUnknown
from miniworld.model.Objects import Objects
from miniworld.model.network.interface.Interface import INTERFACE_NORMAL_CLASSES_TYPES, INTERFACE_NAME_TYPE_MAPPING, AP, Mesh
__author__ = 'Nils Schmidt'
class Interfaces(Objects):
"""
Collection for the interfaces the OS has.
Can be used like a list in python.
Includes a factory method for the creation of multiple interfaces.
"""
def __init__(self, interfaces):
"""
Parameters
----------
node_classes : list<Interface>
"""
self.interfaces = self.data = interfaces
super(Interfaces, self).__init__(interfaces)
@staticmethod
def interface_name_to_type(interface_class_name):
""" Get the interface type for `interface_class_name`.
Parameters
----------
interface_class_name : str
Returns
-------
type
Raises
------
InterfaceUnknown
"""
interface_class_name = interface_class_name.lower()
type = INTERFACE_NAME_TYPE_MAPPING.get(interface_class_name)
if type is None:
raise InterfaceUnknown("The interface name '%s' is unknown!" % interface_class_name)
return type
@staticmethod
def factory_from_interface_names(interface_names):
""" See py:meth:`.factory`
Examples
--------
>>> Interfaces.factory_from_interface_names(["mesh"])
[Mesh(1)]
Raises
------
InterfaceUnknown
"""
if not interface_names:
return Interfaces([])
else:
return Interfaces.factory([Interfaces.interface_name_to_type(interface_name) for interface_name in interface_names])
@staticmethod
def factory(interface_types):
"""
Factory method to create the network interfaces.
The factory takes care of counting interfaces of the same kind.
This count+1 is passed to the `Interface` class (needed to differentiate between e.g. two `AP` objects)
Parameters
----------
interface_types: iterable<type>
List of `Interface` types (uninitialized!)
Returns
-------
Interfaces
"""
# count created instances
counter = defaultdict(lambda: 1)
interfaces = []
for _type in interface_types:
# create interface with current count
interface = _type(counter[_type])
interfaces.append(interface)
# increment counter
counter[_type] += 1
return Interfaces(interfaces)
# TODO: DOC
def filter_mgmt(self):
from miniworld.model.network.interface.Interface import Management
return self.filter_type(Management)
# TODO: DOC
def filter_hub_wifi(self):
from miniworld.model.network.interface.Interface import HubWiFi
return self.filter_type(HubWiFi)
def filter_normal_interfaces(self):
return self.filter_type(fun=lambda _if: type(_if) in INTERFACE_NORMAL_CLASSES_TYPES)
def iter_node_classes(self):
return [nc.node_class for nc in self]
def iter_node_classes_names(self):
return [nc.node_class_name for nc in self]
if __name__ == '__main__':
from miniworld.util import DictUtil
interfaces = Interfaces.factory([Mesh, Mesh, AP, Mesh, AP])
d = {}
d[interfaces] = 1
print(type(d.items()[0][0]))
print(type(d.items()[0][1]))
print(d)
print(DictUtil.to_fully_staffed_matrix_2(d))
# for i in Interfaces.factory([Mesh, Mesh, AP, Mesh, AP]):
# print i
| 29.173228 | 128 | 0.634008 | 3,046 | 0.822132 | 0 | 0 | 1,960 | 0.529015 | 0 | 0 | 1,387 | 0.374359 |
df7edd38a1a100de662fe567b387d768f816c859 | 1,354 | py | Python | tests/test_distgit/test_convert_source_url_to_https.py | sosiouxme/doozer | 9658169207773f9c228ad011f611867a4a9d5a77 | [
"Apache-2.0"
] | null | null | null | tests/test_distgit/test_convert_source_url_to_https.py | sosiouxme/doozer | 9658169207773f9c228ad011f611867a4a9d5a77 | [
"Apache-2.0"
] | null | null | null | tests/test_distgit/test_convert_source_url_to_https.py | sosiouxme/doozer | 9658169207773f9c228ad011f611867a4a9d5a77 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, print_function, unicode_literals
import unittest
from doozerlib import distgit
class TestDistgitConvertSourceURLToHTTPS(unittest.TestCase):
def test_conversion_from_ssh_source(self):
source = "git@github.com:myorg/myproject.git"
actual = distgit.convert_source_url_to_https(source)
expected = "https://github.com/myorg/myproject"
self.assertEqual(actual, expected)
def test_conversion_from_already_https_source(self):
source = "https://github.com/myorg/myproject"
actual = distgit.convert_source_url_to_https(source)
expected = "https://github.com/myorg/myproject"
self.assertEqual(actual, expected)
def test_conversion_from_https_source_with_dotgit_suffix(self):
source = "https://github.com/myorg/myproject.git"
actual = distgit.convert_source_url_to_https(source)
expected = "https://github.com/myorg/myproject"
self.assertEqual(actual, expected)
def test_conversion_from_https_source_with_dotgit_elsewhere(self):
source = "https://foo.gitlab.com/myorg/myproject"
actual = distgit.convert_source_url_to_https(source)
expected = "https://foo.gitlab.com/myorg/myproject"
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
| 31.488372 | 72 | 0.728951 | 1,183 | 0.873708 | 0 | 0 | 0 | 0 | 0 | 0 | 310 | 0.228951 |
df808bedd7a5f0f877dd0614ac4bd0dfb3f89587 | 441 | py | Python | KnowledgeMapping/SpiderExp/4-Function/openSSLExp/3.py | nickliqian/ralph_doc_to_chinese | be120ce2bb94a8e8395630218985f5e51ae087d9 | [
"MIT"
] | 8 | 2018-05-22T01:11:33.000Z | 2020-03-19T01:44:55.000Z | KnowledgeMapping/SpiderExp/4-Function/openSSLExp/3.py | yangliangguang/keep_learning | 47ab39c726cb28713ad22bf4cf39d6b146715910 | [
"MIT"
] | null | null | null | KnowledgeMapping/SpiderExp/4-Function/openSSLExp/3.py | yangliangguang/keep_learning | 47ab39c726cb28713ad22bf4cf39d6b146715910 | [
"MIT"
] | 3 | 2018-07-25T09:31:53.000Z | 2019-09-14T14:05:31.000Z | import requests
import ssl
from requests.adapters import HTTPAdapter, PoolManager
class MyAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize):
super(MyAdapter, self).__init__()
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, ssl_version=ssl.PROTOCOL_SSLv3)
s = requests.Session()
s.mount('https://', MyAdapter()) # 所有的https连接都用ssl.PROTOCOL_SSLV3去连接
s.get('https://xxx.com')
| 29.4 | 110 | 0.75737 | 236 | 0.511931 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.177874 |
df81e1d9539d033063a2a56dbeb862075f49347e | 798 | py | Python | weighmail/observers/__init__.py | gremmie/weighmail | d20b5ab3ef9556a3e7a4a06875c4ca69c22fe31d | [
"BSD-3-Clause"
] | null | null | null | weighmail/observers/__init__.py | gremmie/weighmail | d20b5ab3ef9556a3e7a4a06875c4ca69c22fe31d | [
"BSD-3-Clause"
] | null | null | null | weighmail/observers/__init__.py | gremmie/weighmail | d20b5ab3ef9556a3e7a4a06875c4ca69c22fe31d | [
"BSD-3-Clause"
] | null | null | null | """Base observer class for weighmail operations.
"""
class BaseObserver(object):
"""Base observer class; does nothing."""
def searching(self, label):
"""Called when the search process has started for a label"""
pass
def labeling(self, label, count):
"""Called when the labelling process has started for a given label
label - the label we are working on
count - number of messages to label
"""
pass
def done_labeling(self, label, count):
"""Called when finished labelling for a given label
label - the label we were working on
count - number of messages that were labelled
"""
pass
def done(self):
"""Called when completely finished"""
pass
| 24.181818 | 74 | 0.601504 | 742 | 0.929825 | 0 | 0 | 0 | 0 | 0 | 0 | 538 | 0.674185 |
df84089eac65b59082205d83c06cd6c272f90434 | 145 | py | Python | banks/apps.py | lauriefacer/Facer | ff9f5ca4af302926fed49a4b9b442ff00ea212be | [
"MIT"
] | null | null | null | banks/apps.py | lauriefacer/Facer | ff9f5ca4af302926fed49a4b9b442ff00ea212be | [
"MIT"
] | null | null | null | banks/apps.py | lauriefacer/Facer | ff9f5ca4af302926fed49a4b9b442ff00ea212be | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class BanksConfig(AppConfig):
name = 'banks'
class DatatableView(AppConfig):
name = 'datatableview'
| 16.111111 | 33 | 0.737931 | 106 | 0.731034 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.151724 |
df84e85ea375a45ff61026ef8e92ba96e7e0f291 | 426 | py | Python | assessment/migrations/0010_assessment_open_status.py | sravanireddy1102/peerevaluationsystem | 3dc46447a6b3b581e8738db58e7b861c421cdc0f | [
"MIT"
] | 2 | 2020-06-17T15:17:19.000Z | 2020-10-06T08:03:29.000Z | assessment/migrations/0010_assessment_open_status.py | sravanireddy1102/peerevaluationsystem | 3dc46447a6b3b581e8738db58e7b861c421cdc0f | [
"MIT"
] | 3 | 2021-03-30T13:33:22.000Z | 2021-06-04T23:21:52.000Z | assessment/migrations/0010_assessment_open_status.py | YichengShen/EaglesPeerEvaluationSystem | 853e35bf569efb87dc56064e6ec44a4e3c95c537 | [
"MIT"
] | 1 | 2022-01-02T07:01:37.000Z | 2022-01-02T07:01:37.000Z | # Generated by Django 3.0.2 on 2020-04-29 20:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assessment', '0009_auto_20200426_1103'),
]
operations = [
migrations.AddField(
model_name='assessment',
name='open_status',
field=models.BooleanField(default=True, verbose_name='open status'),
),
]
| 22.421053 | 80 | 0.622066 | 333 | 0.78169 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.286385 |
df8883bf266f2a644d25c10b6a4793fb6917cb40 | 950 | py | Python | opyoid/bindings/instance_binding/instance_binding_to_provider_adapter.py | illuin-tech/opyoid | a2ca485e1820ba0d12a86ba91100aa097a1e5736 | [
"MIT"
] | 37 | 2020-08-25T07:22:41.000Z | 2022-03-18T03:05:53.000Z | opyoid/bindings/instance_binding/instance_binding_to_provider_adapter.py | illuin-tech/opyoid | a2ca485e1820ba0d12a86ba91100aa097a1e5736 | [
"MIT"
] | 18 | 2020-10-04T17:33:24.000Z | 2021-12-16T16:28:35.000Z | opyoid/bindings/instance_binding/instance_binding_to_provider_adapter.py | illuin-tech/opyoid | a2ca485e1820ba0d12a86ba91100aa097a1e5736 | [
"MIT"
] | 2 | 2021-01-26T19:58:15.000Z | 2021-11-30T01:10:25.000Z | from opyoid.bindings.binding import Binding
from opyoid.bindings.binding_to_provider_adapter import BindingToProviderAdapter
from opyoid.bindings.registered_binding import RegisteredBinding
from opyoid.injection_context import InjectionContext
from opyoid.provider import Provider
from opyoid.utils import InjectedT
from .from_instance_provider import FromInstanceProvider
from .instance_binding import InstanceBinding
class InstanceBindingToProviderAdapter(BindingToProviderAdapter[InstanceBinding]):
"""Creates a Provider from an InstanceBinding."""
def accept(self, binding: Binding[InjectedT], context: InjectionContext[InjectedT]) -> bool:
return isinstance(binding, InstanceBinding)
def create(self,
binding: RegisteredBinding[InstanceBinding[InjectedT]],
context: InjectionContext[InjectedT]) -> Provider[InjectedT]:
return FromInstanceProvider(binding.raw_binding.bound_instance)
| 45.238095 | 96 | 0.807368 | 528 | 0.555789 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.051579 |
df8935cb314bb24e9c12282a7a762c31e2106abf | 307 | py | Python | backend/src/asset_manager/data/repos/license.py | JonathanLoscalzo/asset-license-dev-demo | 522c1d531e508ae7e85b212f804eee505d284b2b | [
"Xnet",
"X11"
] | null | null | null | backend/src/asset_manager/data/repos/license.py | JonathanLoscalzo/asset-license-dev-demo | 522c1d531e508ae7e85b212f804eee505d284b2b | [
"Xnet",
"X11"
] | null | null | null | backend/src/asset_manager/data/repos/license.py | JonathanLoscalzo/asset-license-dev-demo | 522c1d531e508ae7e85b212f804eee505d284b2b | [
"Xnet",
"X11"
] | null | null | null | from pymongo.database import Database
from asset_manager.data.repos.base import MongoRepository
from asset_manager.data.schemas.license import LicenseMongo
class LicenseRepository(MongoRepository[LicenseMongo]):
def __init__(self, db: Database):
super().__init__(db, "licenses", LicenseMongo)
| 34.111111 | 59 | 0.801303 | 148 | 0.482085 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.032573 |
df8a62c0847934ac75248a075f1ad448f85f7372 | 1,347 | py | Python | build_utils/salt_utils.py | maharg101/gdl-100-provision | 71651d1f6dd40de841f99cb9a6d1accb16ea39c1 | [
"Apache-2.0"
] | null | null | null | build_utils/salt_utils.py | maharg101/gdl-100-provision | 71651d1f6dd40de841f99cb9a6d1accb16ea39c1 | [
"Apache-2.0"
] | 1 | 2021-06-01T21:55:56.000Z | 2021-06-01T21:55:56.000Z | build_utils/salt_utils.py | maharg101/gdl-100-provision | 71651d1f6dd40de841f99cb9a6d1accb16ea39c1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
salt_utils.py
Description: Utility methods for configuration of Salt Cloud.
Written by: maharg101 on 27th March 2018
"""
import io
import yaml
def generate_openstack_conf(params):
"""
Generate an openstack.conf file-like given the supplied params dict.
:param params: A dictionary containing parameters to use. See utils.populate_params.
:return: StringIO populated with the generated openstack configuration.
"""
openstack_conf_data = dict(
openstack=dict(
driver='openstack',
region_name=params['OS_REGION_NAME'],
auth=dict(
username=params['OS_USERNAME'],
password=params['OS_PASSWORD'],
project_id=params['OS_PROJECT_ID'],
auth_url=params['OS_AUTH_URL'],
user_domain_name=params['OS_USER_DOMAIN_NAME'],
project_domain_name=params['OS_PROJECT_DOMAIN_NAME'],
),
networks=[
dict(name='public', nat_source=True, routes_externally=True, routes_ipv4_externally=True),
dict(name=params['network_name'], nat_destination=True, default_interface=True),
]
)
)
openstack_conf = io.StringIO(yaml.dump(openstack_conf_data, default_flow_style=False))
return openstack_conf
| 34.538462 | 106 | 0.64588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 546 | 0.405345 |
df8bdf1b9de14fdae803f684cecb7551ddf157c2 | 6,102 | py | Python | Scripts/Topology_Generator/top_gen.py | LABORA-INF-UFG/eXP-RAN | b36df30d9f8a39bc76343a1f6f206181dc424561 | [
"MIT"
] | 6 | 2021-02-26T13:50:58.000Z | 2022-03-08T13:30:16.000Z | Scripts/Topology_Generator/top_gen.py | LABORA-INF-UFG/eXP-RAN | b36df30d9f8a39bc76343a1f6f206181dc424561 | [
"MIT"
] | 1 | 2021-09-19T17:45:11.000Z | 2021-09-19T17:45:11.000Z | Scripts/Topology_Generator/top_gen.py | LABORA-INF-UFG/eXP-RAN | b36df30d9f8a39bc76343a1f6f206181dc424561 | [
"MIT"
] | 1 | 2020-06-20T02:11:16.000Z | 2020-06-20T02:11:16.000Z | # -*- coding: utf-8 -*-
import json
import math
import random
import argparse
import networkx as nx
class Node:
def __init__ (self, nodeNumber, nodeType):
self.nodeNumber = nodeNumber
self.nodeType = nodeType
def add_vm (self, vm):
if not hasattr(self,'vms'):
self.vms = []
self.vms.append(vm)
class VM:
def __init__ (self, vmNumber, cpu, ram):
self.vmNumber = vmNumber
self.cpu = cpu
self.ram = ram
class Link:
def __init__ (self, linkNumber, fromNode, toNode, capacity, delay):
self.linkNumber = linkNumber
self.fromNode = fromNode
self.toNode = toNode
self.delay = delay
self.capacity = capacity
def serialize (obj):
return obj.__dict__
def gen_vms_profile ():
vms_profile = []
processor = [1, 2, 4, 8]
memory = [1024, 2048, 4096, 8192]
for vcpus in processor:
for ram in memory:
vms_profile.append({'cpu': vcpus, 'ram': ram})
return vms_profile
def gen_link_profile ():
links_profile = []
mm_wave_bw = [0.9, 1.25, 1.5, 2, 3, 4, 8]
micro_wave_bw = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.25, 1.5, 2]
copper_bw = [1, 10, 40]
smf_fiber_1310_bw = [1, 10, 40, 100]
smf_fiber_1550_bw = [1, 10, 40, 100]
tbe_bw = [200, 400]
mm_wave_delay = range(1, 21)
micro_wave_delay = range(1, 101)
copper_delay = []
n = 0.05
for i in range(50):
copper_delay.append(n + (i * 0.02))
smf_fiber_1310_delay = range(1, 201)
smf_fiber_1550_delay = range(1, 351)
tbe_delay = range (1, 51)
for bw in mm_wave_bw:
for delay in mm_wave_delay:
dist = delay * 0.3
links_profile.append({'delay': delay, 'bw': bw, 'dist': dist})
for bw in micro_wave_bw:
for delay in micro_wave_delay:
dist = delay * 0.3
links_profile.append({'delay': delay, 'bw': bw, 'dist': dist})
for bw in copper_bw:
if(bw == 1):
for delay in copper_delay:
dist = delay * 0.02
links_profile.append({'delay': delay, 'bw': bw, 'dist': dist})
elif(bw == 10):
delay = 0.275
dist = 0.055
links_profile.append({'delay': delay, 'bw': bw, 'dist': dist})
else:
delay = 0.15
dist = 0.03
links_profile.append({'delay': delay, 'bw': bw, 'dist': dist})
for bw in smf_fiber_1310_bw:
if(bw == 1):
for delay in smf_fiber_1310_delay:
dist = delay * 0.2
links_profile.append({'delay': delay, 'bw': bw, 'dist': dist})
else:
delay = 50
dist = 10
links_profile.append({'delay': delay, 'bw': bw, 'dist': dist})
for bw in smf_fiber_1550_bw:
if(bw == 1):
for delay in smf_fiber_1550_delay:
dist = delay * 0.2
links_profile.append({'delay': delay, 'bw': bw, 'dist': dist})
else:
delay = 200
dist = 40
links_profile.append({'delay': delay, 'bw': bw, 'dist': dist})
for bw in tbe_bw:
for delay in tbe_delay:
dist = delay * 0.2
links_profile.append({'delay': delay, 'bw': bw, 'dist': dist})
return links_profile
def draw_nodes (graph, vms_profile):
node_list = []
node_type = ["MECHost", "BaseStation", "Forwarding"]
nodeCount = 0
vmCount = 1
num_nodes = nx.number_of_nodes(graph)
number_mec = int(round(num_nodes * 0.05))
number_Base_Station = int(round(num_nodes * 0.3))
number_forwarding = num_nodes - number_Base_Station - number_mec
graph_nodes = list(graph.nodes)
for i in range(number_mec):
choice = random.choice(graph_nodes)
graph_nodes.remove(choice)
node = (Node(nodeCount, 'MECHost'))
nodeCount += 1
numberVMs = random.randint(1, 10)
for j in range (numberVMs):
profile = random.choice(vms_profile)
node.add_vm(VM(vmCount, profile['cpu'], profile['ram']))
vmCount += 1
node_list.append(node)
for i in range(number_Base_Station):
choice = random.choice(graph_nodes)
graph_nodes.remove(choice)
node = (Node(nodeCount, 'BaseStation'))
node.add_vm(VM(vmCount, 1, 1024))
node_list.append(node)
nodeCount += 1
vmCount += 1
for i in range(number_forwarding):
choice = random.choice(graph_nodes)
graph_nodes.remove(choice)
node_list.append(Node(nodeCount, 'Forwarding'))
nodeCount += 1
return node_list
def closest_dist(links_profile, distance):
aux = []
min_diff = 1000000
closest_dist = -1
for link in links_profile:
if abs(link['dist'] - distance) < min_diff:
min_diff = abs(link['dist'] - distance)
closest_dist = link['dist']
return closest_dist
def draw_links (graph, links_profile, L):
links_list = []
nodeCount = 0
pos = nx.get_node_attributes(graph, 'pos')
for edge in graph.edges:
x1, y1 = pos[edge[0]]
x2, y2 = pos[edge[1]]
distance = math.sqrt((x2 - x1)**2 + (y2 - y1)**2) * L
closest = closest_dist(links_profile, distance)
profile = random.choice([link for link in links_profile if link['dist'] == closest])
links_list.append(Link(nodeCount + 1, edge[0], edge[1], profile['bw'], profile['delay']))
nodeCount += 1
return links_list
def gen_topology(num_nodes, alpha, beta, L):
graph = nx.waxman_graph(num_nodes, beta, alpha, L)
while not nx.is_connected(graph):
graph = nx.waxman_graph(num_nodes, beta, alpha, L)
return graph
def save_json(node_list, links_list, user):
topology = {"user": user, "nodes": node_list, "links": links_list}
json_out_file = open('topology.json', 'w')
json.dump(topology, json_out_file, default=serialize)
def generate_topology(num_nodes, alpha, beta, L, user):
graph = gen_topology(num_nodes, alpha, beta, L)
links_profile = gen_link_profile()
links_list = draw_links(graph, links_profile, L)
vms_profile = gen_vms_profile()
node_list = draw_nodes(graph, vms_profile)
save_json(node_list, links_list, user)
def main ():
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--username", help="Your username")
parser.add_argument("-n", "--nodes", help="Number of nodes", type=int)
parser.add_argument("-a", "--alpha", help="Alpha Model parameter", type=float)
parser.add_argument("-b", "--beta", help="Beta Model parameter", type=float)
parser.add_argument("-l", "--dist", help="Maximum distance between nodes", type=float)
args = parser.parse_args()
generate_topology(args.nodes, args.alpha, args.beta, args.dist, args.username)
if __name__ == '__main__':
main() | 23.743191 | 91 | 0.67568 | 531 | 0.087021 | 0 | 0 | 0 | 0 | 0 | 0 | 549 | 0.089971 |
df8d73ce0b9ecd6d1a67506333d218eaea46a306 | 9,698 | py | Python | gather/weighted_corr.py | mrJeppard/clusterdb-ingest | f52f3ee03a1071ef15a63412e1e2085fdf74e584 | [
"MIT"
] | null | null | null | gather/weighted_corr.py | mrJeppard/clusterdb-ingest | f52f3ee03a1071ef15a63412e1e2085fdf74e584 | [
"MIT"
] | null | null | null | gather/weighted_corr.py | mrJeppard/clusterdb-ingest | f52f3ee03a1071ef15a63412e1e2085fdf74e584 | [
"MIT"
] | null | null | null | """
Weighted correlation of NES vectors. The idea is to weight positive values on either vector. Positive values
"""
import pandas as pd
import numpy as np
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def m(x, w):
"""Weighted Mean"""
return np.sum(x * w) / np.sum(w)
def cov(x, y, w):
"""Weighted Covariance"""
return np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
def corr(x, y, w):
"""Weighted Correlation"""
return cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
def weighted_corr(x, y):
w = weights(x,y)
return corr(x,y,w)
def all_by_all(df, weights=None):
if weights is None:
weights = df.std(axis=1).values.transpose()
sim_func = lambda x, y: corr(x, y, weights)
wd = pdist(df.transpose().values, metric=sim_func)
wd = squareform(wd)
wd = pd.DataFrame(wd, columns=df.columns, index=df.columns)
return wd
def std_weights(invivo_nes, invitro_nes):
std1 = invivo_nes.std(axis=1)
std2 = invitro_nes.std(axis=1)
return np.sqrt(std1.multiply(std2))
invivo_nes = pd.read_csv("./tmp/invivo.celltype.nes.csv", index_col=0)
#invitro_nes = pd.read_csv("./friedmanC.celltype.nesscores.csv", index_col=0)
invitro_nes = pd.read_csv("./tmp/invitro.res_0_4.nes.csv", index_col=0)
#invitro_nes = pd.read_csv("./tmp/test2.tab", index_col=0)
invivo_nes.head()
invivo_nes.shape
invivo_nes.isna().sum()
invitro_nes.shape
invitro_nes.isna().sum()
invivo_nes = invivo_nes.dropna(axis=0, how="any")
invivo_nes.shape
invitro_nes = invitro_nes.dropna(axis=0, how="any")
invitro_nes.shape
pathway_intersect = list(set(invivo_nes.index).intersection(invitro_nes.index))
len(pathway_intersect)
invivo_nes = invivo_nes.loc[pathway_intersect]
invitro_nes = invitro_nes.loc[pathway_intersect]
nes = pd.concat([invivo_nes, invitro_nes], axis=1)
nes.head()
corrs = all_by_all(nes, std_weights(invivo_nes, invitro_nes))
corrs.to_csv("./corrs.invivo-celltype.invitro-res-04.csv")
corrs.columns
#corrs["aCM"][invitro_nes.columns].sort_values()
corrs["pluripotent"][invivo_nes.columns].sort_values(ascending=False)
corrs["definitive_cardiomyocyte"][invivo_nes.columns].sort_values(ascending=False)
corrs["non_contractile"][invivo_nes.columns].sort_values(ascending=False)
#corrs["aCM"].sort_values()
################################################
dataset_name = "fetal combined heart of cells"
cluster_solution_name = "heart cell types"
size = "similarity"
color = "MYL7"
color_centroids = pd.read_csv("invitroCombined.res_0_4.centroids.csv", index_col=0).loc[color]
#color_centroids = pd.read_csv("fetalCombined.celltype.centroids.csv", index_col=0).loc[color]
cluster_cell_counts = pd.read_csv("invitroCombined.cluster.cellcounts.csv", index_col=0)
other_dataset = "in vitro combined heart of cells"
other_species = "human"
other_study = "in vitro"
other_organ = "heart"
other_cluster_solution_name = "louvain resolution 0.4"
big_dict = {
"dataset_name": dataset_name,
"cluster_solution_name": cluster_solution_name,
"size_by": size,
"color_by": color,
"cluster_similarities": []
}
for celltype in invivo_nes:
cs_dict = {
"dataset": {
"name": other_dataset,
"species": other_species,
"organ": other_organ,
"study": other_study
},
"compared_to_cluster": celltype,
"cluster_solution_name": other_cluster_solution_name,
"clusters": []
}
for cluster in invitro_nes:
cluster_dict = {
"name": cluster,
"size": corrs.loc[celltype, cluster].item(),
"color": color_centroids[cluster].item(),
"cell_count": cluster_cell_counts.loc[int(cluster)].item()
}
cs_dict["clusters"].append(cluster_dict)
big_dict["cluster_similarities"].append(cs_dict)
###################
import json
with open('example-similarities.json', 'w') as outfile:
json.dump(big_dict, outfile, indent=4)
print(celltype)
#################
import scanpy as sc
filenames_and_days = [
("./clusterd-friedman/day0-rep12.h5ad","Day0"),
("./clusterd-friedman/day15-rep12.h5ad", "Day15"),
("./clusterd-friedman/day2-rep12.h5ad", "Day2"),
("./clusterd-friedman/day30-rep12.h5ad", "Day30"),
("./clusterd-friedman/day5-rep12.h5ad", "Day5"),
]
dfs = []
for adfile, day in filenames_and_days:
ad = sc.read(adfile)
df = ad.obs[["cell_type"]]
df = df.dropna(axis=1)
df.index = ["%s-%s" % (name, day) for name in df.index]
dfs+=[df]
ctypes = pd.concat(dfs, axis=0)
ctypes.head()
ctypes.to_csv("friedman-celltype-assignments.csv", header=True)
ctypes.head()
len(ctypes.index) == len(ctypes.index.unique())
"""
{
dataset_name: "fetal combined heart of cells",
cluster_solution_name: “heart cell types”
size_by: 'similarity',
color_by: ‘MYL7',
cluster_similarities: [
{
dataset: {
name: 'in vitro heart combined',
species: 'Homo sapiens',
organ: 'heart',
study: 'in vitro',,
},
compared_to_cluster: “cell type 1”,
cluster_solution_name: 'louvain resolution .25',
clusters: [
{
name: 'A',
size: 15,
color: -0.75,
cell_count: 34,
},
{
name: 'B',
size: 30,
color: 0.5,
cell_count: 88
},
...
],
},
{
(repeated...)
},
...
]
}
#############################
# Dont go below here....
##############################
invitro_nes2.head()
n1 = invivo_nes1[invivo_nes1.columns[0]]
n2 = invitro_nes2[invitro_nes2.columns[2]]
c = invivo_nes1.corr()
c[c.columns[0]].sort_values()
c['2']
c.head()
len(set(n1.index).intersection(n2.index))
n1 = n1[list(set(n1.index).intersection(n2.index))]
n2 = n2[list(set(n1.index).intersection(n2.index))]
##############################
import numpy as np
# Without weight.
np.corrcoef(n1,n2)
x, y = n1, n2
(x>0).sum()
(y>0).sum()
positive_ind = np.logical_or(x>0, y>0)
positive_ind.sum()
weights = np.zeros((len(positive_ind),1))
weights[positive_ind] = 10 / positive_ind.sum()
weights[~positive_ind] = 1 / (len(positive_ind) - positive_ind.sum())
# alternative paying attention to stdard devaition across clusters
weights = nes.std(axis=1).values.transpose()
# Gives you way smaller
#
(100 / 5)
weights.sum()
1 - corr(n1.values, n2.values, weights.transpose())
sim_func = lambda x, y: 1 - corr(x,y, weights)
wd = pdist(nes.transpose().values, metric=sim_func)
wd = squareform(wd)
wd.shape
wd
wd = pd.DataFrame(wd, columns=invivo_nes.columns, index=invivo_nes.columns)
wd.head()
wd["aCM"].sort_values()
nes.head()
aa = all_by_all(nes)
aa.head()
aa["aCM"].sort_values()
ab["aCM"][invitro_nes.columns].sort_values()
closest = ab["aCM"][invitro_nes.columns].sort_values().index[0:2]
ad.obs["is"] = np.logical_or(ad.obs["louvain"])
aCMMarkers = ["res.25", "NPPA", "PAM", "MYL7"]
import scanpy as sc
ad = sc.read("in_vitro_clustered.h5ad")
sc.tl.louvain(ad, resolution=.25, key_added="res.25")
sc.pl.umap(ad, color= aCMMarkers)
dataset_name = "in vivo heart of cells"
cluster_solution_name = "heart cell types"
size = "similarity"
color = "NPPA"
other_dataset_name = "in vitro heart of cells"
other_cluster_solution_name = "louvain resolution .25"
import pandas as pd
centroids_invivo = pd.read_csv("fetalCombined.celltype.centroids.csv", index_col=0)
centroids_invitro = pd.read_csv("invitroCombined.res_0_4.centroids.csv", index_col=0)
possible_markers = ["MYL7", "PAM"]
import scanpy as sc
import numpy as np
#ad = sc.read("tm")
len(ad.obs["celltype"].unique())
np.sum(ad[:, possible_markers[0]].X > 0)
def fc_vs_all(centroids):
fc = pd.DataFrame(index=centroids.index)
for cname in centroids:
other_names = [cn for cn in centroids if cn != cname]
#print(other_names)
centroid = centroids[cname]
others_centroid = centroids[other_names]
#print(others_centroid.head())
others_centroid = others_centroid.sum(axis=1)
#print(others_centroid)
others_centroid = np.log2(others_centroid + 2)
log_centroid = np.log2(centroid + 2)
fc[cname] = log_centroid.sub(others_centroid, axis=possible_markers)
#print(fc.head())
return fc
centroids_invitro.loc[possible_markers]
centroids_invivo.loc[color]
centroids_invivo.index
invivo_fc = fc_vs_all(centroids_invivo)
invitro_fc = fc_vs_all(centroids_invitro)
invitro_fc.loc[possible_markers].max(axis=1)
invivo_fc.loc[color]
"""
{
cluster_solution_name: “heart cell types”
size_by: 'similarity',
color_by: ‘MYL7',
cluster_similarities: [
{
dataset: {
name: 'in vitro heart combined',
species: 'Homo sapiens',
organ: 'heart',
study: 'in vitro',,
},
compared_to_cluster: “cell type 1”,
cluster_solution_name: 'louvain resolution .25',
clusters: [
{
name: 'A',
size: 15,
color: -0.75,
cell_count: 34,
},
{
name: 'B',
size: 30,
color: 0.5,
cell_count: 88
},
...
],
},
{
(repeated...)
},
...
]
}
"""
"""
{
gene: 'ALK', / cluster: 'userCluster1',
size_by: 'sensitivity',
color_by: 'z_stat',
cluster_solutions: [
{
dataset: {
name: 'dataset name 2',
species: 'Homo sapiens',
organ: 'heart',
study: 'in vivo',,
},
cluster_name: 'solution 2',
clusters: [
{
name: 'A',
size: 15,
color: -0.75,
cell_count: 34,
},
{
name: 'B',
size: 30,
color: 0.5,
cell_count: 88
},
...
],
},
{
(another dataset/cluster-solution)
},
...
]
}
"""
| 24.739796 | 108 | 0.642813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,494 | 0.565343 |
df8e47912b6edd2b934d37e8bca40a26e9f732d8 | 1,138 | py | Python | time_wizard/mixins.py | wfehr/django-time-wizard | d192f4aa9c9a15249e69fa47f3f41e9c048dc718 | [
"BSD-2-Clause"
] | 5 | 2019-10-08T08:23:25.000Z | 2020-07-08T10:41:00.000Z | time_wizard/mixins.py | wfehr/django-time-wizard | d192f4aa9c9a15249e69fa47f3f41e9c048dc718 | [
"BSD-2-Clause"
] | 8 | 2019-07-06T13:14:58.000Z | 2020-03-09T10:49:03.000Z | time_wizard/mixins.py | wfehr/django-time-wizard | d192f4aa9c9a15249e69fa47f3f41e9c048dc718 | [
"BSD-2-Clause"
] | 2 | 2020-01-28T14:14:09.000Z | 2020-03-09T10:16:27.000Z | from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.utils.timezone import now
class TimeWizardInlineMixin(models.Model):
"""
Mixin for the inline support of PeriodModel.
"""
periods = GenericRelation(
'time_wizard.PeriodModel',
)
class Meta:
abstract = True
@property
def is_published(self):
dt = now()
for p in self.periods.all():
if p.contains(dt):
return True
return False
class TimeWizardMixin(models.Model):
"""
Mixin to let models have a foreign-key-relation to the TimeWizard. Property
`is_published` is used to indicate if a TimeWizard is set wether or not
to show the contents/children.
"""
time_wizard = models.ForeignKey(
'time_wizard.TimeWizardModel',
on_delete=models.SET_NULL,
blank=True,
null=True,
)
class Meta:
abstract = True
@property
def is_published(self):
if self.time_wizard_id:
return self.time_wizard.is_published
else:
return False
| 23.708333 | 79 | 0.633568 | 1,002 | 0.880492 | 0 | 0 | 330 | 0.289982 | 0 | 0 | 316 | 0.27768 |
df8eda08eeceb3a4e4b4d62b56b2aacf6a21c336 | 16,807 | py | Python | highearthorbit.py | orithena/highearthorbit | a444dba54f6fdb5b84cf5bd4c3e24f4bd1796fb1 | [
"MIT"
] | 3 | 2017-06-17T10:13:38.000Z | 2019-01-18T18:40:59.000Z | highearthorbit.py | orithena/highearthorbit | a444dba54f6fdb5b84cf5bd4c3e24f4bd1796fb1 | [
"MIT"
] | null | null | null | highearthorbit.py | orithena/highearthorbit | a444dba54f6fdb5b84cf5bd4c3e24f4bd1796fb1 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import twython, time, pprint, traceback, os, sys
import config
from twython import TwythonStreamer, TwythonError, TwythonRateLimitError
import urllib, json, glob, re
import thread
queuelock = thread.allocate_lock()
watchlock = thread.allocate_lock()
import logging
logging.basicConfig(filename='highearthorbit.log', format='%(asctime)s %(levelname)s %(message)s')
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
"""
Config: create a file named config.py with these lines:
app_key="<your app key>"
app_secret="<your app secret>"
oauth_token="<your oauth token>"
oauth_token_secret="<your oauth token secret"
owner="<twitter name of owner>"
All keys, secrets and tokens are creatable on http://apps.twitter.com
Run: until I implement a better solution, run it in a screen session
"""
lasttry=0
rts = []
_queue = []
_fail = {}
_done = {}
friends = []
listmembers = []
blocked = []
twitter = None
user_screenname = ''
def update_friends(friendlist):
global friends
friends = friendlist
log.info("Updated friends", friends)
def update_approved_list():
global listmembers
try:
listmembers = [ user["id"] for user in twitter.get_list_members(slug=config.approved_list['name'], owner_screen_name=config.approved_list['owner'])["users"] ]
log.info("Updated members of list %s by @%s: %s", (config.approved_list['name'], config.approved_list['owner'], str(listmembers)))
except:
log.warning("Error updating list %s by @%s." % (config.approved_list['name'], config.approved_list['owner']))
def find_and_delete_blocked_retweets():
global blocked
log.info("Checking the last 100 tweets in my timeline for retweets of blocked users. Just to destroy them.")
try:
tl = twitter.get_user_timeline(screen_name=user_screenname, count=100)
except:
log.error('Exception caught while checking for tweets of blocked users.')
else:
for t in tl:
if 'retweeted_status' in t and t['retweeted_status']['user']['id'] in blocked:
queue(twitter.destroy_status, id=t['id'])
log.info('Will destroy status %s, because @%s is blocked.' % (t['id'], t['retweeted_status']['user']['screen_name']))
for archived_file in glob.glob(os.path.join(config.archive_dir, '*', t['retweeted_status']['id_str']) + '-*.*'):
try:
os.remove(archived_file)
except:
log.warning("Unable to delete archive file %s" % archived_file)
else:
log.info("Removed archive file %s" % archived_file)
def update_block_list():
global blocked
old_blocked = blocked
try:
blocked = twitter.list_block_ids()['ids']
log.info("Updated block list.")
except Exception as e:
log.warning("Could not update block list: %s", str(e))
new = list(set(blocked) - set(old_blocked))
if len(new) > 0:
find_and_delete_blocked_retweets()
def cleanrts():
for r in rts:
m,t = r
if t < time.time() - 3600:
rts.remove(r)
def _fmt(func, args, kwargs):
return "%s(%s)" % (func.__name__, ', '.join([ a for a in args ] + [ "%s=%s" % (k, repr(v)) for k,v in kwargs.iteritems() ]))
def run_queue():
global _done, _queue, _fail, twitter
with queuelock:
t = int(time.time())
_done = dict([ (k,v) for k,v in _done.iteritems() if k > t - 86400 ])
_fail = dict([ (k,v) for k,v in _fail.iteritems() if k > t - 3600 ])
log.info('Running queue (%s actions). Actions during last 24h: %s/last 15m: %s, Fails in the last 60m: %s.' % (len(_queue), len(_done), len([ k for k,v in _done.iteritems() if k > t - 900 ]), len(_fail)))
with queuelock:
while len(_queue) > 0 and twitter is not None:
t = int(time.time())
if len([ k for k,v in _done.iteritems() if k > t - 900 ]) + len([ k for k,v in _fail.iteritems() if k > t - 900 ]) >= config.rate_limit_per_15min:
log.warn("Rate Limit reached. Currently not working on the %s items in the queue." % len(_queue))
break
if len(_fail) > 15:
log.error("Fail Limit reached. Killing everything.")
thread.interrupt_main()
(tries, (func, args, kwargs)) = _queue.pop(0)
if not (func, args, kwargs) in _done.values():
try:
log.debug("Trying %s from queue" % _fmt(func, args, kwargs))
if not config.twitter_is_read_only:
func(*args, **kwargs)
except Exception as e:
if isinstance(e, TwythonError) and e.error_code == 403:
log.warn("Twitter says I did %s already." % _fmt(func, args, kwargs))
#_fail[t] = (func, args, kwargs)
elif isinstance(e, TwythonRateLimitError):
log.warn("Twitter says I hit the Rate Limit with %s. Re-queuing." % _fmt(func, args, kwargs))
_queue.insert(0, (tries, (func, args, kwargs)) )
if e.retry_after is not None:
log.warn("Keeping queue lock until I slept for %s seconds." % e.retry_after)
time.sleep(int(e.retry_after))
elif tries < 3:
log.error("Error while running queue item %s: %s" % (_fmt(func, args, kwargs), str(e)), exc_info=True)
_queue.append( (tries+1, (func, args, kwargs)) )
_fail[t] = (func, args, kwargs)
log.warn("Try #%s of %s from queue failed, re-queuing." % (tries+1, _fmt(func, args, kwargs)))
log.warn("Reinitializing twitter connection except streaming.")
twitter = twython.Twython(app_key=config.app_key, app_secret=config.app_secret, oauth_token=config.oauth_token, oauth_token_secret=config.oauth_token_secret)
else:
log.error("Tried 3 times, but always got an exception... giving up on %s" % _fmt(func, args, kwargs))
else:
_done[t] = (func, args, kwargs)
log.info("%s is done." % _fmt(func, args, kwargs))
else:
log.warn("I already had that one in my queue: %s" % _fmt(func, args, kwargs))
time.sleep(5)
def queuewatch(check_time=900):
if watchlock.acquire(0):
while True:
time.sleep(check_time)
log.debug("It's time to check the queue for any forgotten actions. (interval=%ss)" % check_time)
try:
update_block_list()
run_queue()
except Exception as e:
log.warning("Exception in watchdog thread: ", e)
watchlock.release()
def queue(func, *args, **kwargs):
global _queue
_queue.append( (0, (func, args, kwargs)) )
log.debug("Queued %s." % _fmt(func, args, kwargs))
def rt(tweetid):
cleanrts()
if twitter is not None:
if not tweetid in [ m for m,t in rts ]:
log.info("Will retweet id %s" % tweetid)
queue(twitter.retweet, id=tweetid)
rts.append( (tweetid, time.time(), ) )
else:
log.info("Tweet id %s has been retweeted already." % tweetid)
def tweet(tweettext):
if twitter is not None:
queue(twitter.update_status, status=tweettext[0:140])
def download_media(data, filename):
datadict = data['entities']
if 'extended_entities' in data and 'media' in data['extended_entities']:
datadict = data['extended_entities']
if 'extended_tweet' in data:
if 'entities' in data['extended_tweet'] and 'media' in data['extended_tweet']['entities']:
datadict = data['extended_tweet']['entities']
if 'extended_entities' in data['extended_tweet'] and 'media' in data['extended_tweet']['extended_entities']:
datadict = data['extended_tweet']['extended_entities']
for index,mediadata in enumerate(datadict['media']):
mediafile = '.'.join((filename, str(index), mediadata['media_url_https'].split('.')[-1]))
mediaurl = ':'.join((mediadata['media_url_https'], 'orig'))
if 'type' in mediadata and mediadata['type'] == 'photo':
try:
urllib.URLopener().retrieve(mediaurl, mediafile)
log.info("Archived media: %s -> %s from tweet %s." % (mediaurl, mediafile, data['id']))
except Exception as e:
log.error("Archive image cannot be downloaded from %s or created in %s: %s" % (mediaurl, mediafile, str(e)))
def save(data):
user = data['retweeted_status']['user'] if 'retweeted_status' in data else data['user']
basedirname = os.path.join(config.archive_dir, data['id_str'][:-15].zfill(6))
basefilename = '-'.join((data['id_str'], user['screen_name']))
filename = os.path.join(basedirname, basefilename)
if os.path.isfile(filename + '.json'):
log.warn("Archive file %s.json for tweet %s already exists." % (filename, data['id']))
return
try:
if not os.path.isdir(basedirname):
os.makedirs(basedirname)
except Exception as e:
log.error("Archive directory %s cannot be created or is not writable: %s" % (basedirname, str(e)))
return
try:
with open(filename + '.json', 'w') as f:
json.dump(data, f, indent=4)
log.info("Archived tweet %s to %s.json" % (data['id'], filename))
except Exception as e:
log.error("Archive file %s cannot be created or is not writable: %s" % (filename + '.json', str(e)))
return
if config.archive_photos and 'media' in data['entities']:
download_media(data, filename)
def is_spam(data):
log.info("%s @%s: %s" % (data['id'], data['user']['screen_name'], data['text'].replace('\n', ' ')))
text = data['text'].strip()
if not any(hashtag in text.lower() for hashtag in config.track.lower().split(" or ")):
log.info("Retweet did not contain our search, assuming spam.")
return True
if text.startswith('"') and text.endswith('"'):
log.info("Looks like a quoted Tweet, assuming tweet stealing.")
return True
if re.search(r'rt\W+@\w+\W*[:"]', text, re.IGNORECASE) is not None:
log.info("Looks like a manual retweet, assuming tweet stealing.")
return True
if len(data['entities']['hashtags']) > config.spamfilter_max_hashtags: # Too many hashtags?
log.info("Munched some Spam: Too many Hashtags. Not retweeting %s." % data['id'])
return True
if any(word.lower() in text.lower() for word in config.spamfilter_word_blacklist): # Blacklisted words?
log.info("This list of words is black. It contains %s, which is why I won't retweet %s." % (str([word for word in config.spamfilter_word_blacklist if word in data['text']]), data['id']))
return True
return False
def decide(data):
if 'extended_tweet' in data and 'full_text' in data['extended_tweet']:
data['text'] = data['extended_tweet']['full_text']
if 'full_text' in data:
data['text'] = data['full_text']
if config.archive_own_retweets_only and 'retweeted_status' in data and data['user']['screen_name'] == user_screenname:
log.info("%s @%s: %s" % (data['id'], data['user']['screen_name'], data['text'].replace('\n', ' ')))
# I only save my own retweets for the archive. This allows the webviewer to "dumb-detect" that
# a Retweet by the Bot has been destroyed manually from the Botaccount.
save(data)
elif 'retweeted_status' in data:
# Normal retweets are only logged in debug level, else dropped silently.
log.debug("Retweet received: %s @%s: %s" % (data['id'], data['user']['screen_name'], data['text'].replace('\n', ' ')))
return
elif is_spam(data):
# If it's spam, the function is_spam() has already logged a message. We're just walking away.
return
elif 'text' in data:
# Hey, something came in! Maybe it's interesting?
#log.info("%s @%s: %s" % (data['id'], data['user']['screen_name'], data['text'].replace('\n', ' ')))
if data['user']['id'] in blocked:
# If we blocked someone, we don't want to read him. Twitter, why do I keep getting that blockhead in my search results?
log.info('Not retweeting id %s because user @%s is blocked.' % (data['id'], data['user']['screen_name']))
#elif data['text'].lower().find(config.track.lower()) > -1 and not 'retweeted_status' in data:
elif not 'retweeted_status' in data:
# Whohoo! A tweet that actually contains our Hashtag and is not a retweet!
rt(data['id'])
if not config.archive_own_retweets_only:
# I save everything I can get. Okay, archiving photos are configured elsewhere.
save(data)
elif 'direct_message' in data:
# Currently dead code because this type of message is not available in a "site stream".
# To enable this, this Bot also needs to follow the "user stream" in another thread.
update_approved_list()
log.info("DM from @%s: %s" % (data['direct_message']['sender']['screen_name'], data['direct_message']['text']))
if data['direct_message']['sender']['id'] in listmembers:
tweet(data['direct_message']['text'])
elif 'friends' in data:
# Currently dead code because this type of message is not available in a "site stream".
# To enable this, this Bot also needs to follow the "user stream" in another thread.
update_friends(data['friends'])
else:
# Currently dead code because this type of message is not available in a "site stream".
# To enable this, this Bot also needs to follow another stream in another thread.
log.warning("Unknown notification received:")
log.warning(data)
class MyStreamer(TwythonStreamer):
def on_success(self, data):
decide(data)
thread.start_new_thread(run_queue, ())
def on_error(self, status_code, data):
log.error("Error Code %s received in data package:" % status_code)
log.error(data)
if status_code == 503:
log.error("Waiting 10 minutes, then this bot restarts internally... hopefully finding all missed tweets then.")
time.sleep(600)
self.disconnect()
if __name__ == "__main__":
readback = config.read_back
if '--quick' in sys.argv:
readback = 10
log.info("------ Quick start, reading only 10 tweets back")
while True:
try:
log.info("====== Entering High Earth Orbit in the Twitterverse... ehm. Okay, okay, I'm initializing. ======")
twitter = twython.Twython(app_key=config.app_key, app_secret=config.app_secret, oauth_token=config.oauth_token, oauth_token_secret=config.oauth_token_secret)
creds = twitter.verify_credentials()
#userstream = MyStreamer(config.app_key, config.app_secret, config.oauth_token, config.oauth_token_secret)
#userstream.creds = creds
filterstream = MyStreamer(config.app_key, config.app_secret, config.oauth_token, config.oauth_token_secret)
filterstream.creds = creds
userid = creds['id_str']
user_screenname = creds['screen_name']
update_approved_list()
update_block_list()
log.info('Reading last retweets.')
rts += [ (t['retweeted_status']['id'], time.time(),) for t in twitter.get_user_timeline(screen_name=user_screenname, count=readback) if 'retweeted_status' in t ]
for a in (1,2):
time.sleep((a-1)*10)
log.info("Catching up on missed tweets, take %s." % a)
old_tweets = twitter.search(q=config.track + " -filter:retweets", count=readback-5, tweet_mode='extended')['statuses']
for t in sorted(old_tweets, key=lambda t: t['id']):
decide(t)
log.info('Caught up on missed tweets, running queue.')
thread.start_new_thread(run_queue, ())
thread.start_new_thread(queuewatch, (900,))
log.info('Going into streaming mode')
filterstream.statuses.filter(track=[t.strip() for t in config.track.lower().split(' or ')])
except Exception, e:
log.warning('==Exception caught, restarting in 15 minutes==')
log.warning(str(e), exc_info=True)
time.sleep(900)
readback = config.read_back
| 49.143275 | 208 | 0.606057 | 499 | 0.02969 | 0 | 0 | 0 | 0 | 0 | 0 | 5,819 | 0.346225 |
df8f367ee66d37324f8f4c0d32e288f6c1b4fb39 | 1,833 | py | Python | migrations/versions/8b9f5b081137_.py | Rdbaker/betly | 92c7ae41bd221bbd21997fcd13e0f38b48f66d7d | [
"BSD-3-Clause"
] | null | null | null | migrations/versions/8b9f5b081137_.py | Rdbaker/betly | 92c7ae41bd221bbd21997fcd13e0f38b48f66d7d | [
"BSD-3-Clause"
] | null | null | null | migrations/versions/8b9f5b081137_.py | Rdbaker/betly | 92c7ae41bd221bbd21997fcd13e0f38b48f66d7d | [
"BSD-3-Clause"
] | null | null | null | """Creates the bet table and associates it with users.
Revision ID: 8b9f5b081137
Revises: 19cabd29fb71
Create Date: 2016-08-27 17:29:41.137566
"""
# revision identifiers, used by Alembic.
revision = '8b9f5b081137'
down_revision = '19cabd29fb71'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('bet',
sa.Column('guid', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('organizer', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('url_name', sa.String(length=100), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('bet_type', sa.Text(), nullable=True),
sa.Column('options', sa.Text(), nullable=True),
sa.Column('amount', sa.Float(precision=2), nullable=True),
sa.Column('bet_status', sa.Text(), nullable=True),
sa.Column('outcome_option_value', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['organizer'], ['user.id'], ),
sa.PrimaryKeyConstraint('guid')
)
op.create_table('user_bet',
sa.Column('user_guid', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('bet_guid', postgresql.UUID(as_uuid=True), nullable=False),
sa.ForeignKeyConstraint(['bet_guid'], ['bet.guid'], ),
sa.ForeignKeyConstraint(['user_guid'], ['user.id'], ),
sa.PrimaryKeyConstraint('user_guid', 'bet_guid')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('user_bet')
op.drop_table('bet')
### end Alembic commands ###
| 36.66 | 74 | 0.687398 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 659 | 0.35952 |
df92c2b640273d1fde6e35ef399b77e27ac3c838 | 6,847 | py | Python | script.py | Relex12/mywebsite_jekyll | 58a4536c3049f0f575b60dbcfb33789ea1a909ea | [
"MIT"
] | null | null | null | script.py | Relex12/mywebsite_jekyll | 58a4536c3049f0f575b60dbcfb33789ea1a909ea | [
"MIT"
] | null | null | null | script.py | Relex12/mywebsite_jekyll | 58a4536c3049f0f575b60dbcfb33789ea1a909ea | [
"MIT"
] | null | null | null | from os import path
from os import system
if (not path.exists("../Relex12.github.io/")):
raise FileNotFoundError("Relex12.github.io directory not found")
if (not path.exists("../Markdown-Table-of-Contents/")):
raise FileNotFoundError("Markdown-Table-of-Contents directory not found")
#####################
# FILES DECLARATION #
#####################
files = [
{"folder": "Decentralized-Password-Manager", "file":"README.md", "layout": "default",
"title": "Decentralized-Password-Manager", "link": "fr/Decentralized-Password-Manager", "output": "Decentralized-Password-Manager.md"},
{"folder": "Dictionaries", "file":"README.md", "layout": "default",
"title": "Dictionaries", "link": "Dictionaries", "output": "Dictionaries.md"},
{"folder": "Dictionaries", "file":"README-fr.md", "layout": "default",
"title": "Dictionaries", "link": "fr/Dictionaries", "output": "Dictionaries-fr.md"},
{"folder": "Genex", "file":"README.md", "layout": "default",
"title": "Genex", "link": "fr/Genex", "output": "Genex.md"},
{"folder": "Introduction-to-Computer-Science", "file":"README.md", "layout": "default",
"title": "Introduction to Computer Science", "link": "fr/Introduction-to-Computer-Science", "output": "Introduction-to-Computer-Science.md"},
{"folder": "Languages", "file":"README.md", "layout": "default",
"title": "Languages", "link": "fr/Languages", "output": "Languages.md"},
{"folder": "Languages", "file":"Sheets/Bash-Unix.md", "layout": "default",
"title": "Bash Unix", "link": "fr/Languages/Bash-Unix", "output": "Languages.Bash-Unix.md"},
{"folder": "Languages", "file":"Sheets/DOT.md", "layout": "default",
"title": "DOT", "link": "fr/Languages/DOT", "output": "Languages.DOT.md"},
{"folder": "Languages", "file":"Sheets/Git.md", "layout": "default",
"title": "Git", "link": "fr/Languages/Git", "output": "Languages.Git.md"},
{"folder": "Languages", "file":"Sheets/GDB.md", "layout": "default",
"title": "GDB", "link": "fr/Languages/GDB", "output": "Languages.GDB.md"},
{"folder": "Languages", "file":"Examples/Markdown.md", "layout": "default",
"title": "Markdown", "link": "fr/Languages/Markdown", "output": "Languages.Markdown.md"},
{"folder": "Languages", "file":"Sheets/JavaScript.md", "layout": "default",
"title": "JavaScript", "link": "fr/Languages/JavaScript", "output": "Languages.JavaScript.md"},
{"folder": "Lining-draw", "file":"README.md", "layout": "default",
"title": "Lining draw", "link": "Lining-draw", "output": "Lining-draw.md"},
{"folder": "Lining-draw", "file":"README-fr.md", "layout": "default",
"title": "Lining draw", "link": "fr/Lining-draw", "output": "Lining-draw-fr.md"},
{"folder": "Loup-garou", "file":"README.md", "layout": "default",
"title": "Loup-garou", "link": "fr/Loup-garou", "output": "Loup-garou.md"},
{"folder": "Markdown-Table-of-Contents", "file":"README.md", "layout": "default",
"title": "Markdown Table of Contents", "link": "Markdown-Table-of-Contents", "output": "Markdown-Table-of-Contents.md"},
{"folder": "Markdown-Table-of-Contents", "file":"README-fr.md", "layout": "default",
"title": "Markdown Table of Contents", "link": "fr/Markdown-Table-of-Contents", "output": "Markdown-Table-of-Contents-fr.md"},
{"folder": "Maths-for-IT", "file":"README.md", "layout": "default",
"title": "Maths for IT", "link": "fr/Maths-for-IT", "output": "Maths-for-IT.md"},
{"folder": "Relex12", "file":"README.md", "layout": "default",
"title": "Relex12 - Adrian Bonnet", "link": "null", "output": "index.md"},
{"folder": "Secret-Santa", "file":"README.md", "layout": "default",
"title": "Secret Santa", "link": "fr/Secret-Santa", "output": "Secret-Santa.md"},
{"folder": "Simple-Progress-Bar", "file":"README.md", "layout": "default",
"title": "Simple Progress Bar", "link": "Simple-Progress-Bar", "output": "Simple-Progress-Bar.md"},
{"folder": "Simple-Progress-Bar", "file":"README-fr.md", "layout": "default",
"title": "Simple Progress Bar", "link": "fr/Simple-Progress-Bar", "output": "Simple-Progress-Bar-fr.md"},
{"folder": "Voting-Systems-Comparison", "file":"README.md", "layout": "default",
"title": "Voting Systems Comparison", "link": "fr/Voting-Systems-Comparison", "output": "Voting-Systems-Comparison.md"},
{"folder": "Voting-Systems-Simulation", "file":"README.md", "layout": "default",
"title": "Voting Systems Simulation", "link": "Voting-Systems-Simulation", "output": "Voting-Systems-Simulation.md"},
{"folder": "Voting-Systems-Simulation", "file":"README-fr.md", "layout": "default",
"title": "Voting Systems Simulation", "link": "fr/Voting-Systems-Simulation", "output": "Voting-Systems-Simulation-fr.md"},
{"folder": "Voting-Systems-Simulation", "file":"doc/simulation.html", "layout": "null",
"title": "Voting Systems Simulation Doc", "link": "Voting-Systems-Simulation/doc/simulation", "output": "Voting-Systems-Simulation.doc.simulation.html"},
{"folder": "Voting-Systems-Simulation", "file":"doc/voting.html", "layout": "null",
"title": "Voting Systems Simulation Doc", "link": "Voting-Systems-Simulation/doc/voting", "output": "Voting-Systems-Simulation.doc.voting.html"},
{"folder": "Website-manager", "file":"README.md", "layout": "default",
"title": "Website manager", "link": "Website-manager", "output": "Website-manager.md"},
{"folder": "Word-machine", "file":"README.md", "layout": "default",
"title": "Word Machine", "link": "Word-machine", "output": "Word-machine.md"},
{"folder": "Word-machine", "file":"doc/dictionary.html", "layout": "null",
"title": "Word Machine Doc", "link": "Word-machine/doc/dictionary", "output": "Word-machine.doc.dictionary.html"},
{"folder": "Word-machine", "file":"doc/generation.html", "layout": "null",
"title": "Word Machine Doc", "link": "Word-machine/doc/generation", "output": "Word-machine.doc.generation.html"},
{"folder": "Word-machine", "file":"doc/word-machine.html", "layout": "null",
"title": "Word Machine Doc", "link": "Word-machine/doc/word-machine", "output": "Word-machine.doc.word-machine.html"},
]
####################
# FILES GENERATION #
####################
for i in range(len(files)):
if (path.exists("../{}/".format(files[i]["folder"]))):
system("python3 ../Markdown-Table-of-Contents/toc.py ../{}/{}".format(files[i]["folder"], files[i]["file"]))
input_file = open("../{}/{}".format(files[i]["folder"], files[i]["file"]), 'r')
front_matter = """---
layout: {}
title: "{}"
permalink: {}
---
""".format(files[i]["layout"], files[i]["title"], files[i]["link"], )
output_file = open("../Relex12.github.io/{}".format(files[i]["output"]), 'w')
print(files[i]["output"])
output_file.write(front_matter + input_file.read())
else:
print("Cannot create {}, {}/{} is missing.".format(files[i]["output"], files[i]["folder"], files[i]["file"]) )
| 67.792079 | 154 | 0.637505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,330 | 0.778443 |
df938ddabcdb9ae4b9d826fa87e296dc1b34c69d | 11,286 | py | Python | scripts/clang_git_format/clang_git_format/repo.py | zarmomin/mrpt | 1baff7cf8ec9fd23e1a72714553bcbd88c201966 | [
"BSD-3-Clause"
] | 9 | 2017-11-19T16:18:09.000Z | 2020-07-16T02:13:43.000Z | scripts/clang_git_format/clang_git_format/repo.py | gao-ouyang/mrpt | 4af5fdf7e45b00be4a64c3d4f009acb9ef415ec7 | [
"BSD-3-Clause"
] | null | null | null | scripts/clang_git_format/clang_git_format/repo.py | gao-ouyang/mrpt | 4af5fdf7e45b00be4a64c3d4f009acb9ef415ec7 | [
"BSD-3-Clause"
] | 4 | 2018-06-08T07:55:51.000Z | 2022-02-22T08:56:28.000Z | from utils import callo
import os
import subprocess
import re
import logging
logger = logging.getLogger("clang-format")
class Repo(object):
"""Class encapsulates all knowledge about a git repository, and its metadata
to run clang-format.
"""
def __init__(self, path, custom_regex="", dirs_in=[], dirs_out=[]):
"""Initialization method.
:path str Relative path to the Root of the repository
WARNING: After initialization of the Repo, users should set the
languages the repo contains (langs_used). This is used to run
clang-format only on files designated by the langage
"""
logger.info("Initializing repo...")
self.path = path
self.custom_regex = "(" + custom_regex + ")"
# Programming languages that the files in this repo are written in.
# Variable is used to decide what files is clang-format ultimately is
# going to operate on
self.langs_to_file_endings = {
"cpp": ["h", "hxx", "cpp", "cc", "cxx"],
"c": ["h", "c"],
"objc": ["h", "mm"],
"java": ["class", "java"],
"javascript": ["js"],
None: [],
}
assert isinstance(dirs_in, list) and\
"dirs_in should be a list of directories. Instead got {}"\
.format(dirs_in)
self.dirs_in = dirs_in
assert isinstance(dirs_out, list) and\
"dirs_out should be a list of directories. Instead got {}"\
.format(dirs_out)
self.dirs_out = dirs_out
# default language is cpp
self._langs_used = ["cpp"]
self.root = self._get_root()
@property
def langs_used(self):
return self._langs_used
@langs_used.setter
def langs_used(self, langs_in):
"""Set the programming languages that the repo contains files of."""
if not langs_in:
return
assert isinstance(langs_in, list) and \
("The languages of the repo should be provided in a list of "
"strings.\nExiting...")
if set([i for i in langs_in
if i in self.langs_to_file_endings.keys()]) != set(langs_in):
logger.fatal("The following languages are available to use: %s",
self.langs_to_file_endings.keys())
exit(1)
self._langs_used = langs_in
@langs_used.getter
def langs_used(self):
return self._langs_used
def _callgito(self, args):
"""Call git for this repository, and return the captured output
"""
# These two flags are the equivalent of -C in newer versions of Git but
# we use these to support versions pre 1.8.5 but it depends on the
# command and what the current directory is
return callo([
'git', '--git-dir',
os.path.join(self.path, ".git"), '--work-tree', self.path
] + args)
def _callgit(self, args):
"""Call git for this repository without capturing output
This is designed to be used when git returns non-zero exit codes.
"""
# These two flags are the equivalent of -C in newer versions of Git but
# we use these to support versions pre 1.8.5 but it depends on the
# command and what the current directory is
return subprocess.call([
'git', '--git-dir',
os.path.join(self.path, ".git"), '--work-tree', self.path
] + args)
def _get_local_dir(self, path):
"""Get a directory path relative to the git root directory
"""
if os.path.isabs(path):
return os.path.relpath(path, self.root)
return path
def get_candidates(self, candidates):
"""Get the set of candidate files to check by querying the repository
Returns the full path to the file for clang-format to consume.
"""
if candidates is not None and len(candidates) > 0:
candidates = [self._get_local_dir(f) for f in candidates]
valid_files = list(
set(candidates).intersection(self.get_candidate_files()))
else:
valid_files = list(self.get_candidate_files())
# Get the full file name here
valid_files = [
os.path.normpath(os.path.join(self.root, f)) for f in valid_files
]
return valid_files
def get_root(self):
"""Get the root directory for this repository
"""
return self.root
def _get_root(self):
"""Gets the root directory for this repository from git
"""
gito = self._callgito(['rev-parse', '--show-toplevel'])
return gito.rstrip()
def _git_ls_files(self, cmd):
"""Run git-ls-files and filter the list of files to a valid candidate
list
This constitutes a backbone method for fetching the list of files on
which clang-format operates on.
"""
gito = self._callgito(cmd)
# This allows us to pick all the interesting files
# in the mongo and mongo-enterprise repos
file_list = [line.rstrip() for line in gito.splitlines()]
final_list = self.filter_files_by_dir(file_list)
files_regexp = self.get_files_regexp()
final_list = [l for l in final_list if files_regexp.search(l)]
logger.warn("Executing clang-format on %d files" % len(final_list))
return final_list
def filter_files_by_dir(self, file_list):
"""Filter the given list of files based on the list of specified
directories.
"""
# If dirs_in is given use only those files that have that directory in
# their body
valid_files_in = []
if self.dirs_in:
for line in file_list:
if any([self._dir_filter(d, line, do_include=True)
for d in self.dirs_in]):
valid_files_in.append(line)
continue
else:
valid_files_in = file_list
# If dirs_out is given use only those files that have that directory in
# their body
valid_files_out = []
if self.dirs_out:
for line in valid_files_in:
if all([self._dir_filter(d, line, do_include=False)
for d in self.dirs_out]):
valid_files_out.append(line)
continue
else:
valid_files_out = valid_files_in
return valid_files_out
def get_files_regexp(self):
"""Return the regular expression that is used to filter the files for
which clang format is actually going to run.
This takes in account
- Language suffixes that are to be considered
- User-provided custom regexp
"""
files_match_str = ""
for lang in self.langs_used:
lang_exts = self.langs_to_file_endings[lang]
for ext in lang_exts + [ext.upper() for ext in lang_exts]:
files_match_str += ext + "|"
files_match_str = "(" + files_match_str + ")"
files_regexp = re.compile(
'{}\\.{}$'.format(self.custom_regex, files_match_str))
logger.warn("Regexp to find source files: %s" % files_regexp.pattern)
return files_regexp
def get_candidate_files(self):
"""Query git to get a list of all files in the repo to consider for
analysis
"""
return self._git_ls_files(["ls-files", "--cached"])
def get_working_tree_candidate_files(self):
"""Query git to get a list of all files in the working tree to consider
for analysis. Files may not be managed by Git
"""
files = self._git_ls_files(["ls-files", "--cached", "--others"])
return files
def get_working_tree_candidates(self):
"""Get the set of candidate files to check by querying the repository
Returns the full path to the file for clang-format to consume.
"""
valid_files = list(self.get_working_tree_candidate_files())
# Get the full file name here
valid_files = [
os.path.normpath(os.path.join(self.root, f)) for f in valid_files
]
return valid_files
def is_detached(self):
"""Is the current working tree in a detached HEAD state?
"""
# symbolic-ref returns 1 if the repo is in a detached HEAD state
return self._callgit(["symbolic-ref", "--quiet", "HEAD"])
def is_ancestor(self, parent, child):
"""Is the specified parent hash an ancestor of child hash?
"""
# merge base returns 0 if parent is an ancestor of child
return not self._callgit(["merge-base", "--is-ancestor", parent, child])
def is_commit(self, sha1):
"""Is the specified hash a valid git commit?
"""
# cat-file -e returns 0 if it is a valid hash
return not self._callgit(["cat-file", "-e", "%s^{commit}" % sha1])
def is_working_tree_dirty(self):
"""Does the current working tree have changes?
"""
# diff returns 1 if the working tree has local changes
return self._callgit(["diff", "--quiet"])
def does_branch_exist(self, branch):
"""Does the branch exist?
"""
# rev-parse returns 0 if the branch exists
return not self._callgit(["rev-parse", "--verify", branch])
def get_merge_base(self, commit):
"""Get the merge base between 'commit' and HEAD"""
return self._callgito(["merge-base", "HEAD", commit]).rstrip()
def get_branch_name(self):
"""Get the current branch name, short form
This returns "master", not "refs/head/master"
Will not work if the current branch is detached
"""
branch = self.rev_parse(["--abbrev-ref", "HEAD"])
if branch == "HEAD":
raise ValueError("Branch is currently detached")
return branch
def add(self, command):
"""git add wrapper
"""
return self._callgito(["add"] + command)
def checkout(self, command):
"""git checkout wrapper
"""
return self._callgito(["checkout"] + command)
def commit(self, command):
"""git commit wrapper
"""
return self._callgito(["commit"] + command)
def diff(self, command):
"""git diff wrapper
"""
return self._callgito(["diff"] + command)
def log(self, command):
"""git log wrapper
"""
return self._callgito(["log"] + command)
def rev_parse(self, command):
"""git rev-parse wrapper
"""
return self._callgito(["rev-parse"] + command).rstrip()
def rm(self, command):
"""git rm wrapper
"""
return self._callgito(["rm"] + command)
def show(self, command):
"""git show wrapper
"""
return self._callgito(["show"] + command)
@staticmethod
def _dir_filter(d, line, do_include=True):
"""Return True if line includes/doesn't include the given directory
d.
"""
ret = False
if do_include:
ret = d in line
else:
ret = d not in line
return ret
| 32.618497 | 80 | 0.589048 | 11,160 | 0.988836 | 0 | 0 | 1,084 | 0.096048 | 0 | 0 | 4,828 | 0.427787 |
df93ed3728e8d20bbc77012e16ddc613f4f0f3e2 | 5,909 | py | Python | bot.py | jeonghunn/EverytimeBot | 99b411adb11eeaec5c254805c759b63fa9934d3a | [
"MIT"
] | 2 | 2019-07-23T03:10:58.000Z | 2020-10-06T13:47:04.000Z | bot.py | jeonghunn/EverytimeBot | 99b411adb11eeaec5c254805c759b63fa9934d3a | [
"MIT"
] | null | null | null | bot.py | jeonghunn/EverytimeBot | 99b411adb11eeaec5c254805c759b63fa9934d3a | [
"MIT"
] | null | null | null | """Simple Bot to reply to Telegram messages.
This is built on the API wrapper, see echobot2.py to see the same example built
on the telegram.ext bot framework.
This program is dedicated to the public domain under the CC0 license.
"""
import logging
import telegram
import requests, json
import traceback
from time import sleep
import model as ml
import tensorflow as tf
from earlystop import EarlyStopping
import random
import math
import os, sys
import data
import datetime
from configs import DEFINES
update_id = 0
def __del__(self):
bot = telegram.Bot('auth')
bot.send_message(chat_id = -116418298, text="Penta 서비스가 종료되었습니다.")
def main():
"""Run the bot."""
global update_id
# Telegram Bot Authorization Token
bot = telegram.Bot('auth')
URL = "https://unopenedbox.com/develop/square/api.php"
last_message = ""
bootcount = 0
lcount = 0
readingold = False
readingold_lastcount = 0
now = datetime.datetime.now()
# get the first pending update_id, this is so we can skip over it in case
# we get an "Unauthorized" exception.
# 데이터를 통한 사전 구성 한다.
char2idx, idx2char, vocabulary_length = data.load_vocabulary()
# 에스티메이터 구성한다.
classifier = tf.estimator.Estimator(
model_fn=ml.Model, # 모델 등록한다.
model_dir=DEFINES.check_point_path, # 체크포인트 위치 등록한다.
params={ # 모델 쪽으로 파라메터 전달한다.
'hidden_size': DEFINES.hidden_size, # 가중치 크기 설정한다.
'layer_size': DEFINES.layer_size, # 멀티 레이어 층 개수를 설정한다.
'learning_rate': DEFINES.learning_rate, # 학습율 설정한다.
'teacher_forcing_rate': DEFINES.teacher_forcing_rate, # 학습시 디코더 인풋 정답 지원율 설정
'vocabulary_length': vocabulary_length, # 딕셔너리 크기를 설정한다.
'embedding_size': DEFINES.embedding_size, # 임베딩 크기를 설정한다.
'embedding': DEFINES.embedding, # 임베딩 사용 유무를 설정한다.
'multilayer': DEFINES.multilayer, # 멀티 레이어 사용 유무를 설정한다.
'attention': DEFINES.attention, # 어텐션 지원 유무를 설정한다.
'teacher_forcing': DEFINES.teacher_forcing, # 학습시 디코더 인풋 정답 지원 유무 설정한다.
'loss_mask': DEFINES.loss_mask, # PAD에 대한 마스크를 통한 loss를 제한 한다.
'serving': DEFINES.serving # 모델 저장 및 serving 유무를 설정한다.
})
while 1:
sleep(3)
now = datetime.datetime.now()
bootcount = bootcount + 1
lcount = lcount + 1
try:
#data = {'a': 'penta_check', 'auth': 'a1s2d3f4g5h6j7k8l9', 'start_num' : '0', 'number' : '15'}
#res = requests.post(URL, data=data)
#answer = "[보고]" + res.json()[0]['description'];
answer = ""
if bootcount == 1 :
#answer = "다시 시작했습니다. Penta 버전 1.0.625 밀린 채팅을 읽는 중 입니다..."
readingold = True
readingold_lastcount = bootcount
if readingold_lastcount < bootcount and readingold is True :
readingold = False
#bot.send_message(chat_id = -116418298, text="이전글 읽기 완료.")
if last_message != answer and answer != "" :
bot.send_message(chat_id = -116418298, text=answer)
last_message = answer
if last_message == answer :
tlm = ""
last_user = 0
last_talk = ""
updates = bot.get_updates(offset=update_id)
for i in updates:
if i.message:
if last_user != i.message.from_user.id :
last_talk = tlm
tlm = ""
last_user = i.message.from_user.id
# with open("./data_in/ChatBotData.csv", "a") as myfile:
# myfile.write("\n")
if i.message.text is not None and tlm != "" :
tlm = tlm + " " + i.message.text
# with open("./data_in/ChatBotData.csv", "a") as myfile:
# myfile.write(" " + i.message.text)
if i.message.text is not None and tlm == "" :
tlm = i.message.text
# with open("./data_in/ChatBotData.csv", "a") as myfile:
# myfile.write(i.message.text)
update_id = i.update_id + 1
now_last_id = updates[-1].update_id
if tlm != "" and tlm is not None and now_last_id + 1 <= update_id:
readingold_lastcount = readingold_lastcount +1
lcount = 0
if not readingold :
predic_input_enc, predic_input_enc_length = data.enc_processing([tlm], char2idx)
predic_target_dec, _ = data.dec_target_processing([""], char2idx)
# 예측을 하는 부분이다.
predictions = classifier.predict(
input_fn=lambda:data.eval_input_fn(predic_input_enc, predic_target_dec, DEFINES.batch_size))
# 예측한 값을 인지 할 수 있도록
# 텍스트로 변경하는 부분이다.
aimessage = data.pred2string(predictions, idx2char)
if aimessage != "" :
bot.send_message(chat_id = -116418298, text=aimessage)
except IndexError:
update_id = None
except:
if last_message != traceback.format_exc() and "Message text is empty" not in traceback.format_exc():
bot.send_message(chat_id = -11641828, text="[오류] 오류가 발생했습니다. 점검이 필요합니다. \n"+ traceback.format_exc())
last_message = traceback.format_exc()
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
def echo(bot):
"""Echo the message the user sent."""
global update_id
# Request updates after the last update_id
for update in bot.get_updates(offset=update_id, timeout=10):
update_id = update.update_id + 1
if update.message: # your bot can receive updates without messages
# Reply to the message
update.message.reply_text("HI")
if __name__ == '__main__':
main()
| 35.596386 | 110 | 0.588255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,446 | 0.376713 |
10c083bc2b320d261a43560fa1e3b71bb609880e | 3,509 | py | Python | apertools/log.py | scottstanie/apertools | f959d03038e77444204c1ff224ddd8357db3fc04 | [
"MIT"
] | 8 | 2020-02-22T15:44:20.000Z | 2022-02-16T02:49:57.000Z | apertools/log.py | scottstanie/apertools | f959d03038e77444204c1ff224ddd8357db3fc04 | [
"MIT"
] | null | null | null | apertools/log.py | scottstanie/apertools | f959d03038e77444204c1ff224ddd8357db3fc04 | [
"MIT"
] | 2 | 2020-11-02T15:04:15.000Z | 2022-01-08T20:20:58.000Z | """
This module exports a Log class that wraps the logging python package
Uses the standard python logging utilities, just provides
nice formatting out of the box.
Usage:
from apertools.log import get_log
logger = get_log()
logger.info("Something happened")
logger.warning("Something concerning happened")
logger.error("Something bad happened")
logger.critical("Something just awful happened")
logger.debug("Extra printing we often don't need to see.")
# Custom output for this module:
logger.success("Something great happened: highlight this success")
"""
import logging
import time
from functools import wraps
try:
from colorlog import ColoredFormatter
COLORS = True
except ImportError:
from logging import Formatter
COLORS = False
def get_log(debug=False, name=__file__, verbose=False):
"""Creates a nice log format for use across multiple files.
Default logging level is INFO
Args:
name (Optional[str]): The name the logger will use when printing statements
debug (Optional[bool]): If true, sets logging level to DEBUG
"""
logger = logging.getLogger(name)
return format_log(logger, debug=debug, verbose=verbose)
def format_log(logger, debug=False, verbose=False):
"""Makes the logging output pretty and colored with times"""
log_level = logging.DEBUG if debug else logging.INFO
log_colors = {
"DEBUG": "blue",
"INFO": "cyan",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "black,bg_red",
"SUCCESS": "white,bg_blue",
}
if COLORS:
format_ = "[%(asctime)s] [%(log_color)s%(levelname)s %(filename)s%(reset)s] %(message)s%(reset)s"
formatter = ColoredFormatter(
format_, datefmt="%m/%d %H:%M:%S", log_colors=log_colors
)
else:
format_ = "[%(asctime)s] [%(levelname)s %(filename)s] %(message)s"
formatter = Formatter(format_, datefmt="%m/%d %H:%M:%S")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logging.SUCCESS = 25 # between WARNING and INFO
logging.addLevelName(logging.SUCCESS, "SUCCESS")
setattr(
logger,
"success",
lambda message, *args: logger._log(logging.SUCCESS, message, args),
)
if not logger.handlers:
logger.addHandler(handler)
logger.setLevel(log_level)
if verbose:
logger.info("Logger initialized: %s" % (logger.name,))
if debug:
logger.setLevel(debug)
return logger
logger = get_log()
def log_runtime(f):
"""
Logs how long a decorated function takes to run
Args:
f (function): The function to wrap
Returns:
function: The wrapped function
Example:
>>> @log_runtime
... def test_func():
... return 2 + 4
>>> test_func()
6
This prints out to stderr the following in addition to the answer:
[05/26 10:05:51] [INFO log.py] Total elapsed time for test_func (minutes): 0.00
"""
@wraps(f)
def wrapper(*args, **kwargs):
t1 = time.time()
result = f(*args, **kwargs)
t2 = time.time()
elapsed_time = t2 - t1
time_string = "Total elapsed time for {} : {} minutes ({} seconds)".format(
f.__name__,
"{0:.2f}".format(elapsed_time / 60.0),
"{0:.2f}".format(elapsed_time),
)
logger.info(time_string)
return result
return wrapper
| 25.992593 | 105 | 0.624964 | 0 | 0 | 0 | 0 | 431 | 0.122827 | 0 | 0 | 1,796 | 0.511827 |
10c1051c785132345aeb5a605fae2ff4baa5c6ec | 2,387 | py | Python | monitoring/notifications/tests.py | okozachenko1203/monasca-ui | 9014bce3de6fed79f85b79ecf30031e6af7b7356 | [
"Apache-2.0"
] | 17 | 2015-10-18T02:54:57.000Z | 2018-08-05T21:58:20.000Z | monitoring/notifications/tests.py | okozachenko1203/monasca-ui | 9014bce3de6fed79f85b79ecf30031e6af7b7356 | [
"Apache-2.0"
] | null | null | null | monitoring/notifications/tests.py | okozachenko1203/monasca-ui | 9014bce3de6fed79f85b79ecf30031e6af7b7356 | [
"Apache-2.0"
] | 29 | 2016-07-07T01:27:01.000Z | 2021-12-03T07:24:17.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
from unittest.mock import patch
from monitoring.notifications import constants
from monitoring.test import helpers
INDEX_URL = reverse(
constants.URL_PREFIX + 'index')
CREATE_URL = reverse(
constants.URL_PREFIX + 'notification_create')
EDIT_URL = reverse(
constants.URL_PREFIX + 'notification_edit', args=('12345',))
class AlarmsTest(helpers.TestCase):
def test_index(self):
with patch('monitoring.api.monitor', **{
'spec_set': ['notification_list'],
'notification_list.return_value': [],
}) as mock:
res = self.client.get(INDEX_URL)
self.assertEqual(mock.notification_list.call_count, 2)
self.assertTemplateUsed(
res, 'monitoring/notifications/index.html')
def test_notifications_create(self):
with patch('monitoring.api.monitor', **{
'spec_set': ['notification_type_list'],
'notification_type_list.return_value': [],
}) as mock:
res = self.client.get(CREATE_URL)
self.assertEqual(mock. notification_type_list.call_count, 1)
self.assertTemplateUsed(
res, 'monitoring/notifications/_create.html')
def test_notifications_edit(self):
with patch('monitoring.api.monitor', **{
'spec_set': ['notification_get', 'notification_type_list'],
'notification_get.return_value': {
'alarm_actions': []
},
'notification_type_list.return_value': [],
}) as mock:
res = self.client.get(EDIT_URL)
self.assertEqual(mock.notification_get.call_count, 1)
self.assertEqual(mock.notification_type_list.call_count, 1)
self.assertTemplateUsed(
res, 'monitoring/notifications/_edit.html')
| 36.166667 | 76 | 0.668622 | 1,465 | 0.613741 | 0 | 0 | 0 | 0 | 0 | 0 | 1,049 | 0.439464 |
10c1fc508104087ec1126cf7552b5571efddbe5f | 383 | py | Python | basic/sort/03_insert.py | weihuchao/algorithm | 5a44eff323e09fd57d3f2f2f38b4762de0dc8dda | [
"MIT"
] | null | null | null | basic/sort/03_insert.py | weihuchao/algorithm | 5a44eff323e09fd57d3f2f2f38b4762de0dc8dda | [
"MIT"
] | null | null | null | basic/sort/03_insert.py | weihuchao/algorithm | 5a44eff323e09fd57d3f2f2f38b4762de0dc8dda | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/12/4 22:00
# @Author : weihuchao
def insert_sort(data):
"""
从前往后遍历, 假定前面部分已经有序, 查看当前元素应该在前面部分的哪个位置
"""
for idx in range(1, len(data)):
tmp = data[idx]
j = idx - 1
while j >= 0 and tmp < data[j]:
data[j + 1] = data[j]
j = j - 1
data[j + 1] = tmp
| 22.529412 | 42 | 0.480418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 216 | 0.478936 |
10c2936ea0c2d4efb7c7cbaed1c8e284e4da6785 | 884 | py | Python | GraphDefs/create_GraphDefs.py | AnotherGroupChat/Machine-Learning | 73731fd67a90782940736e47d99165adcc1469ed | [
"MIT"
] | null | null | null | GraphDefs/create_GraphDefs.py | AnotherGroupChat/Machine-Learning | 73731fd67a90782940736e47d99165adcc1469ed | [
"MIT"
] | null | null | null | GraphDefs/create_GraphDefs.py | AnotherGroupChat/Machine-Learning | 73731fd67a90782940736e47d99165adcc1469ed | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import tensorflow as tf
import numpy as np
BASE = 'GraphDefs'
def writeg(name):
session = tf.Session()
session.run(tf.initialize_all_variables())
tf.train.write_graph(session.graph_def, BASE, name + '.txt', True)
tf.train.write_graph(session.graph_def, BASE, name, False)
tf.reset_default_graph()
del session
print (name)
writeg('empty.pb')
a = tf.constant(42, name='a')
writeg('a=42.pb')
a = tf.constant([[-1.1, -2.1],[-1.2,-2.2]], name='a')
writeg('a=2x2.pb')
a = tf.constant(0x4242424242424242, name='a')
writeg('a=4242424242424242.pb')
a = tf.constant(0.42, name='a')
writeg('a=0.42.pb')
a = tf.Variable(6.0, name='a')
b = tf.Variable(7.0, name='b')
c = tf.mul(a, b, name="c")
writeg('c=(a=6.0)+(b=7.0).pb')
a = tf.Variable(6, name='a')
b = tf.Variable(7, name='b')
c = tf.mul(a, b, name="c")
writeg('c=(a=6)+(b=7).pb')
| 20.55814 | 70 | 0.626697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.190045 |
10c36b8fc1840cfb5a40958316805cad9712d3bb | 8,254 | py | Python | snipmyvideo.py | rascoro1/SnipMyVideo | 0728684fa271ec140f90c7878e41d7e55eec1989 | [
"MIT"
] | 3 | 2016-07-22T00:34:50.000Z | 2016-09-02T16:52:23.000Z | snipmyvideo.py | rascoro1/SnipMyVideo | 0728684fa271ec140f90c7878e41d7e55eec1989 | [
"MIT"
] | 2 | 2016-09-06T04:11:44.000Z | 2018-01-29T21:57:51.000Z | snipmyvideo.py | rascoro1/SnipMyVideo | 0728684fa271ec140f90c7878e41d7e55eec1989 | [
"MIT"
] | 1 | 2016-09-02T20:39:25.000Z | 2016-09-02T20:39:25.000Z | from moviepy.editor import *
from moviepy import Clip
import sys
import os
"""
This is a simple script that can only snip your code and return it back in one video
This script requires moviepy.py
Is can be obtained through pip:
pip install moviepy
Output file can be '.mp4', 'ogv', 'webm'
You can modify script to output other files based off the codec.
Further reasearch into moviepy.VideoFileClip.write_videofile
Usage: SnipMyVideo.py video.mp4 output.mp4 30-60 90-120 20:20-20:40 1:20:15-1:30:15
E.g: This would create two snipets (unlimited snipets can be created),
the first snipet resembles the time 30seconds to 60 seconds from video.m94
These snipets are then concatenated and written to output.mp4
Only a couple lines actually using moviepy.
"""
###################### Declaring Globals ##################################
SCRIPT_NAME = "" # Name of the script
FNAME = "" # Input filename
OUT_FNAME = "" # Output filename
SNIPPETS = [] # Raw Snippet times given as arguments by the user
SNIPPET_TIMES = [] # Snippet times in seconds
VERBOSE = True # Turn to false if you would not like verbose information
IS_AUDIO_FILE = False
def check_num_of_arguments():
"""
Quick check to make sure the correct number of argumets are portrayed.
First function that is called
:return:
"""
if len(sys.argv) < 4:
print "You have not given enough Arguments"
print "usage: " + sys.argv[0] + " inputfile.mp4 outputfile.mp4 20-30"
print "Time is in seconds"
sys.exit(6)
def trim_arguments():
"""
This trims all of the input arguments from the user
and assagins information to the according global variable
:return:
"""
global SCRIPT_NAME, FNAME, OUT_FNAME, SNIPPETS
SCRIPT_NAME = sys.argv[0]
args = sys.argv[1:] # Trimming of SCRIPT_NAME
FNAME = args[0]
args = args[1:] # Timming off fname
OUT_FNAME = args[0]
SNIPPETS = args[1:] # Trimming off the output filename
del args # Delete old args (REPLACED BY GLOBAL)
def check_args():
"""
Make sure arguments are in the write format
"""
if not os.path.isfile(FNAME):
print "The Input file does not exist."
sys.exit(1)
if os.path.isfile(OUT_FNAME):
print "Output file already exists"
sys.exit(2)
if not os.path.isdir(os.path.abspath(OUT_FNAME).rstrip(os.path.basename(OUT_FNAME))):
print "Output file directory does not exists"
sys.exit(3)
def check_time(time, min, max):
"""
Check time submitted by user to make sure time is correct and follows a correct format
:param time: time requested by user
:param min: min possible value
:param max: max possible value
:return:
"""
if int(time) >= min and int(time) <= max:
return True
else:
return False
def convert_human_readable_time(time):
"""
Convert human readable format of time into seconds
:param time: human readable string format of 1:20:30 or 20:38 or 38
:return: int time in seconds
"""
if ':' in time: # The time follows the min:sec format and must be converted
time = time.split(':')
if len(time) == 2: # Min:Sec format
min, sec = time
verbose("convert_human_readable_time("+str(time)+")", "Min:Sec Format")
if check_time(min, 0, 59) and check_time(sec, 0, 59): # Making sure the times are between possible amounts
verbose("convert_human_readable_time(" + str(time) + ")", "time = " + str(min) + " * 60 + " + sec)
time = int(min) * 60 + int(sec)
else:
print("Incorrect Time has been submitted: " + str(time) + " min:sec 0:0-59:60")
sys.exit(10)
elif len(time) == 3: # Hour:Min:Sec format
hour, min, sec = time
verbose("convert_human_readable_time("+str(time)+")", "Hour:Min:Sec Format")
if check_time(hour, 0, 23) and check_time(min, 0, 59) and check_time(sec, 0, 59): # Making sure the times are between possible amounts
verbose("convert_human_readable_time(" + str(time) + ")", "time = " + str(hour) + " * 3600 + " + str(min) + " * 60 + " + sec)
time = int(hour) * 3600 + int(min) * 60 + int(sec)
else:
print("Incorrect Time has been submitted: " + str(time) + " hour:min:sec 0:0:0-23:59:59")
sys.exit(10)
try:
time = int(time)
except ValueError as e:
print "Value Error: Given time is not a digit" + e.message
sys.exit(8)
verbose("convert_human_readable_time(" + str(time) + ")", "Returned time is -> " + str(time))
return time # If the time does not need to be converted (does not contain ':') it will still be appened
def get_snippet_time(snippet):
"""
This allows for easier use of snipping longer videos
conversion of
hour:min:sec & min:sec & sec e.g 1:20:15 & 20:48 & secs
to seconds
:param args: One snippet of time start and end time
:return: Dict of snippet e.g. {'start':20, 'stop': 40}
"""
if "-" not in snippet: # Checking to see if snippet time was inputted correctly
print("The arguments for the snippet time is not in the correct format: " + snippet)
print("Correct usage is: 20-30 or 20:30-20:35 or 1:20:30-1:20:35 ")
sys.exit(7)
start, stop = snippet.split('-', 1) # start and stop times of snippet
start = convert_human_readable_time(start)
stop = convert_human_readable_time(stop)
snippet = {'start': start, 'stop': stop}
return snippet
def get_all_snippet_times():
"""
Get all the snippet times in seconds
:return:
"""
for snippet in SNIPPETS:
snippet = get_snippet_time(snippet)
for key in snippet:
if snippet[key] < 0:
print "Input must be a positive number"
sys.exit(5)
if snippet['start'] > snippet['stop']:
print('Start needs to be smaller than stop for snipet, exiting.')
verbose("check_args()", "start=" + str(snippet['start']) + ", stop="+str(snippet['stop']))
sys.exit(6)
SNIPPET_TIMES.append(snippet)
def determine_if_audio_file():
"""
"""
global IS_AUDIO_FILE
audio_file_extensions = (".mp3", ".m4a")
IS_AUDIO_FILE = True if FNAME.endswith(audio_file_extensions) else False
def get_snippets():
"""
:return: List of moviepy.subclip objects
"""
snippets = []
clip = AudioFileClip(FNAME) if IS_AUDIO_FILE else VideoFileClip(FNAME)
for snippet in SNIPPET_TIMES:
snippets.append(clip.subclip(snippet['start'], snippet['stop']))
print "Created Snippet:\n\tStarting: " + str(snippet['start']) + " STOPPING: " + str(snippet['stop'])
return snippets
def create_video(snippets):
"""
Concatinate all the snippets together into one movie and write
:param snippets: THis is a list of moviepy.subclip
:return: write out one file
"""
if not IS_AUDIO_FILE:
video = concatenate(snippets)
print "Combined Snipets into one Video"
print "Writing Video to " + OUT_FNAME
video.write_videofile(OUT_FNAME)
else:
audio = concatenate_audioclips(snippets)
print "Combined Snipets into one Audio File"
print "Writing Video to " + OUT_FNAME
audio.write_audiofile(OUT_FNAME)
def verbose(title, info):
"""
Loggin verbose notes when VERBOSE is True
Used for debuging and is very helpful
:param title:
:param info:
:return:
"""
if VERBOSE:
print SCRIPT_NAME + " -> " + str(title) + ": " + info
if __name__ == "__main__":
check_num_of_arguments() # Make sure we have the correct number of arguments
trim_arguments() # Trim the args to just have the snippets at the end
check_args() # check the args for correctness
get_all_snippet_times() # All snippet times in seconds and organized Also checked for correctness
determine_if_audio_file()
snippets = get_snippets() # Get all the moviepy.subclip objects for each snippet
create_video(snippets) # Concatinate the Snippets together
| 34.974576 | 146 | 0.63448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,406 | 0.533802 |
10c3bc22e15792439f59ee32dc4a6907024f22a7 | 20,551 | py | Python | nutils/matrix.py | JochenHinz/nutils | ac18dd6825b107e2e4c186ebb1598dbf0fff0f77 | [
"MIT"
] | null | null | null | nutils/matrix.py | JochenHinz/nutils | ac18dd6825b107e2e4c186ebb1598dbf0fff0f77 | [
"MIT"
] | null | null | null | nutils/matrix.py | JochenHinz/nutils | ac18dd6825b107e2e4c186ebb1598dbf0fff0f77 | [
"MIT"
] | null | null | null | # Copyright (c) 2014 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
The matrix module defines an abstract :class:`Matrix` object and several
implementations. Matrix objects support basic addition and subtraction
operations and provide a consistent insterface for solving linear systems.
Matrices can be converted into other forms suitable for external processing via
the ``export`` method.
"""
from . import numpy, numeric, warnings, cache, types, config, util
import abc, sys, ctypes, treelog as log
class MatrixError(Exception): pass
class Backend(metaclass=abc.ABCMeta):
'backend base class'
def __enter__(self):
if hasattr(self, '_old_backend'):
raise RuntimeError('This context manager is not reentrant.')
global _current_backend
self._old_backend = _current_backend
_current_backend = self
return self
def __exit__(self, etype, value, tb):
if not hasattr(self, '_old_backend'):
raise RuntimeError('This context manager is not yet entered.')
global _current_backend
_current_backend = self._old_backend
del self._old_backend
@abc.abstractmethod
def assemble(self, data, index, shape):
'''Assemble a (sparse) tensor based on index-value pairs.
.. Note:: This function is abstract.
'''
class Matrix(metaclass=types.CacheMeta):
'matrix base class'
def __init__(self, shape):
assert len(shape) == 2
self.shape = shape
@abc.abstractmethod
def __add__(self, other):
'add two matrices'
@abc.abstractmethod
def __mul__(self, other):
'multiply matrix with a scalar'
@abc.abstractmethod
def __neg__(self, other):
'negate matrix'
def __sub__(self, other):
return self.__add__(-other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.__mul__(1/other)
@property
@abc.abstractmethod
def T(self):
'transpose matrix'
@property
def size(self):
return numpy.prod(self.shape)
def rowsupp(self, tol=0):
'return row indices with nonzero/non-small entries'
data, (row, col) = self.export('coo')
supp = numpy.zeros(self.shape[0], dtype=bool)
supp[row[abs(data) > tol]] = True
return supp
@abc.abstractmethod
def solve(self, rhs=None, *, lhs0=None, constrain=None, rconstrain=None, **solverargs):
'''Solve system given right hand side vector and/or constraints.
Args
----
rhs : :class:`float` vector or :any:`None`
Right hand side vector. `None` implies all zeros.
lhs0 : class:`float` vector or :any:`None`
Initial values. `None` implies all zeros.
constrain : :class:`float` or :class:`bool` array, or :any:`None`
Column constraints. For float values, a number signifies a constraint,
NaN signifies a free dof. For boolean, a True value signifies a
constraint to the value in `lhs0`, a False value signifies a free dof.
`None` implies no constraints.
rconstrain : :class:`bool` array or :any:`None`
Row constrains. A True value signifies a constrains, a False value a free
dof. `None` implies that the constraints follow those defined in
`constrain` (by implication the matrix must be square).
Returns
-------
:class:`numpy.ndarray`
Left hand side vector.
'''
@abc.abstractmethod
def submatrix(self, rows, cols):
'''Create submatrix from selected rows, columns.
Args
----
rows : :class:`bool`/:class:`int` array selecting rows for keeping
cols : :class:`bool`/:class:`int` array selecting columns for keeping
Returns
-------
:class:`Matrix`
Matrix instance of reduced dimensions
'''
def export(self, form):
'''Export matrix data to any of supported forms.
Args
----
form : :class:`str`
- "dense" : return matrix as a single dense array
- "csr" : return matrix as 3-tuple of (data, indices, indptr)
- "coo" : return matrix as 2-tuple of (data, (row, col))
'''
raise NotImplementedError('cannot export {} to {!r}'.format(self.__class__.__name__, form))
def __repr__(self):
return '{}<{}x{}>'.format(type(self).__qualname__, *self.shape)
def preparesolvearguments(wrapped):
'''Make rhs optional, add lhs0, constrain, rconstrain arguments.
See Matrix.solve.'''
def solve(self, rhs=None, *, lhs0=None, constrain=None, rconstrain=None, **solverargs):
nrows, ncols = self.shape
if lhs0 is None:
x = numpy.zeros(ncols)
else:
x = numpy.array(lhs0, dtype=float)
assert x.shape == (ncols,)
if constrain is None:
J = numpy.ones(ncols, dtype=bool)
else:
assert constrain.shape == (ncols,)
if constrain.dtype == bool:
J = ~constrain
else:
J = numpy.isnan(constrain)
x[~J] = constrain[~J]
if rconstrain is None:
assert nrows == ncols
I = J
else:
assert rconstrain.shape == (nrows,) and constrain.dtype == bool
I = ~rconstrain
assert I.sum() == J.sum(), 'constrained matrix is not square: {}x{}'.format(I.sum(), J.sum())
if rhs is None:
rhs = 0.
b = (rhs - self.matvec(x))[J]
if b.any():
x[J] += wrapped(self if I.all() and J.all() else self.submatrix(I, J), b, **solverargs)
if not numpy.isfinite(x).all():
raise MatrixError('solver returned non-finite left hand side')
log.info('solver returned with residual {:.0e}'.format(numpy.linalg.norm((rhs - self.matvec(x))[J])))
else:
log.info('skipping solver because initial vector is exact')
return x
return log.withcontext(solve)
## NUMPY BACKEND
class Numpy(Backend):
'''matrix backend based on numpy array'''
def assemble(self, data, index, shape):
array = numeric.accumulate(data, index, shape)
return NumpyMatrix(array) if len(shape) == 2 else array
class NumpyMatrix(Matrix):
'''matrix based on numpy array'''
def __init__(self, core):
assert numeric.isarray(core)
self.core = core
super().__init__(core.shape)
def __add__(self, other):
if not isinstance(other, NumpyMatrix) or self.shape != other.shape:
return NotImplemented
return NumpyMatrix(self.core + other.core)
def __mul__(self, other):
if not numeric.isnumber(other):
return NotImplemented
return NumpyMatrix(self.core * other)
def __neg__(self):
return NumpyMatrix(-self.core)
@property
def T(self):
return NumpyMatrix(self.core.T)
def matvec(self, vec):
return numpy.dot(self.core, vec)
def export(self, form):
if form == 'dense':
return self.core
if form == 'coo':
ij = self.core.nonzero()
return self.core[ij], ij
if form == 'csr':
rows, cols = self.core.nonzero()
return self.core[rows, cols], cols, rows.searchsorted(numpy.arange(self.shape[0]+1))
raise NotImplementedError('cannot export NumpyMatrix to {!r}'.format(form))
def rowsupp(self, tol=0):
return numpy.greater(abs(self.core), tol).any(axis=1)
@preparesolvearguments
def solve(self, rhs):
try:
return numpy.linalg.solve(self.core, rhs)
except numpy.linalg.LinAlgError as e:
raise MatrixError(e) from e
def submatrix(self, rows, cols):
return NumpyMatrix(self.core[numpy.ix_(rows, cols)])
## SCIPY BACKEND
try:
import scipy.sparse.linalg
except ImportError:
pass
else:
class Scipy(Backend):
'''matrix backend based on scipy's sparse matrices'''
def assemble(self, data, index, shape):
if len(shape) < 2:
return numeric.accumulate(data, index, shape)
if len(shape) == 2:
csr = scipy.sparse.csr_matrix((data, index), shape)
return ScipyMatrix(csr)
raise MatrixError('{}d data not supported by scipy backend'.format(len(shape)))
class ScipyMatrix(Matrix):
'''matrix based on any of scipy's sparse matrices'''
def __init__(self, core):
self.core = core
super().__init__(core.shape)
def __add__(self, other):
if not isinstance(other, ScipyMatrix) or self.shape != other.shape:
return NotImplemented
return ScipyMatrix(self.core + other.core)
def __sub__(self, other):
if not isinstance(other, ScipyMatrix) or self.shape != other.shape:
return NotImplemented
return ScipyMatrix(self.core - other.core)
def __mul__(self, other):
if not numeric.isnumber(other):
return NotImplemented
return ScipyMatrix(self.core * other)
def __neg__(self):
return ScipyMatrix(-self.core)
def matvec(self, vec):
return self.core.dot(vec)
def export(self, form):
if form == 'dense':
return self.core.toarray()
if form == 'csr':
csr = self.core.tocsr()
return csr.data, csr.indices, csr.indptr
if form == 'coo':
coo = self.core.tocoo()
return coo.data, (coo.row, coo.col)
raise NotImplementedError('cannot export NumpyMatrix to {!r}'.format(form))
@property
def T(self):
return ScipyMatrix(self.core.transpose())
@preparesolvearguments
def solve(self, rhs, atol=0, solver='spsolve', callback=None, precon=None, **solverargs):
if solver == 'spsolve':
log.info('solving system using sparse direct solver')
return scipy.sparse.linalg.spsolve(self.core, rhs)
assert atol, 'tolerance must be specified for iterative solver'
rhsnorm = numpy.linalg.norm(rhs)
if rhsnorm <= atol:
return numpy.zeros(self.shape[1])
log.info('solving system using {} iterative solver'.format(solver))
solverfun = getattr(scipy.sparse.linalg, solver)
myrhs = rhs / rhsnorm # normalize right hand side vector for best control over scipy's stopping criterion
mytol = atol / rhsnorm
niter = numpy.array(0)
def mycallback(arg):
niter[...] += 1
# some solvers provide the residual, others the left hand side vector
res = numpy.linalg.norm(myrhs - self.matvec(arg)) if numpy.ndim(arg) == 1 else float(arg)
if callback:
callback(res)
with log.context('residual {:.2e} ({:.0f}%)'.format(res, 100. * numpy.log10(res) / numpy.log10(mytol) if res > 0 else 0)):
pass
M = self.getprecon(precon) if isinstance(precon, str) else precon(self.core) if callable(precon) else precon
mylhs, status = solverfun(self.core, myrhs, M=M, tol=mytol, callback=mycallback, **solverargs)
if status != 0:
raise MatrixError('{} solver failed with status {}'.format(solver, status))
log.info('solver converged in {} iterations'.format(niter))
return mylhs * rhsnorm
def getprecon(self, name):
name = name.lower()
assert self.shape[0] == self.shape[1], 'constrained matrix must be square'
log.info('building {} preconditioner'.format(name))
if name == 'splu':
try:
precon = scipy.sparse.linalg.splu(self.core.tocsc()).solve
except RuntimeError as e:
raise MatrixError(e) from e
elif name == 'spilu':
try:
precon = scipy.sparse.linalg.spilu(self.core.tocsc(), drop_tol=1e-5, fill_factor=None, drop_rule=None, permc_spec=None, diag_pivot_thresh=None, relax=None, panel_size=None, options=None).solve
except RuntimeError as e:
raise MatrixError(e) from e
elif name == 'diag':
diag = self.core.diagonal()
if not diag.all():
raise MatrixError("building 'diag' preconditioner: diagonal has zero entries")
precon = numpy.reciprocal(diag).__mul__
else:
raise MatrixError('invalid preconditioner {!r}'.format(name))
return scipy.sparse.linalg.LinearOperator(self.shape, precon, dtype=float)
def submatrix(self, rows, cols):
return ScipyMatrix(self.core[rows,:][:,cols])
## INTEL MKL BACKEND
libmkl = util.loadlib(linux='libmkl_rt.so', darwin='libmkl_rt.dylib', win32='mkl_rt.dll')
if libmkl is not None:
# typedefs
c_int = types.c_array[numpy.int32]
c_long = types.c_array[numpy.int64]
c_double = types.c_array[numpy.float64]
libtbb = util.loadlib(linux='libtbb.so.2', darwin='libtbb.dylib', win32='tbb.dll')
class MKL(Backend):
'''matrix backend based on Intel's Math Kernel Library'''
def __enter__(self):
super().__enter__()
usethreads = config.nprocs > 1
libmkl.mkl_set_threading_layer(c_long(4 if usethreads else 1)) # 1:SEQUENTIAL, 4:TBB
if usethreads and libtbb:
self.tbbhandle = ctypes.c_void_p()
libtbb._ZN3tbb19task_scheduler_init10initializeEim(ctypes.byref(self.tbbhandle), ctypes.c_int(config.nprocs), ctypes.c_int(2))
else:
self.tbbhandle = None
return self
def __exit__(self, etype, value, tb):
if self.tbbhandle:
libtbb._ZN3tbb19task_scheduler_init9terminateEv(ctypes.byref(self.tbbhandle))
super().__exit__(etype, value, tb)
@staticmethod
def assemble(data, index, shape):
if len(shape) < 2:
return numeric.accumulate(data, index, shape)
if len(shape) == 2:
return MKLMatrix(data, index, shape)
raise MatrixError('{}d data not supported by scipy backend'.format(len(shape)))
class Pardiso:
'''simple wrapper for libmkl.pardiso
https://software.intel.com/en-us/mkl-developer-reference-c-pardiso
'''
_pardiso = libmkl.pardiso
_errorcodes = {
-1: 'input inconsistent',
-2: 'not enough memory',
-3: 'reordering problem',
-4: 'zero pivot, numerical factorization or iterative refinement problem',
-5: 'unclassified (internal) error',
-6: 'reordering failed (matrix types 11 and 13 only)',
-7: 'diagonal matrix is singular',
-8: '32-bit integer overflow problem',
-9: 'not enough memory for OOC',
-10: 'error opening OOC files',
-11: 'read/write error with OOC files',
-12: 'pardiso_64 called from 32-bit library',
}
def __init__(self):
self.pt = numpy.zeros(64, numpy.int64) # handle to data structure
@types.apply_annotations
def __call__(self, *, phase:c_int, iparm:c_int, maxfct:c_int=1, mnum:c_int=1, mtype:c_int=0, n:c_int=0, a:c_double=None, ia:c_int=None, ja:c_int=None, perm:c_int=None, nrhs:c_int=0, msglvl:c_int=0, b:c_double=None, x:c_double=None):
error = ctypes.c_int32(1)
self._pardiso(self.pt.ctypes, maxfct, mnum, mtype, phase, n, a, ia, ja, perm, nrhs, iparm, msglvl, b, x, ctypes.byref(error))
if error.value:
raise MatrixError(self._errorcodes.get(error.value, 'unknown error {}'.format(error.value)))
def __del__(self):
if self.pt.any(): # release all internal memory for all matrices
self(phase=-1, iparm=numpy.zeros(64, dtype=numpy.int32))
assert not self.pt.any(), 'it appears that Pardiso failed to release its internal memory'
class MKLMatrix(Matrix):
'''matrix implementation based on sorted coo data'''
__cache__ = 'indptr',
_factors = False
def __init__(self, data, index, shape):
assert index.shape == (2, len(data))
if len(data):
# sort rows, columns
reorder = numpy.lexsort(index[::-1])
index = index[:,reorder]
data = data[reorder]
# sum duplicate entries
keep = numpy.empty(len(reorder), dtype=bool)
keep[0] = True
numpy.not_equal(index[:,1:], index[:,:-1]).any(axis=0, out=keep[1:])
if not keep.all():
index = index[:,keep]
data = numeric.accumulate(data, [keep.cumsum()-1], [index.shape[1]])
if not data.all():
nz = data.astype(bool)
data = data[nz]
index = index[:,nz]
self.data = numpy.ascontiguousarray(data, dtype=numpy.float64)
self.index = numpy.ascontiguousarray(index, dtype=numpy.int32)
super().__init__(shape)
@property
def indptr(self):
return self.index[0].searchsorted(numpy.arange(self.shape[0]+1)).astype(numpy.int32, copy=False)
def __add__(self, other):
if not isinstance(other, MKLMatrix) or self.shape != other.shape:
return NotImplemented
return MKLMatrix(numpy.concatenate([self.data, other.data]), numpy.concatenate([self.index, other.index], axis=1), self.shape)
def __sub__(self, other):
if not isinstance(other, MKLMatrix) or self.shape != other.shape:
return NotImplemented
return MKLMatrix(numpy.concatenate([self.data, -other.data]), numpy.concatenate([self.index, other.index], axis=1), self.shape)
def __mul__(self, other):
if not numeric.isnumber(other):
return NotImplemented
return MKLMatrix(self.data * other, self.index, self.shape)
def __neg__(self):
return MKLMatrix(-self.data, self.index, self.shape)
@property
def T(self):
return MKLMatrix(self.data, self.index[::-1], self.shape[::-1])
def matvec(self, vec):
rows, cols = self.index
return numeric.accumulate(self.data * vec[cols], [rows], [self.shape[0]])
def export(self, form):
if form == 'dense':
return numeric.accumulate(self.data, self.index, self.shape)
if form == 'csr':
return self.data, self.index[1], self.indptr
if form == 'coo':
return self.data, self.index
raise NotImplementedError('cannot export MKLMatrix to {!r}'.format(form))
def submatrix(self, rows, cols):
I, J = self.index
keep = numpy.logical_and(rows[I], cols[J])
csI = rows.cumsum()
csJ = cols.cumsum()
return MKLMatrix(self.data[keep], numpy.array([csI[I[keep]]-1, csJ[J[keep]]-1]), shape=(csI[-1], csJ[-1]))
@preparesolvearguments
def solve(self, rhs):
log.info('solving {0}x{0} system using MKL Pardiso'.format(self.shape[0]))
if self._factors:
log.info('reusing existing factorization')
pardiso, iparm, mtype = self._factors
phase = 33 # solve, iterative refinement
else:
pardiso = Pardiso()
iparm = numpy.zeros(64, dtype=numpy.int32) # https://software.intel.com/en-us/mkl-developer-reference-c-pardiso-iparm-parameter
iparm[0] = 1 # supply all values in components iparm[1:64]
iparm[1] = 2 # fill-in reducing ordering for the input matrix: nested dissection algorithm from the METIS package
iparm[9] = 13 # pivoting perturbation threshold 1e-13 (default for nonsymmetric)
iparm[10] = 1 # enable scaling vectors (default for nonsymmetric)
iparm[12] = 1 # enable improved accuracy using (non-) symmetric weighted matching (default for nonsymmetric)
iparm[34] = 1 # zero base indexing
mtype = 11 # real and nonsymmetric
phase = 13 # analysis, numerical factorization, solve, iterative refinement
self._factors = pardiso, iparm, mtype
lhs = numpy.empty(self.shape[1], dtype=numpy.float64)
pardiso(phase=phase, mtype=mtype, iparm=iparm, n=self.shape[0], nrhs=1, b=rhs, x=lhs, a=self.data, ia=self.indptr, ja=self.index[1])
return lhs
## MODULE METHODS
_current_backend = Numpy()
def backend(names):
for name in names.lower().split(','):
for cls in Backend.__subclasses__():
if cls.__name__.lower() == name:
return cls()
raise RuntimeError('matrix backend {!r} is not available'.format(names))
def assemble(data, index, shape):
return _current_backend.assemble(data, index, shape)
def diag(d):
assert d.ndim == 1
return assemble(d, index=numpy.arange(len(d))[numpy.newaxis].repeat(2, axis=0), shape=d.shape*2)
def eye(n):
return diag(numpy.ones(n))
# vim:sw=2:sts=2:et
| 35.310997 | 236 | 0.660844 | 16,545 | 0.80507 | 0 | 0 | 6,325 | 0.307771 | 0 | 0 | 6,365 | 0.309717 |
10c58e27a9810f57838afb1a0c1697fd854c3b9b | 239 | py | Python | pz/installer.py | pyramidzero/pzinstaller | 43058b0a681fbea6e2173f1192aea720483d861c | [
"MIT"
] | null | null | null | pz/installer.py | pyramidzero/pzinstaller | 43058b0a681fbea6e2173f1192aea720483d861c | [
"MIT"
] | null | null | null | pz/installer.py | pyramidzero/pzinstaller | 43058b0a681fbea6e2173f1192aea720483d861c | [
"MIT"
] | null | null | null | from subprocess import run
# configuration defaults tools
update = ['brew', 'update']
install = ['brew', 'install', 'git']
class Installer:
def func_update(self):
run(update)
def func_install(self):
run(install)
| 18.384615 | 36 | 0.65272 | 113 | 0.472803 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.267782 |
10c64417440fb35fb29c424a295af5fb46bac46b | 5,951 | py | Python | modules/widgets.py | ldvlpr-dev/AudioVisualizer-Release | 23e18b9b96a7bb54a134603b0e41f70b5d8569ad | [
"MIT"
] | 2 | 2021-04-01T16:22:52.000Z | 2021-05-21T18:01:12.000Z | modules/widgets.py | ldvlpr-dev/AudioVisualizer-Release | 23e18b9b96a7bb54a134603b0e41f70b5d8569ad | [
"MIT"
] | null | null | null | modules/widgets.py | ldvlpr-dev/AudioVisualizer-Release | 23e18b9b96a7bb54a134603b0e41f70b5d8569ad | [
"MIT"
] | 2 | 2021-03-31T09:00:58.000Z | 2021-04-28T18:36:31.000Z | import cv2
import numpy as np
def hex_to_bgr(hx):
hx = hx.lstrip('#')
return tuple(int(hx[i:i + 2], 16) for i in (0, 2, 4))[::-1]
class Rectangle:
def __init__(self, x, y, width, height, max_height, min_db, max_db, color, thickness, reverse):
self.rev = -1 if reverse else 1
self.x = x
self.y = y
self.width = width
self.x2 = self.x + self.width
self.color = color
self.thickness = thickness
self.min_height = height
self.max_height = max_height
self.max_db = max_db
self.min_db = min_db
self.height = height
self.ratio = (self.max_height - self.min_height)/(
self.max_db - self.min_db)
def draw(self, db, dt, frame):
desired_height = db * self.ratio + self.max_height
speed = (desired_height - self.height)/0.1
self.height += speed * dt
self.height = max(
min(self.height, self.max_height), self.min_height)
cv2.rectangle(
frame,
(int(self.x), int(self.y)),
(int(self.x2), int(self.y+self.height)),
color=self.color,
thickness=self.thickness
)
class Circle:
def __init__(self, x, y, width, height, max_height, min_db, max_db, color, thickness):
self.x = x
self.y = y
self.width = width
self.color = color
self.thickness = thickness
self.min_height = height
self.max_height = max_height
self.max_db = max_db
self.min_db = min_db
self.height = height
self.ratio = (self.max_height - self.min_height)/(
self.max_db - self.min_db)
def draw(self, db, dt, frame):
desired_height = db * self.ratio + self.max_height
speed = (desired_height - self.height)/0.1
self.height += speed * dt
self.height = max(
min(self.height, self.max_height), self.min_height)
cv2.circle(frame, center=(int(self.x), int(self.y)), radius=int(
self.height), color=self.color, thickness=self.thickness, lineType=cv2.LINE_AA)
class ColorWoofer:
def __init__(self, **kwargs):
self.name = kwargs["name"]
self.x, self.y, self.freqs = int(
kwargs["x"]), int(kwargs["y"]), kwargs["freqs"]
self.colors = [(255, 0, 0), (0, 0, 255)]
self.thickness = int(kwargs["thickness"])
self.height = int(kwargs["height"])
self.min_decibel = int(kwargs["min_decibel"])
self.max_decibel = int(kwargs["max_decibel"])
self.colorsLen = len(self.colors)
self.ratio = (self.max_decibel-self.min_decibel)/(len(self.colors)-1)
def draw(self, db, frame):
db = min(-sum(db), self.max_decibel)
if db <= self.min_decibel:
color = self.colors[0]
else:
color = self.colors[min(
int(self.ratio*(self.max_decibel-db)), self.colorsLen-1)]
cv2.circle(frame, center=(int(self.x), int(self.y)), radius=int(
self.height), color=color, thickness=self.thickness, lineType=cv2.LINE_AA)
class FreqVisualizerGroup:
def __init__(self, **kwargs):
self.direction = kwargs['direction']
self.type = kwargs["freqvolumetype"]
self.name = kwargs["name"]
self.freqs = kwargs["freqs"]
self.x = 0
self.y = int(kwargs["s_height"]) if self.direction == "up" else 0
self.color = hex_to_bgr(kwargs["color"])
self.thickness = int(kwargs["thickness"])
self.width, self.min_height, self.max_height = int(kwargs[
"width"]), int(kwargs["min_height"]), int(kwargs["max_height"])
self.min_decibel = int(kwargs["min_decibel"])
self.max_decibel = int(kwargs["max_decibel"])
self.shapes = []
if self.type == "rectangle":
for i in range(len(self.freqs)):
self.shapes.append(
Rectangle(self.x + i*self.width, self.y, self.width, self.min_height, self.max_height, self.min_decibel, self.max_decibel, self.color, self.thickness, True if self.direction == "up" else False))
elif self.type == "circle":
self.y = (self.y - int(kwargs["circle_y_gap"]) - self.max_height) if self.direction == "up" else (
self.y + int(kwargs["circle_y_gap"]) + self.max_height)
for i in range(len(self.freqs)):
self.shapes.append(
Circle(self.x + i*self.width, self.y, self.width, self.min_height, self.max_height, self.min_decibel, self.max_decibel, self.color, self.thickness))
def draw(self, dt, db, frame):
for (i, shape) in enumerate(self.shapes):
shape.draw(db[i], dt, frame)
class BeatVisualizer:
def __init__(self, **kwargs):
self.name = kwargs["name"]
self.x, self.y, self.min_height, self.height, self.color = int(kwargs["x"]), int(kwargs[
"y"]), int(kwargs["min_height"]), int(kwargs["min_height"]), hex_to_bgr(kwargs["color"])
self.beat_every_x_sec = int(kwargs["bpm"])/60
self.effect_strenght = 0
self.max_effect_strenght = int(kwargs["max_effect_strenght"])
self.delay_tolerance = kwargs["delay_tolerance"]
self.thickness = int(kwargs["thickness"])
self.first_time = float(kwargs["first_time"])
self.speed = 200
def draw(self, **kwargs):
t = kwargs["time"]-self.first_time
if t < 0:
pass
elif abs(t % self.beat_every_x_sec) < self.delay_tolerance:
self.effect_strenght = self.max_effect_strenght
if self.effect_strenght < 0:
self.effect_strenght = 0
self.effect_strenght -= kwargs["dt"] * self.speed
cv2.circle(kwargs["frame"], center=(int(self.x), int(self.y)), radius=int(
self.min_height + self.effect_strenght), color=self.color, thickness=self.thickness, lineType=cv2.LINE_AA)
| 40.209459 | 214 | 0.59469 | 5,796 | 0.973954 | 0 | 0 | 0 | 0 | 0 | 0 | 377 | 0.063351 |
10c68c99215b27d36f277cd7086d010222783579 | 2,772 | py | Python | auto_LiRPA/examples/vision/simple_verification.py | KaidiXu/LiRPA_Verify | 71f5327a8abf136bcfb3e1ec07604628abf8126e | [
"BSD-2-Clause"
] | 14 | 2021-01-27T04:14:41.000Z | 2022-01-20T11:54:35.000Z | auto_LiRPA/examples/vision/simple_verification.py | KaidiXu/LiRPA_Verify | 71f5327a8abf136bcfb3e1ec07604628abf8126e | [
"BSD-2-Clause"
] | null | null | null | auto_LiRPA/examples/vision/simple_verification.py | KaidiXu/LiRPA_Verify | 71f5327a8abf136bcfb3e1ec07604628abf8126e | [
"BSD-2-Clause"
] | 2 | 2021-01-29T10:12:50.000Z | 2021-05-24T21:55:20.000Z | import torch.nn as nn
import torch.nn.functional as F
import torchvision
from auto_LiRPA import BoundedModule, BoundedTensor
from auto_LiRPA.perturbations import *
import models
## Step 1: Define computational graph by implementing forward()
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
# This simple model comes from https://github.com/locuslab/convex_adversarial
def mnist_model():
model = nn.Sequential(
nn.Conv2d(1, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(32*7*7,100),
nn.ReLU(),
nn.Linear(100, 10)
)
return model
model = mnist_model()
# Load the pretrained weights
checkpoint = torch.load(os.path.join(os.path.dirname(__file__),"pretrain/kw_mnist.pth"))
model.load_state_dict(checkpoint)
## Step 2: Prepare dataset as usual
test_data = torchvision.datasets.MNIST("./data", train=False, download=True, transform=torchvision.transforms.ToTensor())
# For illustration we only use 2 image from dataset
N = 2
n_classes = 10
image = test_data.data[:N].view(N,1,28,28)
true_label = test_data.targets[:N]
# Convert to float
image = image.to(torch.float32) / 255.0
## Step 3: wrap model with auto_LiRPA
# The second parameter is for constructing the trace of the computational graph, and its content is not important.
model = BoundedModule(model, torch.empty_like(image), bound_opts={'optimize_bound_args': {'ob_init': True, 'ob_upper': True, 'ob_decision_thresh': 1e8}})
# For larger convolutional models, setting bound_opts={"conv_mode": "patches"} is more efficient.
# model = BoundedModule(model, torch.empty_like(image), bound_opts={"conv_mode": "patches"})
## Step 4: Compute bounds using LiRPA given a perturbation
eps = 0.1
norm = np.inf
# ptb = PerturbationL0Norm(eps=eps)
ptb = PerturbationLpNorm(norm = norm, eps = eps)
image = BoundedTensor(image, ptb)
# Get model prediction as usual
pred = model(image)
label = torch.argmax(pred, dim=1).cpu().detach().numpy()
## Step 5: Compute bounds for final output
for method in ['IBP', 'IBP+backward (CROWN-IBP)', 'backward (CROWN)', 'CROWN-Optimized']:
lb, ub = model.compute_bounds(x=(image,), method=method.split()[0])
lb = lb.detach().cpu().numpy()
ub = ub.detach().cpu().numpy()
print("Bounding method:", method)
for i in range(N):
print("Image {} top-1 prediction {} ground-truth {}".format(i, label[i], true_label[i]))
for j in range(n_classes):
indicator = '(ground-truth)' if j == true_label[i] else ''
print("f_{j}(x_0): {l:8.3f} <= f_{j}(x_0+delta) <= {u:8.3f} {ind}".format(
j=j, l=lb[i][j], u=ub[i][j], ind=indicator))
print()
| 38.5 | 153 | 0.680375 | 88 | 0.031746 | 0 | 0 | 0 | 0 | 0 | 0 | 1,078 | 0.388889 |
10c7a36a4680a6d2f0e301e67846d4b75e77d776 | 15,796 | py | Python | pysnmp/FUNI-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/FUNI-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/FUNI-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module FUNI-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/FUNI-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:03:03 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
TimeTicks, enterprises, MibIdentifier, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Gauge32, ObjectIdentity, NotificationType, ModuleIdentity, Bits, Integer32, Unsigned32, Counter64, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "enterprises", "MibIdentifier", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Gauge32", "ObjectIdentity", "NotificationType", "ModuleIdentity", "Bits", "Integer32", "Unsigned32", "Counter64", "IpAddress")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
atmfFuniMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 353, 5, 6, 1))
if mibBuilder.loadTexts: atmfFuniMIB.setLastUpdated('9705080000Z')
if mibBuilder.loadTexts: atmfFuniMIB.setOrganization('The ATM Forum')
atmForum = MibIdentifier((1, 3, 6, 1, 4, 1, 353))
atmForumNetworkManagement = MibIdentifier((1, 3, 6, 1, 4, 1, 353, 5))
atmfFuni = MibIdentifier((1, 3, 6, 1, 4, 1, 353, 5, 6))
funiMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1))
class FuniValidVpi(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 255)
class FuniValidVci(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 65535)
funiIfConfTable = MibTable((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 1), )
if mibBuilder.loadTexts: funiIfConfTable.setStatus('current')
funiIfConfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: funiIfConfEntry.setStatus('current')
funiIfConfMode = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("mode1a", 1), ("mode1b", 2), ("mode3", 3), ("mode4", 4))).clone('mode1a')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: funiIfConfMode.setStatus('current')
funiIfConfFcsBits = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("fcsBits16", 1), ("fcsBits32", 2))).clone('fcsBits16')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: funiIfConfFcsBits.setStatus('current')
funiIfConfSigSupport = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: funiIfConfSigSupport.setStatus('current')
funiIfConfSigVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 1, 1, 4), FuniValidVpi()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: funiIfConfSigVpi.setStatus('current')
funiIfConfSigVci = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 1, 1, 5), FuniValidVci().clone(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: funiIfConfSigVci.setStatus('current')
funiIfConfIlmiSupport = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: funiIfConfIlmiSupport.setStatus('current')
funiIfConfIlmiVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 1, 1, 7), FuniValidVpi()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: funiIfConfIlmiVpi.setStatus('current')
funiIfConfIlmiVci = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 1, 1, 8), FuniValidVci().clone(16)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: funiIfConfIlmiVci.setStatus('current')
funiIfConfOamSupport = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: funiIfConfOamSupport.setStatus('current')
funiIfStatsTable = MibTable((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 2), )
if mibBuilder.loadTexts: funiIfStatsTable.setStatus('current')
funiIfStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: funiIfStatsEntry.setStatus('current')
funiIfEstablishedPvccs = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 2, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiIfEstablishedPvccs.setStatus('current')
funiIfEstablishedSvccs = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 2, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiIfEstablishedSvccs.setStatus('current')
funiIfRxAbortedFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiIfRxAbortedFrames.setStatus('current')
funiIfRxTooShortFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiIfRxTooShortFrames.setStatus('current')
funiIfRxTooLongFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiIfRxTooLongFrames.setStatus('current')
funiIfRxFcsErrFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiIfRxFcsErrFrames.setStatus('current')
funiIfRxUnknownFaFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 2, 1, 7), Counter32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: funiIfRxUnknownFaFrames.setStatus('current')
funiIfRxDiscardedFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiIfRxDiscardedFrames.setStatus('current')
funiIfTxTooLongFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 2, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiIfTxTooLongFrames.setStatus('current')
funiIfTxLenErrFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiIfTxLenErrFrames.setStatus('current')
funiIfTxCrcErrFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiIfTxCrcErrFrames.setStatus('current')
funiIfTxPartialFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiIfTxPartialFrames.setStatus('current')
funiIfTxTimeOutFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiIfTxTimeOutFrames.setStatus('current')
funiIfTxDiscardedFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiIfTxDiscardedFrames.setStatus('current')
funiVclStatsTable = MibTable((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 3), )
if mibBuilder.loadTexts: funiVclStatsTable.setStatus('current')
funiVclStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "FUNI-MIB", "funiVclFaVpi"), (0, "FUNI-MIB", "funiVclFaVci"))
if mibBuilder.loadTexts: funiVclStatsEntry.setStatus('current')
funiVclFaVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 3, 1, 1), FuniValidVpi())
if mibBuilder.loadTexts: funiVclFaVpi.setStatus('current')
funiVclFaVci = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 3, 1, 2), FuniValidVci())
if mibBuilder.loadTexts: funiVclFaVci.setStatus('current')
funiVclRxClp0Frames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 3, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiVclRxClp0Frames.setStatus('current')
funiVclRxTotalFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 3, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiVclRxTotalFrames.setStatus('current')
funiVclTxClp0Frames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 3, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiVclTxClp0Frames.setStatus('current')
funiVclTxTotalFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 3, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiVclTxTotalFrames.setStatus('current')
funiVclRxClp0Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 3, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiVclRxClp0Octets.setStatus('current')
funiVclRxTotalOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 3, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiVclRxTotalOctets.setStatus('current')
funiVclTxClp0Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 3, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiVclTxClp0Octets.setStatus('current')
funiVclTxTotalOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 3, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiVclTxTotalOctets.setStatus('current')
funiVclRxErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 3, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiVclRxErrors.setStatus('current')
funiVclTxErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 3, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiVclTxErrors.setStatus('current')
funiVclRxOamFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 3, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiVclRxOamFrames.setStatus('current')
funiVclTxOamFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 1, 3, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: funiVclTxOamFrames.setStatus('current')
funiMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 2))
funiMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 2, 1))
funiMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 2, 2))
funiMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 2, 1, 1)).setObjects(("FUNI-MIB", "funiIfConfMinGroup"), ("FUNI-MIB", "funiIfStatsMinGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
funiMIBCompliance = funiMIBCompliance.setStatus('current')
funiIfConfMinGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 2, 2, 1)).setObjects(("FUNI-MIB", "funiIfConfMode"), ("FUNI-MIB", "funiIfConfFcsBits"), ("FUNI-MIB", "funiIfConfSigSupport"), ("FUNI-MIB", "funiIfConfSigVpi"), ("FUNI-MIB", "funiIfConfSigVci"), ("FUNI-MIB", "funiIfConfIlmiSupport"), ("FUNI-MIB", "funiIfConfIlmiVpi"), ("FUNI-MIB", "funiIfConfIlmiVci"), ("FUNI-MIB", "funiIfConfOamSupport"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
funiIfConfMinGroup = funiIfConfMinGroup.setStatus('current')
funiIfStatsMinGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 2, 2, 2)).setObjects(("FUNI-MIB", "funiIfEstablishedPvccs"), ("FUNI-MIB", "funiIfEstablishedSvccs"), ("FUNI-MIB", "funiIfRxAbortedFrames"), ("FUNI-MIB", "funiIfRxTooShortFrames"), ("FUNI-MIB", "funiIfRxTooLongFrames"), ("FUNI-MIB", "funiIfRxFcsErrFrames"), ("FUNI-MIB", "funiIfRxUnknownFaFrames"), ("FUNI-MIB", "funiIfRxDiscardedFrames"), ("FUNI-MIB", "funiIfTxTooLongFrames"), ("FUNI-MIB", "funiIfTxLenErrFrames"), ("FUNI-MIB", "funiIfTxCrcErrFrames"), ("FUNI-MIB", "funiIfTxPartialFrames"), ("FUNI-MIB", "funiIfTxTimeOutFrames"), ("FUNI-MIB", "funiIfTxDiscardedFrames"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
funiIfStatsMinGroup = funiIfStatsMinGroup.setStatus('current')
funiVclStatsOptionalGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 353, 5, 6, 1, 2, 2, 3)).setObjects(("FUNI-MIB", "funiVclRxClp0Frames"), ("FUNI-MIB", "funiVclRxTotalFrames"), ("FUNI-MIB", "funiVclTxClp0Frames"), ("FUNI-MIB", "funiVclTxTotalFrames"), ("FUNI-MIB", "funiVclRxClp0Octets"), ("FUNI-MIB", "funiVclRxTotalOctets"), ("FUNI-MIB", "funiVclTxClp0Octets"), ("FUNI-MIB", "funiVclTxTotalOctets"), ("FUNI-MIB", "funiVclRxErrors"), ("FUNI-MIB", "funiVclTxErrors"), ("FUNI-MIB", "funiVclRxOamFrames"), ("FUNI-MIB", "funiVclTxOamFrames"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
funiVclStatsOptionalGroup = funiVclStatsOptionalGroup.setStatus('current')
mibBuilder.exportSymbols("FUNI-MIB", funiVclRxErrors=funiVclRxErrors, funiVclTxClp0Octets=funiVclTxClp0Octets, funiVclTxClp0Frames=funiVclTxClp0Frames, funiIfConfIlmiSupport=funiIfConfIlmiSupport, funiIfConfMode=funiIfConfMode, FuniValidVpi=FuniValidVpi, funiIfEstablishedPvccs=funiIfEstablishedPvccs, funiIfTxCrcErrFrames=funiIfTxCrcErrFrames, funiIfTxTimeOutFrames=funiIfTxTimeOutFrames, funiIfTxTooLongFrames=funiIfTxTooLongFrames, funiVclRxTotalOctets=funiVclRxTotalOctets, funiIfStatsMinGroup=funiIfStatsMinGroup, funiIfConfTable=funiIfConfTable, funiIfStatsEntry=funiIfStatsEntry, funiVclFaVpi=funiVclFaVpi, funiIfConfSigVpi=funiIfConfSigVpi, funiIfConfFcsBits=funiIfConfFcsBits, funiIfRxTooLongFrames=funiIfRxTooLongFrames, funiIfRxDiscardedFrames=funiIfRxDiscardedFrames, atmfFuniMIB=atmfFuniMIB, funiVclTxErrors=funiVclTxErrors, atmfFuni=atmfFuni, funiIfRxUnknownFaFrames=funiIfRxUnknownFaFrames, funiIfTxPartialFrames=funiIfTxPartialFrames, funiIfConfIlmiVci=funiIfConfIlmiVci, funiIfTxLenErrFrames=funiIfTxLenErrFrames, funiVclRxTotalFrames=funiVclRxTotalFrames, funiIfConfMinGroup=funiIfConfMinGroup, funiVclStatsTable=funiVclStatsTable, FuniValidVci=FuniValidVci, funiVclRxOamFrames=funiVclRxOamFrames, funiIfConfIlmiVpi=funiIfConfIlmiVpi, funiVclStatsEntry=funiVclStatsEntry, funiIfConfSigSupport=funiIfConfSigSupport, funiIfRxFcsErrFrames=funiIfRxFcsErrFrames, funiVclTxTotalOctets=funiVclTxTotalOctets, funiIfStatsTable=funiIfStatsTable, funiVclStatsOptionalGroup=funiVclStatsOptionalGroup, funiVclRxClp0Frames=funiVclRxClp0Frames, funiVclTxOamFrames=funiVclTxOamFrames, funiMIBGroups=funiMIBGroups, atmForum=atmForum, funiMIBCompliance=funiMIBCompliance, funiIfConfSigVci=funiIfConfSigVci, PYSNMP_MODULE_ID=atmfFuniMIB, funiIfConfEntry=funiIfConfEntry, funiIfRxTooShortFrames=funiIfRxTooShortFrames, funiIfEstablishedSvccs=funiIfEstablishedSvccs, funiMIBCompliances=funiMIBCompliances, atmForumNetworkManagement=atmForumNetworkManagement, funiVclTxTotalFrames=funiVclTxTotalFrames, funiIfTxDiscardedFrames=funiIfTxDiscardedFrames, funiVclFaVci=funiVclFaVci, funiMIBConformance=funiMIBConformance, funiIfConfOamSupport=funiIfConfOamSupport, funiVclRxClp0Octets=funiVclRxClp0Octets, funiIfRxAbortedFrames=funiIfRxAbortedFrames, funiMIBObjects=funiMIBObjects)
| 118.766917 | 2,273 | 0.746961 | 288 | 0.018232 | 0 | 0 | 0 | 0 | 0 | 0 | 3,167 | 0.200494 |
10c86dfcee3c5238af99d41d7bb192132b1b5b75 | 10,337 | py | Python | fiction/plaza.py | cloudRoutine/curveship | d46def2cbc5b1de6692c410e018d8e23cb477286 | [
"0BSD"
] | 1 | 2020-05-16T12:03:34.000Z | 2020-05-16T12:03:34.000Z | fiction/plaza.py | ziz/curveship | 17479312174d0ee09abce730e927a58c3967eda7 | [
"0BSD"
] | null | null | null | fiction/plaza.py | ziz/curveship | 17479312174d0ee09abce730e927a58c3967eda7 | [
"0BSD"
] | 1 | 2022-02-26T16:01:26.000Z | 2022-02-26T16:01:26.000Z | """Plaza
Items (in particular, Things and Rooms) representing the setting of Lost One."""
__author__ = 'Nick Montfort'
__copyright__ = 'Copyright 2011 Nick Montfort'
__license__ = 'ISC'
__version__ = '0.5.0.0'
__status__ = 'Development'
from item_model import Room, Thing
items = [
Room('@plaza_center',
article='the',
called='center of the plaza',
referring='center broad plaza of | plaza americas center middle',
sight="""
[*'s] senses [hum/ing/2/v] as [*/s] [view/v] [@plaza_center/o]
the morning [conclude/1/ed/v]
it [is/1/v] midday [now]
""",
exits={
'north':'@plaza_n',
'northeast':'@plaza_ne',
'east':'@plaza_e',
'southeast':'@plaza_se',
'south':'@plaza_s',
'southwest':'@plaza_sw',
'west':'@plaza_w',
'northwest':'@plaza_nw'},
view={
'@plaza_n': (.5, 'to the north'),
'@plaza_ne': (.5, 'to the northeast'),
'@plaza_e': (.5, 'to the east'),
'@plaza_se': (.5, 'to the southeast'),
'@plaza_s': (.5, 'to the south'),
'@plaza_sw': (.5, 'to the southwest'),
'@plaza_w': (.5, 'to the west'),
'@plaza_nw': (.5, 'to the northwest')}),
Room('@plaza_n',
article='the',
called='northern area',
referring='broad plaza of northern | plaza americas part expanse space',
sight="""
the space north of the plaza's center, which [is/1/v] particularly
barren of vegetation and ornament""",
exits={
'east':'@plaza_ne',
'southeast':'@plaza_e',
'south':'@plaza_center',
'west':'@plaza_nw',
'southwest':'@plaza_w',},
view={
'@plaza_ne': (.5, 'to the east'),
'@plaza_e': (.5, 'to the southeast'),
'@plaza_center': (.5, 'to the south'),
'@plaza_nw': (.5, 'to the west'),
'@plaza_w': (.5, 'to the southwest'),
'@plaza_se': (.25, 'off toward the southeast'),
'@plaza_s': (.25, 'across the plaza'),
'@plaza_sw': (.25, 'off toward the southwest')}),
Thing('@rock in @plaza_n',
article='a',
called=' rock',
referring='fist-sized fist sized | rock stone',
sight='a fist-sized rock',
prominence=0.3),
Thing('@statue part_of @plaza_n',
article='a',
called='statue',
referring='marble | likeness Einstein',
sight="""
[*/s] [see/v] a marble likeness of Einstein
there [is/1/v] almost no hint [here] of the playful, disheveled
scientist so often seen in the photographs that were popular in the
early twenty-first century""",
qualities=['stone'],
prominence=0.8),
Room('@plaza_ne',
article='the',
called='northeastern area',
referring=('broad of northeastern | plaza americas part side ' +
'expanse space'),
sight="the space northeast of the plaza's center",
exits={
'south':'@plaza_e',
'southwest':'@plaza_center',
'west':'@plaza_n'},
view={
'@plaza_e': (.5, 'to the south'),
'@plaza_center': (.5, 'to the southwest'),
'@plaza_n': (.5, 'to the west'),
'@plaza_nw': (.25, 'to the far west'),
'@plaza_w': (.25, 'off toward the west'),
'@plaza_sw': (.25, 'across the plaza'),
'@plaza_s': (.25, 'off toward the south'),
'@plaza_se': (.25, 'to the far south')}),
Room('@plaza_e',
article='the',
called='eastern area',
referring='broad of eastern | plaza americas part side expanse space',
sight="the space east of the plaza's center",
exits={
'north':'@plaza_ne',
'south':'@plaza_se',
'southwest':'@plaza_s',
'west':'@plaza_center',
'northwest':'@plaza_n'},
view={
'@plaza_ne': (.5, 'to the north'),
'@plaza_center': (.5, 'to the west'),
'@plaza_se': (.5, 'to the south'),
'@plaza_n': (.5, 'to the northwest'),
'@plaza_s': (.5, 'to the southwest'),
'@plaza_nw': (.25, 'off toward the northwest'),
'@plaza_w': (.25, 'across the plaza'),
'@plaza_sw': (.25, 'off toward the southwest')}),
Thing('@shredded_shirt in @plaza_e',
article='a',
called='shredded shirt',
referring=('shredded torn flesh-colored flesh colored useless of | ' +
'cloth shirt mess'),
sight='a useless mess of flesh-colored cloth',
qualities=['clothing', 'trash'],
prominence=0.3),
Thing('@newspaper_sheet in @plaza_e',
article='a',
called=' newspaper (sheet)',
referring='news newspaper | sheet page paper newspaper',
sight="""
there [are/2/v] summary texts LEADER WORKING THROUGH NIGHT FOR COUNTRY,
MONUMENT NEARS COMPLETION, and PURITY ACCOMPLISHED
""",
qualities=['trash'],
prominence=0.3),
Thing('@fountain part_of @plaza_e',
article='a',
called='fountain',
referring='rectangular plain | fountain basin jet',
sight='a single jet [fan/1/v] out, feeding a basin',
prominence=0.8),
Room('@plaza_se',
article='the',
called='southeastern area',
referring=('broad plaza of southeastern | plaza americas part ' +
'expanse space'),
sight="the space southeast of the plaza's center",
exits={
'north':'@plaza_e',
'west':'@plaza_s',
'northwest':'@plaza_center'},
view={
'@plaza_e': (.5, 'to the north'),
'@plaza_s': (.5, 'to the west'),
'@plaza_center': (.5, 'to the northwest'),
'@plaza_sw': (.25, 'to the far west'),
'@plaza_w': (.25, 'off to the west'),
'@plaza_ne': (.25, 'to the far north'),
'@plaza_n': (.25, 'off to the north'),
'@plaza_nw': (.25, 'across the plaza')}),
Thing('@scrap in @plaza_se',
article='a',
called='plastic scrap',
referring='plastic black | scrap',
sight='something that was perhaps once part of a black plastic bag',
qualities=['trash'],
prominence=0.3),
Room('@plaza_s',
article='the',
called='southern area',
referring=('broad plaza of southern | plaza americas part ' +
'expanse space'),
sight="the space south of the plaza's center",
exits={
'north':'@plaza_center',
'northeast':'@plaza_e',
'northwest':'@plaza_w',
'east':'@plaza_se',
'west':'@plaza_sw'},
view={
'@plaza_se': (.5, 'to the east'),
'@plaza_e': (.5, 'to the northeast'),
'@plaza_center': (.5, 'to the north'),
'@plaza_sw': (.5, 'to the west'),
'@plaza_w': (.5, 'to the northwest'),
'@plaza_ne': (.25, 'off toward the northeast'),
'@plaza_n': (.25, 'across the plaza'),
'@plaza_nw': (.25, 'off toward the northwest')}),
Thing('@obelisk part_of @plaza_s',
article='an',
called='obelisk',
referring='| obelisk',
sight='the stone pointing the way it has for centuries',
qualities=['stone'],
prominence=1.0),
Room('@plaza_sw',
article='the',
called='southwestern area',
referring=('broad plaza of southwestern | plaza americas part ' +
'expanse space'),
sight="the space southwest of the plaza's center",
exits={
'north':'@plaza_w',
'northeast':'@plaza_center',
'east':'@plaza_s'},
view={
'@plaza_w': (.5, 'to the north'),
'@plaza_s': (.5, 'to the east'),
'@plaza_center': (.5, 'to the northeast'),
'@plaza_se': (.25, 'to the far east'),
'@plaza_e': (.25, 'off to the east'),
'@plaza_nw': (.25, 'to the far north'),
'@plaza_n': (.25, 'off to the north'),
'@plaza_ne': (.25, 'across the plaza')}),
Thing('@candy_wrapper in @plaza_sw',
article='a',
called='candy wrapper',
referring="candy commodity's | wrapper husk",
sight="a commodity's husk",
qualities=['trash'],
prominence=0.3),
Room('@plaza_w',
article='the',
called='western area',
referring='broad plaza of western | plaza americas part expanse space',
sight="the space west of the plaza's center",
exits={
'north':'@plaza_nw',
'east':'@plaza_center',
'south':'@plaza_sw',
'northeast':'@plaza_n',
'southeast':'@plaza_s'},
view={
'@plaza_nw': (.5, 'to the north'),
'@plaza_center': (.5, 'to the east'),
'@plaza_sw': (.5, 'to the south'),
'@plaza_n': (.5, 'to the northeast'),
'@plaza_s': (.5, 'to the southeast'),
'@plaza_ne': (.25, 'off toward the northeast'),
'@plaza_e': (.25, 'across the plaza'),
'@plaza_se': (.25, 'off toward the southeast')}),
Thing('@smashed_cup in @plaza_w',
article='a',
called='smashed cup',
referring='smashed paper drinking | cup vessel',
sight='what was once a paper drinking vessel',
qualities=['trash'],
prominence=0.3),
Thing('@tree part_of @plaza_w',
article='a',
called='tree',
referring='large immense sprawling |',
sight='a tree sprawling by itself on the west side of the plaza',
prominence=1.0),
Room('@plaza_nw',
article='the',
called='northwestern area',
referring=('broad plaza of northwestern | plaza americas part ' +
'expanse space'),
sight="the space northwest of the plaza's center",
exits={
'east':'@plaza_n',
'southeast':'@plaza_center',
'south':'@plaza_w'},
view={
'@plaza_w': (.5, 'to the south'),
'@plaza_n': (.5, 'to the east'),
'@plaza_center': (.5, 'to the southeast'),
'@plaza_ne': (.25, 'to the far east'),
'@plaza_e': (.25, 'off to the east'),
'@plaza_sw': (.25, 'to the far south'),
'@plaza_s': (.25, 'off to the south'),
'@plaza_se': (.25, 'across the plaza')})]
| 34.228477 | 80 | 0.528006 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,011 | 0.581503 |
10c9be2f6f73cff7dec3ae3bf47fff1f91431efb | 508 | py | Python | tests/r/test_us_pop.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 199 | 2017-07-24T01:34:27.000Z | 2022-01-29T00:50:55.000Z | tests/r/test_us_pop.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 46 | 2017-09-05T19:27:20.000Z | 2019-01-07T09:47:26.000Z | tests/r/test_us_pop.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 45 | 2017-07-26T00:10:44.000Z | 2022-03-16T20:44:59.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.us_pop import us_pop
def test_us_pop():
"""Test module us_pop.py by downloading
us_pop.csv and testing shape of
extracted data has 22 rows and 2 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = us_pop(test_path)
try:
assert x_train.shape == (22, 2)
except:
shutil.rmtree(test_path)
raise()
| 21.166667 | 43 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.244094 |
10ca9cc7c816c201420d7b6610797d2bfb959cd4 | 2,407 | py | Python | 2021/python/dec04/solution.py | razzlestorm/advent-of-code | 288488539ed64e078368ac012b7f794faa4776ba | [
"MIT"
] | null | null | null | 2021/python/dec04/solution.py | razzlestorm/advent-of-code | 288488539ed64e078368ac012b7f794faa4776ba | [
"MIT"
] | null | null | null | 2021/python/dec04/solution.py | razzlestorm/advent-of-code | 288488539ed64e078368ac012b7f794faa4776ba | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import List
FILE_DIR = Path(__file__).parent
def create_boards(boards: List[str]) -> List[str]:
defined_boards = []
for board in boards:
b = board.split('\n')
d_board = [db.split() for db in b]
defined_boards.append(d_board)
return defined_boards
def get_bingo_rankings(boards: List[str], numbers: List[str]):
# have a pool of bingo numbers we slowly expand and check, return the first board
# that we encounter with all the numbers on a single column or row. Rank boards in
# length of the pool of numbers to see which are first, which are last.
ranking = {}
# exhaustive approach, hopefully pays off for pt2?
for ii, board in enumerate(boards):
ni = 0
nj = 5
# then we search for where the first number is in any boards, and search along
# rows and columns for the remaining numbers.
while nj < len(numbers):
pool = numbers[ni:nj]
for row in board:
if set(row).issubset(pool):
if ii not in ranking.values():
ranking[len(pool)] = ii
break
t_board = [[board[j][i] for j in range(len(board))] for i in range(len(board[0]))]
for row in t_board:
if set(row).issubset(pool):
if ii not in ranking.values():
ranking[len(pool)] = ii
break
nj += 1
return ranking
def get_unmarked_nums(checklist, board) -> List:
nums = []
for row in board:
for num in row:
if num not in checklist:
nums.append(num)
return nums
if __name__ == "__main__":
DATA = (FILE_DIR / "input.txt").read_text().strip()
data = [x for x in DATA.split('\n\n')]
numbers = data[0].split(",")
boards = data[1:]
d_boards = create_boards(boards)
ranks = get_bingo_rankings(d_boards, numbers)
fastest = d_boards[ranks[min(ranks.keys())]]
slowest = d_boards[ranks[max(ranks.keys())]]
marked1 = numbers[:min(ranks.keys())]
marked2 = numbers[:max(ranks.keys())]
sol_one = sum([int(x) for x in get_unmarked_nums(marked1, fastest)]) * int(marked1[-1])
print(sol_one) #72770
sol_two = sum([int(x) for x in get_unmarked_nums(marked2, slowest)]) * int(marked2[-1])
print(sol_two) # 13912 | 37.030769 | 94 | 0.592023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 455 | 0.189032 |
10cb9838854d193a4ba4ad92bc63d09d7e022be4 | 1,804 | py | Python | app.py | serbathome/words | 052a50168d1ce6a5c6df3b18403f75042b0272c9 | [
"Apache-2.0"
] | null | null | null | app.py | serbathome/words | 052a50168d1ce6a5c6df3b18403f75042b0272c9 | [
"Apache-2.0"
] | null | null | null | app.py | serbathome/words | 052a50168d1ce6a5c6df3b18403f75042b0272c9 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, render_template, jsonify
import random as r
import csv
app = Flask(__name__)
# define global words dictionary
words = {}
words["easy"] = []
words["medium"] = []
words["hard"] = []
# load words (russian and english)
with open('words.csv', newline='\n', encoding="UTF8") as csvfile:
wordreader = csv.reader(csvfile, delimiter=';')
for row in wordreader:
rus = row[0]
eng = row[1]
if len(rus) <= 4:
words["easy"].append((rus, eng))
if len(rus) >= 5 and len(rus) <= 8:
words["medium"].append((rus, eng))
if len(rus) > 8:
words["hard"].append((rus, eng))
def scramble_word(word):
if len(word) == 0:
return ""
scramble = word
l = list(word)
while scramble == word:
r.shuffle(l)
scramble = "".join(l)
return scramble
@app.route('/')
def index():
rand = r.randint(0, len(words["medium"]))
word = words["medium"][rand][0]
eng = words["medium"][rand][1]
scrable = scramble_word(word)
result = {"word": word, "scramble": scrable, "eng": eng}
return render_template("index.html", result=result)
@app.route('/next/<level>')
def next(level):
l = int(level)
word = ""
eng = ""
if l == 1:
rand = r.randint(0, len(words["easy"]))
word = words["easy"][rand][0]
eng = words["easy"][rand][1]
if l == 2:
rand = r.randint(0, len(words["medium"]))
word = words["medium"][rand][0]
eng = words["medium"][rand][1]
if l == 3:
rand = r.randint(0, len(words["hard"]))
word = words["hard"][rand][0]
eng = words["hard"][rand][1]
scramble = scramble_word(word)
result = {"word": word, "scramble": scramble, "eng": eng}
return jsonify(result)
| 26.529412 | 65 | 0.553769 | 0 | 0 | 0 | 0 | 930 | 0.515521 | 0 | 0 | 294 | 0.162971 |
10cbc3f174660b6796631abe31b2d238854acb6c | 1,046 | py | Python | setup.py | cexiolabs/cexpay.support-telegram-bot | 84a3d0a5c95f2b43dcf276593d0a5f719f78f67d | [
"Apache-2.0"
] | null | null | null | setup.py | cexiolabs/cexpay.support-telegram-bot | 84a3d0a5c95f2b43dcf276593d0a5f719f78f67d | [
"Apache-2.0"
] | null | null | null | setup.py | cexiolabs/cexpay.support-telegram-bot | 84a3d0a5c95f2b43dcf276593d0a5f719f78f67d | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='cexpay',
version='2.0.4',
description="A support bot for CEX Pay's products. See more https://developers.cexpay.io/.",
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/cexiolabs/cexpay.support-bot',
author='Maksym Anurin',
author_email='m.anurin@cexiolabs.com',
license='Apache-2.0',
packages=find_packages(exclude=["*test*"]),
install_requires=['requests'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.10',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
],
)
| 34.866667 | 96 | 0.647228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 583 | 0.557361 |
10cd63e9b5a20d471655425cd06fc64cdb6da4e4 | 2,471 | py | Python | newsthreads/sentences.py | microsoft/News-Threads | 77e8db64f71ad7f109f6a62898f521f346b28be2 | [
"MIT"
] | 5 | 2020-06-27T01:25:34.000Z | 2021-04-16T05:08:07.000Z | newsthreads/sentences.py | microsoft/News-Threads | 77e8db64f71ad7f109f6a62898f521f346b28be2 | [
"MIT"
] | null | null | null | newsthreads/sentences.py | microsoft/News-Threads | 77e8db64f71ad7f109f6a62898f521f346b28be2 | [
"MIT"
] | 5 | 2020-07-31T10:21:27.000Z | 2021-11-10T08:32:26.000Z | # File #1: Fragment, FragmentId
# File #2: fragmentid, docid, date, domain, fragmentposition
import configparser
from typing import Dict, List, Tuple
import csv
import nltk.data
import string
def process_sentences(config: configparser.SectionProxy):
csv.field_size_limit(2**23)
fin = open(config['IntermediateColumnFilteredFileName'], 'r', encoding='utf-8')
csvreader = csv.reader(fin)
nltk.download('punkt')
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
translator = str.maketrans('', '', string.punctuation)
doc_meta: Dict[str, Tuple[str, str]] = {}
hash_sent: Dict[str, Dict[str, int]] = {}
dict_sent: Dict[str, int] = {}
sentCtr = 0
linectr = 0
for line in csvreader:
(documentId, firstScrapedDate, title, domain, text) = line
linectr += 1
if linectr % 10000 == 0:
print(linectr)
sentences = sent_detector.tokenize(text.lower())
lstsent: List[str] = list(map(lambda x: x.translate(translator), sentences))
doc_meta[documentId] = (firstScrapedDate, domain)
hash_sent[documentId] = {}
sentnum = 0
for sent in lstsent:
if len(sent.split()) > 8:
# Chunk off the sentence
hash_sent[documentId][sent] = sentnum
sentnum += 1
if sent not in dict_sent:
dict_sent[sent] = sentCtr
sentCtr += 1
fin.close()
fout = open(config['OutputSentenceFragmentSummariesFileName'], 'w', encoding='utf-8')
fout.write('docid,sent,sentnum\n')
fout2 = open(config['OutputSentenceFragmentMetaFileName'], 'w')
fout2.write('docid,date,domain\n')
print('writing out')
# next, from the perspective of the doc, find all derived and related content
for outerdoc in hash_sent:
for sent in hash_sent[outerdoc]:
if sent in dict_sent:
# docid, date, domain, sentence
fout.write(outerdoc + ',' + str(dict_sent[sent]) + ',' + str(hash_sent[outerdoc][sent]) + '\n')
fout2.write(outerdoc + ',' + doc_meta[outerdoc][0] + ',' + doc_meta[outerdoc][1] + '\n')
fout.close()
fout2.close()
fout = open(config['OutputSentenceIdLookupFileName'], 'w', encoding='utf-8')
fout.write("_sentence_id,_text\n")
for ky in dict_sent:
fout.write(str(dict_sent[ky]) + ',' + str(ky) + '\n')
fout.close()
# sentence and ngram
| 35.811594 | 111 | 0.61554 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 570 | 0.230676 |
10ceb5b8c333d0a852efe89da7acab8411d380e4 | 1,394 | py | Python | reconcile/test/test_utils_ocm_versions.py | bhushanthakur93/qontract-reconcile | fd8eea9f92d353224113955d08e3592864e37df8 | [
"Apache-2.0"
] | null | null | null | reconcile/test/test_utils_ocm_versions.py | bhushanthakur93/qontract-reconcile | fd8eea9f92d353224113955d08e3592864e37df8 | [
"Apache-2.0"
] | null | null | null | reconcile/test/test_utils_ocm_versions.py | bhushanthakur93/qontract-reconcile | fd8eea9f92d353224113955d08e3592864e37df8 | [
"Apache-2.0"
] | null | null | null | import pytest
from reconcile.utils.ocm import OCM
@pytest.fixture
def ocm(mocker):
mocker.patch("reconcile.utils.ocm.OCM._init_access_token")
mocker.patch("reconcile.utils.ocm.OCM._init_request_headers")
mocker.patch("reconcile.utils.ocm.OCM._init_clusters")
mocker.patch("reconcile.utils.ocm.OCM._init_version_gates")
return OCM("name", "url", "tid", "turl", "ot")
def test_no_blocked_versions(ocm):
result = ocm.version_blocked("1.2.3")
assert result is False
def test_version_blocked(ocm):
ocm.blocked_versions = ["1.2.3"]
result = ocm.version_blocked("1.2.3")
assert result is True
def test_version_not_blocked(ocm):
ocm.blocked_versions = ["1.2.3"]
result = ocm.version_blocked("1.2.4")
assert result is False
def test_version_blocked_multiple(ocm):
ocm.blocked_versions = ["1.2.3", "1.2.4"]
result = ocm.version_blocked("1.2.3")
assert result is True
def test_version_blocked_regex(ocm):
ocm.blocked_versions = [r"^.*-fc\..*$"]
result = ocm.version_blocked("1.2.3-fc.1")
assert result is True
def test_version_not_blocked_regex(ocm):
ocm.blocked_versions = [r"^.*-fc\..*$"]
result = ocm.version_blocked("1.2.3-rc.1")
assert result is False
def test_version_invalid_regex(ocm):
with pytest.raises(TypeError):
OCM("name", "url", "tid", "turl", "ot", blocked_versions=["["])
| 26.807692 | 71 | 0.689383 | 0 | 0 | 0 | 0 | 335 | 0.240316 | 0 | 0 | 339 | 0.243185 |
10cef77bbd20a3864270cb1eae1c4d0659257eb3 | 97,317 | py | Python | test_agent/jianyang_ai.py | bbpp222006/maj_ai | 48cf458f621ef7d688757efc98747d20d9f60f3e | [
"MIT"
] | null | null | null | test_agent/jianyang_ai.py | bbpp222006/maj_ai | 48cf458f621ef7d688757efc98747d20d9f60f3e | [
"MIT"
] | null | null | null | test_agent/jianyang_ai.py | bbpp222006/maj_ai | 48cf458f621ef7d688757efc98747d20d9f60f3e | [
"MIT"
] | 1 | 2021-03-17T16:26:32.000Z | 2021-03-17T16:26:32.000Z | # -*- coding: utf-8 -*-
import datetime
import os
import pickle
import random
from copy import deepcopy
from time import sleep
import numpy as np
from test_agent.ai_interface import AIInterface
from test_agent.utils.wait_calc import WaitCalc
from test_agent.utils.win_calc import WinCalc
from client.mahjong_meld import Meld
from client.mahjong_player import OpponentPlayer
from client.mahjong_tile import Tile
__author__ = "Jianyang Tang"
__email__ = "jian4yang2.tang1@gmail.com"
class OppPlayer(OpponentPlayer):
level_dict = {'新人': 91, '9級': 91, '8級': 91, '7級': 91, '6級': 91, '5級': 91, '4級': 91, '3級': 91, '2級': 92,
'1級': 94, '初段': 96, '二段': 97, '三段': 98, '四段': 99,
'五段': 100, '六段': 101, '七段': 102, '八段': 103, '九段': 104, '十段': 105, '天鳳位': 106}
def __init__(self, seat, dealer_seat):
super().__init__(seat, dealer_seat)
self.safe_tiles = []
self.prios_history = []
self.discard_types = []
def init_state(self):
super().init_state()
self.safe_tiles = []
self.prios_history = []
self.discard_types = []
def add_prediction(self, prios):
self.prios_history.append(prios)
def add_safe_tile(self, tile34):
self.safe_tiles.append(tile34)
def waiting_feature_212(self, self_index):
res = self.open_state_f_lst # 89
res += self.game_table.revealed_feature # 34
opponents_discard = [t for i in range(1, 4) for t in self.game_table.get_player((i + self_index) % 4).discard34]
res += [min(opponents_discard.count(tile) / 4, 1) for tile in range(34)] # 34
opponents_melds = []
for i in range(1, 4):
opp = self.game_table.get_player((i + self_index) % 4)
opponents_melds += opp.meld34 + opp.minkan34 + opp.ankan34
res += [Tile.index_to_chow[chow] in opponents_melds for chow in range(21)] # 21
res += [[pon] * 3 in opponents_melds or [pon] * 4 in opponents_melds for pon in range(34)] # 34
return np.array(res)
def richii_feature_225(self, self_index):
res = self.open_state_f_richii # 68
res += [tile in self.safe_tiles for tile in range(34)] # 34
res += self.game_table.revealed_feature # 34
opponents_discard = [t for i in range(1, 4) for t in self.game_table.get_player((i + self_index) % 4).discard34]
res += [min(opponents_discard.count(tile) / 4, 1) for tile in range(34)] # 34
opponents_melds = []
for i in range(1, 4):
opp = self.game_table.get_player((i + self_index) % 4)
opponents_melds += opp.meld34 + opp.minkan34 + opp.ankan34
res += [Tile.index_to_chow[chow] in opponents_melds for chow in range(21)] # 21
res += [([pon] * 3) in opponents_melds or ([pon] * 4) in opponents_melds for pon in range(34)] # 34
return np.array(res)
@property
def is_valid(self):
return 0 in self.discard_types
@property
def waiting_prediction(self):
prios = {tile: 0 for tile in range(34)}
safe_tiles = self.abs_safe_tiles
if len(self.prios_history) == 0:
return []
factor = 1
for p in self.prios_history[-1::-1]:
v = p[0]
if factor <= 0:
break
for tile in range(34):
if tile in safe_tiles:
continue
prios[tile] += v[tile] * factor
factor -= 0.280
prios = sorted(prios.items(), key=lambda x: -x[1])
cnt, res = 0, []
if self.dangerous:
prios = prios[0:7]
elif self.meld_len >= 2:
prios = prios[0:6 if self.turn_num <= 11 else 5]
elif self.meld_len == 1:
prios = prios[0:5 if self.turn_num <= 11 else 4]
elif self.prios_history[-1][1] <= self.level_dict[self.level]:
prios = prios[0:4 if self.turn_num <= 11 else 3]
else:
return []
for r in prios:
tile = r[0]
if tile in safe_tiles or tile in res or r[1] == 0 or self.game_table.revealed_tiles[tile] >= 4:
continue
if cnt == 0:
res.append(tile)
cnt += 1
elif cnt == 1:
for danger in res:
if tile < 27 and danger // 9 == tile // 9 and abs(tile - danger) == 3:
res.append(tile)
break
else:
res.append(tile)
cnt += 1
else:
for danger in res:
if tile < 27 and danger // 9 == tile // 9 and abs(tile - danger) == 3:
res.append(tile)
return res
@property
def is_freezing(self):
return self.reach_status and abs(self.reach_time - len(self.discard34)) <= 2 and self.turn_num < 13
@property
def dangerous(self):
return self.reach_status or (self.cnt_open_bonus_tiles > 2 and self.turn_num > 6)
@property
def cnt_open_bonus_tiles(self):
cnt = 0
bts = self.game_table.bonus_tiles
for meld in self.total_melds34:
for tile in meld:
if tile in bts:
cnt += 1
for meld in self.meld136:
if any(rb in meld.tiles for rb in Tile.RED_BONUS):
cnt += 1
for meld in self.total_melds34:
if meld[0] == meld[1] and meld[0] > 26:
cnt += (meld[0] in Tile.THREES) + (meld[0] == self.round_wind) + (meld[0] == self.player_wind)
return cnt
@property
def enough_fan_to_win(self):
for meld in self.total_melds34:
if meld[0] == meld[1] and meld[0] in self.bonus_honors:
return True
return False
@property
def abs_safe_tiles(self):
return list(set(self.safe_tiles + self.discard34))
@property
def is_reach_dealer(self):
return self.reach_status and self.is_dealer
@property
def gin_safe_tiles(self):
res = []
for i in range(0, 9, 18):
i in self.discard34 and i + 6 in self.discard34 and res.append(i + 3)
i + 1 in self.discard34 and i + 7 in self.discard34 and res.append(i + 4)
i + 2 in self.discard34 and i + 8 in self.discard34 and res.append(i + 5)
i + 3 in self.discard34 and (res.append(i) or res.append(i + 6))
i + 4 in self.discard34 and (res.append(i + 1) or res.append(i + 7))
i + 5 in self.discard34 and (res.append(i + 2) or res.append(i + 8))
return res
@property
def relaxed_gin_safe_tiles(self):
res = []
abs_safe = self.abs_safe_tiles
for i in range(0, 9, 18):
i in abs_safe and i + 6 in abs_safe and res.append(i + 3)
i + 1 in abs_safe and i + 7 in abs_safe and res.append(i + 4)
i + 2 in abs_safe and i + 8 in abs_safe and res.append(i + 5)
i + 3 in abs_safe and (res.append(i) or res.append(i + 6))
i + 4 in abs_safe and (res.append(i + 1) or res.append(i + 7))
i + 5 in abs_safe and (res.append(i + 2) or res.append(i + 8))
return res
@property
def meld_len(self):
return len(self.meld136)
@property
def dangerous_type(self):
if self.meld_len >= 2:
meld_types = []
for m in self.total_melds34:
if m[0] // 9 < 3 and m[0] // 9 not in meld_types:
meld_types.append(m[0] // 9)
if len(meld_types) == 1:
return meld_types[0]
if self.meld_len == 1 and len(self.discard34) > 8:
meld = self.total_melds34[0]
discard_geos = [0] * 3
for d in self.discard34:
if d < 27:
discard_geos[d // 9] += 1
min_num = min(discard_geos)
if min_num == 0:
min_type = discard_geos.index(min_num)
if meld[0] // 9 == min_type or meld[0] // 9 == 3:
return min_type
if len(self.discard34) > 12:
discard_geos = [0] * 3
for d in self.discard34:
if d < 27:
discard_geos[d // 9] += 1
min_num = min(discard_geos)
if min_num <= 1:
return discard_geos.index(min_num)
return -1
class EnsembleCLF:
root_dir = os.path.dirname(os.path.abspath(__file__)) + "/utils/clfs/"
RICHII = True
NORMAL = True
def __init__(self):
if self.RICHII:
self.clfs_richii = []
clfs = os.listdir(self.root_dir)
clfs = [f for f in clfs if 'R_(' in f]
for f in clfs:
self.clfs_richii.append(pickle.load(open(self.root_dir + f, 'rb')))
print("{} richii classifiers loaded".format(len(self.clfs_richii)))
if self.NORMAL:
self.clfs_normal = []
clfs = os.listdir(self.root_dir)
clfs = [f for f in clfs if 'N_(' in f]
# print(ensembles_normal)
for f in clfs:
self.clfs_normal.append(pickle.load(open(self.root_dir + f, 'rb')))
print("{} normal waiting classifiers loaded".format(len(self.clfs_normal)))
def predict_normal_single_prio(self, input_f):
f = np.zeros((1, 212))
f[0,] = input_f
times = [0] * 35
for clf in self.clfs_normal:
prd = clf.predict(f)
predict = [tile for tile in range(34) if prd[0, tile] and not f[0, tile]]
if len(predict) == 0:
times[34] += 1
for p in predict:
times[p] += 1
return [[time / 120 for time in times[:34]], times[34]]
def predict_richii_single_prio(self, input_f):
f = np.zeros((1, 225))
f[0,] = input_f
times = [0] * 34
for clf in self.clfs_richii:
prd = clf.predict(f)
predict = [tile for tile in range(34) if prd[0, tile] and not f[0, tile]]
for p in predict:
times[p] += 1
return [[time / 60 for time in times], 0]
class HandParti:
NORMAL, PINHU, NO19, PPH, SP, QH = 0, 1, 2, 3, 4, 5
names = ['NM', 'PH', 'NO19', 'PPH', '7P', 'QH']
total_forms = 6
prios = [QH, PPH, NO19, PINHU, SP]
second_prios = [NO19, PINHU]
def __init__(self, hand34, melds, forms, bonus_winds, revealed, bonus_tiles):
self.h34 = sorted(hand34)
self.m34 = melds
self.bonus_winds = bonus_winds
self.revealed = revealed
self.bonus_tiles = bonus_tiles
self.partitions = []
self.partitions_geo = []
self.hand_partition()
self.shantins = [10] * self.total_forms
self.best_partitions = [[]] * self.total_forms
funcs = [
self.cal_normal_shantin, self.cal_pinhu_shantin, self.cal_no19_shantin,
self.cal_pph_shantin, self.cal_sp_shantin, self.cal_qh_shantin
]
for i in range(6):
forms[i] and funcs[i]()
def __str__(self):
res = ""
for i in range(self.total_forms):
if self.shantins[i] < 10:
res += "{}: {} ".format(self.names[i], self.shantins[i])
return res
def __repr__(self):
return self.__str__()
@property
def current_shantin(self):
if self.shantins[self.SP] == 1:
return 1
else:
return self.shantins[self.NORMAL]
@property
def all_melds(self):
res = []
for partition in self.best_partitions[self.NORMAL]:
for meld in partition:
meld not in res and res.append(meld)
return res
@staticmethod
def partition(tiles):
len_t = len(tiles)
# no tiles of this type
if len_t == 0:
return [[]]
# one tile, or two tile which can be parsed into an open set
if len_t == 1 or (len_t == 2 and abs(tiles[0] - tiles[1]) < 3):
return [[tiles]]
# two separate tiles
if len_t == 2:
return [[tiles[0:1], tiles[1:2]]]
res = []
if tiles[0] == tiles[1] == tiles[2]:
tmp_res = HandParti.partition(tiles[3:])
if len(tmp_res) > 0:
for tile_set in tmp_res:
res.append([tiles[0:3]] + tile_set)
if tiles[0] + 1 in tiles and tiles[0] + 2 in tiles:
rec_tiles = deepcopy(tiles)
rec_tiles.remove(tiles[0])
rec_tiles.remove(tiles[0] + 1)
rec_tiles.remove(tiles[0] + 2)
tmp_res = HandParti.partition(rec_tiles)
if len(tmp_res) > 0:
for tile_set in tmp_res:
res.append([[tiles[0], tiles[0] + 1, tiles[0] + 2]] + tile_set)
if tiles[0] + 1 in tiles:
rec_tiles = deepcopy(tiles)
rec_tiles.remove(tiles[0])
rec_tiles.remove(tiles[0] + 1)
tmp_res = HandParti.partition(rec_tiles)
if len(tmp_res) > 0:
for tile_set in tmp_res:
res.append([[tiles[0], tiles[0] + 1]] + tile_set)
if tiles[0] + 2 in tiles:
rec_tiles = deepcopy(tiles)
rec_tiles.remove(tiles[0])
rec_tiles.remove(tiles[0] + 2)
tmp_res = HandParti.partition(rec_tiles)
if len(tmp_res) > 0:
for tile_set in tmp_res:
res.append([[tiles[0], tiles[0] + 2]] + tile_set)
if tiles[0] == tiles[1]:
tmp_res = HandParti.partition(tiles[2:])
if len(tmp_res) > 0:
for tile_set in tmp_res:
res.append([tiles[0:2]] + tile_set)
tmp_res = HandParti.partition(tiles[1:])
if len(tmp_res) > 0:
for tile_set in tmp_res:
res.append([tiles[0:1]] + tile_set)
tuned_res = []
min_len = min([len(p) for p in res])
for p in res:
if len(p) <= min_len and p not in tuned_res:
tuned_res.append(p)
return tuned_res
def dy_decided(self, tile136):
danyao_st = self.shantins[self.NO19]
return self.current_shantin == danyao_st == 2 and tile136 // 4 not in Tile.ONENINE
def qh_decided(self, num_turn):
if self.shantins[self.QH] <= 3 and self.shantins[self.QH] * 4 + num_turn <= 16:
return True
chr_pair = len([tile for tile in set(self.h34) if tile > 26 and self.h34.count(tile) >= 2])
chr_pair += len([m for m in self.m34 if m[0] > 26])
if chr_pair >= 2 and self.shantins[self.QH] <= self.shantins[self.NORMAL] + 1:
return True
return False
@property
def pp_decided(self):
has_dead_pair = any(self.revealed[tile] == 2 for tile in self.pairs)
has_bonus_pair = any(tile in self.bonus_tiles for tile in self.pairs)
good_cnt = len([tile for tile in self.pairs if tile in Tile.GOOD_PAIR])
if len(self.meld_kezi) > 0:
if self.shantins[self.PPH] <= 2:
return True
else:
if self.shantins[self.PPH] <= 1:
return True
else:
if len(self.pairs) == 5 or (len(self.pairs) == 4 and len(self.hand_kezi) == 1):
if has_dead_pair:
return False
if has_bonus_pair:
return True
if good_cnt >= len(self.pairs) - 2:
return True
if len(self.pairs) == 4 and len(self.hand_kezi) == 0:
if self.shantins[self.PPH] > self.shantins[self.NORMAL] + 1:
return False
if has_dead_pair:
return False
if has_bonus_pair and good_cnt >= 2:
return True
if good_cnt >= 3:
return True
return False
@property
def sp_decided(self):
if len(self.meld_kezi) > 0:
return False
if len(self.pairs) == 6 or (len(self.pairs) == 5 and len(self.hand_kezi) == 1):
return True
if len(self.pairs) == 5 or (len(self.pairs) == 4 and len(self.hand_kezi) == 1):
if not self.pp_decided:
return True
return False
@property
def pairs(self):
return [tile for tile in set(self.h34) if self.h34.count(tile) == 2]
@property
def meld_kezi(self):
return [m[0] for m in self.m34 if m[0] == m[1]]
@property
def hand_kezi(self):
return [tile for tile in set(self.h34) if self.h34.count(tile) >= 3]
def hand_partition(self):
p_man = self.partition([t for t in self.h34 if 0 <= t < 9])
p_pin = self.partition([t for t in self.h34 if 9 <= t < 18])
p_suo = self.partition([t for t in self.h34 if 18 <= t < 27])
h_chr = [t for t in self.h34 if 27 <= t < 34]
p_chr = [[[chr_tile] * h_chr.count(chr_tile) for chr_tile in set(h_chr)]]
for pm in p_man:
for pp in p_pin:
for ps in p_suo:
for pc in p_chr:
self.partitions.append(pm + pp + ps + pc)
for p in self.partitions:
self.partitions_geo.append(self.geo_vec_normal(p))
def cal_normal_shantin(self):
def shantin_n(geo_index):
geo_vec = self.partitions_geo[geo_index]
needed_set = (4 - len(self.m34)) - geo_vec[4] - geo_vec[5]
if geo_vec[3] > 0:
if geo_vec[1] + geo_vec[2] + geo_vec[3] - 1 >= needed_set:
return needed_set - 1
else:
return 2 * needed_set - (geo_vec[1] + geo_vec[2] + geo_vec[3] - 1) - 1
else:
if geo_vec[1] + geo_vec[2] >= needed_set:
return needed_set
else:
return 2 * needed_set - (geo_vec[1] + geo_vec[2])
shantin_geo = [[index, shantin_n(index)] for index in range(len(self.partitions))]
min_shantin = min(shantin_geo, key=lambda x: x[1])[1]
self.shantins[self.NORMAL] = min_shantin
self.best_partitions[self.NORMAL] = [self.partitions[g[0]] for g in shantin_geo if g[1] == min_shantin]
def cal_pinhu_shantin(self):
if len(self.m34) > 0:
self.shantins[self.PINHU] = 10
return
def shantin_p(geo_index):
partition = self.partitions[geo_index]
geo = self.geo_vec_pinhuh(partition)
need_chow = 4 - geo[4]
if geo[1] + geo[2] >= need_chow:
return (geo[3] == 0) + need_chow - 1 + (geo[2] == 0)
else:
return (geo[3] == 0) + need_chow - 1 + need_chow - geo[1] - geo[2]
shantin_geo = [[index, shantin_p(index)] for index in range(len(self.partitions))]
min_shantin = min(shantin_geo, key=lambda x: x[1])[1]
self.shantins[self.PINHU] = min_shantin
self.best_partitions[self.PINHU] = [self.partitions[g[0]] for g in shantin_geo if g[1] == min_shantin]
def cal_no19_shantin(self):
for m in self.m34:
if any(tile in Tile.ONENINE for tile in m):
self.shantins[self.NO19] = 10
return
def shantin_no19(geo_index):
partition = self.partitions[geo_index]
geo_vec = self.geo_vec_no19(partition)
needed_set = (4 - len(self.m34)) - geo_vec[4] - geo_vec[5]
if geo_vec[3] > 0:
if geo_vec[1] + geo_vec[2] + geo_vec[3] - 1 >= needed_set:
return needed_set - 1
else:
need_single = needed_set - (geo_vec[1] + geo_vec[2] + geo_vec[3] - 1)
if geo_vec[0] >= need_single:
return 2 * needed_set - (geo_vec[1] + geo_vec[2] + geo_vec[3] - 1) - 1
else:
return 2 * needed_set - (geo_vec[1] + geo_vec[2] + geo_vec[3] - 1) - 1 + need_single - geo_vec[
0]
else:
if geo_vec[1] + geo_vec[2] >= needed_set:
return needed_set + (geo_vec[0] == 0)
else:
need_single = needed_set - (geo_vec[1] + geo_vec[2]) + 1
if geo_vec[0] >= need_single:
return 2 * needed_set - (geo_vec[1] + geo_vec[2])
else:
return 2 * needed_set - (geo_vec[1] + geo_vec[2]) + need_single - geo_vec[0]
shantin_geo = [[index, shantin_no19(index)] for index in range(len(self.partitions))]
min_shantin = min(shantin_geo, key=lambda x: x[1])[1]
self.shantins[self.NO19] = min_shantin
self.best_partitions[self.NO19] = [self.partitions[g[0]] for g in shantin_geo if g[1] == min_shantin]
def cal_pph_shantin(self):
for m in self.m34:
if m[0] != m[1]:
return
num_kezi = len([tile for tile in set(self.h34) if self.h34.count(tile) == 3])
num_pair = len([tile for tile in set(self.h34) if self.h34.count(tile) == 2])
need_kezi = 4 - len(self.m34) - num_kezi
if num_pair >= need_kezi + 1:
self.shantins[self.PPH] = need_kezi - 1
else:
self.shantins[self.PPH] = 2 * need_kezi - num_pair
self.best_partitions[self.PPH] = [[[tile] * self.h34.count(tile) for tile in set(self.h34)]]
def cal_sp_shantin(self):
if len(self.m34) > 0:
self.shantins[self.SP] = 10
return
num_pair = len([tile for tile in set(self.h34) if self.h34.count(tile) >= 2])
self.shantins[self.SP] = 6 - num_pair
self.best_partitions[self.SP] = [[[tile] * self.h34.count(tile) for tile in set(self.h34)]]
def cal_qh_shantin(self):
q_type = self.qh_type()
if len(q_type) == 0:
self.shantins[self.QH] = 10
return
def shantin_n(par_index, tp):
partition = self.partitions[par_index]
geo_vec = self.geo_vec_qh(partition, tp)
needed_set = (4 - len(self.m34)) - geo_vec[4] - geo_vec[5]
if geo_vec[3] > 0:
if geo_vec[1] + geo_vec[2] + geo_vec[3] - 1 >= needed_set:
return needed_set - 1
else:
needed_open = needed_set - (geo_vec[1] + geo_vec[2] + geo_vec[3] - 1)
if needed_open > geo_vec[0]:
return needed_set - 1 + needed_open + needed_open - geo_vec[0]
else:
return needed_set - 1 + needed_open
else:
if geo_vec[1] + geo_vec[2] >= needed_set:
return needed_set + (geo_vec[0] == 0)
else:
needed_open = (needed_set - (geo_vec[1] + geo_vec[2]))
if geo_vec[0] > needed_open:
return needed_set + needed_open
else:
return needed_set + needed_open + needed_open - geo_vec[0]
def shantin_qh(par_index):
return min([shantin_n(par_index, tp) for tp in q_type])
shantin_geo = [[index, shantin_qh(index)] for index in range(len(self.partitions))]
min_shantin = min(shantin_geo, key=lambda x: x[1])[1]
self.shantins[self.QH] = min_shantin
self.best_partitions[self.QH] = [self.partitions[g[0]] for g in shantin_geo if g[1] == min_shantin]
def geo_vec_pinhuh(self, p):
geo_vec = [0] * 6
def incre(set_type):
geo_vec[set_type] += 1
for m in p:
len(m) == 1 and incre(0)
len(m) == 2 and abs(m[0] - m[1]) == 0 and m[0] not in self.bonus_winds and incre(3)
len(m) == 2 and abs(m[0] - m[1]) == 1 and incre(2 if m[0] % 9 > 0 and m[1] % 9 < 8 else 1)
len(m) == 2 and abs(m[0] - m[1]) == 2 and incre(1)
len(m) == 3 and incre(5 if m[0] == m[1] else 4)
return geo_vec
def qh_type(self):
qh_type = []
if len(self.m34) > 0:
meld_types = []
for m in self.m34:
if m[0] // 9 == 3:
continue
if m[0] // 9 not in meld_types:
meld_types.append(m[0] // 9)
if len(meld_types) > 1:
self.shantins[self.QH] = 10
return []
else:
qh_type = meld_types
if (len(qh_type) == 0 and len(self.m34) > 0) or len(self.m34) == 0:
type_geo = [
len([t for t in self.h34 if 0 <= t < 9]),
len([t for t in self.h34 if 9 <= t < 18]),
len([t for t in self.h34 if 18 <= t < 27])
]
max_num = max(type_geo)
qh_type = [i for i in range(3) if type_geo[i] == max_num]
return qh_type
@staticmethod
def geo_vec_normal(p):
geo_vec = [0] * 6
def incre(set_type):
geo_vec[set_type] += 1
for m in p:
len(m) == 1 and incre(0)
len(m) == 2 and abs(m[0] - m[1]) == 0 and incre(3)
len(m) == 2 and abs(m[0] - m[1]) == 1 and incre(2 if m[0] % 9 > 0 and m[1] % 9 < 8 else 1)
len(m) == 2 and abs(m[0] - m[1]) == 2 and incre(1)
len(m) == 3 and incre(5 if m[0] == m[1] else 4)
return geo_vec
@staticmethod
def geo_vec_no19(p):
geo_vec = [0] * 6
def incre(set_type):
geo_vec[set_type] += 1
for m in p:
if m[0] > 26:
continue
len(m) == 1 and 0 < m[0] % 9 < 8 and incre(0)
len(m) == 2 and abs(m[0] - m[1]) == 0 and 0 < m[0] % 9 < 8 and incre(3)
len(m) == 2 and abs(m[0] - m[1]) == 1 and m[0] % 9 > 1 and m[1] % 9 < 7 and incre(2)
len(m) == 2 and abs(m[0] - m[1]) == 1 and (m[0] % 9 == 1 or m[1] % 9 == 7) and incre(1)
len(m) == 2 and abs(m[0] - m[1]) == 2 and m[0] % 9 > 0 and m[1] % 9 < 8 and incre(1)
len(m) == 3 and m[0] == m[1] and 0 < m[0] % 9 < 8 and incre(5)
len(m) == 3 and m[0] != m[1] and incre(4 if m[0] % 9 > 0 and m[2] % 9 < 8 else 1)
return geo_vec
@staticmethod
def geo_vec_qh(p, tp):
allowed_types = [tp, 3]
geo_vec = [0] * 6
def incre(set_type):
geo_vec[set_type] += 1
for m in p:
if m[0] // 9 in allowed_types:
len(m) == 1 and incre(0)
len(m) == 2 and abs(m[0] - m[1]) == 0 and incre(3)
len(m) == 2 and abs(m[0] - m[1]) == 1 and incre(2 if m[0] % 9 > 0 and m[1] % 9 < 8 else 1)
len(m) == 2 and abs(m[0] - m[1]) == 2 and incre(1)
len(m) == 3 and incre(5 if m[0] == m[1] else 4)
return geo_vec
class HandAnalyser(HandParti):
def norm_eff_vec(self, bot):
total_revealed = deepcopy(self.revealed)
for tile in self.h34:
total_revealed[tile] += 1
current_shantin = self.current_shantin
res = []
for to_discard in set(self.h34):
tmp_h34 = deepcopy(self.h34)
tmp_h34.remove(to_discard)
eff = self._eff_nm_p7p(tmp_h34, total_revealed, current_shantin)
if to_discard in self.bonus_tiles:
eff *= 0.9
if to_discard < 27 and to_discard % 9 == 4 and self.h34.count(to_discard) == 1:
if bot.tile_34_to_136(to_discard) in Tile.RED_BONUS:
eff *= 0.9
res.append([to_discard, eff])
res = sorted(res, key=lambda x: 0 if x[0] in Tile.ONENINE else 4 - abs(x[0] % 9 - 4))
res = sorted(res, key=lambda x: -x[1])
self.set_19_prior(res)
return res
def enforce_eff_vec(self, num_turn, bot):
decided_pph, decided_dy, decided_qh = bot.decided_pph, bot.decided_dy, bot.decided_qh
def enforce_eff(index):
# bot.thclient.drawer and bot.thclient.drawer.set_enforce_form(self.names[index])
return self.spec_eff_vec(index, bot)
qh_decided = self.qh_decided(num_turn) or decided_qh or self.shantins[self.QH] == self.current_shantin
pp_decided = self.pp_decided or decided_pph or self.shantins[self.PPH] == self.current_shantin
if qh_decided and pp_decided:
if self.shantins[self.PPH] < self.shantins[self.QH]:
return enforce_eff(self.PPH)
else:
return enforce_eff(self.QH)
elif qh_decided:
return enforce_eff(self.QH)
elif pp_decided:
return enforce_eff(self.PPH)
if self.sp_decided:
return enforce_eff(self.SP)
if decided_dy:
return enforce_eff(self.NO19)
def deep_eff_vec(self, bot):
deep_eff = {}
normal_eff = {}
total_revealed = deepcopy(self.revealed)
for tile in self.h34:
total_revealed[tile] += 1
current_shantin = self.current_shantin
for to_discard in set(self.h34):
tmp_h34 = deepcopy(self.h34)
tmp_h34.remove(to_discard)
drawn_sum = 0
total_eff = 0
hand_ana = HandAnalyser(tmp_h34, self.m34, [1, 0, 0, 0, 1, 0], self.bonus_winds, self.revealed, self.bonus_tiles)
if hand_ana.shantins[self.NORMAL] == current_shantin or hand_ana.shantins[self.SP] == current_shantin:
for drawn in range(34):
if total_revealed[drawn] < 4:
tiles_after_drawn = tmp_h34 + [drawn]
hand_ana = HandAnalyser(tiles_after_drawn, self.m34, [1, 0, 0, 0, 1, 0], self.bonus_winds, self.revealed, self.bonus_tiles)
if hand_ana.shantins[self.NORMAL] < current_shantin or hand_ana.shantins[self.SP] < current_shantin:
remain = 4 - total_revealed[drawn]
drawn_sum += remain
tmp_revealed = deepcopy(total_revealed)
tmp_revealed[drawn] += 1
eff = hand_ana._eff_nm_p7p(tiles_after_drawn, tmp_revealed, current_shantin - 1)
total_eff += eff * remain
if drawn_sum > 0:
factor = 1
if to_discard in self.bonus_tiles:
factor *= 0.9
if to_discard < 27 and to_discard % 9 == 4 and self.h34.count(to_discard) == 1:
if bot.tile_34_to_136(to_discard) in Tile.RED_BONUS:
factor *= 0.9
deep_eff[to_discard] = total_eff / drawn_sum
normal_eff[to_discard] = drawn_sum * factor
else:
deep_eff[to_discard] = 0
normal_eff[to_discard] = 0
normal_eff = sorted(normal_eff.items(), key=lambda x: 0 if x[0] in Tile.ONENINE else 4 - abs(x[0] % 9 - 4))
normal_eff = sorted(normal_eff, key=lambda x: - x[1])
index = 0
res = []
while True:
current_index = index + 1
while current_index < len(normal_eff) and abs(normal_eff[index][1] - normal_eff[current_index][1]) < 2:
current_index += 1
tmp_eff = sorted(normal_eff[index:current_index], key=lambda x: - deep_eff[x[0]])
for pr in tmp_eff:
res.append(pr)
if current_index == len(normal_eff):
break
else:
index = current_index
return res
def _eff_nm_p7p(self, tiles, total_revealed, current_shantin):
eff = 0
for drawn in range(34):
if total_revealed[drawn] >= 4 or not self._has_adj(drawn):
continue
tiles_after = tiles + [drawn]
forms = [1, 0, 0, 0, 1, 0]
hand_analiser = HandAnalyser(tiles_after, self.m34, forms, self.bonus_winds, self.revealed, self.bonus_tiles)
if hand_analiser.shantins[self.NORMAL] < current_shantin or \
hand_analiser.shantins[self.SP] < current_shantin:
eff += (4 - total_revealed[drawn]) * self._get_factor(drawn)
return eff
def spec_eff_vec(self, goal_form, bot):
total_revealed = deepcopy(self.revealed)
for tile in self.h34:
total_revealed[tile] += 1
current_shantin = self.shantins[goal_form]
res = []
for to_discard in set(self.h34):
tmp_h34 = deepcopy(self.h34)
tmp_h34.remove(to_discard)
eff = self._eff_spec(tmp_h34, total_revealed, current_shantin, goal_form)
if to_discard in self.bonus_tiles:
eff *= 0.9
res.append([to_discard, eff])
norm_res = self.norm_eff_vec(bot)
norm_eff = {x[0]: x[1] for x in norm_res}
res = sorted(res, key=lambda x: 0 if x[0] in Tile.ONENINE else 4 - abs(x[0] % 9 - 4))
res = sorted(res, key=lambda x: - norm_eff[x[0]])
res = sorted(res, key=lambda x: -x[1])
self.set_19_prior(res)
return res
def _eff_spec(self, tiles, total_revealed, current_shantin, form):
eff = 0
for drawn in range(34):
if total_revealed[drawn] >= 4 or (form != self.QH and not self._has_adj(drawn)):
continue
forms = [0] * 6
forms[form] = 1
tiles_after = tiles + [drawn]
hand_analiser = HandAnalyser(tiles_after, self.m34, forms, self.bonus_winds, self.revealed, self.bonus_tiles)
if hand_analiser.shantins[form] < current_shantin:
eff += (4 - total_revealed[drawn]) * self._get_factor(drawn)
return eff
def _get_factor(self, tile):
factor = 1
if tile < 27:
if (tile - 2) // 9 == tile // 9 and (tile - 2) in self.bonus_tiles or \
(tile + 2) // 9 == tile // 9 and (tile + 2) in self.bonus_tiles:
factor += 0.2
if (tile - 1) // 9 == tile // 9 and (tile - 1) in self.bonus_tiles or \
(tile + 1) // 9 == tile // 9 and (tile + 1) in self.bonus_tiles:
factor += 0.4
if tile in self.bonus_tiles:
factor += 0.7
return factor
def _has_adj(self, tile):
if tile > 26:
if tile in self.h34:
return True
else:
for diff in range(-2, 3):
if (tile + diff) // 9 == tile // 9 and (tile + diff) in self.h34:
return True
return False
def set_19_prior(self, res_lst):
f19 = -1
for r in res_lst:
if r[0] in Tile.ONENINE:
f19 = res_lst.index(r)
break
while f19 > 0 and abs(res_lst[f19 - 1][1] - res_lst[f19][1]) < self.current_shantin and res_lst[f19][1] > 20:
res_lst[f19 - 1], res_lst[f19] = res_lst[f19], res_lst[f19 - 1]
f19 -= 1
class WaitingAnalyser:
@staticmethod
def check_waiting_m1(bot, richii=False):
w_dict = {}
bonus_tiles = bot.game_table.bonus_tiles
finished_hand, win_tile = WaitCalc.waiting_calc(bot.hand34)
if not finished_hand or len(finished_hand) == 0:
return w_dict
bns = bot.cnt_total_bonus_tiles
total_win_tiles = list(set([t for wt in win_tile for t in list(wt)]))
w_dict = {'waitings': {}, 'remain_num': 0}
ave_score_sum = 0
for w in total_win_tiles:
score, _, _, _ = WinCalc.score_calc_long_para(
bot.hand34, w, bot.meld34, bot.minkan34, bot.ankan34, False,
bot.player_wind, bot.round_wind, richii, bns + bonus_tiles.count(w),
bot.game_table.honba_sticks, bot.game_table.reach_sticks, bot.is_dealer
)
if score > 0:
w_dict['waitings'][w] = score
w_dict['remain_num'] += 4 - bot.game_table.revealed_tiles[w] - bot.hand34.count(w)
ave_score_sum += score * (4 - bot.game_table.revealed_tiles[w] - bot.hand34.count(w))
if w_dict['remain_num'] > 0:
w_dict['ave_score'] = ave_score_sum // w_dict['remain_num']
return w_dict
else:
return {}
@staticmethod
def check_waiting(bot, ricchi=False):
waiting_dict = {}
bonus_tiles = bot.game_table.bonus_tiles
for tile in set(bot.hand34):
hand_after_discard = deepcopy(bot.hand34)
hand_after_discard.remove(tile)
finished_hand, win_tile = WaitCalc.waiting_calc(hand_after_discard)
if not finished_hand or len(finished_hand) == 0:
continue
bns = bot.cnt_total_bonus_tiles - bonus_tiles.count(tile)
if tile % 9 == 4 and tile < 27 and bot.hand34.count(tile) == 1 and tile * 4 in bot.tiles136:
bns -= 1
tmp_dict = {'waitings': {}, 'remain_num': 0, 's_remain': {}}
total_win_tiles = list(set([t for wt in win_tile for t in list(wt)]))
for w in total_win_tiles:
score, _, _, _ = WinCalc.score_calc_long_para(
hand_after_discard, w, bot.meld34, bot.minkan34, bot.ankan34, False,
bot.player_wind, bot.round_wind, ricchi, bns + bonus_tiles.count(w),
bot.game_table.honba_sticks, bot.game_table.reach_sticks, bot.is_dealer
)
if score > 0 and (4 - bot.game_table.revealed_tiles[w] - bot.hand34.count(w)) > 0:
tmp_dict['waitings'][w] = score
tmp_dict['s_remain'][w] = 4 - bot.game_table.revealed_tiles[w] - bot.hand34.count(w)
tmp_dict['remain_num'] += 4 - bot.game_table.revealed_tiles[w] - bot.hand34.count(w)
if tmp_dict['remain_num'] > 0:
tmp_dict['ave_score'] = sum([v for k, v in tmp_dict['waitings'].items()]) // len(tmp_dict['waitings'])
tmp_dict['w_tiles'] = [k for k, v in tmp_dict['waitings'].items()]
waiting_dict[tile] = tmp_dict
if len(waiting_dict) > 0:
waiting_dict = sorted(waiting_dict.items(), key=lambda x: -x[1]['remain_num'])
return waiting_dict
@staticmethod
def check_waiting_long(bonus_tiles, hand_34, hand_136, cnt_bonus, meld_34, minkan_34, ankan_34,
player_wind, round_wind, honba_sticks, reach_sticks, is_dealer, revealed):
waiting_dict = {}
for tile in set(hand_34):
hand_after_discard = deepcopy(hand_34)
hand_after_discard.remove(tile)
finished_hand, win_tile = WaitCalc.waiting_calc(hand_after_discard)
if not finished_hand or len(finished_hand) == 0:
continue
cnt_bonus = cnt_bonus - bonus_tiles.count(tile)
cnt_bonus -= (tile % 9 == 4 and tile < 27 and hand_34.count(tile) == 1 and tile * 4 in hand_136)
tmp_dict = {'waitings': {}, 'remain_num': 0}
total_win_tiles = list(set([t for wt in win_tile for t in list(wt)]))
for w in total_win_tiles:
score, _, _, _ = WinCalc.score_calc_long_para(
hand_after_discard, w, meld_34, minkan_34, ankan_34, False,
player_wind, round_wind, False, cnt_bonus + bonus_tiles.count(w),
honba_sticks, reach_sticks, is_dealer
)
if score > 0:
tmp_dict['waitings'][w] = score
tmp_dict['remain_num'] += 4 - revealed[w] - hand_34.count(w)
if tmp_dict['remain_num'] > 0:
tmp_dict['ave_score'] = sum([v for k, v in tmp_dict['waitings'].items()]) // len(tmp_dict['waitings'])
if tmp_dict['ave_score'] > 0:
waiting_dict[tile] = tmp_dict
if len(waiting_dict) > 0:
waiting_dict = sorted(waiting_dict.items(), key=lambda x: -x[1]['remain_num'])
return waiting_dict
@staticmethod
def can_win(bot, final_tile):
if final_tile in bot.discard34:
return False
finished_hand, win_tile = WaitCalc.waiting_calc(deepcopy(bot.hand34))
if not finished_hand or len(finished_hand) == 0:
return False
for j in range(len(win_tile)):
win_tiles = list(win_tile[j])
if final_tile in win_tiles:
hand_partition = finished_hand[j]
f, _, m, _ = WinCalc.fan_calc_long_para(hand_partition, final_tile, bot.meld34, bot.minkan34,
bot.ankan34,
False, bot.player_wind, bot.round_wind, False)
return f > 0 or m > 0
return False
@staticmethod
def check_waiting_after_pon(bot, tile136):
hand34 = deepcopy(bot.hand34)
hand34.remove(tile136 // 4)
hand34.remove(tile136 // 4)
meld34 = deepcopy(bot.meld34)
meld34.append([tile136 // 4] * 3)
return WaitingAnalyser.check_waiting_long(
bot.game_table.bonus_tiles, hand34, bot.tiles136, bot.cnt_total_bonus_tiles, meld34, bot.minkan34,
bot.ankan34, bot.player_wind, bot.round_wind, bot.game_table.honba_sticks, bot.game_table.reach_sticks,
bot.is_dealer, bot.game_table.revealed_tiles
)
@staticmethod
def check_waiting_after_chow(bot, hand_open, tile136):
hand34 = deepcopy(bot.hand34)
hand34.remove(hand_open[0])
hand34.remove(hand_open[1])
meld34 = deepcopy(bot.meld34)
meld34.append(sorted(hand_open + [tile136 // 4]))
return WaitingAnalyser.check_waiting_long(
bot.game_table.bonus_tiles, hand34, bot.tiles136, bot.cnt_total_bonus_tiles, meld34, bot.minkan34,
bot.ankan34, bot.player_wind, bot.round_wind, bot.game_table.honba_sticks, bot.game_table.reach_sticks,
bot.is_dealer, bot.game_table.revealed_tiles
)
@staticmethod
def should_richii(bot, hand_ana):
waiting_dict_not_richii = WaitingAnalyser.check_waiting(bot, False)
waiting_dict = WaitingAnalyser.check_waiting(bot, True)
if bot.is_all_last:
if bot.current_rank == 0:
return WaitingAnalyser.all_last_richii_rk0(waiting_dict, bot, hand_ana)
elif bot.current_rank == 1:
return WaitingAnalyser.all_last_richii_rk1(waiting_dict, bot, hand_ana, waiting_dict_not_richii)
elif bot.current_rank == 2:
return WaitingAnalyser.all_last_richii_rk2(waiting_dict, bot, hand_ana, waiting_dict_not_richii)
else:
return WaitingAnalyser.all_last_richii_rk3(waiting_dict, bot, hand_ana, waiting_dict_not_richii)
else:
return WaitingAnalyser.not_all_last_richii(waiting_dict, bot, hand_ana)
@staticmethod
def not_all_last_richii(waiting_riichi, bot, hand_ana):
good_indices, bad_indices = [], []
for index in range(len(waiting_riichi)):
tmp_dict = waiting_riichi[index][1]
remain = tmp_dict['remain_num']
wtiles = tmp_dict['w_tiles']
if remain > 4 or (remain > 2 and any(t in Tile.ONENINE for t in wtiles)):
good_indices.append(index)
else:
bad_indices.append(index)
good_indices = sorted(good_indices, key=lambda x: - waiting_riichi[x][1]['ave_score'])
bad_indices = sorted(bad_indices, key=lambda x: - waiting_riichi[x][1]['ave_score'])
sorted_indices = []
i, j = 0, 0
while i < len(good_indices) or j < len(bad_indices):
if i == len(good_indices) and j < len(bad_indices):
sorted_indices.append(bad_indices[j])
j += 1
if i < len(good_indices) and j == len(bad_indices):
sorted_indices.append(good_indices[i])
i += 1
if i < len(good_indices) and j < len(bad_indices):
score1 = waiting_riichi[good_indices[i]][1]['ave_score']
score2 = waiting_riichi[bad_indices[j]][1]['ave_score']
if score2 > 2 * score1:
sorted_indices.append(bad_indices[j])
j += 1
else:
sorted_indices.append(good_indices[j])
i += 1
for index in sorted_indices:
to_discard = waiting_riichi[index][0]
tmp_dict = waiting_riichi[index][1]
remain = tmp_dict['remain_num']
score = tmp_dict['ave_score']
if bot.turn_num <= 6:
if score >= 4000 and remain > 2:
return True, to_discard, remain, tmp_dict['waitings']
if score >= 2000 and remain > 4:
return True, to_discard, remain, tmp_dict['waitings']
elif bot.turn_num < 15:
if bot.game_table.has_reach:
if score >= 8000 and remain > 2:
return True, to_discard, remain, tmp_dict['waitings']
if score >= 5000 and remain > 2 and bot.can_discard(to_discard, hand_ana):
return True, to_discard, remain, tmp_dict['waitings']
if score >= 2600 and remain > 4 and bot.can_discard(to_discard, hand_ana):
return True, to_discard, remain, tmp_dict['waitings']
else:
if score >= 6000 and remain > 2:
return True, to_discard, remain, tmp_dict['waitings']
if score >= 4000 and remain > 2 and bot.can_discard(to_discard, hand_ana):
return True, to_discard, remain, tmp_dict['waitings']
if score >= 2000 and remain > 4 and bot.can_discard(to_discard, hand_ana):
return True, to_discard, remain, tmp_dict['waitings']
elif bot.turn_num > 16:
if not bot.game_table.has_reach:
if bot.can_discard(to_discard, hand_ana):
return True, to_discard, remain, tmp_dict['waitings']
return False, None, None, None
@staticmethod
def all_last_richii_rk0(waiting_riichi, bot, hand_ana):
good_indices, bad_indices = [], []
for index in range(len(waiting_riichi)):
tmp_dict = waiting_riichi[index][1]
remain = tmp_dict['remain_num']
wtiles = tmp_dict['w_tiles']
if remain > 4 or (remain > 2 and any(t in Tile.ONENINE for t in wtiles)):
good_indices.append(index)
else:
bad_indices.append(index)
def expect_score(x_x):
w_dict = waiting_riichi[x_x][1]['waitings']
return sum([waiting_riichi[x_x][1]['s_remain'][k] * v for k, v in w_dict.items()])
good_indices = sorted(good_indices, key=lambda x: - expect_score(x))
bad_indices = sorted(bad_indices, key=lambda x: -expect_score(x))
for need_score in bot.need_scores:
for g_index in good_indices:
to_discard = waiting_riichi[g_index][0]
tmp_dict = waiting_riichi[g_index][1]
total_remain = tmp_dict['remain_num']
score = tmp_dict['ave_score']
if score >= need_score:
return True, to_discard, total_remain, tmp_dict['waitings']
if score * 2 >= need_score and bot.can_discard(to_discard, hand_ana):
return True, to_discard, total_remain, tmp_dict['waitings']
for b_index in bad_indices:
to_discard = waiting_riichi[b_index][0]
tmp_dict = waiting_riichi[b_index][1]
total_remain = tmp_dict['remain_num']
score = tmp_dict['ave_score']
if score >= need_score:
return True, to_discard, total_remain, tmp_dict['waitings']
if score * 1.5 >= need_score and bot.can_discard(to_discard, hand_ana):
return True, to_discard, total_remain, tmp_dict['waitings']
return False, None, None, None
@staticmethod
def all_last_richii_rk1(waiting_riichi, bot, hand_ana, waiting_not_riichi):
good_indices, bad_indices = [], []
for index in range(len(waiting_riichi)):
tmp_dict = waiting_riichi[index][1]
remain = tmp_dict['remain_num']
wtiles = tmp_dict['w_tiles']
if remain > 4 or (remain > 2 and any(t in Tile.ONENINE for t in wtiles)):
good_indices.append(index)
else:
bad_indices.append(index)
def expect_score(x_x):
w_dict = waiting_riichi[x_x][1]['waitings']
return sum([waiting_riichi[x_x][1]['s_remain'][k] * v for k, v in w_dict.items()])
good_indices = sorted(good_indices, key=lambda x: - expect_score(x))
bad_indices = sorted(bad_indices, key=lambda x: -expect_score(x))
for need_score in bot.need_scores:
for g_index in good_indices:
to_discard = waiting_riichi[g_index][0]
tmp_dict = waiting_riichi[g_index][1]
total_remain = tmp_dict['remain_num']
score = tmp_dict['ave_score']
if bot.game_table.has_reach:
if score >= 1.5 * need_score:
return True, to_discard, total_remain, tmp_dict['waitings']
if score >= need_score and bot.can_discard(to_discard, hand_ana):
return True, to_discard, total_remain, tmp_dict['waitings']
else:
if score >= need_score:
return True, to_discard, total_remain, tmp_dict['waitings']
if score >= 0.8 * need_score and bot.can_discard(to_discard, hand_ana):
return True, to_discard, total_remain, tmp_dict['waitings']
for b_index in bad_indices:
to_discard = waiting_riichi[b_index][0]
tmp_dict = waiting_riichi[b_index][1]
total_remain = tmp_dict['remain_num']
score = tmp_dict['ave_score']
if bot.game_table.has_reach:
if score >= 2 * need_score:
return True, to_discard, total_remain, tmp_dict['waitings']
if score >= 1.5 * need_score and bot.can_discard(to_discard, hand_ana):
return True, to_discard, total_remain, tmp_dict['waitings']
else:
if score >= 1.5 * need_score:
return True, to_discard, total_remain, tmp_dict['waitings']
if score >= need_score and bot.can_discard(to_discard, hand_ana):
return True, to_discard, total_remain, tmp_dict['waitings']
diff = abs(bot.score - min(bot.game_table.scores))
if len(waiting_not_riichi) == 0 or diff > 1000:
for index in good_indices + bad_indices:
to_discard = waiting_riichi[index][0]
tmp_dict = waiting_riichi[index][1]
total_remain = tmp_dict['remain_num']
if bot.can_discard(to_discard, hand_ana):
return True, to_discard, total_remain, tmp_dict['waitings']
return False, None, None, None
@staticmethod
def all_last_richii_rk2(waiting_riichi, bot, hand_ana, waiting_not_riichi):
good_indices, bad_indices = [], []
for index in range(len(waiting_riichi)):
tmp_dict = waiting_riichi[index][1]
remain = tmp_dict['remain_num']
wtiles = tmp_dict['w_tiles']
if remain > 4 or (remain > 2 and any(t in Tile.ONENINE for t in wtiles)):
good_indices.append(index)
else:
bad_indices.append(index)
def expect_score(x_x):
w_dict = waiting_riichi[x_x][1]['waitings']
return sum([waiting_riichi[x_x][1]['s_remain'][k] * v for k, v in w_dict.items()])
good_indices = sorted(good_indices, key=lambda x: - expect_score(x))
bad_indices = sorted(bad_indices, key=lambda x: -expect_score(x))
for need_score in bot.need_scores:
for g_index in good_indices:
to_discard = waiting_riichi[g_index][0]
tmp_dict = waiting_riichi[g_index][1]
total_remain = tmp_dict['remain_num']
score = tmp_dict['ave_score']
if bot.game_table.has_reach:
if score >= need_score:
return True, to_discard, total_remain, tmp_dict['waitings']
if score >= 0.6 * need_score and bot.can_discard(to_discard, hand_ana):
return True, to_discard, total_remain, tmp_dict['waitings']
else:
if score >= 0.7 * need_score:
return True, to_discard, total_remain, tmp_dict['waitings']
if score >= 0.5 * need_score and bot.can_discard(to_discard, hand_ana):
return True, to_discard, total_remain, tmp_dict['waitings']
for b_index in bad_indices:
to_discard = waiting_riichi[b_index][0]
tmp_dict = waiting_riichi[b_index][1]
total_remain = tmp_dict['remain_num']
score = tmp_dict['ave_score']
if bot.game_table.has_reach:
if score >= 1.2 * need_score:
return True, to_discard, total_remain, tmp_dict['waitings']
if score >= need_score and bot.can_discard(to_discard, hand_ana):
return True, to_discard, total_remain, tmp_dict['waitings']
else:
if score >= 0.9 * need_score:
return True, to_discard, total_remain, tmp_dict['waitings']
if score >= 0.7 * need_score and bot.can_discard(to_discard, hand_ana):
return True, to_discard, total_remain, tmp_dict['waitings']
diff = abs(bot.score - sorted(bot.game_table.scores)[1])
if len(waiting_not_riichi) == 0 or diff > 1000:
for index in good_indices + bad_indices:
to_discard = waiting_riichi[index][0]
tmp_dict = waiting_riichi[index][1]
total_remain = tmp_dict['remain_num']
if bot.can_discard(to_discard, hand_ana):
return True, to_discard, total_remain, tmp_dict['waitings']
return False, None, None, None
@staticmethod
def all_last_richii_rk3(waiting_riichi, bot, hand_ana, waiting_not_riichi):
good_indices, bad_indices = [], []
for index in range(len(waiting_riichi)):
tmp_dict = waiting_riichi[index][1]
remain = tmp_dict['remain_num']
wtiles = tmp_dict['w_tiles']
if remain > 4 or (remain > 2 and any(t in Tile.ONENINE for t in wtiles)):
good_indices.append(index)
else:
bad_indices.append(index)
def expect_score(x_x):
w_dict = waiting_riichi[x_x][1]['waitings']
return sum([waiting_riichi[x_x][1]['s_remain'][k] * v for k, v in w_dict.items()])
good_indices = sorted(good_indices, key=lambda x: - expect_score(x))
bad_indices = sorted(bad_indices, key=lambda x: -expect_score(x))
diff = abs(bot.score - sorted(bot.game_table.scores)[2])
if len(waiting_not_riichi) == 0:
for g_index in good_indices:
to_discard = waiting_riichi[g_index][0]
tmp_dict = waiting_riichi[g_index][1]
remain = tmp_dict['remain_num']
w_dict = tmp_dict['waitings']
score = tmp_dict['ave_score']
if bot.can_discard(to_discard, hand_ana) and score >= (4000 if bot.is_dealer else 2000):
return True, to_discard, remain, w_dict
else:
if diff < 1000:
if bot.turn_num <= 6:
for g_index in good_indices:
to_discard = waiting_riichi[g_index][0]
tmp_dict = waiting_riichi[g_index][1]
remain = tmp_dict['remain_num']
w_dict = tmp_dict['waitings']
score = tmp_dict['ave_score']
if score >= (2000 if bot.is_dealer else 1000):
return True, to_discard, remain, w_dict
else:
return False, None, None, None
else:
for g_index in good_indices:
to_discard = waiting_riichi[g_index][0]
tmp_dict = waiting_riichi[g_index][1]
remain = tmp_dict['remain_num']
w_dict = tmp_dict['waitings']
score = tmp_dict['ave_score']
if bot.turn_num <= 6:
return True, to_discard, remain, w_dict
else:
if bot.can_discard(to_discard, hand_ana):
if bot.game_table.has_reach:
if score >= (5000 if bot.is_dealer else 3000):
return True, to_discard, remain, w_dict
else:
if score >= (3000 if bot.is_dealer else 1300):
return True, to_discard, remain, w_dict
return False, None, None, None
class MLAI(AIInterface):
def __init__(self, ensemble_clfs):
super().__init__()
self.ensemble_clfs = ensemble_clfs
self.called_reach = False
self.to_discard_after_reach = -1
self.decided_pph = False
self.decided_dy = False
self.decided_qh = False
self.not_kan = None
self.dis_funcs = {3: self._dis_3_st, 2: self._dis_2_st, 1: self._dis_1_st, 0: self._dis_1_st}
self.riichi_waiting = None
def init_state(self):
super().init_state()
self.erase_states()
def to_discard_tile(self):
if self.called_reach:
return self.tile_34_to_136(self.to_discard_after_reach)
# for opp in range(1, 4):
# if self.game_table.get_player(opp).reach_status:
# self.handle_opponent_discard(opp)
self.log_opponents_prediction()
s = datetime.datetime.now()
def wait_3():
e = datetime.datetime.now()
diff = (e - s).seconds
diff < 1 and sleep(1 + random.uniform(0, 2))
hand_ana = HandAnalyser(self.hand34, self.total_melds34, [1] * 6, self.bonus_honors, self.game_table.revealed_tiles, self.game_table.bonus_tiles)
to_discard = self._dis_check_waiting(hand_ana)
if to_discard >= 0:
wait_3()
return to_discard
self._show_shantins(hand_ana)
almost_qh = False
if self.decided_qh:
if hand_ana.shantins[hand_ana.QH] == 1:
almost_qh = True
if hand_ana.shantins[hand_ana.QH] == 2 and self.turn_num < 10:
almost_qh = True
if self._dis_should_enforce(hand_ana) or self.decided_dy or almost_qh:
to_discard = self._dis_spec_form(hand_ana)
if to_discard >= 0:
wait_3()
return to_discard
shantin = hand_ana.current_shantin
to_discard = self.dis_funcs.get(shantin, self._dis_3_st)(hand_ana)
# self.thclient.drawer and self.thclient.drawer.set_enforce_form('')
if to_discard >= 0:
wait_3()
return to_discard
hand_def = [tile for tile in self.hand34 if tile > 26] + sorted(self.hand34, key=lambda x: 4 - abs(x % 9 - 4))
# self.thclient.drawer and self.thclient.drawer.set_enforce_form('')
for tile in hand_def:
if self.can_discard(tile, hand_ana):
wait_3()
return self.tile_34_to_136(tile)
# self.thclient.drawer and self.thclient.drawer.set_enforce_form('')
return self.tile_34_to_136(hand_def[0])
def _dis_check_waiting(self, hand_ana):
num_turn = len(self.discard34)
waiting_dict = WaitingAnalyser.check_waiting(self, False)
indices = list(range(len(waiting_dict)))
def prio19(t):
return 0 if t in Tile.ONENINE else 4 - abs(t % 9 - 4)
indices.sort(key=lambda x: prio19(waiting_dict[x][0]))
indices.sort(key=lambda x: -len(waiting_dict[x][1]['w_tiles']))
indices.sort(key=lambda x: -waiting_dict[x][1]['remain_num'])
indices.sort(key=lambda x: -waiting_dict[x][1]['ave_score'])
# can discard
if len(indices) > 1:
any_can_discard = None
for index in indices:
to_discard = waiting_dict[index][0]
if self.can_discard(to_discard, hand_ana):
any_can_discard = index
break
if any_can_discard:
to_discard = waiting_dict[any_can_discard][0]
tmp_dict = waiting_dict[any_can_discard][1]
self._show_waiting(tmp_dict['waitings'])
return self.tile_34_to_136(to_discard)
# high score
for index in indices:
to_discard = waiting_dict[index][0]
tmp_dict = waiting_dict[index][1]
score = tmp_dict['ave_score']
remain = tmp_dict['remain_num']
self._show_waiting(tmp_dict['waitings'])
if num_turn <= 13:
if (score >= 5800 and remain > 1) or self.can_discard(to_discard, hand_ana):
self._show_waiting(tmp_dict['waitings'])
return self.tile_34_to_136(to_discard)
else:
if (score >= 8000 and remain > 2) or self.can_discard(to_discard, hand_ana):
self._show_waiting(tmp_dict['waitings'])
return self.tile_34_to_136(to_discard)
return -1
def _dis_should_enforce(self, hand_ana):
if hand_ana.shantins[hand_ana.NORMAL] == hand_ana.shantins[hand_ana.QH]:
return True
if hand_ana.shantins[hand_ana.NORMAL] == hand_ana.shantins[hand_ana.PPH]:
return True
if self.game_table.round_number >= 3:
scores = sorted(self.game_table.scores)
self_score = self.score
rk = scores.index(self.score)
if rk == 3:
return False
if rk == 2 and abs(self_score - scores[3]) < 1500:
return False
if rk < 3 and abs(self_score - scores[3]) <= 6000:
return True
if self.is_open_hand:
if (self.game_table.has_reach or self.potential_fan >= 3) and self.has_dori:
return False
else:
if self.potential_fan >= 3:
return False
return True
def _dis_spec_form(self, hand_ana: HandAnalyser):
enforce_eff = hand_ana.enforce_eff_vec(self.turn_num, self)
if enforce_eff:
# self.thclient.drawer and self.thclient.drawer.set_tile_eff(enforce_eff)
for i in range(len(enforce_eff)):
if self.can_discard(enforce_eff[i][0], hand_ana):
return self.tile_34_to_136(enforce_eff[i][0])
return -1
def _dis_3_st(self, hand_ana: HandAnalyser):
useless_tile = self._dis_3_st_chr()
if useless_tile >= 0:
return useless_tile
discard_3shantin = self._dis_1_st(hand_ana)
if discard_3shantin >= 0:
return discard_3shantin
return -1
def _dis_3_st_chr(self):
# self.thclient.drawer and self.thclient.drawer.set_tile_eff([])
for chr_tile in range(27, 34):
if chr_tile not in self.bonus_honors:
if self.hand34.count(chr_tile) == 1:
return self.tile_34_to_136(chr_tile)
for chr_tile in range(27, 34):
if chr_tile in self.bonus_honors:
if self.hand34.count(chr_tile) == 1:
if self.game_table.revealed_tiles[chr_tile] > 0 and self.turn_num < 7:
return self.tile_34_to_136(chr_tile)
# TODO: is this needed?
dict19 = []
for one in [0, 9, 18]:
if self.hand34.count(one) == 1 and (one + 1) not in self.hand34 and (one + 2) not in self.hand34:
dict19.append([one, 0 if one + 3 in self.hand34 else 1])
for nine in [8, 17, 26]:
if self.hand34.count(nine) == 1 and (nine - 1) not in self.hand34 and (nine - 2) not in self.hand34:
dict19.append([nine, 0 if (nine - 3) in self.hand34 else 1])
if len(dict19) > 0:
dict19 = sorted(dict19, key=lambda x: x[1])
return self.tile_34_to_136(dict19[0][0])
return -1
def _dis_2_st(self, hand_ana: HandAnalyser):
effectiveness = hand_ana.deep_eff_vec(self)
# self.thclient.drawer and self.thclient.drawer.set_tile_eff(effectiveness)
prios_str = ','.join(["{}-{:2.1f}".format(Tile.t34_to_g(p[0]), p[1]) for p in effectiveness])
msg = " 🤖[2 shantin eff]: {}".format(prios_str)
self._post_msg(msg)
for i in range(len(effectiveness)):
if self.can_discard(effectiveness[i][0], hand_ana):
return self.tile_34_to_136(effectiveness[i][0])
if len(effectiveness) == 0:
return self._dis_1_st(hand_ana)
else:
return self.tile_34_to_136(effectiveness[0][0])
def _dis_1_st(self, hand_ana: HandAnalyser):
prios = hand_ana.norm_eff_vec(self)
# self.thclient.drawer and self.thclient.drawer.set_tile_eff(prios)
prios_str = ','.join(["{}-{:2.1f}".format(Tile.t34_to_g(p[0]), p[1]) for p in prios])
msg = " 🤖[1 shantin eff]: {}".format(prios_str)
self._post_msg(msg)
for i in range(len(prios)):
if self.can_discard(prios[i][0], hand_ana):
return self.tile_34_to_136(prios[i][0])
return self.tile_34_to_136(prios[0][0])
def _can_discard_chr(self, candidate, hand_ana):
is_bonus_chr = (candidate in self.game_table.bonus_tiles)
num_reaveal = self.game_table.revealed_tiles[candidate]
hand_num = self.hand34.count(candidate)
round_num = len(self.discard34)
if hand_num == 3:
return False
if candidate in self.bonus_honors:
if hand_num == 2:
if is_bonus_chr:
return False
want_to_make_pinhu = (hand_ana.current_shantin == hand_ana.shantins[hand_ana.PINHU] <= 2)
if num_reaveal < 2 and not want_to_make_pinhu:
return False
if hand_num:
if is_bonus_chr:
if num_reaveal < 2 and round_num > 12:
return False
elif is_bonus_chr:
if hand_num == 2 or (hand_num == 1 and round_num > 12 and num_reaveal < 2):
return False
return True
def can_discard(self, t34, hand_ana):
if t34 > 26:
return self._can_discard_chr(t34, hand_ana)
if t34 == self.game_table.last_discard:
return True
if (t34 in self.game_table.last_round_discard or self.turn_num < 7) and not self.game_table.has_reach:
return True
wall_safe_tiles = self.game_table.barrier_safe_tiles
# near_bonus = self._is_near_bonus(t34)
can_discard = True
for i in range(1, 4):
opp_obj = self.game_table.get_player(i)
if not opp_obj.is_valid:
continue
total_waiting = opp_obj.waiting_prediction
safe_tiles = opp_obj.abs_safe_tiles
gin_safes = opp_obj.gin_safe_tiles
if can_discard:
if t34 in safe_tiles:
continue
elif t34 in total_waiting:
if opp_obj.dangerous:
if opp_obj.is_freezing:
if t34 in gin_safes + wall_safe_tiles and random.uniform(0, 1) <= 0.3:
continue
else:
can_discard = False
else:
if t34 in gin_safes + wall_safe_tiles and random.uniform(0, 1) <= 0.5:
continue
else:
can_discard = False
else:
if t34 in gin_safes + wall_safe_tiles and random.uniform(0, 1) <= 0.7:
continue
else:
can_discard = False
elif opp_obj.is_freezing:
if t34 in gin_safes + wall_safe_tiles and random.uniform(0, 1) <= 0.5:
continue
else:
can_discard = False
elif opp_obj.dangerous:
if (t34 % 9 < 3 and t34 + 3 in total_waiting) or (6 <= t34 % 9 and t34 - 3 in total_waiting):
if t34 in wall_safe_tiles + gin_safes and random.uniform(0, 1) <= 0.6:
continue
else:
can_discard = False
return can_discard
def can_call_reach(self):
"""
:return: False, 0 if can not call reach, else True, corresponding_to_be_discarded_tile
"""
if self.is_open_hand:
return False, 0
hand_ana = self._get_hand_ana()
richii, to_discard_34, remain, win_dict = WaitingAnalyser.should_richii(self, hand_ana)
if richii:
self.called_reach = True
self.to_discard_after_reach = to_discard_34
# self.thclient.drawer and self.thclient.drawer.set_shantins(hand_ana)
self._show_waiting(win_dict)
self.riichi_waiting = win_dict
return True, self.tile_34_to_136(to_discard_34)
return False, 0
def should_call_kan(self, tile136, from_opponent):
tile34 = tile136 // 4
hand_ana = self._get_hand_ana()
shantin = hand_ana.current_shantin
if shantin > 3:
return False, False
def can_kan():
if tile34 > 26:
return True
opens = hand_ana.all_melds
for m in opens:
if len(m) == 3 and m[0] != m[1]:
if tile34 in m:
msg = ' 🤖 Tile is in a chow, do not KAN'
self._post_msg(msg)
return False
return True
if from_opponent:
# minkan
if not can_kan() or tile34 in self.game_table.bonus_tiles:
self.not_kan = tile136
return False, False
if self.game_table.has_reach:
self.not_kan = tile136
self._post_msg(' 🤖 Someone has reach, do not KAN')
return False, False
if self.game_table.kan_num >= 2:
self.not_kan = tile136
self._post_msg(' 🤖 Too many kans, do not KAN')
return False, False
if hand_ana.sp_decided:
self.not_kan = tile136
self._post_msg(' 🤖 Decide to form Seven pairs, do not KAN')
return False, False
should_kan = False
if self.hand34.count(tile34) == 3:
if tile34 in self.bonus_honors:
self._post_msg(' 🤖 Bonus honors, KAN!')
should_kan = True
if hand_ana.pp_decided or self.decided_pph:
self._post_msg(' 🤖 Decide to form ponpon, KAN!')
self.decided_pph, should_kan = True, True
if hand_ana.qh_decided(len(self.discard34)) and (tile34 // 9 in hand_ana.qh_type() or tile34 // 9 == 3):
self._post_msg(' 🤖 Decide to form qing/hun, KAN!')
self.decided_qh, should_kan = True, True
if self.decided_dy:
self._post_msg(' 🤖 Can make danyao, KAN!')
self.decided_dy, should_kan = True, True
if self.has_dori:
self._post_msg(' 🤖 Has dori, KAN!')
should_kan = True
# handle kan
if should_kan:
self_tiles = [t for t in self.tiles136 if t // 4 == tile34]
for t in self_tiles:
self.tiles136.remove(t)
msg = " 😊[Call minkan]: {}".format(Tile.t34_to_g([tile136 // 4] * 4))
self._post_msg(msg)
return Meld.KAN, tile136
else:
self.not_kan = tile136
else:
# ankan
ankan_tile = None
if self.hand34.count(tile34) == 4:
ankan_tile = tile34
else:
own_tile = [tile for tile in set(self.hand34) if self.hand34.count(tile) == 4]
if own_tile and len(own_tile) > 0:
ankan_tile = own_tile[0]
if ankan_tile:
if self.reach_status and not self.game_table.has_reach and can_kan():
self._post_msg(' 🤖 Only bot riichis, KAN!')
msg = " 🤖[Bot calls ankan]: {}".format(Tile.t34_to_g([ankan_tile] * 4))
self._post_msg(msg)
return Meld.KAN, self.tile_34_to_136(ankan_tile)
if not can_kan() or self.game_table.has_reach:
self._post_msg(' 🤖 Someone called riichi, NOT KAN!')
return False, False
msg = " 🤖[Bot calls ankan]: {}".format(Tile.t34_to_g([ankan_tile] * 4))
self._post_msg(msg)
return Meld.KAN, self.tile_34_to_136(ankan_tile)
# chakan
for meld in self.meld136:
if meld.tiles[0] // 4 == meld.tiles[1] // 4 == tile34:
if not can_kan() or self.game_table.has_reach:
self._post_msg(' 🤖 Someone called riichi, NOT KAN!')
return False, False
msg = " 🤖[Bot calls chakan]: {}".format(Tile.t34_to_g([tile136 // 4] * 4))
self._post_msg(msg)
return Meld.CHANKAN, tile136
return False, False
def try_to_call_meld(self, tile136, might_call_chi):
# check if bot can win this tile
if self.reach_status:
return None, None
if WaitingAnalyser.can_win(self, tile136 // 4):
self._post_msg(' 🤖 Can win this tile')
return None, None
hand_ana = self._get_hand_ana()
shantin = hand_ana.current_shantin
can_make_danyao = self._can_make_danyao(hand_ana)
tile34 = tile136 // 4
all_melds = hand_ana.all_melds
# check if better to stay close hand
if self._better_not_to_call_meld(hand_ana, tile34):
return None, None
# check if calling meld might improve waiting
meld, tag = self._call_meld_check_waiting_improve(tile136, might_call_chi, hand_ana)
if meld and tag == 0:
return meld, 0
if meld == 1 and tag == 1:
return None, None
# check if calling meld could make it waiting
meld, tag = self._call_meld_check_waiting(tile136, might_call_chi, hand_ana)
if meld:
return meld, 0
# if tile34 was not kanned, then also not pon
if self.not_kan and self.not_kan == tile136:
if tile34 > 26 or all(tile34 not in m for m in all_melds if len(m) == 3 and m[0] != m[1]):
return None, None
# always pon dori honors
if self.hand34.count(tile34) == 2 and tile34 in self.bonus_honors:
if shantin > 1 or self.game_table.revealed_tiles[tile34] > 0 or self.turn_num >= 6:
self._post_msg(' 🤖 Always call dori honors!')
self_tiles = [t136 for t136 in self.tiles136 if t136 // 4 == tile136 // 4]
self._post_msg(" 🤖[Bot calls pon]: {}".format(Tile.t34_to_g([tile34] * 3)))
return Meld(Meld.PON, sorted(self_tiles[0:2] + [tile136]), True, tile136), 0
# hand tiles too bad, better not call meld
if shantin >= 4:
self._post_msg(' 🤖 Terrible hand tiles, do not call meld!')
return None, None
# check pon for special form
if self.hand34.count(tile34) >= 2:
should_pon = False
# Case: decided to form "ponpon"
if hand_ana.pp_decided or self.decided_pph:
self._post_msg(' 🤖 Decide to form PPH, PON!!!')
self.decided_pph, should_pon = True, True
# Case: decided to form "qing/hun"
if (hand_ana.qh_decided(self.turn_num) or self.decided_qh) and tile34 // 9 in (hand_ana.qh_type() + [3]):
reduce_shantin = self._call_pon_reduce_shantin(
self.hand34, self.total_melds34, hand_ana.shantins[hand_ana.QH], hand_ana.QH, tile34
)
if reduce_shantin:
self._post_msg(' 🤖 Decide to form QH, PON!!!')
self.decided_qh, should_pon = True, True
# Case: neither "qing/hun" nor "seven pairs", check "danyao" and "dori"
if not hand_ana.sp_decided:
if self.has_dori:
reduce_shantin = self._call_pon_reduce_shantin(
self.hand34, self.total_melds34, hand_ana.shantins[hand_ana.NORMAL], hand_ana.NORMAL, tile34
)
if reduce_shantin:
self._post_msg(' 🤖 Already have dori, PON!!!')
should_pon = True
if (can_make_danyao or self.decided_dy) and tile34 not in Tile.ONENINE:
reduce_shantin = self._call_pon_reduce_shantin(
self.hand34, self.total_melds34, hand_ana.shantins[hand_ana.NO19], hand_ana.NO19, tile34
)
if reduce_shantin:
self._post_msg(' 🤖 Can make Danyao, PON!!!')
self.decided_dy, should_pon = True, True
if should_pon:
self_tiles = [t136 for t136 in self.tiles136 if t136 // 4 == tile136 // 4]
self._post_msg(" 🤖[Bot calls pon]: {}".format(Tile.t34_to_g([tile136 // 4] * 3)))
return Meld(Meld.PON, self_tiles[0:2] + [tile136], True, tile136), 0
# check chow for special form
if might_call_chi and tile34 < 27:
if hand_ana.pp_decided or self.decided_pph:
self._post_msg(' 🤖 Is making PPH, not CHOW!!!')
return None, None
if hand_ana.sp_decided:
self._post_msg(' 🤖 Is making 7P, not CHOW!!!')
return None, None
candidates = self._get_chow_candidates(tile34)
if len(candidates) == 0:
return None, None
for candidate in candidates:
should_chow = False
if hand_ana.qh_decided(self.turn_num) or self.decided_qh:
if tile34 // 9 in hand_ana.qh_type():
reduce_shantin = self._call_chow_reduce_shantin(
self.hand34, self.total_melds34, hand_ana.shantins[hand_ana.QH], hand_ana.QH,
candidate[0], candidate[1], tile34
)
if reduce_shantin:
self._post_msg(' 🤖 Decide to form qing/hun, CHOW!!!')
self.decided_qh, should_chow = True, True
if can_make_danyao or self.decided_dy:
if all(0 < tile % 9 < 8 for tile in (candidate + [tile34])):
reduce_shantin = self._call_chow_reduce_shantin(
self.hand34, self.total_melds34, hand_ana.shantins[hand_ana.NO19], hand_ana.NO19,
candidate[0], candidate[1], tile34
)
if reduce_shantin:
self._post_msg(' 🤖 Decide to form Danyao, CHOW!!!')
self.decided_dy, should_chow = True, True
if self.has_dori:
reduce_shantin = self._call_chow_reduce_shantin(
self.hand34, self.total_melds34, hand_ana.shantins[hand_ana.NORMAL], hand_ana.NORMAL,
candidate[0], candidate[1], tile34
)
if reduce_shantin:
self._post_msg(' 🤖 Already has dori, CHOW!!!')
should_chow = True
if should_chow:
opt1, opt2 = self.tile_34_to_136(candidate[0]), self.tile_34_to_136(candidate[1])
msg = " 😊[Bot calls chow]: {}".format(Tile.t34_to_g(candidate + [tile34]))
self._post_msg(msg)
return Meld(Meld.CHI, sorted([opt1, opt2, tile136]), True, tile136), 0
return None, None
def _call_pon_reduce_shantin(self, handtiles, melds, currentshantin, form, pontile):
if handtiles.count(pontile) >= 2:
tmp_handtiles = deepcopy(handtiles)
tmp_handtiles.remove(pontile)
tmp_handtiles.remove(pontile)
tmp_melds = deepcopy(melds)
tmp_melds.append([pontile] * 3)
forms = [0] * 6
forms[form] = 1
tmp_handana = HandAnalyser(tmp_handtiles, tmp_melds, forms, self.bonus_honors, self.game_table.revealed_tiles, self.game_table.bonus_tiles)
if tmp_handana.shantins[form] < currentshantin:
return True
return False
def _call_chow_reduce_shantin(self, handtiles, melds, currentshantin, form, op1, op2, chowtile):
if op1 in handtiles and op2 in handtiles:
tmp_handtiles = deepcopy(handtiles)
tmp_handtiles.remove(op1)
tmp_handtiles.remove(op2)
tmp_melds = deepcopy(melds)
tmp_melds.append(sorted([op1, op2, chowtile]))
forms = [0] * 6
forms[form] = 1
tmp_handana = HandAnalyser(tmp_handtiles, tmp_melds, forms, self.bonus_honors, self.game_table.revealed_tiles, self.game_table.bonus_tiles)
if tmp_handana.shantins[form] < currentshantin:
return True
return False
def _better_not_to_call_meld(self, hand_ana, tile34):
# almost riichi, do not call meld
if not self.is_open_hand:
if self.game_table.kan_num >= 1 and hand_ana.current_shantin < 2 and self.turn_num <= 12:
self._post_msg(' 🤖 There was kan and hand tiles are not bad, better to profit from richii')
return True
if (tile34 in self.bonus_honors and self.game_table.revealed_tiles[tile34] == 0) \
or tile34 not in self.bonus_honors:
if hand_ana.current_shantin < 2 and self.turn_num <= 6:
self._post_msg(' 🤖 Almost riichi, do not call meld!!!')
return True
if hand_ana.shantins[hand_ana.PINHU] < 2 and self.turn_num <= 9:
self._post_msg(' 🤖 Almost riichi and pinhu, do not call meld!!!')
return True
return False
def _call_meld_check_waiting_improve(self, tile136, might_call_chi, hand_ana):
tile34 = tile136 // 4
# check if calling pon might improve waiting
waiting_dict = WaitingAnalyser.check_waiting_m1(self)
if len(waiting_dict) > 0:
if self.hand34.count(tile34) >= 2:
waiting_dict_after_pon = WaitingAnalyser.check_waiting_after_pon(self, tile136)
if len(waiting_dict_after_pon) > 0 and self.is_open_hand:
for w_dict in waiting_dict_after_pon:
remain_b, remain_a = waiting_dict['remain_num'], w_dict[1]['remain_num']
score_b, score_a = waiting_dict['ave_score'], w_dict[1]['ave_score']
improve_remain_num = remain_a > remain_b and abs(remain_a - remain_b) >= 2
improve_score = score_b > score_a + 500
if (improve_remain_num or improve_score) and self.can_discard(w_dict[0], hand_ana):
self_tiles = [t136 for t136 in self.tiles136 if t136 // 4 == tile136 // 4]
m_pon = Meld(Meld.PON, sorted(self_tiles[0:2] + [tile136]), True, tile136)
msg = " 🤖[Bot calls pon]: {}".format(Tile.t34_to_g([tile136 // 4] * 3))
self._post_msg(msg)
return m_pon, 0
msg = " 🤖 Is waiting now, calling pon does not improve score"
self._post_msg(msg)
# check if calling chow might improve waiting
if len(waiting_dict) > 0 and might_call_chi and tile34 < 27:
chow_candidates = self._get_chow_candidates(tile34)
for candidate in chow_candidates:
waiting_dict_after_chow = WaitingAnalyser.check_waiting_after_chow(self, candidate, tile136)
if len(waiting_dict_after_chow) > 0 and self.is_open_hand:
for w_dict in waiting_dict_after_chow:
remain_b, remain_a = waiting_dict['remain_num'], w_dict[1]['remain_num']
score_b, score_a = waiting_dict['ave_score'], w_dict[1]['ave_score']
improve_remain_num = remain_a > remain_b and abs(remain_a - remain_b) >= 2
improve_score = score_b > score_a + 500
if (improve_remain_num or improve_score) and self.can_discard(w_dict[0], hand_ana):
t1, t2 = self.tile_34_to_136(candidate[0]), self.tile_34_to_136(candidate[1])
m_chi = Meld(Meld.CHI, sorted([t1, t2, tile136]), True, tile136)
msg = " 🤖[Bot calls chow]: {}".format(Tile.t34_to_g(candidate + [tile34]))
self._post_msg(msg)
return m_chi, 0
if len(waiting_dict) > 0:
return 1, 1
else:
return None, None
def _call_meld_check_waiting(self, tile136, might_call_chi, hand_ana):
# check if waiting after pon
if self.hand34.count(tile136 // 4) >= 2:
waiting_dict_after_pon = WaitingAnalyser.check_waiting_after_pon(self, tile136)
if len(waiting_dict_after_pon) > 0:
for w_dict in waiting_dict_after_pon:
remain = w_dict[1]['remain_num']
score = w_dict[1]['ave_score']
if score < 2000 and not self.is_open_hand:
continue
if (self.can_discard(w_dict[0], hand_ana) and remain > 3) or (score >= 4000 and remain > 3):
self._post_msg(' 🤖 Waiting after pon, PON!!!')
self_tiles = [t136 for t136 in self.tiles136 if t136 // 4 == tile136 // 4]
self._post_msg(" 🤖[Bot calls pon]: {}".format(Tile.t34_to_g([tile136 // 4] * 3)))
return Meld(Meld.PON, sorted(self_tiles[0:2] + [tile136]), True, tile136), 0
if not might_call_chi or tile136 // 4 > 26:
return None, None
# check if waiting after chow
chow_candidates = self._get_chow_candidates(tile136 // 4)
if len(chow_candidates) == 0:
return None, None
for candidate in chow_candidates:
waiting_dict_after_chow = WaitingAnalyser.check_waiting_after_chow(self, candidate, tile136)
if len(waiting_dict_after_chow) > 0:
for w_dict in waiting_dict_after_chow:
remain = w_dict[1]['remain_num']
score = w_dict[1]['ave_score']
if score < 2000 and not self.is_open_hand:
continue
if (self.can_discard(w_dict[0], hand_ana) and remain > 3) or (score >= 4000 and remain > 3):
self._post_msg(' 🤖 Waiting after chow, CHOW!!!')
t1, t2 = self.tile_34_to_136(candidate[0]), self.tile_34_to_136(candidate[1])
self._post_msg(" 🤖[Bot calls pon]: {}".format(Tile.t34_to_g(candidate + [tile136 // 4])))
return Meld(Meld.CHI, sorted([t1, t2, tile136]), True, tile136), 0
return None, None
def _get_chow_candidates(self, tile34):
candidates = []
if tile34 % 9 > 1 and (tile34 - 2) in self.hand34 and (tile34 - 1) in self.hand34:
candidates.append([tile34 - 2, tile34 - 1])
if 8 > tile34 % 9 > 0 and (tile34 - 1) in self.hand34 and (tile34 + 1) in self.hand34:
candidates.append([tile34 - 1, tile34 + 1])
if 7 > tile34 % 9 and (tile34 + 1) in self.hand34 and (tile34 + 2) in self.hand34:
candidates.append([tile34 + 1, tile34 + 2])
def prio(x):
if abs(x[0] - x[1]) == 2:
return 1
elif x[0] % 9 == 0 or x[1] % 9 == 8:
return 0
else:
return 2
if len(candidates) > 0:
candidates = sorted(candidates, key=prio)
return candidates
def _can_make_danyao(self, hand_ana: HandAnalyser):
if self.decided_dy:
return True
if hand_ana.shantins[hand_ana.NORMAL] and hand_ana.shantins[hand_ana.NO19] <= 2:
for tile in set(self.hand34):
if tile in Tile.ONENINE and tile in self.game_table.bonus_tiles:
return False
if self.cnt_total_bonus_tiles >= 3:
return True
if self.cnt_total_bonus_tiles >= 2 and self.turn_num >= 10:
return True
return False
def handle_opponent_discard(self, opp_index):
opponent_obj = self.game_table.get_player(opp_index)
if opponent_obj.reach_status:
richii_f = opponent_obj.richii_feature_225(opp_index)
opponent_obj.add_prediction(self.ensemble_clfs.predict_richii_single_prio(richii_f))
else:
normal_f = opponent_obj.waiting_feature_212(opp_index)
opponent_obj.add_prediction(self.ensemble_clfs.predict_normal_single_prio(normal_f))
self.thclient.drawer and self.thclient.drawer.set_prediction_history(opp_index, opponent_obj.waiting_prediction)
def _get_hand_ana(self):
return HandAnalyser(self.hand34, self.total_melds34, [1] * 6, self.bonus_honors, self.game_table.revealed_tiles, self.game_table.bonus_tiles)
def log_opponents_prediction(self):
prd_str = " " * 8 + "🤖[Waiting prediction] "
for opp_index in range(1, 4):
opponent_obj = self.game_table.get_player(opp_index)
waitings = opponent_obj.waiting_prediction
if len(waitings) > 0:
prd_str += "P{}:{}".format(opp_index, Tile.t34_to_g(waitings))
# self.thclient.both_log(prd_str)
def _is_near_bonus(self, t34):
if self.turn_num > 12:
return False
first_bonus_tile = self.game_table.bonus_tiles[0]
near_bonus = self.turn_num >= 9
if near_bonus and t34 < 27:
if first_bonus_tile // 9 == t34 // 9 and abs(first_bonus_tile - t34) <= 3:
to_be_considered = []
(abs(first_bonus_tile - t34) == 2) and to_be_considered.append((first_bonus_tile + t34) // 2)
if abs(first_bonus_tile - t34) == 1:
left = min(first_bonus_tile, t34) - 1
(left // 9 == first_bonus_tile) // 9 and to_be_considered.append(left)
right = max(first_bonus_tile, t34) + 1
(right // 9 == first_bonus_tile // 9) and to_be_considered.append(right)
abs(first_bonus_tile - t34) == 3 and to_be_considered.append(min(first_bonus_tile, t34) + 1)
abs(first_bonus_tile - t34) == 3 and to_be_considered.append(max(first_bonus_tile, t34) - 1)
if len(to_be_considered) > 0:
total_revealed = deepcopy(self.game_table.revealed_tiles)
for t in self.hand34:
total_revealed[t] += 1
if any(total_revealed[tile] > 2 for tile in to_be_considered):
near_bonus = False
else:
near_bonus = False
if t34 > 26 and t34 == first_bonus_tile and self.turn_num >= 9:
near_bonus = True
return near_bonus
def _show_shantins(self, hand_ana):
# self.thclient.drawer and self.thclient.drawer.set_shantins(hand_ana)
msg = ' 🤖[Shantins]: {}'.format(hand_ana)
self._post_msg(msg)
def _show_waiting(self, waiting_dict):
total_revealed = deepcopy(self.game_table.revealed_tiles)
for t in self.hand34:
total_revealed[t] += 1
waiting_str = ','.join([
"{} {} {}".format(4 - total_revealed[k], Tile.t34_to_g(k), v)
for k, v in waiting_dict.items()
])
total_remain = sum([4 - total_revealed[k] for k, v in waiting_dict.items()])
msg = ' 😊 [Waiting]: {}, total remain: {}'.format(waiting_str, total_remain)
self._post_msg(msg)
waiting_lst = [[k, v, 4 - total_revealed[k]] for k, v in waiting_dict.items()]
# self.thclient.drawer and self.thclient.drawer.set_waiting(waiting_lst)
def show_riichi_waiting(self):
if self.reach_status and self.riichi_waiting:
self._show_waiting(self.riichi_waiting)
def _post_msg(self, msg):
print(msg)
def erase_states(self):
self.called_reach = False
self.to_discard_after_reach = -1
self.decided_pph = False
self.decided_dy = False
self.not_kan = None
self.decided_qh = False
self.riichi_waiting = None
@property
def has_dori(self):
for meld in self.total_melds34:
if meld[0] > 26 and meld[0] in self.bonus_honors:
return True
for tile in self.bonus_honors:
if self.hand34.count(tile) >= 3:
return True
return False
@property
def opponents(self):
return [self.game_table.get_player(i) for i in range(1, 4)]
@property
def total_tiles34(self):
return [t // 4 for t in self.total_tiles136]
@property
def total_tiles136(self):
return self.tiles136 + [t for meld in self.meld136 for t in meld.tiles]
@property
def potential_fan(self):
return self.cnt_total_bonus_tiles + self.cnt_open_fan + self.cnt_hand_fan
@property
def cnt_total_bonus_tiles(self):
cnt = len([t136 for t136 in self.total_tiles136 if t136 in Tile.RED_BONUS])
cnt += sum([self.game_table.bonus_tiles.count(t) for t in self.total_tiles34])
return cnt
@property
def cnt_open_fan(self):
res = 0
for m in self.total_melds34:
if m[0] > 26 and m[0] in self.bonus_honors:
res += (m[0] == self.round_wind) + (m[0] == self.player_wind) + (m[0] in Tile.THREES)
return res
@property
def cnt_hand_fan(self):
res = 0
for tile in self.bonus_honors:
res += self.hand34.count(tile) >= 3
return res
@property
def total_revealed(self):
revealed = deepcopy(self.game_table.revealed_tiles)
for t in self.hand34:
revealed[t] += 1
return revealed
@property
def is_all_last(self):
return self.game_table.round_number >= 3
@property
def current_rank(self):
return sorted(self.game_table.scores).index(self.score)
@property
def need_scores(self):
res = []
for score in self.game_table.scores:
if score > self.score:
res.append(score - self.score)
if max(self.game_table.scores) < 30000:
res.append(30000 - self.score)
return res
| 44.640826 | 153 | 0.548979 | 97,021 | 0.994852 | 0 | 0 | 36,921 | 0.378588 | 0 | 0 | 5,639 | 0.057822 |
10cf83b6989722a699079e2f639db75597032ff7 | 5,418 | py | Python | jupyterlab_templates/extension.py | ElectroDevTeam/jupyterlab_templates | b09d8ac52b583bdc8fdcce11f6da14341d282b54 | [
"Apache-2.0"
] | null | null | null | jupyterlab_templates/extension.py | ElectroDevTeam/jupyterlab_templates | b09d8ac52b583bdc8fdcce11f6da14341d282b54 | [
"Apache-2.0"
] | null | null | null | jupyterlab_templates/extension.py | ElectroDevTeam/jupyterlab_templates | b09d8ac52b583bdc8fdcce11f6da14341d282b54 | [
"Apache-2.0"
] | null | null | null | import fnmatch
import json
import os
import os.path
import jupyter_core.paths
from io import open
from datetime import datetime
from notebook.utils import url_path_join
from notebook.base.handlers import IPythonHandler
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
DEFAULT_USERNAME = "anonymous"
USERNAME_TEMPLATE = "##username##"
TEMPLATES_TO_FUNCTION = {
"##datetime##": lambda: datetime.now().strftime(DATE_FORMAT + " " + TIME_FORMAT),
"##date##": lambda: datetime.now().strftime(DATE_FORMAT),
"##time##": lambda: datetime.now().strftime(TIME_FORMAT),
}
class TemplatesLoader():
def __init__(self, template_dirs):
self.template_dirs = template_dirs
def get_templates(self, username=DEFAULT_USERNAME):
templates = {}
for path in self.template_dirs:
# in order to produce correct filenames, abspath should point to the parent directory of path
abspath = os.path.abspath(os.path.join(os.path.realpath(path), os.pardir))
files = []
# get all files in subdirectories
for dirname, dirnames, filenames in os.walk(path):
if dirname.startswith("."):
# Skip hidden paths
continue
for filename in fnmatch.filter(filenames, '*.ipynb'):
if '.ipynb_checkpoints' not in dirname:
files.append((os.path.join(dirname, filename), dirname.replace(path, ''), filename))
# pull contents and push into templates list
for f, dirname, filename in files:
with open(os.path.join(abspath, f), 'r', encoding='utf8') as fp:
content = fp.read()
templates[os.path.join(dirname, filename)] = {'path': f,
'dirname': dirname,
'filename': filename,
'content': format_content(content, username),
'username': username}
return templates
def format_content(content, username):
formatted_content = content.replace(USERNAME_TEMPLATE, username)
for pattern, func in TEMPLATES_TO_FUNCTION.items():
formatted_content = formatted_content.replace(pattern, func())
return formatted_content
class TemplatesHandler(IPythonHandler):
def initialize(self, loader):
self.loader = loader
def get(self):
temp = self.get_argument('template', '')
if temp:
self.finish(self.loader.get_templates(get_username(self))[temp])
else:
self.set_status(404)
class TemplateNamesHandler(IPythonHandler):
def initialize(self, loader):
self.loader = loader
def get(self):
template_names = self.loader.get_templates(get_username(self)).keys()
self.finish(json.dumps(sorted(template_names)))
class TemplateTotorialPathHandler(IPythonHandler):
def initialize(self, totorial_path):
self.totorial_path = totorial_path
def get(self):
self.finish(json.dumps(self.totorial_path))
def get_username(web_handler):
data = web_handler.get_current_user()
if data == DEFAULT_USERNAME:
return data
return data['name']
def convert_template_to_relative_path(absolute_path, root_dirs):
for root_dir in root_dirs:
if os.path.commonpath([absolute_path, root_dir]) == root_dir:
return absolute_path[len(root_dir) + 1:]
def load_jupyter_server_extension(nb_server_app):
"""
Called when the extension is loaded.
Args:
nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance.
"""
web_app = nb_server_app.web_app
template_dirs = nb_server_app.config.get('JupyterLabTemplates', {}).get('template_dirs', [])
totorial_path = nb_server_app.config.get('JupyterLabTemplates', {}).get('totorial_path')
if nb_server_app.config.get('JupyterLabTemplates', {}).get('include_default', True):
template_dirs.append(os.path.join(os.path.dirname(__file__), 'templates'))
base_url = web_app.settings['base_url']
host_pattern = '.*$'
print('Installing jupyterlab_templates handler on path %s' % url_path_join(base_url, 'templates'))
if nb_server_app.config.get('JupyterLabTemplates', {}).get('include_core_paths', True):
template_dirs.extend([os.path.join(x, 'notebook_templates') for x in jupyter_core.paths.jupyter_path()])
print('Search paths:\n\t%s' % '\n\t'.join(template_dirs))
loader = TemplatesLoader(template_dirs)
print('Available templates:\n\t%s' % '\n\t'.join(t for t in loader.get_templates()))
web_app.add_handlers(host_pattern,
[(url_path_join(base_url, 'templates/names'), TemplateNamesHandler, {'loader': loader})])
web_app.add_handlers(host_pattern,
[(url_path_join(base_url, 'templates/get'), TemplatesHandler, {'loader': loader})])
web_app.add_handlers(host_pattern,
[(url_path_join(base_url, 'templates/get_totorial_path'), TemplateTotorialPathHandler,
{'totorial_path': (convert_template_to_relative_path(totorial_path, template_dirs)
if totorial_path else None)})])
| 39.26087 | 114 | 0.634736 | 2,364 | 0.436323 | 0 | 0 | 0 | 0 | 0 | 0 | 943 | 0.174049 |
10cf86523f7b53b3cbe34ca1abb35ccfd600860e | 3,633 | py | Python | src/automotive/application/actions/serial_actions.py | philosophy912/automotive | de918611652b789a83545f346c1569c2c2c955a6 | [
"Apache-2.0"
] | null | null | null | src/automotive/application/actions/serial_actions.py | philosophy912/automotive | de918611652b789a83545f346c1569c2c2c955a6 | [
"Apache-2.0"
] | null | null | null | src/automotive/application/actions/serial_actions.py | philosophy912/automotive | de918611652b789a83545f346c1569c2c2c955a6 | [
"Apache-2.0"
] | 1 | 2022-02-28T07:23:28.000Z | 2022-02-28T07:23:28.000Z | # -*- coding:utf-8 -*-
# --------------------------------------------------------
# Copyright (C), 2016-2020, lizhe, All rights reserved
# --------------------------------------------------------
# @Name: serial_actions.py
# @Author: lizhe
# @Created: 2021/5/2 - 0:02
# --------------------------------------------------------
from typing import List
from automotive.utils.serial_utils import SerialUtils
from automotive.logger.logger import logger
from automotive.utils.common.enums import SystemTypeEnum
from ..common.interfaces import BaseDevice
class SerialActions(BaseDevice):
"""
串口操作类
"""
def __init__(self, port: str, baud_rate: int):
super().__init__()
self.__serial = SerialUtils()
self.__port = port.upper()
self.__baud_rate = baud_rate
@property
def serial_utils(self):
return self.__serial
def open(self):
"""
打开串口
"""
logger.info("初始化串口")
logger.info("打开串口")
buffer = 32768
self.__serial.connect(port=self.__port, baud_rate=self.__baud_rate)
logger.info(f"*************串口初始化成功*************")
self.__serial.serial_port.set_buffer(buffer, buffer)
logger.info(f"串口缓存为[{buffer}]")
def close(self):
"""
关闭串口
"""
logger.info("关闭串口")
self.__serial.disconnect()
def write(self, command: str):
"""
向串口写入数据
:param command:
"""
self.__serial.write(command)
def read(self) -> str:
"""
从串口中读取数据
:return:
"""
return self.__serial.read()
def read_lines(self) -> List[str]:
"""
从串口中读取数据,按行来读取
:return:
"""
return self.__serial.read_lines()
def clear_buffer(self):
"""
清空串口缓存数据
"""
self.read()
def file_exist(self, file: str, check_times: int = None, interval: float = 0.5, timeout: int = 10) -> bool:
"""
检查文件是否存在
:param file: 文件名(绝对路径)
:param check_times: 检查次数
:param interval: 间隔时间
:param timeout: 超时时间
:return: 存在/不存在
"""
logger.info(f"检查文件{file}是否存在")
return self.__serial.file_exist(file, check_times, interval, timeout)
def login(self, username: str, password: str, double_check: bool = False, login_locator: str = "login"):
"""
登陆系统
:param username: 用户名
:param password: 密码
:param double_check: 登陆后的二次检查
:param login_locator: 登陆定位符
"""
logger.info(f"登陆系统,用户名{username}, 密码{password}")
self.__serial.login(username, password, double_check, login_locator)
def copy_file(self, remote_folder: str, target_folder: str, system_type: SystemTypeEnum, timeout: float = 300):
"""
复制文件
:param remote_folder: 原始文件
:param target_folder: 目标文件夹
:param system_type: 系统类型,目前支持QNX和Linux
:param timeout: 超时时间
"""
logger.info(f"复制{remote_folder}下面所有的文件到{target_folder}")
self.__serial.copy_file(remote_folder, target_folder, system_type, timeout)
def check_text(self, contents: str) -> bool:
"""
检查是否重启
:param contents: 重启的标识内容
:return:
True: 串口输出找到了匹配的内容
False: 串口输出没有找到匹配的内容
"""
logger.warning("使用前请调用clear_buffer方法清除缓存")
data = self.read()
result = True
for content in contents:
logger.debug(f"现在检查{content}是否在串口数据中存在")
result = result and content in data
return result
| 25.95 | 115 | 0.548582 | 3,562 | 0.862261 | 0 | 0 | 66 | 0.015977 | 0 | 0 | 2,001 | 0.484386 |
10d058dbcac1f25191aa2aab916d1db3bd200dbf | 1,446 | py | Python | colours.py | F0lha/UJunior-Projects | c46be35de2266a6bf72b22db5d982ebda1334b30 | [
"MIT"
] | null | null | null | colours.py | F0lha/UJunior-Projects | c46be35de2266a6bf72b22db5d982ebda1334b30 | [
"MIT"
] | null | null | null | colours.py | F0lha/UJunior-Projects | c46be35de2266a6bf72b22db5d982ebda1334b30 | [
"MIT"
] | null | null | null | # Permite usar o modulo do Sense HAT
from sense_hat import SenseHat
from mcpi.minecraft import Minecraft
from time import sleep
sense = SenseHat()
mc = Minecraft.create()
# blocos
grass = 2
water = 9
sand = 12
air = 0
# cores
white = (255, 255, 255)
green = (0, 255, 0)
blue = (0, 0, 255)
yellow = (255, 255, 0)
black = (0, 0, 0)
# bloco: cor
colours = {
air: white,
grass: green,
water: blue,
sand: yellow,
}
known_blocks = {}
def get_blocks():
blocks = []
x, y, z = mc.player.getTilePos()
y -= 1
for dz in range(z-3, z+5):
for dx in range(x-3, x+5):
b = (dx, y, dz)
if b in known_blocks:
block = known_blocks[b]
else:
block = mc.getBlock(dx, y, dz)
known_blocks[b] = block
altura = y
while block == air:
altura = altura - 1
block = mc.getBlock(dx, altura, dz)
known_blocks[b] = block
blocks.append(block)
return blocks
def lookup_colour(block):
if block in colours:
return colours[block]
else:
return white
def map_blocks_to_colours(blocks):
return [lookup_colour(block) for block in blocks]
player_pos = 27
while True:
blocks = get_blocks()
pixels = map_blocks_to_colours(blocks)
pixels[player_pos] = black
sense.set_pixels(pixels)
| 20.956522 | 59 | 0.553942 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.043568 |
10d1e06975d52bb7b5f8e0f3c90a9460e0d76260 | 12,674 | py | Python | year_2/os_sem2/checkLab14/app.py | honchardev/KPI | f8425681857c02a67127ffb05c0af0563a8473e1 | [
"MIT"
] | null | null | null | year_2/os_sem2/checkLab14/app.py | honchardev/KPI | f8425681857c02a67127ffb05c0af0563a8473e1 | [
"MIT"
] | 21 | 2020-03-24T16:26:04.000Z | 2022-02-18T15:56:16.000Z | year_2/os_sem2/checkLab14/app.py | honchardev/KPI | f8425681857c02a67127ffb05c0af0563a8473e1 | [
"MIT"
] | null | null | null | import sys
import os
import re
def main():
# Check input for the program.
checker = ArgsChecker()
if not checker.checkInputArgsAreCorrect():
return 1
if checker.processMode == checker.PROCESS_FROM_DIR:
dirProcesser = FromDirProcesser()
dirProcesser.process(checker.csvFilePath)
if checker.processMode == checker.PROCESS_CERTAIN_FILE:
certainFileProcesser = FromCertainFileProcesser(
checker.csvFilePath, checker.resultNFilePath, checker.lsNFilePath
)
certainFileProcesser.process(checker.varNum)
# End of the program.
print('End of the program.')
return 0
class ArgsChecker:
PROCESS_CERTAIN_FILE = 1
PROCESS_FROM_DIR = 2
def __init__(self):
self.varNum = 0
self.processMode = 0
def checkInputArgsAreCorrect(self):
return self._checkArgv() and self._checkFilesExist() and self._checkFilesNamingFormat()
def _checkArgv(self):
# Check command line arguments for amount of input files.
argv = sys.argv[1:]
if len(argv) != 3 and len(argv) != 1:
print('Usage: python app.py [group(.csv)] ([resultN(.txt)] [lsN(.txt)])')
return False
# Save mode of processing.
if len(argv) == 3:
self.processMode = self.PROCESS_CERTAIN_FILE
elif len(argv) == 1:
self.processMode = self.PROCESS_FROM_DIR
return True
def _checkFilesExist(self):
# Check if files from sys.argv really exist.
for arg in sys.argv[1:]:
fullFileName = os.path.splitext(arg)
# This program works with files without extensions.
if not os.path.exists(arg) and not os.path.exists(fullFileName[0]):
print('File %s does not exist.' % arg)
return False
return True
def _checkFilesNamingFormat(self):
# Pass if FROM_DIR mode.
if self.processMode == self.PROCESS_FROM_DIR:
# Save .csv filepath.
self.csvFilePath = sys.argv[1]
return True
# Check if files from sys.argv have a proper naming format.
fileNames = sys.argv[1:]
# Check [.csv] file.
# It has a format [XX.csv] or [XX], where X is a digit.
# Example: [51.csv] or [51].
csvMatch = re.match(r'^(\d){2}(\.csv)?$', fileNames[0], re.I)
if not csvMatch:
print('Your [XX.csv] file has incorrect name.')
print('Please use [NN.csv] or [NN] format, where N is a digit.')
return False
# Check file [resultNVar.txt].
# It has a format [resultNVar.txt] or [resultNVar], where NVar is one or two digits.
# Example: [result5.txt], or [result15].
resultNFileMatch = re.match(r'^result(\d){1,2}(\.txt)?$', fileNames[1], re.I)
if not resultNFileMatch:
print('Your [resultN.txt] file has incorrect name.')
print('Please use [resultN.txt] or [resultN] format, where N is a 1- or 2- digit variant number.')
return False
# Check file [lsN.txt].
# It has a format [lsN.txt] or [lsN], where N is 1- or 2- digit variant number.
# Example: [ls5.txt] or [ls15].
lsNFileMatch = re.match(r'^ls(\d){1,2}(\.txt)?$', fileNames[2], re.I)
if not lsNFileMatch:
print('Your [lsN.txt] file has incorrect name.')
print('Please use [lsN.txt] or [lsN] format, where N is 1- or 2- digit variant number.')
return False
# Check if files [lsN.txt] and [resultNVar.txt] have the same variant number.
firstNum = re.match(r'^result((\d){1,2})(\.txt)?$', fileNames[1], re.I).group(1)
secondNum = re.match(r'^ls((\d){1,2})(\.txt)$', fileNames[2], re.I).group(1)
if firstNum != secondNum:
print('Files [resultN.txt] and [lsN.txt] are not for the same variant.')
return False
# Save the variant number.
self.varNum = int(firstNum)
# Save .csv filepath.
self.csvFilePath = fileNames[0]
# Save resultN.txt filepath.
self.resultNFilePath = fileNames[1]
# Save lsN.txt filepath.
self.lsNFilePath = fileNames[2]
return True
class FromDirProcesser:
def process(self, csvFilePath):
if not self._processCsvFile(csvFilePath):
print('Error occured while reading %s file' % csvFilePath)
return False
# Go through all lines and check each variant.
for variant in self.csvFileContent:
varMatch = re.match(r'^(\d){1,2}$', variant, re.I)
varMatchErr = re.match(r'^((\d){1,2}),Err$', variant, re.I)
varMatchSemiColon = re.match(r'^((\d){1,2}),$', variant, re.I)
# If line is not filled with results:
if varMatch or varMatchErr or varMatchSemiColon:
if varMatch:
curVar = int(varMatch.group())
elif varMatchErr:
curVar = int(varMatchErr.group(1))
elif varMatchSemiColon:
curVar = int(varMatchSemiColon.group(1))
print("%r" % curVar)
self.CertainValProcessor = FromCertainFileProcesser(
csvFilePath,
str.format("result%i.txt" % curVar),
str.format("ls%i.txt" % curVar)
)
self.CertainValProcessor.process(curVar)
def _processCsvFile(self, csvFilePath):
# Read content from .csv file.
self.csvFileContent = []
try:
f = open(csvFilePath, "r", encoding="utf-8-sig")
self.csvFileContent = [line.rstrip('\n') for line in f]
except:
print('Error occured while reading file %s.' % csvFilePath)
return False
return True
class FromCertainFileProcesser:
def __init__(self, csvFilePath, resultNFilePath, lsNFilePath):
self.resultNFilePath = resultNFilePath
self.lsNFilePath = lsNFilePath
self.csvFilePath = csvFilePath
def process(self, varNum):
self._processResultNFile(varNum)
self._writeResult(self.csvFilePath, varNum)
def _processResultNFile(self, varNum):
resultNContent = []
self._marks = []
# Read content from lsN.txt file.
if not self._processLsNFile():
print('Error occured while reading file %s.' % self.lsNFilePath)
self._marks.append('Err')
return False
# Read content from resultN.txt file.
try:
f = open(self.resultNFilePath, "r", encoding="utf-8-sig")
resultNContent = [line.rstrip('\n') for line in f if line.rstrip('\n')]
except:
print('Error occured while reading file %s.' % self.resultNFilePath)
self._marks.append('Err')
return False
# Save username.
self.userName = resultNContent[0]
# Check variant.
if varNum != int(resultNContent[1]):
print('Error: variant missmatch. In file:', int(resultNContent[1]))
self._marks.append('Err')
return False
# Testing.
print('Checking @%s var%i' % (self.userName, varNum))
# "labs" task.
if resultNContent[2] == str.format("lab%i" % varNum) \
and resultNContent[3] == str.format("lab%i" % (varNum + 1)):
self._marks.append('1')
else:
self._marks.append('labs-')
# Testing:
print('labs', self._marks[0])
# "hard1" task.
try:
first1, *middle1, last1 = resultNContent[4].split()
first2, *middle2, last2 = resultNContent[5].split()
except:
print('Error in "hard1" task: incorrect data format.')
self._marks.append('Err')
return False
if resultNContent[4] and resultNContent[5] \
and first1 == first2 \
and str.format("tree%i_h" % varNum) == last1 \
and str.format(".tree%i" % varNum) == last2:
self._marks.append('1')
else:
self._marks.append('hard1-')
# Testing
print('hard1', self._marks[1])
# "mount" task.
startingLine = 6
endingLine = startingLine
for i, line in enumerate(resultNContent[startingLine:]):
if '-rw' in line:
endingLine = startingLine + i
break
strToFind = str.format('/home/%s/mount/NTFS' % self.userName)
strToFind2 = str.format('/home/%s/mount/EXT4' % self.userName)
firstStrFound = False
secondStrFound = False
for line in resultNContent[startingLine:endingLine]:
if strToFind in line:
firstStrFound = True
if strToFind2 in line:
secondStrFound = True
if firstStrFound and secondStrFound:
self._marks.append('1')
else:
self._marks.append('mount-')
# Testing
print('mount', self._marks[2])
# "hard2" task.
try:
first1, *middle1, last1 = resultNContent[endingLine].split()
first2, *middle2, last2 = resultNContent[endingLine + 1].split()
except:
print('Error in "hard2" task: incorrect data format.')
self._marks.append('Err')
return False
if resultNContent[endingLine] and resultNContent[endingLine + 1] \
and first1 != first2 \
and str.format("tree%i_h" % varNum) == last1 \
and str.format(".tree%i" % varNum) == last2 \
and self._argsInText([first1, last1], self.lsNContent) \
and self._argsInText([first2, last2], self.lsNContent):
self._marks.append('1')
else:
self._marks.append('hard2-')
# Testing
print('hard2', self._marks[3])
# "diff" task.
if self.userName in resultNContent[endingLine + 3] \
and str.format('lab%i' % varNum) in resultNContent[endingLine + 6] \
and str.format('lab%i' % (varNum + 1)) in resultNContent[endingLine + 7] \
and str.format('lect%i' % (varNum - 1)) in resultNContent[endingLine + 9] \
and str.format('lect%i' % varNum) in resultNContent[endingLine + 10] \
and str.format('result%i' % varNum) in resultNContent[endingLine + 13] \
and '3 directories, 7 files' in resultNContent[endingLine + 16]:
self._marks.append('1')
else:
self._marks.append('diff-')
# Testing
print('diff', self._marks[4])
return True
def _processLsNFile(self):
# Read content from lsN.txt file.
try:
f = open(self.lsNFilePath, "r", encoding="utf-8-sig")
self.lsNContent = [line.rstrip('\n') for line in f if line.rstrip('\n')]
except:
return False
return True
def _argsInText(self, argsLst, textAsLst):
# Means that all items from argsLst should be in one line.
for line in textAsLst:
# I am sorry.
splittedLine = line.split()
if (argsLst[0] in splittedLine) and (argsLst[1] in splittedLine):
return True
return False
def _writeResult(self, csvFilePath, varNum):
# Write result to .csv file.
# Note, that result will be on certain line in .csv file.
csvFileContent = []
try:
f = open(csvFilePath, "r", encoding="utf-8-sig")
csvFileContent = [line.rstrip('\n') for line in f]
except:
print('Error occured while reading file %s.' % csvFilePath)
return False
# Format data to write to .csv file.
toWrite = self._marks
toWrite.insert(0, str(varNum))
if 'Err' not in self._marks:
toWrite.append(str(self._getMarksSum()))
csvFileContent[int(varNum) - 1] = ','.join(toWrite)
csvFileContentWithNewLines = [(line+'\n') for line in csvFileContent]
# Write data to .csv file.
try:
f = open(csvFilePath, "w", encoding="utf-8-sig")
f.writelines(csvFileContentWithNewLines)
except:
print('Error occured while writing data to file %s.' % csvFilePath)
return False
return True
def _getMarksSum(self):
toRet = 0
for mark in self._marks:
if mark == '1':
toRet += 1
return toRet
if __name__ == '__main__':
main()
| 36.524496 | 110 | 0.566435 | 11,972 | 0.944611 | 0 | 0 | 0 | 0 | 0 | 0 | 3,010 | 0.237494 |
10d2a94ebd88d32ad26b964e29b12c2b154a5761 | 7,858 | py | Python | HSTB/drivers/hips_log.py | noaa-ocs-hydrography/drivers | d798a851b7b06c986c811a84242529038fd0b2b3 | [
"CC0-1.0"
] | 2 | 2021-04-28T17:37:30.000Z | 2022-01-28T21:56:17.000Z | HSTB/drivers/hips_log.py | noaa-ocs-hydrography/drivers | d798a851b7b06c986c811a84242529038fd0b2b3 | [
"CC0-1.0"
] | 1 | 2020-11-05T13:57:34.000Z | 2020-11-05T14:00:26.000Z | HSTB/drivers/hips_log.py | noaa-ocs-hydrography/drivers | d798a851b7b06c986c811a84242529038fd0b2b3 | [
"CC0-1.0"
] | 1 | 2021-04-09T08:29:54.000Z | 2021-04-09T08:29:54.000Z | import xml.etree.ElementTree as ElementTree
import os
class CARISObject:
"""A generic CARIS object with a name"""
def __init__(self):
"""Initialize with empty name"""
self.name = ''
def __init__(self, name):
"""Initialize with a name provided"""
self.name = name
def set_name(self, name):
"""Set the name"""
if name:
self.name = name
def get_name(self):
"""Return the name"""
return self.name
class CARISSource(CARISObject):
"""A CARIS source object under Process Model 1.0"""
def __init__(self):
"""Initialize with empty name, type and data"""
self.name = ''
self.stype = ''
self.sdata = ''
def set_type(self, dtype):
"""Set the type"""
if dtype:
self.stype = dtype
def set_data(self, data):
"""Set the data"""
if data:
self.sdata = data
def get_type(self):
"""Return the type"""
return self.stype
def get_data(self):
"""Return the data"""
return self.sdata
class CARISLog():
"""A CARIS log object under Process Model 1.0"""
def __init__(self):
"""Initialize with empty user, software, start/end times"""
self.user = ''
self.software = ''
self.startTime = ''
self.endTime = ''
def set_user(self, user):
"""Set the user"""
if user:
self.user = user
def set_software(self, software):
"""Set the software"""
if software:
self.software = software
def set_start_time(self, startTime):
"""Set the start time"""
if startTime:
self.startTime = startTime
def set_end_time(self, endTime):
"""Set the end time"""
if endTime:
self.endTime = endTime
def get_user(self):
"""Return the user"""
return self.user
def get_software(self):
"""Return the software"""
return self.software
def get_start_time(self):
"""Return the start time"""
return self.startTime
def get_end_time(self):
"""Return the end time"""
return self.endTime
class CARISPort(CARISObject):
"""A CARIS port under Process Model 1.0"""
def __init__(self):
"""Initialize with empty name and sources list"""
self.name = ''
self.sources = []
def set_sources(self, sources):
"""Set the sources list"""
if sources:
self.sources = sources
def get_sources(self):
"""Get the sources list"""
return self.sources
def get_source(self, index):
"""Get a source by index"""
return self.sources[index]
class CARISProcess(CARISObject):
""" A CARIS Process under Process Model 1.0"""
def __init__(self):
"""Initialize with empty name, version, ports dictionary, and log list"""
self.name = ''
self.version = ''
self.ports = {}
self.log = []
def set_version(self, version):
"""Set the version"""
if version:
self.version = version
def set_ports(self, ports):
"""Set the ports dictionary"""
if ports:
self.ports = ports
def add_port(self, name, port):
"""Add a port to the dictionary"""
if port:
self.ports[name] = port
def set_log(self, log):
"""Set the log list"""
if log:
self.log = log
def get_version(self):
"""Return the version"""
return self.version
def get_ports(self):
"""Return the ports dictionary"""
return self.ports
def get_port(self, name):
"""Return a port value by key"""
return self.ports[name]
def get_log(self):
"""Return the log list"""
return self.log
class HIPSLog:
"""
A class representing a HIPS line log.
Applicable only for logs created in HIPS v10.0.0 and above (Process.log)
"""
def __init__(self):
"""Initialize with empty source path, process list and version"""
self.source_path = ''
self.processes = []
self.version = ''
def __init__(self, log_path):
"""Initialize from an existing log path, which processes all entries into this object."""
self.source_path = log_path
self.processes = []
tree = ElementTree.parse(log_path)
root = tree.getroot()
self.version = root.find('version').text
for process in root.findall('process'):
proc_obj = self.__parse_process(process)
self.processes.append(proc_obj)
def set_source_path(self, path):
"""Set the source path"""
if path:
self.source_path = path
def set_version(self, version):
"""Set the version"""
if version:
self.version = version
def set_processes(self, process):
"""Set the processes list"""
if process:
self.processes = process
def get_source_path(self):
"""Return the source path"""
return self.source_path
def get_version(self):
"""Return the version"""
return self.version
def get_processes(self):
"""Return the list of processes"""
return self.processes
def get_process(self, index):
"""Return a specific process object by index"""
return self.processes[index]
def get_last_process(self, process_name):
"""Returns the last log entry of the provided name."""
return self.get_process(next(i for i, v in zip(list(range(len(
self.processes) - 1, -1, -1)), reversed(self.processes)) if v.get_name() == process_name))
def has_process(self, process_name):
"""Check if the process exists in the log"""
return any(process_name in s.get_name() for s in self.processes)
def __parse_process(self, process):
"""Internal process to parse the process XML"""
proc_obj = CARISProcess()
# set metadata
proc_obj.set_name(process.find('id').text)
proc_obj.set_version(process.find('version').text)
log = process.find('log')
log_obj = CARISLog()
log_obj.set_user(log.find('user').text)
log_obj.set_start_time(log.find('start').text)
log_obj.set_end_time(log.find('end').text)
soft = log.find('software')
log_obj.set_software(
soft.find('id').text +
' ' +
soft.find('version').text)
proc_obj.set_log(log_obj)
# add ports
for option in process.findall('port'):
opt_obj = self.__parse_port(option)
proc_obj.add_port(opt_obj.get_name(), opt_obj)
return proc_obj
def __parse_port(self, option):
"""Internal process to parse each port (option) of the log entry"""
opt_obj = CARISPort()
opt_obj.set_name(option.find('id').text)
for source in option.findall('source'):
src_obj = self.__parse_source(source)
opt_obj.sources.append(src_obj)
return opt_obj
def __parse_source(self, source):
"""Internal process to parse each source of a given port"""
src_obj = CARISSource()
data = source.find('data')
simple = data.find('simple')
if simple:
src_obj.set_name('simple')
src_obj.set_type(simple.find('type').text)
src_obj.set_data(simple.find('value').text)
else:
complex_v = data.find('complex')
if complex_v:
src_obj.set_name('complex')
src_obj.set_type('complex')
# simply store this part of the ETree
src_obj.set_data(complex_v)
return src_obj
| 27.865248 | 102 | 0.573556 | 7,786 | 0.990837 | 0 | 0 | 0 | 0 | 0 | 0 | 2,135 | 0.271698 |
10d394809031c831a797106d7da931ca1931a5d8 | 89 | py | Python | Contest/ABC017/b/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | Contest/ABC017/b/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | Contest/ABC017/b/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
import re
print(re.sub("ch|o|k|u", "", input()) and "NO" or "YES") | 29.666667 | 56 | 0.606742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.483146 |
10d3a064f0cfe210246996fe757aa5f3e0edc11d | 8,393 | py | Python | plugins/salesforce_utils.py | brennoflavio/airflow-salesforce-operators | 30019402b0ae4e2c8d3fde43b1273781316358a7 | [
"MIT"
] | null | null | null | plugins/salesforce_utils.py | brennoflavio/airflow-salesforce-operators | 30019402b0ae4e2c8d3fde43b1273781316358a7 | [
"MIT"
] | null | null | null | plugins/salesforce_utils.py | brennoflavio/airflow-salesforce-operators | 30019402b0ae4e2c8d3fde43b1273781316358a7 | [
"MIT"
] | null | null | null | from simple_salesforce import Salesforce
from airflow.hooks.base_hook import BaseHook
import json
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.models import BaseOperator
from airflow.plugins_manager import AirflowPlugin
from airflow.utils.decorators import apply_defaults
import csv
from tempfile import NamedTemporaryFile
from airflow.hooks.S3_hook import S3Hook
class SalesforceHook(BaseHook, LoggingMixin):
"""
Forked from Airflow Contrib
"""
def __init__(self, conn_id, *args, **kwargs):
self.conn_id = conn_id
self._args = args
self._kwargs = kwargs
self.connection = self.get_connection(conn_id)
self.extras = self.connection.extra_dejson
def sign_in(self):
if hasattr(self, "_sf"):
return self._sf
sf = Salesforce(
username=self.connection.login,
password=self.connection.password,
security_token=self.extras["security_token"],
instance_url=self.connection.host,
sandbox=self.extras.get("sandbox", False),
)
self._sf = sf
return sf
def make_query(self, query):
self.sign_in()
self.log.info("Querying for all objects")
query = self._sf.query_all(query)
self.log.info(
"Received results: Total size: %s; Done: %s",
query["totalSize"],
query["done"],
)
query = json.loads(json.dumps(query))
return query
class SalesforceToS3Operator(BaseOperator):
template_fields = ("sql", "dest_key")
template_ext = (".sql",)
@apply_defaults
def __init__(
self,
sql,
dest_key,
dest_bucket,
salesforce_conn_id,
aws_conn_id="aws_default",
include_deleted=False,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.salesforce_conn_id = salesforce_conn_id
self.aws_conn_id = aws_conn_id
self.sql = sql
self.dest_key = dest_key
self.dest_bucket = dest_bucket
self.include_deleted = include_deleted
def execute(self, context):
self.log.info("Start executing SalesforceToS3Operator")
salesforce_hook = SalesforceHook(conn_id=self.salesforce_conn_id)
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
salesforce_conn = salesforce_hook.sign_in()
self.log.info(f"Going to execute query {self.sql}")
result = salesforce_conn.query(
query=self.sql, include_deleted=self.include_deleted
)
with NamedTemporaryFile("w", encoding="utf-8") as target_file:
times_executed = 0
while True:
list_result = []
records_result = result["records"]
for row in records_result:
json_row = json.loads(json.dumps(row))
del json_row["attributes"]
json_row = {k.lower(): v for k, v in json_row.items()}
list_result.append(json_row)
fieldnames = list(list_result[0])
csv_writer = csv.DictWriter(target_file, fieldnames=fieldnames)
if times_executed == 0:
csv_writer.writeheader()
times_executed = times_executed + 1
for row in list_result:
csv_writer.writerow(row)
target_file.flush()
if not result["done"]:
result = salesforce_conn.query_more(
next_records_identifier=result["nextRecordsUrl"],
identifier_is_url=True,
include_deleted=self.include_deleted,
)
else:
break
s3_hook.load_file(
filename=target_file.name,
key=self.dest_key,
bucket_name=self.dest_bucket,
replace=True,
)
class S3ToSalesforceOperator(BaseOperator):
template_fields = ("source_key",)
@apply_defaults
def __init__(
self,
source_key,
source_bucket,
salesforce_object,
salesforce_conn_id,
api_action,
aws_conn_id="aws_default",
batch_size=9000,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.source_key = source_key
self.source_bucket = source_bucket
self.salesforce_object = salesforce_object
self.aws_conn_id = aws_conn_id
self.salesforce_conn_id = salesforce_conn_id
self.api_action = api_action
self.batch_size = batch_size
def execute(self, context):
if self.api_action not in ["update", "insert", "delete"]:
raise Exception(
"api_action is not update, insert or delete. Check class definition in Dag and try again"
)
self.log.info("Getting Connections")
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
salesforce_hook = SalesforceHook(conn_id=self.salesforce_conn_id)
salesforce_conn = salesforce_hook.sign_in()
self.log.info("Downloading S3 File")
with NamedTemporaryFile() as source_csv:
source_obj = s3_hook.get_key(self.source_key, self.source_bucket)
with open(source_csv.name, "wb") as opened_source_csv:
source_obj.download_fileobj(opened_source_csv)
self.log.info("Replacing special chars")
with open(source_csv.name, "r+") as opened_source_csv, NamedTemporaryFile(
"r+"
) as sanitized_csv:
for row in opened_source_csv:
sanitized_csv.write(row.replace("\\", ""))
sanitized_csv.flush()
self.log.info("Converting CSV to Dict")
opened_file = open(sanitized_csv.name)
dict_reader = csv.DictReader(opened_file)
salesforce_obj = getattr(salesforce_conn.bulk, self.salesforce_object)
upload_list = []
for row in dict_reader:
upload_list.append(dict(row))
opened_file.close()
parsed_upload_list = []
for i in range(0, len(upload_list), self.batch_size):
parsed_upload_list.append(upload_list[i : i + self.batch_size])
if upload_list == []:
self.log.info("Dict doesn't have any records. Skipping.")
elif self.api_action == "update":
self.log.info("Updating salesforce records")
for row in parsed_upload_list:
response = salesforce_obj.update(row)
for row in response:
if row["success"] is False:
raise Exception(
"Salesforce returned error: " + str(row)
)
self.log.info("Sucess!")
elif self.api_action == "insert":
self.log.info("Inserting salesforce records")
for row in parsed_upload_list:
response = salesforce_obj.insert(row)
for row in response:
if row["success"] is False:
raise Exception(
"Salesforce returned error: " + str(row)
)
self.log.info("Sucess!")
elif self.api_action == "delete":
self.log.info("Deleting salesforce records")
for row in parsed_upload_list:
response = salesforce_obj.delete(row)
for row in response:
if row["success"] is False:
raise Exception(
"Salesforce returned error: " + str(row)
)
self.log.info("Sucess!")
class SalesforceUtils(AirflowPlugin):
name = "salesforce_utils"
operators = [SalesforceToS3Operator, S3ToSalesforceOperator]
hooks = [SalesforceHook]
executors = []
macros = []
admin_views = []
| 35.563559 | 105 | 0.556535 | 7,992 | 0.952222 | 0 | 0 | 1,113 | 0.132611 | 0 | 0 | 883 | 0.105207 |
10d3ca3bf530a8c109cbd94308e316fd9795c6d0 | 1,928 | py | Python | src/django_website/blog/views.py | jdheinz/project-ordo_ab_chao | 4063f93b297bab43cff6ca64fa5ba103f0c75158 | [
"MIT"
] | 2 | 2019-09-23T18:42:32.000Z | 2019-09-27T00:33:38.000Z | src/django_website/blog/views.py | jdheinz/project-ordo_ab_chao | 4063f93b297bab43cff6ca64fa5ba103f0c75158 | [
"MIT"
] | 6 | 2021-03-19T03:25:33.000Z | 2022-02-10T08:48:14.000Z | src/django_website/blog/views.py | jdheinz/project-ordo_ab_chao | 4063f93b297bab43cff6ca64fa5ba103f0c75158 | [
"MIT"
] | 6 | 2019-09-23T18:53:41.000Z | 2020-02-06T00:20:06.000Z | from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404
from .models import BlogPost
from .forms import BlogPostModelForm
# render intial html page of list of published blogs
def blog_post_list_view(request):
qs = BlogPost.objects.all().published() # queryset -> list of python objects
if request.user.is_authenticated:
my_qs = BlogPost.objects.filter(user=request.user)
qs = (qs | my_qs).distinct()
context = {'object_list':qs}
return render(request, 'blog/list.html', context)
# create new blog post
@login_required
def blog_post_create_view(request):
form = BlogPostModelForm(request.POST or None, request.FILES or None)
if form.is_valid():
obj = form.save(commit=False)
obj.user = request.user
obj.save()
form = BlogPostModelForm()
context = {'form':form}
return render(request, 'blog/form.html', context)
# click on blog 'view' on blog list page to see details of a single blog
def blog_post_detail_view(request, slug):
obj = get_object_or_404(BlogPost, slug=slug)
context = {'object':obj}
return render(request, 'blog/detail.html', context)
# blog author can update/edit the blog post that user created
@login_required
def blog_post_update_view(request, slug):
obj = get_object_or_404(BlogPost, slug=slug)
form = BlogPostModelForm(request.POST or None, instance=obj)
if form.is_valid():
form.save()
context = {
"form":form,
"title":f"Update {obj.title}",
}
return render(request, 'blog/update.html', context)
# blog author can delete the blog post that user created
@login_required
def blog_post_delete_view(request, slug):
obj = get_object_or_404(BlogPost, slug=slug)
if request.method == "POST":
obj.delete()
context = {'object':obj}
return render(request, 'blog/delete.html', context)
| 35.703704 | 80 | 0.702801 | 0 | 0 | 0 | 0 | 961 | 0.498444 | 0 | 0 | 460 | 0.238589 |
10d567f52597f63ae873c81fa0c69444cf588834 | 6,863 | py | Python | uiConfig.py | g-ulrich/ScreenShotToDiscord | 7228d01637d426a2767a9531576880180a49c92f | [
"MIT"
] | null | null | null | uiConfig.py | g-ulrich/ScreenShotToDiscord | 7228d01637d426a2767a9531576880180a49c92f | [
"MIT"
] | null | null | null | uiConfig.py | g-ulrich/ScreenShotToDiscord | 7228d01637d426a2767a9531576880180a49c92f | [
"MIT"
] | null | null | null | from PyQt5.QtCore import QTimer, QTime
from PyQt5 import QtGui, QtWidgets
import time
import random
import os
import datetime
import requests
import sqlite3 as sql
import database_impl as db
from date_tools import dt
def datetime_diff(old_datetime, new_datetime, dates_are_strings=True):
"""
String dates should be in this format "%Y-%m-%d %H:%M:%S"
"""
seconds_in_day = 24 * 60 * 60
if dates_are_strings:
d1 = datetime.datetime.strptime(old_datetime, "%Y-%m-%d %H:%M:%S")
d2 = datetime.datetime.strptime(new_datetime, "%Y-%m-%d %H:%M:%S")
else:
d1, d2 = old_datetime, new_datetime
difference = d1 - d2
x = divmod(difference.days * seconds_in_day + difference.seconds, 60)
minutes, seconds = x[0], x[1]
return minutes, seconds
def current_time():
t = QTime.currentTime().toString()
am_pm = "pm" if 12 < int(t[:2]) < 23 else "am"
return t + " " + am_pm
def message_discord_server(message, user_data={}):
try:
discord_webhook_url = user_data['discordwebhook']
Message = {
"content": str(message)
}
requests.post(discord_webhook_url, data=Message)
except Exception as e:
print("ERROR discord", e)
class Presets:
def event_log(self, message):
t, c = current_time(), self.ui.mouseList.count()
self.ui.mouseList.setCurrentRow(c-1)
self.ui.mouseLastUpdate.setText(' {}'.format(t))
if c > 100:
self.ui.mouseList.clear()
self.ui.mouseList.addItem("CLEARED --> {}".format(t))
self.ui.mouseList.takeItem(c-1)
self.ui.mouseList.addItem("[{}] {}".format(t, message))
self.ui.mouseList.addItem("")
def init_ui(self):
self.ui.password.setEchoMode(QtWidgets.QLineEdit.Password)
self.ui.bar.setMaximum(100)
self.ui.bar.setValue(100)
self.setWindowIcon(QtGui.QIcon('images/discord.png'))
# Presets.mouse_loop(self)
self.ui.close.clicked.connect(lambda: self.close())
self.ui.minimize.clicked.connect(lambda: self.showMinimized())
self.ui.startBtn.clicked.connect(lambda: Presets.start(self))
self.ui.stopBtn.clicked.connect(lambda: Presets.stop(self))
self.ui.password.returnPressed.connect(lambda: Presets.start(self))
self.ui.stopBtn.hide()
def progress_bar_count(self):
self.ui.SECONDS -= 1
self.ui.bar.setValue(self.ui.SECONDS)
def start(self):
CON = sql.connect('userData/user.db')
if db.valid_login_password(CON, self.ui.password.text(), commit=False) and not db.select_stop_session_status(CON, commit=False):
self.ui.user_data = db.user_data_by_password(CON, self.ui.password.text(), commit=False)
if self.ui.mins.value() != 0.0 or self.ui.hrs.value() != 0.0:
self.ui.start_timer = QTimer()
self.ui.start_timer.timeout.connect(lambda: Presets.awake_loop(self))
hrs_to_secs, mins_to_secs = (self.ui.hrs.value() * 60) * 60000, self.ui.mins.value() * 60000
self.ui.start_timer.start(hrs_to_secs + mins_to_secs)
self.ui.SECONDS = (hrs_to_secs + mins_to_secs) / 1000
Presets.event_log(self, "Start")
Presets.event_log(self, "Interval set to {} minute(s).".format(self.ui.SECONDS / 60))
self.ui.bar.setMaximum(self.ui.SECONDS)
self.ui.bar.setValue(self.ui.SECONDS)
self.ui.progress_timer = QTimer()
self.ui.progress_timer.timeout.connect(lambda: Presets.progress_bar_count(self))
self.ui.progress_timer.start(1000)
self.ui.stopBtn.show()
self.ui.startBtn.hide()
else:
Presets.event_log(self, "Set an interval! :)")
else:
if not db.valid_login_password(CON, self.ui.password.text(), commit=False):
Presets.event_log(self, "Enter an application password. :)")
if db.select_stop_session_status(CON, commit=False):
Presets.event_log(self, "Start live trading session. :)")
def stop(self):
self.ui.start_timer.stop()
self.ui.progress_timer.stop()
self.ui.bar.setMaximum(100)
self.ui.bar.setValue(100)
Presets.event_log(self, "Stop")
self.ui.stopBtn.hide()
self.ui.startBtn.show()
def awake_loop(self):
CON = sql.connect('userData/user.db')
data = db.get_timestamps_from_livetrade(CON, commit=False)
# current set
current_min_diff, sec1 = datetime_diff(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), data[-1])
# last set
previous_min_diff, sec2 = datetime_diff(data[-1], data[-2])
#second to last set
min3, sec3 = datetime_diff(data[-2], data[-3])
message = "_"
"""
if interval check is greater than min diff then success
"""
checkup_interval = self.ui.mins.value() + (self.ui.hrs.value() * 60)
livetrade_loop_rate = min3 if previous_min_diff == min3 else "unknown"
if data[-1] != "2000-01-01 00:00:00":
if checkup_interval < current_min_diff:
message = "```diff\n-Error! Application stopped live trading!\n-Stoppage occurred after: {}\n``` ".format(data[-1])
elif current_min_diff > previous_min_diff:
message = "```ini\n[Warning! Application either slowed down or stopped live trading.]\n[Last loop occurrence: {}]\n``` ".format(data[-1])
else:
message = "```diff\n+Success! Application is live trading.\n+Last loop occurrence: {}\n+Live Trade Loop Rate: {} minute(s)``` ".format(data[-1], livetrade_loop_rate)
else:
lastItem = self.ui.mouseList.currentItem().text()
if "bypassing" not in lastItem or "_" not in lastItem:
db.drop_table(CON, "live_trade_timestamps", commit=True)
db.insert_timestamp_livetrade(CON, data=("2000-01-01 00:00:00", ""), commit=False)
db.insert_timestamp_livetrade(CON, data=("2000-01-01 00:00:00", ""), commit=False)
db.insert_timestamp_livetrade(CON, data=("2000-01-01 00:00:00", ""), commit=True)
message = "Market Closed: bypassing checkup..."
Presets.event_log(self, message)
if message != "":
message_discord_server(message, self.ui.user_data)
Presets.event_log(self, "\n"+message.replace("```", "").replace("diff", "").replace("ini", ""))
hrs_to_secs, mins_to_secs = (self.ui.hrs.value() * 60) * 60000, self.ui.mins.value() * 60000
self.ui.SECONDS = (hrs_to_secs + mins_to_secs)/1000
self.ui.bar.setMaximum(self.ui.SECONDS)
self.ui.bar.setValue(self.ui.SECONDS)
| 43.713376 | 181 | 0.616203 | 5,622 | 0.819175 | 0 | 0 | 0 | 0 | 0 | 0 | 1,078 | 0.157074 |
10d7d0aaafbffeb183977058c120740205d009d7 | 38,243 | py | Python | src/glum/_distribution.py | Quantco/glum | 880dc6ff2599864679d86c3a8b1809aa24e2a839 | [
"BSD-3-Clause"
] | 68 | 2021-10-08T09:05:29.000Z | 2022-03-28T14:40:58.000Z | src/glum/_distribution.py | Quantco/glum | 880dc6ff2599864679d86c3a8b1809aa24e2a839 | [
"BSD-3-Clause"
] | 49 | 2021-10-08T01:44:18.000Z | 2022-03-08T08:53:00.000Z | src/glum/_distribution.py | Quantco/glum | 880dc6ff2599864679d86c3a8b1809aa24e2a839 | [
"BSD-3-Clause"
] | 11 | 2021-10-14T10:34:53.000Z | 2022-03-09T11:38:29.000Z | from abc import ABCMeta, abstractmethod
from functools import partial
from typing import Tuple, Union
import numexpr
import numpy as np
from scipy import sparse, special
from tabmat import MatrixBase, StandardizedMatrix
from ._functions import (
binomial_logit_eta_mu_deviance,
binomial_logit_rowwise_gradient_hessian,
gamma_deviance,
gamma_log_eta_mu_deviance,
gamma_log_likelihood,
gamma_log_rowwise_gradient_hessian,
normal_deviance,
normal_identity_eta_mu_deviance,
normal_identity_rowwise_gradient_hessian,
normal_log_likelihood,
poisson_deviance,
poisson_log_eta_mu_deviance,
poisson_log_likelihood,
poisson_log_rowwise_gradient_hessian,
tweedie_deviance,
tweedie_log_eta_mu_deviance,
tweedie_log_likelihood,
tweedie_log_rowwise_gradient_hessian,
)
from ._link import IdentityLink, Link, LogitLink, LogLink
from ._util import _safe_lin_pred, _safe_sandwich_dot
class ExponentialDispersionModel(metaclass=ABCMeta):
r"""Base class for reproductive Exponential Dispersion Models (EDM).
The PDF of :math:`Y \sim \mathrm{EDM}(\mu, \phi)` is given by
.. math::
p(y \mid \theta, \phi)
&= c(y, \phi) \exp((\theta y - A(\theta)_ / \phi) \\
&= \tilde{c}(y, \phi) \exp(-d(y, \mu) / (2\phi))
with mean :math:`\mathrm{E}(Y) = A'(\theta) = \mu`, variance
:math:`\mathrm{var}(Y) = \phi \cdot v(\mu)`, unit variance
:math:`v(\mu)` and unit deviance :math:`d(y, \mu)`.
Properties
----------
lower_bound
upper_bound
include_lower_bound
include_upper_bound
Methods
-------
in_y_range
unit_variance
unit_variance_derivative
variance
variance_derivative
unit_deviance
unit_deviance_derivative
deviance
deviance_derivative
starting_mu
_mu_deviance_derivative
eta_mu_deviance
gradient_hessian
References
----------
https://en.wikipedia.org/wiki/Exponential_dispersion_model.
"""
@property
@abstractmethod
def lower_bound(self) -> float:
"""Get the lower bound of values for the EDM."""
pass
@property
@abstractmethod
def upper_bound(self) -> float:
"""Get the upper bound of values for the EDM."""
pass
@property
def include_lower_bound(self) -> bool:
"""Return whether ``lower_bound`` is allowed as a value of ``y``."""
pass
@property
def include_upper_bound(self) -> bool:
"""Return whether ``upper_bound`` is allowed as a value of ``y``."""
pass
def in_y_range(self, x) -> np.ndarray:
"""Return ``True`` if ``x`` is in the valid range of the EDM.
Parameters
----------
x : array-like, shape (n_samples,)
Target values.
Returns
-------
np.ndarray
"""
if self.include_lower_bound:
if self.include_upper_bound:
return np.logical_and(
np.greater_equal(x, self.lower_bound),
np.less_equal(x, self.upper_bound),
)
else:
return np.logical_and(
np.greater_equal(x, self.lower_bound), np.less(x, self.upper_bound)
)
else:
if self.include_upper_bound:
return np.logical_and(
np.greater(x, self.lower_bound), np.less_equal(x, self.upper_bound)
)
else:
return np.logical_and(
np.greater(x, self.lower_bound), np.less(x, self.upper_bound)
)
@abstractmethod
def unit_variance(self, mu):
r"""Compute the unit variance function.
The unit variance :math:`v(\mu)` determines the variance as a function
of the mean :math:`\mu` by
:math:`\mathrm{var}(y_i) = (\phi / s_i) \times v(\mu_i)`. It can
also be derived from the unit deviance :math:`d(y, \mu)` as
.. math::
v(\mu) = \frac{2}{\frac{\partial^2 d(y, \mu)}{\partial\mu^2}}\big|_{y=\mu}.
See also :func:`variance`.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
"""
pass
@abstractmethod
def unit_variance_derivative(self, mu):
r"""Compute the derivative of the unit variance with respect to ``mu``.
Return :math:`v'(\mu)`.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
"""
pass
def variance(self, mu: np.ndarray, dispersion=1, sample_weight=1) -> np.ndarray:
r"""Compute the variance function.
The variance of :math:`Y_i \sim \mathrm{EDM}(\mu_i, \phi / s_i)` is
:math:`\mathrm{var}(Y_i) = (\phi / s_i) * v(\mu_i)`, with unit variance
:math:`v(\mu)` and weights :math:`s_i`.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
dispersion : float, optional (default=1)
Dispersion parameter :math:`\phi`.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Weights or exposure to which variance is inverse proportional.
Returns
-------
array-like, shape (n_samples,)
"""
return self.unit_variance(mu) * dispersion / sample_weight
def variance_derivative(self, mu, dispersion=1, sample_weight=1):
r"""Compute the derivative of the variance with respect to ``mu``.
The derivative of the variance is equal to
:math:`(\phi / s_i) * v'(\mu_i)`, where :math:`v(\mu)` is the unit
variance and :math:`s_i` are weights.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
dispersion : float, optional (default=1)
Dispersion parameter :math:`\phi`.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Weights or exposure to which variance is inverse proportional.
Returns
-------
array-like, shape (n_samples,)
"""
return self.unit_variance_derivative(mu) * dispersion / sample_weight
@abstractmethod
def unit_deviance(self, y, mu):
r"""Compute the unit deviance.
In terms of the log likelihood :math:`L`, the unit deviance is
:math:`-2\phi\times [L(y, \mu, \phi) - L(y, y, \phi)].`
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
"""
pass
def unit_deviance_derivative(self, y, mu):
r"""Compute the derivative of the unit deviance with respect to ``mu``.
The derivative of the unit deviance is given by
:math:`-2 \times (y - \mu) / v(\mu)`, where :math:`v(\mu)` is the unit
variance.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
Returns
-------
array-like, shape (n_samples,)
"""
return -2 * (y - mu) / self.unit_variance(mu)
def deviance(self, y, mu, sample_weight=1):
r"""Compute the deviance.
The deviance is a weighted sum of the unit deviances,
:math:`\sum_i s_i \times d(y_i, \mu_i)`, where :math:`d(y, \mu)` is the
unit deviance and :math:`s` are weights. In terms of the log likelihood,
it is :math:`-2\phi \times [L(y, \mu, \phi / s) - L(y, y, \phi / s)]`.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Weights or exposure to which variance is inversely proportional.
Returns
-------
float
"""
if sample_weight is None:
return np.sum(self.unit_deviance(y, mu))
else:
return np.sum(self.unit_deviance(y, mu) * sample_weight)
def deviance_derivative(self, y, mu, sample_weight=1):
r"""Compute the derivative of the deviance with respect to ``mu``.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,) (default=1)
Weights or exposure to which variance is inverse proportional.
Returns
-------
array-like, shape (n_samples,)
"""
return sample_weight * self.unit_deviance_derivative(y, mu)
def _mu_deviance_derivative(
self,
coef: np.ndarray,
X,
y: np.ndarray,
sample_weight: np.ndarray,
link: Link,
offset: np.ndarray = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute ``mu`` and the derivative of the deviance \
with respect to coefficients."""
lin_pred = _safe_lin_pred(X, coef, offset)
mu = link.inverse(lin_pred)
d1 = link.inverse_derivative(lin_pred)
temp = d1 * self.deviance_derivative(y, mu, sample_weight)
if coef.size == X.shape[1] + 1:
devp = np.concatenate(([temp.sum()], temp @ X))
else:
devp = temp @ X # same as X.T @ temp
return mu, devp
def eta_mu_deviance(
self,
link: Link,
factor: float,
cur_eta: np.ndarray,
X_dot_d: np.ndarray,
y: np.ndarray,
sample_weight: np.ndarray,
):
"""
Compute ``eta``, ``mu`` and the deviance.
Compute:
* the linear predictor, ``eta``, as ``cur_eta + factor * X_dot_d``;
* the link-function-transformed prediction, ``mu``;
* the deviance.
Returns
-------
numpy.ndarray, shape (X.shape[0],)
The linear predictor, ``eta``.
numpy.ndarray, shape (X.shape[0],)
The link-function-transformed prediction, ``mu``.
float
The deviance.
"""
# eta_out and mu_out are filled inside self._eta_mu_deviance,
# avoiding allocating new arrays for every line search loop
eta_out = np.empty_like(cur_eta)
mu_out = np.empty_like(cur_eta)
deviance = self._eta_mu_deviance(
link, factor, cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out
)
return eta_out, mu_out, deviance
def _eta_mu_deviance(
self,
link: Link,
factor: float,
cur_eta: np.ndarray,
X_dot_d: np.ndarray,
y: np.ndarray,
sample_weight: np.ndarray,
eta_out: np.ndarray,
mu_out: np.ndarray,
):
"""
Update ``eta`` and ``mu`` and compute the deviance.
This is a default implementation that should work for all valid
distributions and link functions. To implement a custom optimized
version for a specific distribution and link function, please override
this function in the subclass.
Returns
-------
float
"""
eta_out[:] = cur_eta + factor * X_dot_d
mu_out[:] = link.inverse(eta_out)
return self.deviance(y, mu_out, sample_weight=sample_weight)
def rowwise_gradient_hessian(
self,
link: Link,
coef: np.ndarray,
dispersion,
X: Union[MatrixBase, StandardizedMatrix],
y: np.ndarray,
sample_weight: np.ndarray,
eta: np.ndarray,
mu: np.ndarray,
offset: np.ndarray = None,
):
"""
Compute the gradient and negative Hessian of the log likelihood row-wise.
Returns
-------
numpy.ndarray, shape (X.shape[0],)
The gradient of the log likelihood, row-wise.
numpy.ndarray, shape (X.shape[0],)
The negative Hessian of the log likelihood, row-wise.
"""
gradient_rows = np.empty_like(mu)
hessian_rows = np.empty_like(mu)
self._rowwise_gradient_hessian(
link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
)
# To form the full Hessian matrix from the IRLS sample_weight:
# hessian_matrix = _safe_sandwich_dot(X, hessian_rows, intercept=intercept)
return gradient_rows, hessian_rows
def _rowwise_gradient_hessian(
self, link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
):
"""
Update ``gradient_rows`` and ``hessian_rows`` in place.
This is a default implementation that should work for all valid
distributions and link functions. To implement a custom optimized
version for a specific distribution and link function, please override
this function in the subclass.
"""
# FOR TWEEDIE: sigma_inv = weights / (mu ** p) during optimization bc phi = 1
sigma_inv = get_one_over_variance(self, link, mu, eta, 1.0, sample_weight)
d1 = link.inverse_derivative(eta) # = h'(eta)
# Alternatively:
# h'(eta) = h'(g(mu)) = 1/g'(mu), note that h is inverse of g
# d1 = 1./link.derivative(mu)
d1_sigma_inv = d1 * sigma_inv
gradient_rows[:] = d1_sigma_inv * (y - mu)
hessian_rows[:] = d1 * d1_sigma_inv
def _fisher_information(
self, link, X, y, mu, sample_weight, dispersion, fit_intercept
):
"""Compute the expected information matrix.
Parameters
----------
link : Link
A link function (i.e. an instance of :class:`~glum._link.Link`).
X : array-like
Training data.
y : array-like
Target values.
mu : array-like
Predicted mean.
sample_weight : array-like
Weights or exposure to which variance is inversely proportional.
dispersion : float
The dispersion parameter.
fit_intercept : bool
Whether the model has an intercept.
"""
W = (link.inverse_derivative(link.link(mu)) ** 2) * get_one_over_variance(
self, link, mu, link.inverse(mu), dispersion, sample_weight
)
return _safe_sandwich_dot(X, W, intercept=fit_intercept)
def _observed_information(
self, link, X, y, mu, sample_weight, dispersion, fit_intercept
):
"""Compute the observed information matrix.
Parameters
----------
X : array-like
Training data.
y : array-like
Target values.
mu : array-like
Predicted mean.
sample_weight : array-like
Weights or exposure to which variance is inversely proportional.
dispersion : float
The dispersion parameter.
fit_intercept : bool
Whether the model has an intercept.
"""
linpred = link.link(mu)
W = (
-link.inverse_derivative2(linpred) * (y - mu)
+ (link.inverse_derivative(linpred) ** 2)
* (
1
+ (y - mu) * self.unit_variance_derivative(mu) / self.unit_variance(mu)
)
) * get_one_over_variance(self, link, mu, linpred, dispersion, sample_weight)
return _safe_sandwich_dot(X, W, intercept=fit_intercept)
def _score_matrix(self, link, X, y, mu, sample_weight, dispersion, fit_intercept):
"""Compute the score.
Parameters
----------
X : array-like
Training data.
y : array-like
Target values.
mu : array-like
Predicted mean.
sample_weight : array-like
Weights or exposure to which variance is inversely proportional.
dispersion : float
The dispersion parameter.
fit_intercept : bool
Whether the model has an intercept.
"""
linpred = link.link(mu)
W = (
get_one_over_variance(self, link, mu, linpred, dispersion, sample_weight)
* link.inverse_derivative(linpred)
* (y - mu)
).reshape(-1, 1)
if fit_intercept:
if sparse.issparse(X):
return sparse.hstack((W, X.multiply(W)))
else:
return np.hstack((W, np.multiply(X, W)))
else:
if sparse.issparse(X):
return X.multiply(W)
else:
return np.multiply(X, W)
def dispersion(self, y, mu, sample_weight=None, ddof=1, method="pearson") -> float:
r"""Estimate the dispersion parameter :math:`\phi`.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Weights or exposure to which variance is inversely proportional.
ddof : int, optional (default=1)
Degrees of freedom consumed by the model for ``mu``.
method = {'pearson', 'deviance'}, optional (default='pearson')
Whether to base the estimate on the Pearson residuals or the deviance.
Returns
-------
float
"""
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
if method == "pearson":
pearson_residuals = ((y - mu) ** 2) / self.unit_variance(mu)
if sample_weight is None:
numerator = pearson_residuals.sum()
else:
numerator = np.dot(pearson_residuals, sample_weight)
elif method == "deviance":
numerator = self.deviance(y, mu, sample_weight)
else:
raise NotImplementedError(f"Method {method} hasn't been implemented.")
if sample_weight is None:
return numerator / (len(y) - ddof)
else:
return numerator / (sample_weight.sum() - ddof)
class TweedieDistribution(ExponentialDispersionModel):
r"""A class for the Tweedie distribution.
A Tweedie distribution with mean :math:`\mu = \mathrm{E}(Y)` is uniquely
defined by its mean-variance relationship
:math:`\mathrm{var}(Y) \propto \mu^{\mathrm{power}}`.
Special cases are:
====== ================
Power Distribution
====== ================
0 Normal
1 Poisson
(1, 2) Compound Poisson
2 Gamma
3 Inverse Gaussian
====== ================
Parameters
----------
power : float, optional (default=0)
The variance power of the `unit_variance`
:math:`v(\mu) = \mu^{\mathrm{power}}`. For
:math:`0 < \mathrm{power} < 1`, no distribution exists.
"""
upper_bound = np.Inf
include_upper_bound = False
def __init__(self, power=0):
# validate power and set _upper_bound, _include_upper_bound attrs
self.power = power
@property
def lower_bound(self) -> Union[float, int]:
"""Return the lowest value of ``y`` allowed."""
if self.power <= 0:
return -np.Inf
if self.power >= 1:
return 0
raise ValueError
@property
def include_lower_bound(self) -> bool:
"""Return whether ``lower_bound`` is allowed as a value of ``y``."""
if self.power <= 0:
return False
if (self.power >= 1) and (self.power < 2):
return True
if self.power >= 2:
return False
raise ValueError
@property
def power(self) -> float:
"""Return the Tweedie power parameter."""
return self._power
@power.setter
def power(self, power):
if not isinstance(power, (int, float)):
raise TypeError(f"power must be an int or float, input was {power}")
if (power > 0) and (power < 1):
raise ValueError("For 0<power<1, no distribution exists.")
# Prevents upcasting when working with 32-bit data
self._power = power if isinstance(power, int) else np.float32(power)
def unit_variance(self, mu: np.ndarray) -> np.ndarray:
"""Compute the unit variance of a Tweedie distribution ``v(mu) = mu^power``.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
Returns
-------
numpy.ndarray, shape (n_samples,)
"""
p = self.power # noqa: F841
return numexpr.evaluate("mu ** p")
def unit_variance_derivative(self, mu: np.ndarray) -> np.ndarray:
r"""Compute the derivative of the unit variance of a Tweedie distribution.
Equation: :math:`v(\mu) = p \times \mu^{(p-1)}`.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
Returns
-------
numpy.ndarray, shape (n_samples,)
"""
p = self.power # noqa: F841
return numexpr.evaluate("p * mu ** (p - 1)")
def deviance(self, y, mu, sample_weight=None) -> float:
"""Compute the deviance.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Sample weights.
"""
p = self.power
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
sample_weight = np.ones_like(y) if sample_weight is None else sample_weight
# NOTE: the dispersion parameter is only necessary to convey
# type information on account of a bug in Cython
if p == 0:
return normal_deviance(y, sample_weight, mu, dispersion=1.0)
if p == 1:
return poisson_deviance(y, sample_weight, mu, dispersion=1.0)
elif p == 2:
return gamma_deviance(y, sample_weight, mu, dispersion=1.0)
else:
return tweedie_deviance(y, sample_weight, mu, p=float(p))
def unit_deviance(self, y, mu):
"""Get the deviance of each observation."""
p = self.power
if p == 0: # Normal distribution
return (y - mu) ** 2
if p == 1: # Poisson distribution
return 2 * (special.xlogy(y, y / mu) - y + mu)
elif p == 2: # Gamma distribution
return 2 * (np.log(mu / y) + y / mu - 1)
else:
mu1mp = mu ** (1 - p)
return 2 * (
(np.maximum(y, 0) ** (2 - p)) / ((1 - p) * (2 - p))
- y * mu1mp / (1 - p)
+ mu * mu1mp / (2 - p)
)
def _rowwise_gradient_hessian(
self, link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
):
f = None
if self.power == 0 and isinstance(link, IdentityLink):
f = normal_identity_rowwise_gradient_hessian
elif self.power == 1 and isinstance(link, LogLink):
f = poisson_log_rowwise_gradient_hessian
elif self.power == 2 and isinstance(link, LogLink):
f = gamma_log_rowwise_gradient_hessian
elif 1 < self.power < 2 and isinstance(link, LogLink):
f = partial(tweedie_log_rowwise_gradient_hessian, p=self.power)
if f is not None:
return f(y, sample_weight, eta, mu, gradient_rows, hessian_rows)
return super()._rowwise_gradient_hessian(
link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
)
def _eta_mu_deviance(
self,
link: Link,
factor: float,
cur_eta: np.ndarray,
X_dot_d: np.ndarray,
y: np.ndarray,
sample_weight: np.ndarray,
eta_out: np.ndarray,
mu_out: np.ndarray,
):
f = None
if self.power == 0 and isinstance(link, IdentityLink):
f = normal_identity_eta_mu_deviance
elif self.power == 1 and isinstance(link, LogLink):
f = poisson_log_eta_mu_deviance
elif self.power == 2 and isinstance(link, LogLink):
f = gamma_log_eta_mu_deviance
elif 1 < self.power < 2 and isinstance(link, LogLink):
f = partial(tweedie_log_eta_mu_deviance, p=self.power)
if f is not None:
return f(cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out, factor)
return super()._eta_mu_deviance(
link, factor, cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out
)
def log_likelihood(self, y, mu, sample_weight=None, dispersion=None) -> float:
r"""Compute the log likelihood.
For ``1 < power < 2``, we use the series approximation by Dunn and Smyth
(2005) to compute the normalization term.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Sample weights.
dispersion : float, optional (default=None)
Dispersion parameter :math:`\phi`. Estimated if ``None``.
"""
p = self.power
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
sample_weight = np.ones_like(y) if sample_weight is None else sample_weight
if (p != 1) and (dispersion is None):
dispersion = self.dispersion(y, mu, sample_weight)
if p == 0:
return normal_log_likelihood(y, sample_weight, mu, float(dispersion))
if p == 1:
# NOTE: the dispersion parameter is only necessary to convey
# type information on account of a bug in Cython
return poisson_log_likelihood(y, sample_weight, mu, 1.0)
elif p == 2:
return gamma_log_likelihood(y, sample_weight, mu, float(dispersion))
elif p < 2:
return tweedie_log_likelihood(
y, sample_weight, mu, float(p), float(dispersion)
)
else:
raise NotImplementedError
def dispersion(self, y, mu, sample_weight=None, ddof=1, method="pearson") -> float:
r"""Estimate the dispersion parameter :math:`\phi`.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights or exposure to which variance is inversely proportional.
ddof : int, optional (default=1)
Degrees of freedom consumed by the model for ``mu``.
method = {'pearson', 'deviance'}, optional (default='pearson')
Whether to base the estimate on the Pearson residuals or the deviance.
Returns
-------
float
"""
p = self.power # noqa: F841
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
if method == "pearson":
formula = "((y - mu) ** 2) / (mu ** p)"
if sample_weight is None:
return numexpr.evaluate(formula).sum() / (len(y) - ddof)
else:
formula = f"sample_weight * {formula}"
return numexpr.evaluate(formula).sum() / (sample_weight.sum() - ddof)
return super().dispersion(
y, mu, sample_weight=sample_weight, ddof=ddof, method=method
)
class NormalDistribution(TweedieDistribution):
"""Class for the Normal (a.k.a. Gaussian) distribution."""
def __init__(self):
super().__init__(power=0)
class PoissonDistribution(TweedieDistribution):
"""Class for the scaled Poisson distribution."""
def __init__(self):
super().__init__(power=1)
class GammaDistribution(TweedieDistribution):
"""Class for the Gamma distribution."""
def __init__(self):
super().__init__(power=2)
class InverseGaussianDistribution(TweedieDistribution):
"""Class for the scaled Inverse Gaussian distribution."""
def __init__(self):
super().__init__(power=3)
class GeneralizedHyperbolicSecant(ExponentialDispersionModel):
"""A class for the Generalized Hyperbolic Secant (GHS) distribution.
The GHS distribution is for targets ``y`` in ``(-∞, +∞)``.
"""
lower_bound = -np.Inf
upper_bound = np.Inf
include_lower_bound = False
include_upper_bound = False
def unit_variance(self, mu: np.ndarray) -> np.ndarray:
"""Get the unit-level expected variance.
See superclass documentation.
Parameters
----------
mu : array-like or float
Returns
-------
array-like
"""
return 1 + mu**2
def unit_variance_derivative(self, mu: np.ndarray) -> np.ndarray:
"""Get the derivative of the unit variance.
See superclass documentation.
Parameters
----------
mu : array-like or float
Returns
-------
array-like
"""
return 2 * mu
def unit_deviance(self, y: np.ndarray, mu: np.ndarray) -> np.ndarray:
"""Get the unit-level deviance.
See superclass documentation.
Parameters
----------
y : array-like
mu : array-like
Returns
-------
array-like
"""
return 2 * y * (np.arctan(y) - np.arctan(mu)) + np.log(
(1 + mu**2) / (1 + y**2)
)
class BinomialDistribution(ExponentialDispersionModel):
"""A class for the Binomial distribution.
The Binomial distribution is for targets ``y`` in ``[0, 1]``.
"""
lower_bound = 0
upper_bound = 1
include_lower_bound = True
include_upper_bound = True
def __init__(self):
return
def unit_variance(self, mu: np.ndarray) -> np.ndarray:
"""Get the unit-level expected variance.
See superclass documentation.
Parameters
----------
mu : array-like
Returns
-------
array-like
"""
return mu * (1 - mu)
def unit_variance_derivative(self, mu):
"""Get the derivative of the unit variance.
See superclass documentation.
Parameters
----------
mu : array-like or float
Returns
-------
array-like
"""
return 1 - 2 * mu
def unit_deviance(self, y: np.ndarray, mu: np.ndarray) -> np.ndarray:
"""Get the unit-level deviance.
See superclass documentation.
Parameters
----------
y : array-like
mu : array-like
Returns
-------
array-like
"""
# see Wooldridge and Papke (1996) for the fractional case
return -2 * (special.xlogy(y, mu) + special.xlogy(1 - y, 1 - mu))
def _rowwise_gradient_hessian(
self, link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
):
if isinstance(link, LogitLink):
return binomial_logit_rowwise_gradient_hessian(
y, sample_weight, eta, mu, gradient_rows, hessian_rows
)
return super()._rowwise_gradient_hessian(
link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
)
def _eta_mu_deviance(
self,
link: Link,
factor: float,
cur_eta: np.ndarray,
X_dot_d: np.ndarray,
y: np.ndarray,
sample_weight: np.ndarray,
eta_out: np.ndarray,
mu_out: np.ndarray,
):
if isinstance(link, LogitLink):
return binomial_logit_eta_mu_deviance(
cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out, factor
)
return super()._eta_mu_deviance(
link, factor, cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out
)
def log_likelihood(self, y, mu, sample_weight=None, dispersion=1) -> float:
"""Compute the log likelihood.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Sample weights.
dispersion : float, optional (default=1)
Ignored.
"""
ll = special.xlogy(y, mu) + special.xlogy(1 - y, 1 - mu)
return np.sum(ll) if sample_weight is None else np.dot(ll, sample_weight)
def dispersion(self, y, mu, sample_weight=None, ddof=1, method="pearson") -> float:
r"""Estimate the dispersion parameter :math:`\phi`.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights or exposure to which variance is inversely proportional.
ddof : int, optional (default=1)
Degrees of freedom consumed by the model for ``mu``.
method = {'pearson', 'deviance'}, optional (default='pearson')
Whether to base the estimate on the Pearson residuals or the deviance.
Returns
-------
float
"""
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
if method == "pearson":
formula = "((y - mu) ** 2) / (mu * (1 - mu))"
if sample_weight is None:
return numexpr.evaluate(formula).sum() / (len(y) - ddof)
else:
formula = f"sample_weight * {formula}"
return numexpr.evaluate(formula).sum() / (sample_weight.sum() - ddof)
return super().dispersion(
y, mu, sample_weight=sample_weight, ddof=ddof, method=method
)
def guess_intercept(
y: np.ndarray,
sample_weight: np.ndarray,
link: Link,
distribution: ExponentialDispersionModel,
eta: Union[np.ndarray, float] = None,
):
"""
Say we want to find the scalar `b` that minimizes ``LL(eta + b)``, with \
``eta`` fixed.
An exact solution exists for Tweedie distributions with a log link and for
the normal distribution with identity link. An exact solution also exists
for the case of logit with no offset.
If the distribution and corresponding link are something else, we use the
Tweedie or normal solution, depending on the link function.
"""
avg_y = np.average(y, weights=sample_weight)
if isinstance(link, IdentityLink):
# This is only correct for normal. For other distributions, answer is unknown,
# but assume that we want sum(y) = sum(mu)
if eta is None:
return avg_y
avg_eta = eta if np.isscalar(eta) else np.average(eta, weights=sample_weight)
return avg_y - avg_eta
elif isinstance(link, LogLink):
# This is only correct for Tweedie
log_avg_y = np.log(avg_y)
assert np.isfinite(log_avg_y).all()
if eta is None:
return log_avg_y
mu = np.exp(eta)
if isinstance(distribution, TweedieDistribution):
p = distribution.power
else:
p = 1 # Like Poisson
if np.isscalar(mu):
first = np.log(y.dot(sample_weight) * mu ** (1 - p))
second = np.log(sample_weight.sum() * mu ** (2 - p))
else:
first = np.log((y * mu ** (1 - p)).dot(sample_weight))
second = np.log((mu ** (2 - p)).dot(sample_weight))
return first - second
elif isinstance(link, LogitLink):
log_odds = np.log(avg_y) - np.log(np.average(1 - y, weights=sample_weight))
if eta is None:
return log_odds
avg_eta = eta if np.isscalar(eta) else np.average(eta, weights=sample_weight)
return log_odds - avg_eta
else:
return link.link(y.dot(sample_weight))
def get_one_over_variance(
distribution: ExponentialDispersionModel,
link: Link,
mu: np.ndarray,
eta: np.ndarray,
dispersion,
sample_weight: np.ndarray,
):
"""
Get one over the variance.
For Tweedie: ``sigma_inv = sample_weight / (mu ** p)`` during optimization,
because ``phi = 1``.
For Binomial with Logit link: Simplifies to
``variance = phi / ( sample_weight * (exp(eta) + 2 + exp(-eta)))``.
More numerically accurate.
"""
if isinstance(distribution, BinomialDistribution) and isinstance(link, LogitLink):
max_float_for_exp = np.log(np.finfo(eta.dtype).max / 10)
if np.any(np.abs(eta) > max_float_for_exp):
eta = np.clip(eta, -max_float_for_exp, max_float_for_exp) # type: ignore
return sample_weight * (np.exp(eta) + 2 + np.exp(-eta)) / dispersion
return 1.0 / distribution.variance(
mu, dispersion=dispersion, sample_weight=sample_weight
)
def _as_float_arrays(*args):
"""Convert to a float array, passing ``None`` through, and broadcast."""
never_broadcast = {} # type: ignore
maybe_broadcast = {}
always_broadcast = {}
for ix, arg in enumerate(args):
if isinstance(arg, (int, float)):
maybe_broadcast[ix] = np.array([arg], dtype="float")
elif arg is None:
never_broadcast[ix] = None
else:
always_broadcast[ix] = np.asanyarray(arg, dtype="float")
if always_broadcast and maybe_broadcast:
to_broadcast = {**always_broadcast, **maybe_broadcast}
_broadcast = np.broadcast_arrays(*to_broadcast.values())
broadcast = dict(zip(to_broadcast.keys(), _broadcast))
elif always_broadcast:
_broadcast = np.broadcast_arrays(*always_broadcast.values())
broadcast = dict(zip(always_broadcast.keys(), _broadcast))
else:
broadcast = maybe_broadcast # possibly `{}`
out = {**never_broadcast, **broadcast}
return [out[ix] for ix in range(len(args))]
| 31.710614 | 87 | 0.577256 | 33,181 | 0.867545 | 0 | 0 | 3,039 | 0.079457 | 0 | 0 | 17,986 | 0.470259 |
10d958d2eda3c687a5c43498b484847dfdf3bb4d | 2,733 | py | Python | apps/medicalcontrol/models.py | claudioDcv/Vetadmin | 2b5cf78bc7ad58000cedaf2474191d921e61745c | [
"MIT"
] | 1 | 2017-03-18T15:07:00.000Z | 2017-03-18T15:07:00.000Z | apps/medicalcontrol/models.py | claudioDcv/Vetadmin | 2b5cf78bc7ad58000cedaf2474191d921e61745c | [
"MIT"
] | null | null | null | apps/medicalcontrol/models.py | claudioDcv/Vetadmin | 2b5cf78bc7ad58000cedaf2474191d921e61745c | [
"MIT"
] | null | null | null | from django.db import models
from apps.core.models import Patient, Person
from django_extensions.db.fields import AutoSlugField
from apps.core.utils import STYLE_IMG_MIN
from apps.pharmacy.models import SupplyAssignControl
class Medicalcontrol(models.Model):
name = models.CharField(max_length=100)
natural_key = AutoSlugField(populate_from='name', verbose_name='nemotécnico')
make = models.DateTimeField()
patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
medic = models.ForeignKey(Person, null=True, blank=True, verbose_name='medic', limit_choices_to={'rol__id': 1}) # noqa
supplies = models.ManyToManyField(SupplyAssignControl, blank=True)
photo_first = models.ImageField(upload_to='medicalcontrol/%Y/%m/%d/', verbose_name='foto 1', null=True, blank=True) # noqa
photo_second = models.ImageField(upload_to='medicalcontrol/%Y/%m/%d/', verbose_name='foto 2', null=True, blank=True) # noqa
photo_third = models.ImageField(upload_to='medicalcontrol/%Y/%m/%d/', verbose_name='foto 3', null=True, blank=True) # noqa
def photo_first_tag(self):
if not self.photo_first:
p1 = '/media/none.jpg'
else:
p1 = self.photo_first.url
if not self.photo_second:
p2 = '/media/none.jpg'
else:
p2 = self.photo_second.url
if not self.photo_third:
p3 = '/media/none.jpg'
else:
p3 = self.photo_third.url
return '<img style="{}" src="{}" />\
<img style="{}" src="{}" />\
<img style="{}" src="{}" />'.format(
STYLE_IMG_MIN, p1,
STYLE_IMG_MIN, p2,
STYLE_IMG_MIN, p3,
)
photo_first_tag.short_description = 'miniatura'
photo_first_tag.allow_tags = True
def name_tag(self):
return '<strong>{} - {}</strong></br>{}'.format(self.id, self.name, self.natural_key) # noqa
name_tag.admin_order_field = 'name'
name_tag.short_description = 'Nombre'
name_tag.allow_tags = True
def save(self, *args, **kwargs):
# delete old file when replacing by updating the file
try:
this = Medicalcontrol.objects.get(id=self.id)
if this.photo_first != self.photo_first:
this.photo_first.delete() # save=False
if this.photo_second != self.photo_second and this.photo_second is not None:
this.photo_second.delete() # save=False
if this.photo_third != self.photo_third and this.photo_third is not None:
this.photo_third.delete() # save=False
except:
pass # when new photo then we do nothing, normal case
super(Medicalcontrol, self).save(*args, **kwargs)
| 40.191176 | 127 | 0.643249 | 2,508 | 0.917337 | 0 | 0 | 0 | 0 | 0 | 0 | 517 | 0.1891 |
10db0a1461dd4901dea43c8088f2d331ca5bbb5e | 908 | py | Python | icetray/resources/test/shoulddo.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | 1 | 2020-12-24T22:00:01.000Z | 2020-12-24T22:00:01.000Z | icetray/resources/test/shoulddo.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | null | null | null | icetray/resources/test/shoulddo.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | 3 | 2020-07-17T09:20:29.000Z | 2021-03-30T16:44:18.000Z | #!/usr/bin/env python
#
# Sample i3module in python
#
from icecube.icetray import *
from I3Tray import *
tray = I3Tray()
# generate empty frames
tray.AddModule("BottomlessSource","bottomless")
def make_counter(base):
class ShouldCounter(base):
def __init__(self, context):
base.__init__(self, context)
self.sdp = 0
def ShouldDoPhysics(self, frame):
print(base.__name__ + " *** ShouldDoPhysics")
self.sdp += 1
return True
def Physics(self, frame):
print("%s *** sdp == %d" % (base.__name__, self.sdp))
assert self.sdp == 1
self.sdp = 0
self.PushFrame(frame)
return ShouldCounter
tray.AddModule(make_counter(I3Module), "modulecounter")
tray.AddModule(make_counter(I3ConditionalModule), "conditionalmodulecounter")
# do it 5 times.
tray.Execute(5)
| 23.282051 | 77 | 0.615639 | 473 | 0.520925 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.221366 |
10db498d974995cdddc73b0eb663f988c2c1a75e | 3,405 | py | Python | Scripts/Biochemistry/Merge_Compounds.py | nkkchem/ModelSEEDDatabase | a117e433f540e931ee60a961f0d23cfc23cc387d | [
"MIT"
] | null | null | null | Scripts/Biochemistry/Merge_Compounds.py | nkkchem/ModelSEEDDatabase | a117e433f540e931ee60a961f0d23cfc23cc387d | [
"MIT"
] | null | null | null | Scripts/Biochemistry/Merge_Compounds.py | nkkchem/ModelSEEDDatabase | a117e433f540e931ee60a961f0d23cfc23cc387d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os, sys
temp=list();
header=1;
sys.path.append('../../Libs/Python')
from BiochemPy import Reactions, Compounds
ReactionsHelper = Reactions()
Reactions_Dict = ReactionsHelper.loadReactions()
CompoundsHelper = Compounds()
Compounds_Dict = CompoundsHelper.loadCompounds()
Compound_To_Merge_From="cpd00013"
Compound_To_Merge_To="cpd19013"
Cpds_Rxns_Dict=dict()
Rxns_Cpds_Dict=dict()
for rxn in Reactions_Dict.keys():
if(Reactions_Dict[rxn]["status"] == "EMPTY"):
continue
for rgt in Reactions_Dict[rxn]["stoichiometry"].split(";"):
(coeff,cpd,cpt,index,name) = rgt.split(":",4)
if(cpd not in Cpds_Rxns_Dict):
Cpds_Rxns_Dict[cpd]=dict()
Cpds_Rxns_Dict[cpd][rxn]=1
if(rxn not in Rxns_Cpds_Dict):
Rxns_Cpds_Dict[rxn]=dict()
Rxns_Cpds_Dict[rxn][cpd]=1
#Merging two compounds means:
#1) You take all the reactions for the second compound, and replace the compound id in the second reaction with the first compound
#Change stoichiometry only, first
#2) You check to see if all the reactions are balanced following the change
#3) You need to check to see if new reactions are now merged/linked to other reactions in database
#If truly still new reaction, change definition, code, compound_ids, equation
#If merged, change to obsolete and store linked reaction(s)
#4) You need to update Aliases
#5) You need to update media
#6) You need to report possible updates in templates (and, following modifications, re-build any public models?)
for rxn in Cpds_Rxns_Dict[Compound_To_Merge_From].keys():
old_stoichiometry = Reactions_Dict[rxn]["stoichiometry"]
new_stoichiometry_array = list()
for rgt in old_stoichiometry.split(";"):
(coeff,cpd,cpt,index,name) = rgt.split(":",4)
#Replace cpd
if(cpd == Compound_To_Merge_From):
cpd = Compound_To_Merge_To
new_stoichiometry_array.append(":".join([coeff,cpd,cpt,index,name]))
new_stoichiometry = ";".join(new_stoichiometry_array)
if(new_stoichiometry == old_stoichiometry):
print rxn, old_stoichiometry, new_stoichiometry
break
sys.exit()
Update_Reactions=0
for rxn in sorted(Reactions_Dict.keys()):
if(Reactions_Dict[rxn]["status"] == "EMPTY"):
continue
Rxn_Cpds_Array=list()
for rgt in Reactions_Dict[rxn]["stoichiometry"].split(";"):
(coeff,cpd,cpt,index,name) = rgt.split(":",4)
rgt_id = cpd+"_"+cpt+index
Rxn_Cpds_Array.append({"reagent":rgt_id,"coefficient":coeff,
"formula":Compounds_Dict[cpd]["formula"],
"charge":Compounds_Dict[cpd]["charge"]})
Status = ReactionsHelper.balanceReaction(Rxn_Cpds_Array)
if("ERROR" in Status):
# print rxn,Status
continue
#Remove old HB message
old_status = ""
for item in Reactions_Dict[rxn]["status"].split("|"):
if(item != "HB"):
old_status += item+"|"
old_status = old_status[0:-1]
if(Status != old_status and ("OK" not in old_status and "OK" not in Status)):
print "Changing Status for "+rxn+" from "+Reactions_Dict[rxn]["status"]+" to "+Status
Reactions_Dict[rxn]["status"]=Status
Update_Reactions=1
#if(Update_Reactions==1):
# print "Saving reactions";
# ReactionsHelper.saveReactions(Reactions_Dict)
| 34.744898 | 130 | 0.676065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,140 | 0.334802 |
10db75502d4a745e4f587f18e42925c85597af44 | 1,110 | py | Python | Homework.py | Quericee/da | 1207bae632fc0a1a755a5bd57d0236a43d31efbf | [
"MIT"
] | null | null | null | Homework.py | Quericee/da | 1207bae632fc0a1a755a5bd57d0236a43d31efbf | [
"MIT"
] | null | null | null | Homework.py | Quericee/da | 1207bae632fc0a1a755a5bd57d0236a43d31efbf | [
"MIT"
] | null | null | null | import math
import pandas as pd
data = pd.read_csv("works.csv").dropna()
count_people = 0
for (jobTitle, qualification) in zip(data['jobTitle'], data['qualification']):
if jobTitle != qualification:
count_people += 1
print(f"У {count_people} человек профессия и должность не совпадают")
menegers = data[data['jobTitle'].str.lower().str.contains("менеджер")]['qualification'].value_counts().head()
print("Топ 5 образований менеджеров: ")
print(menegers)
injineers = data[data['qualification'].str.lower().str.contains("инженер")]['jobTitle'].value_counts().head()
print("Топ 5 специальностей инженеров: ")
print(injineers)
# У 1052 человек профессия и должность не совпадают
# Топ 5 образований менеджеров:
# Бакалавр 9
# менеджер 5
# Специалист 5
# Менеджер 5
# Экономист 4
# Топ 5 специальностей инженеров:
# заместитель директора 2
# Инженер лесопользования 2
# главный инженер 2
# Директор 2
# заместитель директора по производству 1
| 30.833333 | 110 | 0.640541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 998 | 0.692094 |
10dc00d7522da15de0e33f479a057809367bc817 | 3,439 | py | Python | examtool/examtool/cli/compile_all.py | ThaumicMekanism/cs61a-apps | 2dfd1402a87569020fe28b5bd1b9282703e3eebd | [
"MIT"
] | null | null | null | examtool/examtool/cli/compile_all.py | ThaumicMekanism/cs61a-apps | 2dfd1402a87569020fe28b5bd1b9282703e3eebd | [
"MIT"
] | null | null | null | examtool/examtool/cli/compile_all.py | ThaumicMekanism/cs61a-apps | 2dfd1402a87569020fe28b5bd1b9282703e3eebd | [
"MIT"
] | null | null | null | import json
import os
import pathlib
from datetime import datetime
from io import BytesIO
from pikepdf import Pdf, Encryption
import click
import pytz
from examtool.api.database import get_exam, get_roster
from examtool.api.utils import sanitize_email
from examtool.api.scramble import scramble
from examtool.api.gen_latex import render_latex
from examtool.cli.utils import (
determine_semester,
exam_name_option,
hidden_output_folder_option,
prettify,
)
@click.command()
@exam_name_option
@hidden_output_folder_option
@click.option(
"--subtitle",
prompt=True,
default="Structure and Interpretation of Computer Programs",
)
@click.option(
"--do-twice",
is_flag=True,
help="Run the compile twice for each student to fix weird rendering bugs.",
)
@click.option("--email", help="The email address of a particular student.")
@click.option(
"--exam-type",
default="Final Exam",
help="The type of exam you are given. For example 'Final Exam' (default).",
)
@click.option(
"--semester",
default=determine_semester(),
help=f"The semester of the exam. '{determine_semester()}' (default).",
)
@click.option(
"--deadline",
default=None,
help="Generates exam regardless of if student is in roster with the set deadline.",
)
def compile_all(
exam,
out,
subtitle,
do_twice,
email,
exam_type,
semester,
deadline,
):
"""
Compile individualized PDFs for the specified exam.
Exam must have been deployed first.
"""
if not out:
out = "out/latex/" + exam
pathlib.Path(out).mkdir(parents=True, exist_ok=True)
try:
exam_data = get_exam(exam=exam)
except Exception as e:
print(
f"Exception: Unable to pull the exam {exam}. Received: {e}\nDid you deploy the exam first?"
)
return
password = exam_data.pop("secret")[:-1]
print(password)
exam_str = json.dumps(exam_data)
roster = get_roster(exam=exam)
if email:
roster = [line_info for line_info in roster if line_info[0] == email]
if len(roster) == 0:
if deadline:
roster = [(email, deadline)]
else:
raise ValueError("Email does not exist in the roster!")
for email, deadline in roster:
if not deadline:
continue
exam_data = json.loads(exam_str)
scramble(email, exam_data)
deadline_utc = datetime.utcfromtimestamp(int(deadline))
deadline_pst = pytz.utc.localize(deadline_utc).astimezone(
pytz.timezone("America/Los_Angeles")
)
deadline_string = deadline_pst.strftime("%I:%M%p")
with render_latex(
exam_data,
{
"emailaddress": sanitize_email(email),
"deadline": deadline_string,
"coursecode": prettify(exam.split("-")[0]),
"description": subtitle,
"examtype": exam_type,
"semester": semester,
},
do_twice=do_twice,
) as pdf:
pdf = Pdf.open(BytesIO(pdf))
pdf.save(
os.path.join(
out, "exam_" + email.replace("@", "_").replace(".", "_") + ".pdf"
),
encryption=Encryption(owner=password, user=password),
)
pdf.close()
if __name__ == "__main__":
compile_all()
| 27.293651 | 103 | 0.610352 | 0 | 0 | 0 | 0 | 2,916 | 0.847921 | 0 | 0 | 848 | 0.246583 |
10dc049135b7cdd6cae504711512f545f568bc84 | 5,020 | py | Python | main/tests/test_views.py | Lumexralph/book-store | 990ad187f0babd1a08e83a90c381117ce5d4f686 | [
"Apache-2.0"
] | null | null | null | main/tests/test_views.py | Lumexralph/book-store | 990ad187f0babd1a08e83a90c381117ce5d4f686 | [
"Apache-2.0"
] | 8 | 2020-06-05T23:45:34.000Z | 2022-01-13T01:47:23.000Z | main/tests/test_views.py | Lumexralph/book-store | 990ad187f0babd1a08e83a90c381117ce5d4f686 | [
"Apache-2.0"
] | null | null | null | from decimal import Decimal
from unittest.mock import patch
from django.test import TestCase
from django.urls import reverse
from django.contrib import auth
from main.models import Product, User, Address
from main.forms import UserCreationForm
class TestPage(TestCase):
def test_home_page_works(self):
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
self.assertContains(response, 'BookStore')
def test_about_us_page_works(self):
response = self.client.get(reverse('about_us'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'about_us.html')
self.assertContains(response, 'BookStore')
def test_products_page_returns_active(self):
Product.objects.create(
name='The cathedral and the bazaar',
slug='cathedral-bazaar',
price=Decimal('10.00'),
)
Product.objects.create(
name='A Tale of Two Cities',
slug='tale-two-cities',
price=Decimal('2.00'),
active=False,
)
product_list = Product.objects.active().order_by(
'name'
)
response = self.client.get(
reverse('products', kwargs={'tag': "all"})
)
self.assertEqual(
list(response.context['object_list']),
list(product_list),
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'BookStore')
def test_products_page_filters_by_tag_and_active(self):
cb = Product.objects.create(
name='The cathedral and the bazaar',
slug='cathedral-bazaar',
price=Decimal('10.00'),
)
cb.tags.create(name='Open Source', slug='open-source')
Product.objects.create(
name='A Tale of Two Cities',
slug='tale-two-cities',
price=Decimal('2.00'),
active=False,
)
response = self.client.get(
reverse('products', kwargs={'tag': 'open-source'})
)
product_list = (
Product.objects.active()
.filter(tags__slug='open-source')
.order_by('name')
)
self.assertEqual(
list(response.context['object_list']),
list(product_list),
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'BookStore')
def test_user_signup_page_loads_correctly(self):
response = self.client.get(reverse('signup'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'signup.html')
self.assertContains(response, "BookStore")
self.assertIsInstance(response.context['form'], UserCreationForm)
def test_user_signup_page_submission_works(self):
post_data = {
'email': 'user@domain.com',
'password1': 'abcabcabc',
'password2': 'abcabcabc',
}
with patch.object(UserCreationForm, 'send_mail') as mock_send:
response = self.client.post(reverse('signup'), post_data)
self.assertEqual(response.status_code, 302)
self.assertTrue(auth.get_user(self.client).is_authenticated)
self.assertTrue(User.objects.filter(email='user@domain.com').exists())
mock_send.assert_called_once()
def test_address_list_page_returns_owned_by_user(self):
user1 = User.objects.create_user("user1", "12345pw")
user2 = User.objects.create_user("user2", "12345pw")
Address.objects.create(
user=user1,
name="lumex ralph",
address1="1 mende",
address2="24 church street",
city="kano",
country="Nigeria",
)
Address.objects.create(
user=user2,
name="Ian ralph",
address1="4 mendez",
address2="24 boulevard street",
city="Abuja",
country="Nigeria",
)
self.client.force_login(user2)
response = self.client.get(reverse("address_list"))
self.assertEqual(response.status_code, 200)
address_list = Address.objects.filter(user=user2)
self.assertEqual(
list(response.context["object_list"]),
list(address_list),
)
def test_address_create_stores_user(self):
user1 = User.objects.create_user("user1", "12345pw")
post_data = {
"name": "dedah walker",
"address1": "20 broadstreet",
"address2": "",
"zip_code": "IKJ20",
"city": "Ibadan",
"country": "brazil",
}
self.client.force_login(user1)
self.client.post(
reverse("address_create"), post_data,
)
self.assertEqual(Address.objects.filter(user=user1).exists())
| 33.026316 | 78 | 0.591434 | 4,771 | 0.950398 | 0 | 0 | 0 | 0 | 0 | 0 | 863 | 0.171912 |
10dc80116f563ea0a2e9886f4435631d1b02b4b4 | 299 | py | Python | 021_loguru/multifile01.py | fkubota/rkkubotay-gmail.com | 03abffd520b6ba241d102184bba28507c8aa4d61 | [
"MIT"
] | null | null | null | 021_loguru/multifile01.py | fkubota/rkkubotay-gmail.com | 03abffd520b6ba241d102184bba28507c8aa4d61 | [
"MIT"
] | 49 | 2021-01-12T07:25:17.000Z | 2022-03-12T00:53:24.000Z | 021_loguru/multifile01.py | fkubota/rkkubotay-gmail.com | 03abffd520b6ba241d102184bba28507c8aa4d61 | [
"MIT"
] | null | null | null | from loguru import logger
from multifile02 import sum_ab
def run():
logger.add("log_multifile.log")
logger.info("="*30)
logger.info("start run")
sum_ab(5, 7)
logger.info("end run")
def main():
run()
logger.success('Complete.')
if __name__ == "__main__":
main()
| 14.95 | 35 | 0.625418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.210702 |
10dcd63f6971079940c1323786fecd18f6fad2b3 | 162 | py | Python | pythonperlin/__init__.py | timpyrkov/pyperlin | c79080657aa79df1abc83e481d2b09cac5edbff7 | [
"MIT"
] | null | null | null | pythonperlin/__init__.py | timpyrkov/pyperlin | c79080657aa79df1abc83e481d2b09cac5edbff7 | [
"MIT"
] | null | null | null | pythonperlin/__init__.py | timpyrkov/pyperlin | c79080657aa79df1abc83e481d2b09cac5edbff7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from pythonperlin.perlin import perlin
from pkg_resources import get_distribution
__version__ = get_distribution('pythonperlin').version | 27 | 54 | 0.796296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.228395 |
10dd33d3f2c88f6ed9447b48563fc0a3cb09360a | 4,903 | py | Python | reservas/settings/base.py | fedegallar/reservas | 75fc06b9dedf53eca76b61ea0ccc914d5e084b2d | [
"MIT"
] | 1 | 2018-11-10T14:57:54.000Z | 2018-11-10T14:57:54.000Z | reservas/settings/base.py | fedegallar/reservas | 75fc06b9dedf53eca76b61ea0ccc914d5e084b2d | [
"MIT"
] | 6 | 2020-06-05T17:11:56.000Z | 2021-09-07T23:38:00.000Z | reservas/settings/base.py | fedegallar/reservas | 75fc06b9dedf53eca76b61ea0ccc914d5e084b2d | [
"MIT"
] | 1 | 2019-04-16T20:00:05.000Z | 2019-04-16T20:00:05.000Z | # coding=utf-8
"""
Django settings for reservas project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import random
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Establece la clave secreta a partir de la variable de entorno 'DJANGO_SECRET_KEY', o genera una
# clave aleatoria si ésta no se encuentra seteada.
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY',
''.join([random.SystemRandom()
.choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)')
for i in range(50)]))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Administradores del proyecto.
ADMINS = []
MANAGERS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'djangobower',
'app_facturacion',
'app_reservas.apps.ReservasConfig',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'reservas.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'reservas.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Establece el prefijo para el proyecto Django, según la configuración
# del servidor web.
DJANGO_URL_PREFIX = os.environ.get('DJANGO_URL_PREFIX', '')
# Da formato al prefijo URL, para que sea de la forma '<prefijo>/'.
# 1. Quita las barras iniciales y finales, por si el prefijo cuenta con más de una.
DJANGO_URL_PREFIX = DJANGO_URL_PREFIX.strip('/')
# 2. Añade una única barra final, en caso de que el prefijo no haya quedado vacío luego de la
# operación anterior.
if DJANGO_URL_PREFIX:
DJANGO_URL_PREFIX += '/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/' + DJANGO_URL_PREFIX + 'static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder',
)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/' + DJANGO_URL_PREFIX + 'media/'
EVENTOS_URL = 'media/app_reservas/eventos_recursos/'
BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, 'components')
BOWER_INSTALLED_APPS = (
'bootstrap-datepicker#1.6.0',
'bootswatch-dist#3.3.6-flatly',
'font-awesome#4.7.0',
'fullcalendar-scheduler',
'handsontable#0.31.2',
'jquery#1.9.1',
'pace#1.0.2',
'qtip2#2.2.1',
'slick-carousel#1.6.0'
)
# Token de Google Calendar, utilizado para consultar la información de eventos
# de los calendarios de Google Calendar.
GOOGLE_CALENDAR_TOKEN = os.environ.get('GOOGLE_CALENDAR_TOKEN', '')
BROKER_URL = os.environ.get('BROKER_URL', 'amqp://guest:guest@rabbit//')
| 29.011834 | 98 | 0.698756 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,282 | 0.66816 |
10ddaf79aa926db060bebc8c2157d158b2eb7df1 | 27,684 | py | Python | python/learning2.py | dblalock/sprintz | a056cdb67d049669875ab5487359aca99ae873ea | [
"Apache-2.0"
] | 45 | 2019-02-02T15:50:25.000Z | 2022-03-08T05:42:06.000Z | python/learning2.py | memetb/sprintz | a056cdb67d049669875ab5487359aca99ae873ea | [
"Apache-2.0"
] | 2 | 2020-03-27T23:29:15.000Z | 2020-11-09T01:37:08.000Z | python/learning2.py | memetb/sprintz | a056cdb67d049669875ab5487359aca99ae873ea | [
"Apache-2.0"
] | 10 | 2019-03-08T09:04:29.000Z | 2022-02-18T22:28:23.000Z | #!/usr/bin/env python
import collections
# import itertools
import numpy as np
# from sklearn import linear_model as linear # for VAR
# from .utils import sliding_window as window
# from .utils.distance import kmeans, dists_sq
# from .utils import distance as dist
# from python import compress
# ================================================================ shifts lut
SHIFT_PAIRS_16 = [
(7, 1), # ~0 - .5 = ~-.5
(3, 1), # .125 - .5 = -.375
(2, 1), # .25 - .5 = -.25
# (4, 2), # .0625 - .25 = -.1875
(3, 2), # .125 - .5 = -.125
(4, 3), # .0625 - .125 = -.0625
(0, 0), # 1 - 1 = 0
(3, 4), # .125 - .0625 = .0625
(2, 3), # .25 - .125 - .125
(2, 4), # .25 - .0625 = .1875
(1, 2), # .5 - .25 = .25
(1, 3), # .5 - .125 = .375
(0, 1), # 1 - .5 = .5
(0, 2), # 1 - .25 = .75
(0, 3), # 1 - .125 = .875
(0, 4), # 1 - .0625 = .9375
(0, 7), # 1 - ~0 = ~1
]
# should be equivalent to `all_shifts(max_shift=5, omit_duplicates=True)`
# EDIT: wait, no, not true because we have shifts of 7 at the ends
SHIFT_PAIRS_26 = [
(7, 1), # ~0 - .5 = ~-.5
(5, 1), # .0625 - .5 = -.46875 # added
(4, 1), # .0625 - .5 = -.4375 # added, max 4
(3, 1), # .125 - .5 = -.375
(2, 1), # .25 - .5 = -.25
(5, 2), # .03125- .25 = -.21875
(4, 2), # .0625 - .25 = -.1875 # added, max 4
(3, 2), # .125 - .25 = -.125
(5, 3), # .03125- .125 = -.09375 # added
(4, 3), # .0625 - .125 = -.0625
(5, 4), # .03125- .0625 = -.03125 # added
(0, 0), # 1 - 1 = 0
(4, 5), # .0625 - .03125= .03125
(3, 4), # .125 - .0625 = .0625
(3, 5), # .125 - .03125= .09375 # added
(2, 3), # .25 - .125 - .125
(2, 4), # .25 - .0625 = .1875
(2, 5), # .25 - .03125= .21875 # added
(1, 2), # .5 - .25 = .25
(1, 3), # .5 - .125 = .375
(1, 4), # .5 - .0625 = .4375 # added, max 4
(1, 5), # .5 - .03125= .46875 # added
(0, 1), # 1 - .5 = .5
(0, 2), # 1 - .25 = .75
(0, 3), # 1 - .125 = .875
(0, 4), # 1 - .0625 = .9375
(0, 5), # 1 - .03125= .96875 # added
(0, 7), # 1 - ~0 = ~1
]
def all_shifts(max_shift=-1, omit_duplicates=True):
vals = {}
nbits = 8
x = 1 << nbits # reference val; 256 for nbits
if max_shift < 0:
max_shift = nbits - 1
if omit_duplicates:
vals[(0, 0)] = 0
for a in range(max_shift + 1):
for b in range(max_shift + 1):
if omit_duplicates and a == b:
continue
vals[(a, b)] = (x >> a) - (x >> b)
keys, coeffs = list(zip(*list(vals.items())))
keys = np.array(keys)
coeffs = np.array(coeffs)
order = np.argsort(coeffs)
# print "shift results:"
# print keys[order]
# print coeffs[order]
return keys[order], coeffs[order]
# okay, looks like (according to test immediately below) these values are
# identical to what's in our existing LUT; this makes sense given that impls
# are basically identical
def _i16_for_shifts(pos_shift, neg_shift, nbits=8):
start_val = 1 << nbits # 256 for nbits = 8
return (start_val >> pos_shift) - (start_val >> neg_shift)
# TODO actual unit test
def _test_shift_coeffs(nbits=8):
shifts, shift_coeffs = all_shifts()
for (pos_shift, neg_shift), coeff in zip(shifts, shift_coeffs):
assert _i16_for_shifts(pos_shift, neg_shift) == coeff
for val in range(-128, 128):
two_shifts_val = (val >> pos_shift) - (val >> neg_shift)
# ya, this fails; multiply and rshift != using shifts directly
# assert (val * coeff) >> nbits == two_shifts_val
# this way works; requires two multiplies though...
pos_coef = 1 << (nbits - pos_shift)
neg_coef = 1 << (nbits - neg_shift)
pos = (val * pos_coef) >> nbits
neg = (val * neg_coef) >> nbits
assert pos - neg == two_shifts_val
# this way also fails
# pos = val * pos_coef
# neg = val * neg_coef
# assert (pos - neg) >> nbits == two_shifts_val
# def coeff_lut():
# """create lookup table `T` such that `T[coeff]` yields the two indices
# whose associated coefficients are immediately above and below `coeff`"""
# shifts, shift_coeffs = all_shifts()
SHIFTS, SHIFT_COEFFS = all_shifts()
# ================================================================ funcs
def binary_search(array, val):
M = len(array)
first = 0
middle = int(M / 2)
last = M - 1
while (first <= last):
middle_val = array[middle]
if middle_val < val:
first = middle + 1
elif middle_val == val:
return middle
else: # middle_val > val
last = middle - 1
middle = int((first + last) / 2)
return middle
class OnlineRegressor(object):
def __init__(self, block_sz=8, verbose=0, method='linreg',
shifts=SHIFTS, shift_coeffs=SHIFT_COEFFS, numbits=8, ntaps=1):
# self.prev0 = 0
# self.prev1 = 0
# self.mod = 1 << nbits
# self.shift0 = 0
# self.shift1 = 1
self.block_sz = block_sz
self.verbose = verbose
self.method = method
self.shifts = shifts
self.shift_coeffs = shift_coeffs
self.numbits = numbits
self.ntaps = ntaps
self.last_val = 0
self.last_delta = 0
self.coef = 0
self.coef = 256
self.counter = 0
# self.counter = 256 << (1 + self.numbits - 8) # TODO indirect to learning rate, not just 1 # noqa
# self.counter = 8 << 1 # equivalent to adding 8 to round to nearest?
# self.counter = self.coef
self.t = 0
self.grad_counter = 0
self.offset = 0
self.offset_counter = 0
shift_by = (1 + self.numbits - 8)
self.coeffs = np.zeros(self.ntaps, dtype=np.int32) + 256
self.counters = np.zeros(self.ntaps, dtype=np.int32) + (256 << shift_by)
# self.approx_256_over_x = 1
self.Sxy = 0
self.Sxx = 0
self.errs = []
# print "using shifts, coeffs:"
# print shifts
# print shift_coeffs
# for logging
# self.best_idx_offset_counts = np.zeros(3, dtype=np.int64)
self.best_idx_counts = np.zeros(len(self.shifts), dtype=np.int64)
# counts_len = len(self.shifts) if method == 'linreg' else 512
# self.best_idx_counts = np.zeros(counts_len, dtype=np.int64)
self.best_coef_counts = collections.Counter()
self.best_offset_counts = collections.Counter()
def feed_group(self, group):
pass # TODO determine optimal filter here
# errhat = a*x0 - b*x0 - a*x1 + b*x1
# = a(x0 - x1) + b(x1 - x0)
# = c(x0 - x1), where c = (a - b)
#
# we should compute c, and find shifts (which correspond to a, b) that
# approximate it well; also note that errhat is prediction of the delta
#
# this is just linear regression between (x0 - x1) and new val, with
# some extra logic at the end to get shifts based on regression coeff
# deltas; these are our target variable
deltas = np.zeros(group.size, dtype=group.dtype)
deltas[1:] = group[1:] - group[:-1]
deltas[0] = group[0] - self.last_val
self.last_val = group[-1]
# deltas from previous time step; these are our indep variable
diffs = np.zeros(group.size, dtype=group.dtype)
diffs[1:] = deltas[:-1]
diffs[0] = self.last_delta
self.last_delta = deltas[-1]
x = diffs
y = deltas
# linear regression
if self.method == 'linreg':
Sxy = np.sum(x * y)
Sxx = np.sum(x * x)
# print "x, y dtypes: ", x.dtype, y.dtype
# print "Sxx, Sxy dtypes: ", Sxx.dtype, Sxy.dtype
coeff = (Sxy << 8) / Sxx # shift to mirror what we'll need to do in C
idx = binary_search(self.shift_coeffs, coeff)
def compute_errs(x, y, shifts):
predictions = (x >> shifts[0]) - (x >> shifts[1])
return y - predictions
# These are commented out because, empirically, they're
# *never* chosen
#
# best_idx_offset = 0
#
# def compute_total_cost(errs, block_sz=self.block_sz):
# raw_costs = compress.nbits_cost(errs)
# block_costs_rows = raw_costs.reshape(-1, block_sz)
# block_costs = np.max(block_costs_rows, axis=1)
# return np.sum(block_costs)
#
# cost = compute_total_cost(errs)
# if idx > 0:
# errs2 = compute_errs(x, y, SHIFTS[idx - 1])
# cost2 = compute_total_cost(errs)
# if cost2 < cost:
# ret = errs2
# best_idx_offset = -1
# if idx < (len(SHIFTS) - 1):
# errs3 = compute_errs(x, y, SHIFTS[idx + 1])
# cost3 = compute_total_cost(errs)
# if cost3 < cost:
# ret = errs3
# best_idx_offset = 1
# self.best_idx_offset_counts[best_idx_offset] += 1
errs = compute_errs(x, y, self.shifts[idx])
self.best_idx_counts[idx] += 1 # for logging
elif self.method == 'gradient':
# update coeffs using last entry in each block
# learning_rate_shift = 7 # learning rate of 2^(-learning_rate_shift)
# learning_rate_shift = 8 # learning rate of 2^(-learning_rate_shift)
# learning_rate_shift = 12 # learning rate of 2^(-learning_rate_shift)
# learning_rate_shift = 4 # learning rate of 2^(-learning_rate_shift)
# learning_rate_shift = 2 # learning rate of 2^(-learning_rate_shift)
predictions = (x * self.coef) >> int(min(self.numbits, 8))
for tap_idx in range(1, self.ntaps):
predictions[tap_idx:] += (x[:-tap_idx] * self.coeffs[tap_idx])
predictions += self.offset
errs = y - predictions
for b in range(8): # for each block
# only update based on a few values for efficiency
which_idxs = 8 * b + np.array([3, 7]) # downsample by 4
# which_idxs = 8 * b + np.array([1, 3, 5, 7]) # downsample by 2
grads = 0
# grads = np.zeros(self.ntaps)
# offsets = 0
for idx in which_idxs:
xval = x[idx]
# xval = x[idx] >> (self.numbits - 8)
# grad = int(-errs[idx] * x[idx]) >> 8
# grad = int(-errs[idx] * x[idx]) // 256
# y0 = np.abs(self.approx_256_over_x) * np.sign(xval)
# y0 = 1 + (256 - xval) >> 8
# y0 = 3 - ((3 * xval) >> 8)
# grad = int(-(errs[idx] << 8) / xval) if xval != 0 else 0 # works great
# self.counter -= grad # equivalent to above two lines
# if self.t % 100 == 0:
# print "grad:", grad
# continue
# # xabs = self.t # TODO rm
# xabs = np.abs(xval)
# if xabs == 0:
# lzcnt = self.numbits
# else:
# lzcnt = self.numbits - 1 - int(np.log2(xabs))
# lzcnt = max(0, lzcnt - 1) # round up to nearest power of 2
# # lzcnt = min(15, lzcnt + 1) # round up to nearest power of 2
# # numerator = 1 << self.numbits
# # recip = 1 << (lzcnt - 8) if lzcnt >= 8 else
# # recip = np.sign(xval) << (8 + lzcnt)
# shift_amt = max(0, lzcnt - (self.numbits - 8)) # usually 0, maybe 1 sometimes
# recip = (1 << shift_amt) * np.sign(xval)
# grad = int(-errs[idx] * recip)
# # grad = int(grad / len(which_idxs))
# normal grad descent
# grad = int(-errs[idx] * np.sign(xval)) # div by sqrt(hessian)
# grad = int(-errs[idx] * xval) >> self.numbits # true gradient
# approx newton step for log(nbits)
err = errs[idx]
# if False: # TODO rm
# if self.numbits > 8:
# grad = int(-(1 + err)) if err > 0 else int(-(err - 1))
# else:
# grad = int(-err) # don't add 1
# self.grad_counter += (grad - (self.grad_counter >> 8))
# wtf this works so well for 16b, despite ignoring sign of x...
# (when also only shifting counter by learning rate, not
# an additional 8)
# grad = -err
# grad = -(err + np.sign(err)) * np.sign(xval)
# grad = -err * np.sign(xval)
# these both seem to work pretty well; prolly need to directly
# compare them
# grad = -err * np.sign(xval)
# grad = -np.sign(err) * xval # significantly better than prev line
grad = np.sign(err) * xval # significantly better than prev line
# ^ duh; above is minimizer for L1 loss
# grad = -np.sign(err) * np.sign(xval) << (self.numbits - 8)
# sub_from = ((1 << self.numbits) - 1) * np.sign(xval)
# approx_recip_x = sub_from - xval
# grad = -np.sign(err) * approx_recip_x
grads += int(grad)
# grads += grad >> 1 # does this help with overflow?
# simulate int8 overflow, adjusted for fact that we do 8 blocks
# per group (so 1024, 2048 instead of 128, 256)
mod = int(1 << self.numbits)
offset = mod // 2
grads = ((grads + offset) % mod) - offset
# grads = ((grads + 1024) % 2048) - 1024 # wrecks accuracy
# grads = ((grads + 8192) % 16384) - 8192 # no effect
self.errs.append(err)
# offsets += np.sign(err) # optimize bias for l1 loss
# this is the other one we should actually consider doing
#
# grad = int(-errs[idx] * np.sign(xval))
# # approximation of what we'd end up doing with a LUT
# shift_to_just_4b = self.numbits - 4
# # y0 = ((xval >> shift_to_just_4b) + 1) << shift_to_just_4b
# shifted_xval = xval >> shift_to_just_4b
# if shifted_xval != 0:
# y0 = int(256. / shifted_xval) << shift_to_just_4b
# else:
# y0 = 16*np.sign(xval) << shift_to_just_4b
# # y0 = y0 * int(2 - (xval * y0 / 256)) # diverges
# y0 = int(256. / xval) if xval else 0
# y0 = (1 << int(8 - np.floor(np.log2(xval)))) * np.sign(xval)
# y0 = 4 * np.sign(xval)
# self.approx_256_over_x = int( y0*(2 - (int(xval*y0) >> 8)) ) # noqa # doesn't work
# grad = int(-errs[idx] * self.approx_256_over_x)
# grad = int(-errs[idx] * y0)
# grad = int(-errs[idx] * xval) # works
# grad = int(-errs[idx] * 2*np.sign(xval))
# this_best_coef = self.coef - grad
# self.counter += this_best_coef - self.coef
# self.counter -= grad # equivalent to above two lines
# self.counter -= grad >> learning_rate_shift
# if self.t < 8:
# if self.t % 50 == 0:
# if (self.t < 5 == 0) and (b == 0):
# if (self.t % 50 == 0) and (b == 0):
# # print "errs: ", errs[-7], errs[-5], errs[-3], errs[-1]
# print "t, b = ", self.t, b
# print "errs: ", errs[-10:]
# print "xs: ", x[-10:]
# # print "sum(|xs|)", np.sum(np.abs(x))
# print "grads: ", grads
# print "counter:", self.counter
# # print "grad counter:", self.grad_counter
# # # print "recip, grad: ", recip, grad
# self.coef = self.counter >> min(self.t, learning_rate_shift)
# self.coef = self.counter >> learning_rate_shift
learning_rate_shift = 1
# learning_rate_shift = 4
# grad_learning_shift = 1
# grad_learning_shift = 4
# offset_learning_shift = 4
# compute average gradient for batch
# grad = int(4 * grads / len(which_idxs)) # div by 16
grad = int(grads / len(which_idxs)) # div by 64
# grad = grads
# self.grad_counter += grad - (self.grad_counter >> grad_learning_shift)
# self.grad_counter += grad
#
# this is the pair of lines that we know works well for UCR
#
# self.counter -= grad
self.counter += grad
self.coef = self.counter >> (learning_rate_shift + (self.numbits - 8))
# self.coef = self.counter >> learning_rate_shift
# self.coef -= (self.grad_counter >> grad_learning_shift) >> learning_rate_shift
# learn_shift = int(min(learning_rate_shift, np.log2(self.t + 1)))
# self.coef = self.counter >> (learn_shift + (self.numbits - 8))
# self.coef = self.counter >> learn_shift # for use with l1 loss
# self.coef -= (self.grad_counter >> grad_learning_shift) >> learn_shift
# self.coef -= (self.grad_counter >> grad_learning_shift) >> learning_rate_shift
# self.coef = 192 # global soln for olive oil
# quantize coeff by rounding to nearest 16; this seems to help
# quite a bit, at least for stuff that really should be double
# delta coded (starlight curves, presumably timestamps)
# self.coef = ((self.coef + 8) >> 4) << 4
self.coef = (self.coef >> 4) << 4 # just round towards 0
# self.coef = (self.coef >> 5) << 5 # just round towards 0
# like above, but use sign since shift and unshift round towards 0
# EDIT: no apparent difference, though perhaps cuz almost nothing
# actually wants a negative coef
# self.coef = ((self.coef + 8 * np.sign(self.coef)) >> 4) << 4
# offset = int(offsets / len(which_idxs)) # div by 64
# self.offset_counter += offset
# # self.offset = self.offset_counter >> offset_learning_shift
# self.offset = 0 # offset doesn't seem to help at all
# self.coef = 0 # why are estimates biased? TODO rm
# self.coef = 256
# self.coef = self.counter
# self.coef = np.clip(self.coef, -256, 256) # apparently important
# self.coef = np.clip(self.coef, -128, 256) # apparently important
# if self.t < 8:
# if self.t % 100 == 0:
# print "----- t = {}".format(self.t)
# print "offset, offset counter: ", self.offset, self.offset_counter
# # print "grad, grads sum: ", grad, grads
# # print "learn shift: ", learn_shift
# # print "errs[:10]: ", errs[:16]
# # print "-grads[:10]: ", errs[:16] * x[:16]
# # print "signed errs[:10]: ", errs[:16] * np.sign(x[:16])
# print "new coeff, grad_counter, counter = ", self.coef, self.grad_counter, self.counter
# # print "new coeff, grad counter = ", self.coef, self.grad_counter
# self.best_idx_counts[self.coef] += 1 # for logging
self.best_coef_counts[self.coef] += 1
self.best_offset_counts[self.offset] += 1
# errs -= self.offset # do this at the end to not mess up training
elif self.method == 'exact':
# print "using exact method"
if self.numbits <= 8:
predictions = (x * self.coef) >> self.numbits
else:
predictions = ((x >> 8) * self.coef)
errs = y - predictions
learn_shift = 6
# shift = learn_shift + 2*self.numbits - 8
shift = learn_shift
# only update based on a few values for efficiency
start_idx = 0 if self.t > 0 else 8
for idx in np.arange(start_idx, len(x), 8):
# xval = x[idx] # >> (self.numbits - 8)
# yval = y[idx] # >> (self.numbits - 8)
xval = x[idx] >> (self.numbits - 8)
yval = y[idx] >> (self.numbits - 8)
# # this way works just like global one, or maybe better
# self.Sxx += xval * xval
# self.Sxy += xval * yval
# moving average way; seemingly works just as well
# Exx = self.Sxx >> learn_shift
# Exy = self.Sxy >> learn_shift
Exy = self.Sxy >> shift
Exx = self.Sxx >> shift
# adjust_shift = 2 *
diff_xx = (xval * xval) - Exx
diff_xy = (xval * yval) - Exy
self.Sxx += diff_xx
self.Sxy += diff_xy
# if min(self.Sxy, self.Sxx) >= 1024:
# self.Sxx /= 2
# self.Sxy /= 2
Exy = self.Sxy >> shift
Exx = self.Sxx >> shift
self.coef = int((Exy << 8) / Exx) # works really well
# none of this really works
# # print "Exy, Exx = ", Exy, Exx
# print "xval, yval: ", xval, yval
# print "diff_xx, diff_xy, Exy, Exx = ", diff_xx, diff_xy, Exy, Exx
# # numerator = 1 << (2 * self.numbits)
# numerator = 256
# nbits = int(min(4, np.log2(Exx))) if Exx > 1 else 1
# assert numerator >= np.abs(Exx)
# # print "nbits: ", nbits
# recip = int((numerator >> nbits) / (Exx >> nbits)) << nbits
# # recip = recip >> (2 * self.numbits - 8)
# print "numerator, recip: ", numerator, recip
# self.coef = int(Exy * recip)
self.best_coef_counts[self.coef] += 1
self.t += 1
return errs
# while (first <= last) {
# if (array[middle] < search)
# first = middle + 1;
# else if (array[middle] == search) {
# printf("%d found at location %d.\n", search, middle+1);
# break;
# }
# else
# last = middle - 1;
# middle = (first + last)/2;
# }
def sub_online_regress(blocks, verbose=0, group_sz_blocks=8, max_shift=4,
only_16_shifts=True, method='linreg', numbits=8,
drop_first_half=False, **sink):
# drop_first_half=True, **sink):
blocks = blocks.astype(np.int32)
if only_16_shifts:
shifts = SHIFT_PAIRS_16
shift_coeffs = [_i16_for_shifts(*pair) for pair in shifts]
else:
shifts, shift_coeffs = all_shifts(max_shift=max_shift)
encoder = OnlineRegressor(block_sz=blocks.shape[1], verbose=verbose,
shifts=shifts, shift_coeffs=shift_coeffs,
method=method, numbits=numbits)
# print "using group_sz_blocks: ", group_sz_blocks
# print "using method: ", method
# print "using nbits: ", numbits
out = np.empty(blocks.shape, dtype=np.int32)
if group_sz_blocks < 1:
group_sz_blocks = len(blocks) # global model
ngroups = int(len(blocks) / group_sz_blocks)
for g in range(ngroups):
# if verbose and (g > 0) and (g % 100 == 0):
# print "running on block ", g
start_idx = g * group_sz_blocks
end_idx = start_idx + group_sz_blocks
group = blocks[start_idx:end_idx]
errs = encoder.feed_group(group.ravel())
out[start_idx:end_idx] = errs.reshape(group.shape)
out[end_idx:] = blocks[end_idx:]
if verbose > 1:
if method == 'linreg':
if group_sz_blocks != len(blocks):
import hipsterplot as hp # pip install hipsterplot
# hp.plot(x_vals=encoder.shift_coeffs, y_vals=encoder.best_idx_counts,
hp.plot(encoder.best_idx_counts,
num_x_chars=len(encoder.shift_coeffs), num_y_chars=12)
else:
coef_idx = np.argmax(encoder.best_idx_counts)
coef = encoder.shift_coeffs[coef_idx]
print("global linreg coeff: ", coef)
else:
coeffs_counts = np.array(encoder.best_coef_counts.most_common())
print("min, max coeff: {}, {}".format(
coeffs_counts[:, 0].min(), coeffs_counts[:, 0].max()))
# print("most common (coeff, counts):\n", coeffs_counts[:16])
# bias_counts = np.array(encoder.best_offset_counts.most_common())
# print "most common (bias, counts):\n", bias_counts[:16]
errs = np.array(encoder.errs)
print("raw err mean, median, std, >0 frac: {}, {}, {}, {}".format(
errs.mean(), np.median(errs), errs.std(), np.mean(errs > 0)))
if drop_first_half and method == 'gradient':
keep_idx = len(out) // 2
out[:keep_idx] = out[keep_idx:(2*keep_idx)]
print("NOTE: duplicating second half of data into first half!!" \
" (blocks {}:)".format(keep_idx))
return out
def _test_moving_avg(x0=0):
# vals = np.zeros(5, dtype=np.int32) + 100
vals = np.zeros(5, dtype=np.int32) - 100
shft = 3
counter = x0 << shft
xhats = []
for v in vals:
xhat = counter >> shft
xhats.append(xhat)
counter += (v - xhat)
print("vals: ", vals)
print("xhats: ", xhats)
# ================================================================ main
def main():
np.set_printoptions(formatter={'float': lambda x: '{:.3f}'.format(x)})
# print "all shifts:\n", all_shifts()
# _test_shift_coeffs()
_test_moving_avg()
# print "shifts_16, coeffs"
# print SHIFT_PAIRS_16
# print [_i16_for_shifts(*pair) for pair in SHIFT_PAIRS_16]
# x = np.array([5], dtype=np.int32)
# print "shifting x left: ", x << 5
# blocks = np.arange(8 * 64, dtype=np.int32).reshape(-1, 8)
# sub_online_regress(blocks)
if __name__ == '__main__':
main()
| 40.592375 | 109 | 0.489091 | 18,348 | 0.662765 | 0 | 0 | 0 | 0 | 0 | 0 | 14,253 | 0.514846 |
10de22358037cf8ccf5fee7e45edea840e4276ac | 159 | py | Python | run.py | radish2012/flask-restful-example | 972c720cee9819d030f9889a8535a444277b874e | [
"MIT"
] | 650 | 2019-07-08T09:09:25.000Z | 2022-03-31T08:01:43.000Z | run.py | radish2012/flask-restful-example | 972c720cee9819d030f9889a8535a444277b874e | [
"MIT"
] | 5 | 2020-01-14T05:35:37.000Z | 2022-03-11T23:46:39.000Z | run.py | radish2012/flask-restful-example | 972c720cee9819d030f9889a8535a444277b874e | [
"MIT"
] | 222 | 2019-07-15T01:52:03.000Z | 2022-03-28T05:32:21.000Z | from app.factory import create_app, celery_app
app = create_app(config_name="DEVELOPMENT")
app.app_context().push()
if __name__ == "__main__":
app.run()
| 19.875 | 46 | 0.735849 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.144654 |
10de725aa99b82e35e8d69fc01aaec8b46a671ed | 4,405 | py | Python | __init__.py | KAndreyK/gachicmd | a4f55276363c49389bba95ab7d4ed28faecc5681 | [
"MIT"
] | null | null | null | __init__.py | KAndreyK/gachicmd | a4f55276363c49389bba95ab7d4ed28faecc5681 | [
"MIT"
] | null | null | null | __init__.py | KAndreyK/gachicmd | a4f55276363c49389bba95ab7d4ed28faecc5681 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-*- encoding: utf-8 -*-
"""Command line interpreter right version."""
import sys
import cmd
import random
import os
import pyglet.media
import webbrowser
import time
gachidict = []
with open('gachicmd' + os.sep + 'gachidict.txt') as f:
gachidict = f.read().splitlines()
heroes = []
with open('gachicmd' + os.sep + 'gachiheroes.txt') as f:
heroes = f.read().splitlines()
soundsdir = os.sep + 'gachicmd' + os.sep + 'sounds'
class GachiShell(cmd.Cmd):
username = 'slave'
intro = f'Nice to see you in the gym, {username}. Type help or ? to list commands.\n'
prompt = '(gym)> '
file = None
def do_fisting(self, arg):
'fisting is 300$'
self.username = 'dungeon master'
path = os.getcwd() + soundsdir
mp3 = pyglet.media.load(os.path.join(path, "fisting-is-300-bucks.mp3"))
mp3.play()
print("Fisting is 300$")
def do_choose_hero(self, arg):
"""become gachi hero
options: -l, --list: show list of hero names"""
if arg == '--list' or arg == '-l':
print(heroes)
return
if arg.isspace() or arg == '':
print("You don't print hero name, slave")
return
for hero in heroes:
if arg in hero:
self.username = hero
print(f"Nice to fuck you, {self.username}")
return
print("Hero don`t found, slave")
def do_mansingym(self, arg):
'Who is in gym'
l = list(range(0, random.randint(0, len(heroes) - 1)))
random.shuffle(l)
for i in l:
print(heroes[i])
def do_do_anal(self, arg):
'AAAAAAAAAA'
self.username = 'dungeon master'
path = os.getcwd() + soundsdir
mp3 = pyglet.media.load(os.path.join(path, "AAAAAAA.mp3"))
mp3.play()
print("AAAAAAAAAAAA")
def do_slave(self, arg):
'for slaves'
print('Gotcha! You`ve been gachirickrolled :)')
webbrowser.open('https://youtu.be/_rW9a9Z4YbI')
def do_im_master(self, arg):
'become dungeon master'
self.username = 'dungeon master'
path = os.getcwd() + soundsdir
mp3 = pyglet.media.load(os.path.join(path, "oh-shit-iam-sorry.mp3"))
mp3.play()
print(f"Of course you are {self.username}.")
def do_sounds(self, arg):
"""
Usage: sounds [options]
Default: turn on all sounds
Options:
[words]: turn sounds, which has this word(s) in name.
-l, --list: show list available names of sounds.
"""
files = os.listdir(os.getcwd() + soundsdir)
files = [f for f in files if f[-3:] == 'mp3']
good = 0
if arg == '--list' or arg == '-l':
print(files)
return
for f in files:
tmp = f.replace('-', ' ')
if arg in tmp:
path = os.getcwd() + soundsdir
mp3 = pyglet.media.load(os.path.join(path, f))
mp3.play()
time.sleep(mp3.duration)
good = 1
if not good:
print(f'This sound not in gym, {self.username}')
def do_random_sounds(self, arg):
'gachi sounds'
files = os.listdir(os.getcwd() + soundsdir)
files = [f for f in files if f[-3:] == 'mp3']
path = os.getcwd() + soundsdir
mp3 = pyglet.media.load(os.path.join(path, files[random.randint(0, len(files) - 1)]))
mp3.play()
def do_phrase(self, arg):
'random gachi phrase'
if sys.platform.startswith('linux'):
print(gachidict[random.randint(0, len(gachidict) - 1)])
elif sys.platform.startswith('win'):
print(gachidict[random.randint(0, len(gachidict) - 1)][3:-3])
def do_fuckyou(self, arg):
'Go out from gym'
print(f'Fisting you soon, {self.username}\n')
self.close()
return True
def do_exit(self, arg):
'Go out from gym'
return self.do_fuckyou(arg)
def close(self):
if self.file:
self.file.close()
self.file = None
def main():
GachiShell().cmdloop()
if __name__ == '__main__':
sys.exit(main())
| 30.171233 | 94 | 0.528944 | 3,826 | 0.868558 | 0 | 0 | 0 | 0 | 0 | 0 | 1,209 | 0.274461 |
10de9939df6d76398eb74dc52c2cb120b2d2c88f | 1,571 | py | Python | scripts/check_schemas.py | chasleslr/dagster | 88907f9473fb8e7a9b1af9a0a8b349d42f4b8153 | [
"Apache-2.0"
] | null | null | null | scripts/check_schemas.py | chasleslr/dagster | 88907f9473fb8e7a9b1af9a0a8b349d42f4b8153 | [
"Apache-2.0"
] | null | null | null | scripts/check_schemas.py | chasleslr/dagster | 88907f9473fb8e7a9b1af9a0a8b349d42f4b8153 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=print-call
import importlib
import pkgutil
import dagster
import sqlalchemy as sa
def check_schema_compat(schema):
"""We run this check to ensure that we don't have any schema columns that are incompatible with
MySQL.
"""
for name in dir(schema):
obj = getattr(schema, name)
if isinstance(obj, sa.Table):
print(name, obj)
for column in obj.columns:
print(f" -{column}: {column.type}")
if isinstance(column.type, sa.types.VARCHAR):
raise Exception(
f"Column {column} is type VARCHAR; cannot use bare db.String type as "
"it is incompatible with certain databases (MySQL). Use either a "
"fixed-length db.String(123) or db.Text instead."
)
print()
if __name__ == "__main__":
schema_modules = set()
def list_submodules(package_name):
for _, module_name, is_pkg in pkgutil.walk_packages(
package_name.__path__, package_name.__name__ + "."
):
# Collect all of the dagster.core.storage.*.schema modules
if module_name.endswith("schema"):
schema_modules.add(module_name)
module_name = __import__(module_name, fromlist="dummylist")
if is_pkg:
list_submodules(module_name)
list_submodules(dagster.core.storage)
for schema_module in schema_modules:
check_schema_compat(importlib.import_module(schema_module))
| 33.425532 | 99 | 0.60662 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 446 | 0.283896 |
10df133a433492185e9da42e3e25c81ed4744f41 | 2,206 | py | Python | src/main/resources/alm/CreateDefect.py | xebialabs-community/xlr-hpalm-plugin | e374f9082595a34afe7bef8e644715b2b0462c33 | [
"MIT"
] | null | null | null | src/main/resources/alm/CreateDefect.py | xebialabs-community/xlr-hpalm-plugin | e374f9082595a34afe7bef8e644715b2b0462c33 | [
"MIT"
] | 1 | 2019-05-13T17:36:45.000Z | 2019-05-13T17:36:45.000Z | src/main/resources/alm/CreateDefect.py | xebialabs-community/xlr-hpalm-plugin | e374f9082595a34afe7bef8e644715b2b0462c33 | [
"MIT"
] | 3 | 2019-05-10T17:48:20.000Z | 2020-07-08T14:22:20.000Z | #
# Copyright 2021 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from alm.almClientUtil import almClientUtil
from java.util import Calendar
import json
alm_client = almClientUtil.create_alm_client(server, username, password)
cookies = alm_client.login()
alm_client = almClientUtil.create_alm_client(server, cookies=cookies.get_dict())
content = {
"data": [
{
"type": "defect",
"name": title,
"description": description,
"severity": severity,
"detected-by": detectedBy,
"creation-time": "%s-%s-%s"
% (
str(Calendar.getInstance().get(Calendar.YEAR)),
str(Calendar.getInstance().get(Calendar.MONTH) + 1), # zero indexed
str(Calendar.getInstance().get(Calendar.DAY_OF_MONTH)),
),
}
]
}
for additionalField in additionalFields.keys():
content["data"][0][additionalField] = additionalFields[additionalField]
result = alm_client.create_defect(domain, project, json.dumps(content))
defectId = result["data"][0]["id"]
output = json.dumps(result)
print "Successfully created a defect with id [ %s ]" % defectId
logout = alm_client.logout()
| 50.136364 | 462 | 0.714415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,218 | 0.552131 |
10df8daedd4922d6712d342fc7421a530de31aea | 527 | py | Python | extra_apps/xadmin/plugins/utils.py | Fors3cDream/DjX | cac207b84923a558da83b0cad15f736f5e6faad2 | [
"Apache-2.0"
] | 3 | 2017-05-30T14:29:21.000Z | 2018-08-06T08:29:25.000Z | extra_apps/xadmin/plugins/utils.py | Fors3cDream/DjX | cac207b84923a558da83b0cad15f736f5e6faad2 | [
"Apache-2.0"
] | 1 | 2021-02-08T20:51:30.000Z | 2021-02-08T20:51:30.000Z | extra_apps/xadmin/plugins/utils.py | Fors3cDream/DjX | cac207b84923a558da83b0cad15f736f5e6faad2 | [
"Apache-2.0"
] | 1 | 2017-08-23T06:49:46.000Z | 2017-08-23T06:49:46.000Z | from django.template.context import RequestContext
def get_context_dict(context):
"""
Contexts in django version 1.9+ must be dictionaries. As xadmin has a legacy with older versions of django,
the function helps the transition by converting the [RequestContext] object to the dictionary when necessary.
:param context: RequestContext
:return: dict
"""
if isinstance(context, RequestContext):
ctx = {}
map(ctx.update, context.dicts)
else:
ctx = context
return ctx
| 31 | 113 | 0.698292 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 291 | 0.552182 |
10e08d796a74b4eb31783e866cf66948364093d9 | 2,144 | py | Python | tests/wallet_api/test_mining.py | beatsbears/pyburstlib | 27722f7f3bb0bc5739110f6c99435d13fa54a1e0 | [
"MIT"
] | 7 | 2018-03-24T17:26:27.000Z | 2020-06-09T10:38:44.000Z | tests/wallet_api/test_mining.py | MrPilotMan/pyburstlib | 27722f7f3bb0bc5739110f6c99435d13fa54a1e0 | [
"MIT"
] | 2 | 2019-09-09T17:06:43.000Z | 2021-06-01T21:20:08.000Z | tests/wallet_api/test_mining.py | MrPilotMan/pyburstlib | 27722f7f3bb0bc5739110f6c99435d13fa54a1e0 | [
"MIT"
] | 4 | 2018-04-10T12:50:55.000Z | 2022-03-14T20:30:01.000Z | '''
pyburstlib
:author: drownedcoast
:date: 4-26-2018
'''
import pytest
from pyburstlib.wallet_api.models.mining import *
from tests.base import BaseTest
from tests.config import PyBurstLibConfig
@pytest.mark.api
class TestMiningApi(BaseTest):
def setup(self):
self.TEST_ACCOUNT_NUMERIC = PyBurstLibConfig.get('account_id')
self.TEST_ACCOUNT_ADDRESS = PyBurstLibConfig.get('account_address')
def test_mining_get_accounts_with_reward_recipient(self, client):
accts = client.wallet_mining_api.get_accounts_with_reward_recipient(account_id=self.TEST_ACCOUNT_NUMERIC)
assert isinstance(accts, Accounts)
assert self.TEST_ACCOUNT_NUMERIC in accts.accounts
def test_mining_get_mining_info(self, client):
info = client.wallet_mining_api.get_mining_info()
assert isinstance(info, MiningInfo)
def test_mining_get_reward_recipient(self, client):
reward = client.wallet_mining_api.get_reward_recipient(account_id=self.TEST_ACCOUNT_NUMERIC)
assert isinstance(reward, RewardRecipient)
assert self.TEST_ACCOUNT_NUMERIC == reward.rewardRecipient
def test_mining_set_reward_recipient(self, client):
reward_req = SetRewardRecipientRequest(
recipient=PyBurstLibConfig.get('account_address'),
secretPhrase=PyBurstLibConfig.get('account_pw')
)
set_reward = client.wallet_mining_api.set_reward_recipient(req=reward_req.as_dict())
assert isinstance(set_reward, SetRewardRecipientResponse)
assert isinstance(set_reward.transactionJSON, TransactionJSON)
assert set_reward.transactionJSON.feeNQT == SetRewardRecipientRequest.DEFAULT_REWARD_RECIPIENT_FEE
def test_mining_submit_nonce(self, client):
nonce = client.wallet_mining_api.submit_nonce(secret_pass=PyBurstLibConfig.get('account_pw'),
nonce="100000",
account_id=PyBurstLibConfig.get('account_id'))
assert isinstance(nonce, SubmitNonceResponse)
assert nonce.result == SubmitNonceResponse.SUCCESS | 45.617021 | 113 | 0.728078 | 1,930 | 0.900187 | 0 | 0 | 1,947 | 0.908116 | 0 | 0 | 147 | 0.068563 |
10e1be91406866f0d1fa96365de13d361b7ae9cd | 291 | py | Python | nltktest.py | HeikkiKeskustalo/Fuzzy-String-Matching | 21215d58473f2af72c039d6d3c186e18f04dd893 | [
"MIT"
] | null | null | null | nltktest.py | HeikkiKeskustalo/Fuzzy-String-Matching | 21215d58473f2af72c039d6d3c186e18f04dd893 | [
"MIT"
] | 7 | 2018-06-28T03:57:04.000Z | 2018-08-28T12:37:19.000Z | nltktest.py | EvoluzTampere/Fuzzy-String-Matching | 9137f23f9713e2778a8894cbfd3ef1dc34913599 | [
"MIT"
] | 1 | 2018-08-07T07:41:00.000Z | 2018-08-07T07:41:00.000Z | from nltk.stem.snowball import SnowballStemmer
letter = [u'kissassa']
for word in letter:
print word, SnowballStemmer("finnish").stem(word)
# read file (get a string)
# detect charset (UTF-8)
# decode (get a Unicode code point presentation)
# tokenize
# try out the Snowball stemmer
| 20.785714 | 53 | 0.738832 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.542955 |
10e2118348f1c2b14089d5cf7bc1ddd8e43cd869 | 5,979 | py | Python | Linked-in Job scrapper.py | MMallah/Linked-in-Job-scrapper | 9a537849a8f62e278f091ca71d8041bb3b4541b0 | [
"MIT"
] | null | null | null | Linked-in Job scrapper.py | MMallah/Linked-in-Job-scrapper | 9a537849a8f62e278f091ca71d8041bb3b4541b0 | [
"MIT"
] | null | null | null | Linked-in Job scrapper.py | MMallah/Linked-in-Job-scrapper | 9a537849a8f62e278f091ca71d8041bb3b4541b0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Project Data Science Desired Skills
#
# this project will Answer the question of what are the most desired skills for Data Science in Egypt and UAE.
# * Extract data from Linked in Via webscrapping or Manually.
# * Perform data wrangling ( image recognition or NLP).
# * Segment the the job market based on skills.
# * Analyze the skill growth over time.
# Import libraries
# In[134]:
from selenium import webdriver
from selenium.webdriver.common.by import By
import pandas as pd
from scrapy import selector
import time
import operator
from functools import reduce
# ## step 1 Importing Data
#
# #### Selenium driver for chrome
# In[78]:
# instantiate a webdriver
driver = webdriver.Chrome('C:\\Users\\mosta\\Downloads\\chromedriver_win32\\chromedriver.exe')
# In[79]:
# use chrome to get the desired url
driver.get('https://www.linkedin.com/jobs/search/?geoId=102007122&keywords=data%20scientist&location=Cairo%2C%20Egypt')
# In[61]:
# the job card container(link for job)
full_card_link= driver.find_elements_by_class_name("result-card__full-card-link")
# In[62]:
# view the Selenium elment count
len(full_card_link)
# In[66]:
link_list=[]
for e in full_card_link:
link_list.append(e.get_attribute('href'))
# In[67]:
len(link_list)
# In[166]:
# In[187]:
for l in range(len(link_list)):
print(link_list[l])
# In[122]:
# create list place holder for data
job_title=[]
company=[]
location=[]
posting_date=[]
applicants_count=[]
job_description=[]
job_seniority_level=[]
employment_type=[]
industry=[]
job_function=[]
# In[237]:
# create a function to iterate over job links
def get_job(links):
''' function to import the required data (10 specifications) from the search url'''
for link in range(len(links)):
driver.get(links[link])
job_title.append(driver.find_element_by_class_name("topcard__title").text)
company.append(driver.find_element_by_class_name('topcard__flavor').text)
location.append(driver.find_element_by_xpath('//*[@id="main-content"]/section[1]/section[2]/div[1]/div[1]/h3[1]/span[2]').text)
posting_date.append(driver.find_element_by_xpath('//*[@id="main-content"]/section[1]/section[2]/div[1]/div[1]/h3[2]/span[1]').text)
try:
applicants_count.append(driver.find_element_by_xpath('//*[@id="main-content"]/section[1]/section[2]/div[1]/div[1]/h3[2]/span[2]').text)
except:
applicants_count.append('')
job_description.append(driver.find_element_by_xpath('//*[@id="main-content"]/section[1]/section[3]/div/section/div').text)
try:
job_seniority_level.append(driver.find_element_by_xpath('//*[@id="main-content"]/section[1]/section[3]/ul/li[1]/span').text)
except:
job_seniority_level.append('')
employment_type.append(driver.find_element_by_xpath('//*[@id="main-content"]/section[1]/section[3]/ul/li[2]/span').text)
industry.append(driver.find_element_by_xpath('//*[@id="main-content"]/section[1]/section[3]/ul/li[4]/span').text)
job_function.append(reduce(operator.concat,
[(e.text+ ", ") for e in driver.find_elements_by_xpath('//*[@id="main-content"]/section[1]/section[3]/ul/li[3]/span')]))
time.sleep(5)
# In[241]:
#company
# In[239]:
#use the function to import the data
get_job(link_list)
# In[243]:
# convert lists acqquired to a dataframe
data= pd.DataFrame(list(zip(job_title, company, location, posting_date, applicants_count, job_description, job_seniority_level,
employment_type, industry, job_function)),
columns=['job_title', 'company', 'location', 'posting_date', 'applicants_count', 'job_description', 'job_seniority_level',
'employment_type', 'industry', 'job_function'])
# In[245]:
data.info()
# In[200]:
# job title
#print(driver.find_element_by_class_name("topcard__title").text)
# In[98]:
# company
#print(driver.find_element_by_class_name('topcard__flavor').text)
# In[107]:
# job location
#print(driver.find_element_by_xpath('//*[@id="main-content"]/section[1]/section[2]/div[1]/div[1]/h3[1]/span[2]').text)
# In[110]:
# posting date
#print(driver.find_element_by_xpath('//*[@id="main-content"]/section[1]/section[2]/div[1]/div[1]/h3[2]/span[1]').text)
# In[112]:
# number of applicants
#rint(driver.find_element_by_xpath('//*[@id="main-content"]/section[1]/section[2]/div[1]/div[1]/h3[2]/span[2]').text)
# In[113]:
# description
#print(driver.find_element_by_xpath('//*[@id="main-content"]/section[1]/section[3]/div/section/div').text)
# In[206]:
# job_seniority_level
#print(driver.find_element_by_xpath('//*[@id="main-content"]/section[1]/section[3]/ul/li[1]/span').text)
# In[114]:
# employment_type
#print(driver.find_element_by_xpath('//*[@id="main-content"]/section[1]/section[3]/ul/li[2]/span').text)
# In[119]:
# job_function
for e in(driver.find_elements_by_xpath('//*[@id="main-content"]/section[1]/section[3]/ul/li[3]/span')):
print(e.text)
# In[145]:
#test=[]
#test.append(reduce(operator.concat,
# [(e.text+ ", ") for e in driver.find_elements_by_xpath('//*[@id="main-content"]/section[1]/section[3]/ul/li[3]/span')]))
# In[146]:
test
# In[120]:
# industry
#print(driver.find_element_by_xpath('//*[@id="main-content"]/section[1]/section[3]/ul/li[4]/span').text)
# In[10]:
#job_card = driver.find_elements_by_class_name("results__detail-view")
# In[147]:
#for e in job_card:
# print(e.text)
# In[ ]:
# the right rail, jobs-search job details container
#body > div.application-outlet > div.authentication-outlet > div.job-search-ext > div > div > section.jobs-search__right-rail > div
# In[ ]:
# the path for the header
#/html/body/main/div/section[2]/ul/li[1]/a
# In[ ]:
# the show more tab where body is located
#/html/body/main/section/div[2]/section[2]/div/section/div
| 21.430108 | 147 | 0.680716 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,808 | 0.636896 |
10e230dff4f183cac0ecd093228e7522ab70f334 | 26 | py | Python | 25_Assigment_Operator/main.py | jmmedel/Python-Tutorials- | 243ae9a6b51a4fce03dd90c02da13b859cbfbe5f | [
"MIT"
] | null | null | null | 25_Assigment_Operator/main.py | jmmedel/Python-Tutorials- | 243ae9a6b51a4fce03dd90c02da13b859cbfbe5f | [
"MIT"
] | null | null | null | 25_Assigment_Operator/main.py | jmmedel/Python-Tutorials- | 243ae9a6b51a4fce03dd90c02da13b859cbfbe5f | [
"MIT"
] | null | null | null |
x = 5
x |= 3
print(x)
| 3.25 | 8 | 0.384615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
10e277afdb91cd3d6c4969cdf3e9d08b1012d482 | 3,424 | py | Python | email_extractor.py | lukeeckley/outlook-email-recovery | 94f6ce67d12e761eb9049d56f6a3950203d5d472 | [
"MIT"
] | null | null | null | email_extractor.py | lukeeckley/outlook-email-recovery | 94f6ce67d12e761eb9049d56f6a3950203d5d472 | [
"MIT"
] | null | null | null | email_extractor.py | lukeeckley/outlook-email-recovery | 94f6ce67d12e761eb9049d56f6a3950203d5d472 | [
"MIT"
] | null | null | null | import argparse
import os
import re
import sys
try:
import pypff
except ImportError:
print("[+] Install the libpff Python bindings to use this script")
sys.exit(1)
message_list = []
messages = 0
def main(pst_file, output_dir):
print("[+] Accessing {} PST file".format(pst_file))
pst = pypff.open(pst_file)
root = pst.get_root_folder()
print("[+] Traversing PST folder structure")
recursePST(root)
print("[+] Identified {} messages..".format(messages))
def recursePST(base):
for folder in base.sub_folders:
if folder.number_of_sub_folders:
recursePST(folder)
processMessages(folder)
def emailExtractor(item):
if "<" in item:
start = item.find("<") + 1
stop = item.find(">")
email = item[start:stop]
else:
email = item.split(":")[1].strip().replace('"', "")
if "@" not in email:
domain = False
else:
domain = email.split("@")[1].replace('"', "")
return email, domain
def processMessages(folder):
global messages
print("[+] Processing {} Folder with {} messages".format(folder.name, folder.number_of_sub_messages))
if folder.number_of_sub_messages == 0:
return
for message in folder.sub_messages:
eml_from, replyto, returnpath = ("", "", "")
messages += 1
try:
headers = message.get_transport_headers().splitlines()
except AttributeError:
# No email header
continue
for header in headers:
if header.strip().lower().startswith("from:"):
eml_from = header.strip().lower()
elif header.strip().lower().startswith("reply-to:"):
replyto = header.strip().lower()
elif header.strip().lower().startswith("return-path:"):
returnpath = header.strip().lower()
if eml_from == "" or (replyto == "" and returnpath == ""):
# No FROM value or no Reply-To / Return-Path value
continue
dumpMessage(folder, message, eml_from, replyto, returnpath)
def dumpMessage(folder, msg, eml_from, reply, return_path):
reply_bool = False
return_bool = False
from_email, from_domain = emailExtractor(eml_from)
if reply != "":
reply_bool = True
reply_email, reply_domain = emailExtractor(reply)
if return_path != "":
return_bool = True
return_email, return_domain = emailExtractor(return_path)
try:
if msg.html_body is None:
if msg.plain_text_body is None:
if msg.rtf_body is None:
print("BODY:")
else:
print("BODY: {}".format(msg.rtf_body))
else:
print("BODY: {}".format(msg.plain_text_body))
else:
print("BODY: {}".format(msg.html_body))
except:
print("Coudn't process body")
if __name__ == '__main__':
# Command-line Argument Parser
parser = argparse.ArgumentParser(description="Outlook Email Dumper")
parser.add_argument("PST_FILE", help="File path to input PST file")
parser.add_argument("OUTPUT_DIR", help="Output Dir")
args = parser.parse_args()
if not os.path.exists(args.OUTPUT_DIR):
os.makedirs(args.OUTPUT_DIR)
if os.path.exists(args.PST_FILE) and os.path.isfile(args.PST_FILE):
main(args.PST_FILE, args.OUTPUT_DIR)
else:
print("[-] Input PST {} does not exist or is not a file".format(args.PST_FILE))
sys.exit(4)
| 31.703704 | 105 | 0.622079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 573 | 0.167348 |