blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
96a369b5364a86c64fea69948449dae6ce12c2ee | 0dd881b86146eff46a99e3100a12addcb5b1bde9 | /No1047 Remove All Adjacent Duplicates In String.py | fa03ed30c38d34973f97d2798ccca0b6b0a2699f | [] | no_license | BaijingML/leetcode | 8b04599ba6f1f9cf12fbb2726f6a1463a42f0a70 | 0ba37ea32ad71d9467f73da6f9e71971911f1d4c | refs/heads/master | 2020-03-22T05:07:17.884441 | 2020-01-10T12:13:54 | 2020-01-10T12:13:54 | 138,399,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@version: python3.6
@Author : Zhangfusheng
@Time : 2019/10/3 23:07
@File : No1047 Remove All Adjacent Duplicates In String
@Software: PyCharm
"""
class Solution:
def removeDuplicates(self, S: str) -> str:
stack = []
for i in S:
if stack and stack[-1] == i:
stack.pop(-1)
else:
stack.append(i)
return "".join(stack)
if __name__ == '__main__':
solu = Solution()
print(solu.removeDuplicates("aaaaaaaa")) | [
"2670871693@qq.com"
] | 2670871693@qq.com |
c30d318cfddb193f968bec78863696786a1a7129 | 89a3dad201597d270d4e8be16d9f70fab8f85cd6 | /backend/theme_20114/settings.py | fcdc6849ad455a2d8cc6e9cc97db66ec25efbd04 | [] | no_license | crowdbotics-apps/theme-20114 | f76a4edeb86a9edd7fb5bd10717f272ee32b3aa5 | 792eeea493fec39e700bf6495896ea0b487f6c34 | refs/heads/master | 2022-12-11T04:38:11.187545 | 2020-09-09T06:33:54 | 2020-09-09T06:33:54 | 293,898,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,889 | py | """
Django settings for theme_20114 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'theme_20114.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'theme_20114.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
145f4c97909c9fc237cf9c83434ba2761d63d0b9 | 071cb4ec0686d1bc57d6f8cde98c60f25cf31134 | /aws_learn/tests/test_aws_learn.py | f1e35568ec4b33e2e095b05e542f287052e1c5be | [] | no_license | nmaswood/aws-learn | 9aaef4f80e1088c33ae39038700bfb80f0c9d313 | 8db9451e2315f2196b40b138d5fe6758d27cba7f | refs/heads/master | 2022-11-28T01:38:02.897138 | 2020-08-10T15:22:53 | 2020-08-10T15:22:53 | 285,590,649 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | from aws_learn import __version__
def test_version():
assert __version__ == '0.1.0'
| [
"nasrmaswood@gmail.com"
] | nasrmaswood@gmail.com |
8d821a2f6270c7142c70114355a8fa7e0c718aa9 | c7c1e268fa641ab97bb4f48d8e419ea918ed8b87 | /sdf_tools.py | 0f36f5655b4c7e38d579cc1a4055651849269416 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mnowotka/sdf_viewer | 19fb7423af078af4e877641f2f5c864da13bb369 | a0758930c3a19418d47e2f04cd9e41af287d837b | refs/heads/master | 2021-01-15T14:01:48.633151 | 2014-11-03T09:34:21 | 2014-11-03T09:34:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55,139 | py | #!/usr/bin/env python2
#-*- coding: utf-8 -*-
# sdf_tools.py
# version: 2014-10-24
# author: Axel Pahl (APL)
# contact: firstnamelastname at gmx dot de
# license: BSD, see license.txt in this folder
#==============================================================================
# TODO: implement class-based version
#==============================================================================
# TODO: implement progressbar
# progressbar code:
# import progressbar as pb
# # downloaded from https://github.com/fnoble/python-progressbar/tree/ipython_support
# # location: /home/apl/dev/python/libs/
# # encapsulate in try: ... except:
# widgets = ['Processed: ', pb.Counter("%5d"), ' molecules [', pb.Timer(), ']']
# pbar = pb.ProgressBar(widgets=widgets).start()
# for i in range(80):
# pbar.update(i+1)
# time.sleep(0.2)
#==============================================================================
from __future__ import absolute_import, division, print_function # , unicode_literals
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import Draw
import rdkit.Chem.Descriptors as Desc
# imports for similarity search
from rdkit.Chem.Fingerprints import FingerprintMols
# imports for clustering
from rdkit import DataStructs
from rdkit.ML.Cluster import Butina
import os
import sys
import random
import math
import csv
import os.path as op
from time import strftime
from collections import Counter
if hasattr(sys, "ps1"): # <True> when called from an interactive session
print(" > interactive session, trying to import display_png from IPython...", end="")
try:
from IPython.core.display import display_png
IPYTHON = True
print(" success!")
except:
IPYTHON = False
print()
print(" * no inline display of molecule structures supported.")
try:
import pylab
PYLAB = True
except:
PYLAB = False
print(" * failed to import pylab,")
print(" plotting will not work.")
def set_sdf_report_folder():
if "SDF_VIEWER_REPORTS" in os.environ:
folder = os.environ["SDF_VIEWER_REPORTS"]
print(" > found environment var", folder)
else:
if "HOME" in os.environ:
folder = op.join(os.environ["HOME"], "sdf_reports")
elif "HOMEPATH" in os.environ: # Windows
folder = op.join(os.environ["HOMEPATH"], "sdf_reports")
else:
folder = None
print(" > setting default folder", folder)
if not op.exists(folder):
print(" * folder does not exist, creating...")
os.mkdir(folder)
for directory in ["sdf", "reports", "html", "session"]:
subfolder = op.join(folder, directory)
if not op.exists(subfolder):
print(" - creating subfolder", subfolder)
os.mkdir(subfolder)
return folder
MISSING_VAL = -999
POINTSIZE = 40
REPORT_FOLDER = set_sdf_report_folder()
def create_dir_if_not_exist(dir_name):
if not op.exists(dir_name):
print(" * target folder does not exist, creating {}...".format(dir_name))
os.makedirs(dir_name)
def load_sdf(file_name_or_obj="testset.sdf", large_sdf=False):
"""load small sdf completely in memory as list; return large sdf as file object
function accepts a string filename or a file object"""
if type(file_name_or_obj) == str:
file_obj = open(file_name_or_obj)
else:
file_obj = file_name_or_obj
reader = Chem.ForwardSDMolSupplier(file_obj)
if large_sdf:
if type(file_name_or_obj) == str:
print(" > large sdf {} loaded as file object.".format(file_name_or_obj.split(".")[0]))
else:
print(" > large sdf loaded as file object.")
return reader
sdf_list = []
for mol in reader:
if mol:
sdf_list.append(mol)
if type(file_name_or_obj) == str:
print(" > sdf {} loaded with {} records.".format(file_name_or_obj.split(".")[0], len(sdf_list)))
else:
print(" > sdf loaded with {} records.".format(len(sdf_list)))
return sdf_list
def write_sdf(sdf_list, fn, conf_id=-1):
if type(sdf_list) != list and type(sdf_list) != file:
sdf_list = [sdf_list]
writer = Chem.SDWriter(fn)
for mol in sdf_list:
writer.write(mol, confId=conf_id)
writer.close()
def write_pdb(sdf_list, fn):
"""sdf_list can be a single molecule or a list of molecules"""
if type(sdf_list) != list:
sdf_list = list(sdf_list)
writer = Chem.PDBWriter(fn)
for mol in sdf_list:
writer.write(mol)
writer.close()
def prepare_for_viewer(sdf_list):
if type(sdf_list) != list:
print(" * function prepare_for_viewer currently only handles lists.")
return
print(" > assigning types to fields...", end="")
for mol in sdf_list:
for field in mol.GetPropNames():
try:
value = float(mol.GetProp(field))
rename_prop_in_mol(mol, field, "n_"+field)
except ValueError:
rename_prop_in_mol(mol, field, "s_"+field)
print("done.")
def iterate_over_reagents_file(fn="testset.sdf", supplier="__guess__",
max_num_recs=1000000, mw_low=100, mw_high=350, dryrun=False):
if not supplier in ["aldrich", "chemspider", "__guess__"]:
print(" * unknown supplier.")
print(" aborting.")
return
reader = Chem.SDMolSupplier(fn)
if not dryrun:
writer = Chem.SDWriter(op.join(REPORT_FOLDER, "sdf", "output.sdf"))
mol_counter_in = 0
mol_counter_out = 0
removals = Counter()
for mol in reader:
mol_counter_in += 1
if mol_counter_in == 1 and supplier == "__guess__":
if mol.HasProp("CSID"):
supplier = "chemspider"
elif mol.HasProp("CAS_NUMBER"):
supplier = "aldrich"
else:
print(" * supplier could not be guessed.")
print(" aborting.")
return
print(" > guessed supplier:", supplier)
if not mol:
removals["rejected_by_rdkit"] += 1
continue
if supplier == "aldrich":
remove_props_from_mol(mol, ["ASSAY_NAME", "COMMON_NAME", "MOLECULAR_FORMULA", "MOLECULAR_WEIGHT",
"MOLECULAR_WEIGHT", "BOILING_POINT", "FLASH_POINT", "PRODUCTS", "DENSITY"])
rename_prop_in_mol(mol, "Similarity", "n_sim")
rename_prop_in_mol(mol, "IUPAC_NAME", "s_name")
rename_prop_in_mol(mol, "MDL_NUMBER", "s_mdl")
rename_prop_in_mol(mol, "CAS_NUMBER", "s_cas")
elif supplier == "chemspider":
remove_props_from_mol(mol, ["MF", "MW", "SMILES", "InChI", "InChIKey", "Data Sources", "References", "PubMed",
"RSC", "CSURL"])
rename_prop_in_mol(mol, "CSID", "k_csid")
# rename_prop_in_mol(mol, "CSURL", "s_url")
# remove organometallics
WRITE_TO_OUTPUT = True
calc_props_in_mol(mol, include_date = False)
formula = mol.GetProp("s_formula").lower()
for element in ["hg", "pd", "pt", "os", "mg", "mn", "ti", "zn"]:
if element in formula:
WRITE_TO_OUTPUT = False
removals["organometallic"] += 1
break
# remove low or high molwt
if WRITE_TO_OUTPUT:
molwt = float(mol.GetProp("n_molwt"))
if molwt > mw_high:
WRITE_TO_OUTPUT = False
removals["molwt_high"] += 1
elif molwt < mw_low:
WRITE_TO_OUTPUT = False
removals["molwt_low"] += 1
if WRITE_TO_OUTPUT:
mol_counter_out += 1
if not dryrun:
writer.write(mol)
if mol_counter_in >= max_num_recs:
break
if dryrun and mol_counter_in < 10:
if not WRITE_TO_OUTPUT:
print(" *** REMOVED ***")
show_record(mol)
if mol_counter_in % 500 == 0:
print(" > processed: {:7d} found: {:6d}\r".format(mol_counter_in, mol_counter_out), end="")
sys.stdout.flush()
print(" > processed: {:7d} found: {:6d}".format(mol_counter_in, mol_counter_out))
print(" done.")
if not dryrun:
writer.close()
print("Molecules removed for the following reasons:")
for reason in removals:
print("{:20s}: {:4d}".format(reason, removals[reason]))
def iterate_over_sdf_file(fn="testset.sdf", max_num_recs=1000000, actives_only=False, dryrun=False):
reader = Chem.SDMolSupplier(fn)
if not dryrun:
writer = Chem.SDWriter("output.sdf")
mol_counter_in = 0
mol_counter_out = 0
removals = Counter()
for mol in reader:
mol_counter_in += 1
if not mol:
removals["rejected_by_rdkit"] += 1
continue
remove_props_from_mol(mol, ["ASSAY_NAME"])
if mol.HasProp("% activity@ClpP"):
rename_prop_in_mol(mol, "% activity@ClpP", "n_clpp_percact")
try:
old_value = float(mol.GetProp("n_clpp_percact"))
except ValueError:
removals["no_activity"] += 1
mol.ClearProp("n_clpp_percact")
continue
new_value = 100 - old_value
if actives_only and new_value < 0:
removals["activity_low"] += 1
continue
mol.SetProp("n_clpp_percinh", str(new_value))
else:
removals["no_activity"] += 1
continue
if mol.HasProp("pIC50@ClpP"):
rename_prop_in_mol(mol, "pIC50@ClpP", "n_clpp_pic50")
try:
old_value = float(mol.GetProp("n_clpp_pic50"))
except ValueError: # not a number
mol.SetProp("n_clpp_pic50", "n.d.")
rename_prop_in_mol(mol, "COMPOUND_ID", "k_molid")
rename_prop_in_mol(mol, "SUPPLIER", "s_supplier")
rename_prop_in_mol(mol, "BATCH_ID", "k_batchid")
# remove organometallics
WRITE_TO_OUTPUT = True
calc_props_in_mol(mol, include_date = False)
formula = mol.GetProp("s_formula").lower()
for element in ["hg", "pd", "pt", "os", "mn", "ti"]:
if element in formula:
WRITE_TO_OUTPUT = False
removals["organometallic"] += 1
break
# remove low or high molwt
if WRITE_TO_OUTPUT:
molwt = float(mol.GetProp("n_molwt"))
if molwt > 600:
WRITE_TO_OUTPUT = False
removals["molwt_high"] += 1
if molwt < 200:
WRITE_TO_OUTPUT = False
removals["molwt_low"] += 1
if WRITE_TO_OUTPUT:
mol_counter_out += 1
if not dryrun:
writer.write(mol)
if mol_counter_in >= max_num_recs:
break
if dryrun and mol_counter_in < 10:
if not WRITE_TO_OUTPUT:
print(" *** REMOVED ***")
show_record(mol)
if mol_counter_in % 500 == 0:
print(" > processed: {:7d} found: {:6d}\r".format(mol_counter_in, mol_counter_out), end="")
sys.stdout.flush()
print(" > processed: {:7d} found: {:6d}".format(mol_counter_in, mol_counter_out))
print(" done.")
if not dryrun:
writer.close()
print("Molecules removed for the following reasons:")
for reason in removals:
print("{:20s}: {:4d}".format(reason, removals[reason]))
def enum_racemates(sdf_list_or_file, find_only=True):
"""returns: result_sdf::list<mol>, racemic_molids::list<int>
find_only==True: return new sdf as list which contains all the racemates of the input sdf.
find_only==False: return new sdf as list with ALL input structures, where the racemates are
replaced by their two enantiomers. The returned sdf is always
equal in size or larger as the input sdf.
Multiple stereo centers are not yet handled.
In the new sdf the molids are no longer unique and should be reassigned
(remove k_molid and run calc_props(sdf))."""
result_sdf = []
racemic_molids = []
if type(sdf_list_or_file) != list and sdf_list_or_file.atEnd(): # sdf is file
print(" * file object is at end, please reload.")
return None
for mol in sdf_list_or_file:
first_undefined = False
chiral_centers = Chem.FindMolChiralCenters(mol, includeUnassigned=True)
if chiral_centers:
first_center = chiral_centers[0][0]
first_undefined = chiral_centers[0][1] == "?"
if first_undefined:
racemic_molids.append(int(mol.GetProp("k_molid")))
if find_only:
result_sdf.append(mol)
continue
else:
mol1 = Chem.Mol(mol)
mol2 = Chem.Mol(mol)
mol1.GetAtomWithIdx(first_center).SetChiralTag(Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW)
mol2.GetAtomWithIdx(first_center).SetChiralTag(Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW)
result_sdf.append(mol1)
result_sdf.append(mol2)
else:
if not find_only: # return ALL mols
result_sdf.append(mol)
return result_sdf, racemic_molids
def list_fields(sdf_list_or_file):
field_list = []
if type(sdf_list_or_file) == list:
if len(sdf_list_or_file) > 100:
sdf_sample = random.sample(sdf_list_or_file, len(sdf_list_or_file)//2)
else:
sdf_sample = sdf_list_or_file
for mol in sdf_sample:
field_list.extend(mol.GetPropNames())
else: # sdf is file
if sdf_list_or_file.atEnd():
print(" * file object is at end, please reload.")
return None
index = 0
while sdf_list_or_file and index < 500:
try:
mol = sdf_list_or_file.next()
field_list.extend(mol.GetPropNames())
except StopIteration:
break
return list(set(field_list))
def logp_from_smiles(smiles):
mol = Chem.MolFromSmiles(smiles)
if IPYTHON:
display_png(mol)
logp = Desc.MolLogP(mol)
return logp
def show_record(mol):
if IPYTHON:
display_png(mol)
for field in mol.GetPropNames():
print(" {:13s}: {}".format(field, mol.GetProp(field)))
print("_" * 75)
def show_sdf(sdf_list, force=False):
l = len(sdf_list)
if l > 20 and not force:
print(" * will not show more than 20 records.")
print(" to force, use force=True.")
else:
for mol in sdf_list:
show_record(mol)
def merge_prop_from_file(sdf_list, fn, prop):
lines_in_file = 0
counter_sdf = 0
firstline = True
f_in = open(fn, "rb")
dr = csv.reader(f_in, delimiter="\t")
for row in dr:
if firstline:
headers = row
firstline = False
index_of = headers.index(prop)
else:
lines_in_file += 1
for mol in sdf_list:
if mol.HasProp(prop) and mol.GetProp(prop) == row[index_of]:
counter_sdf += 1
for index, new_prop in enumerate(row):
if index != index_of:
mol.SetProp(headers[index], row[index])
print(" > {} lines from {} parsed. {} records modified in sdf.".format(lines_in_file-1, fn, counter_sdf))
def remove_props_from_mol(mol, prop_or_propslist):
if type(prop_or_propslist) != list:
prop_or_propslist = [prop_or_propslist]
for prop in prop_or_propslist:
if prop in mol.GetPropNames():
mol.ClearProp(prop)
def remove_props(mol_or_sdf_list, props):
if type(mol_or_sdf_list) == file:
print(" * operation not supported for file objects.")
return
if type(mol_or_sdf_list) == list:
for mol in mol_or_sdf_list:
if mol:
remove_props_from_mol(mol, props)
else:
remove_props_from_mol(mol_or_sdf_list, props)
def rename_prop_in_mol(mol, old_prop, new_prop):
if old_prop in mol.GetPropNames():
value = mol.GetProp(old_prop)
mol.SetProp(new_prop, value)
mol.ClearProp(old_prop)
def rename_prop(mol_or_sdf_list, old_prop, new_prop):
if type(mol_or_sdf_list) == list:
for mol in mol_or_sdf_list:
rename_prop_in_mol(mol, old_prop, new_prop)
else:
rename_prop_in_mol(mol_or_sdf_list, old_prop, new_prop)
def calc_props_in_mol(mol, dateprop="k_date", include_date=True, force2d=False):
if force2d:
mol.Compute2DCoords()
else:
try:
mol.GetConformer()
except ValueError: # no 2D coords... calculate them
mol.Compute2DCoords()
mol.SetProp("n_molwt", "{:.2f}".format(Desc.MolWt(mol)))
mol.SetProp("s_formula", Chem.CalcMolFormula(mol))
mol.SetProp("n_logp", "{:.2f}".format(Desc.MolLogP(mol)))
mol.SetProp("n_hba", str(Desc.NOCount(mol)))
mol.SetProp("n_hbd", str(Desc.NHOHCount(mol)))
mol.SetProp("n_rotb", str(Desc.NumRotatableBonds(mol)))
mol.SetProp("n_tpsa", str(int(Desc.TPSA(mol))))
if include_date and not dateprop in mol.GetPropNames():
mol.SetProp(dateprop, strftime("%Y%m%d"))
def get_highest_counter(mol_or_sdf, counterprop="k_molid"):
# get highest counter in sdf
molid_counter = 0
for mol in mol_or_sdf:
if counterprop in mol.GetPropNames():
value = int(mol.GetProp(counterprop))
if value > molid_counter:
molid_counter = value
return molid_counter
def calc_props(mol_or_sdf, counterprop="k_molid", dateprop="k_date",
include_date=True, force2d=False):
if type(mol_or_sdf) != list:
calc_props_in_mol(mol_or_sdf, dateprop, include_date, force2d)
return
molid_counter = get_highest_counter(mol_or_sdf, counterprop=counterprop) + 1
for mol in mol_or_sdf:
# continue counting if counterprop not present
if not counterprop in mol.GetPropNames():
mol.SetProp(counterprop, str(molid_counter))
molid_counter += 1
calc_props_in_mol(mol, dateprop, include_date, force2d)
def sort_sdf(sdf_list, field, reverse=True):
if field[:2] in "n_ k_":
sdf_list.sort(cmp=lambda x,y: cmp(float(x.GetProp(field)), float(y.GetProp(field))), reverse=reverse)
else:
print(" * only sorting of numbers is currently supported.")
def activity_hist(sdf_list_or_file, activityprop):
hist = Counter()
act_oor = "OOR (<0)"
act_high = "high ( >=50)"
act_med = "medium (20 - <50)"
act_low = "low ( 0 - <20)"
if type(sdf_list_or_file) != list and sdf_list_or_file.atEnd(): # sdf is file
print(" * file object is at end, please reload.")
return
for mol_counter_in, mol in enumerate(sdf_list_or_file):
try:
value = float(mol.GetProp(activityprop))
except:
hist["NaN"] += 1
continue
if value >= 50:
hist[act_high] += 1
elif value >= 20:
hist[act_med] += 1
elif value >= 0:
hist[act_low] += 1
else:
hist[act_oor] += 1
if mol_counter_in > 0 and mol_counter_in % 500 == 0:
print(" > processed: {:7d}\r".format(mol_counter_in, end=""))
sys.stdout.flush()
mol_counter_in += 1
print(" > processed: {:7d}".format(mol_counter_in))
print(" done.")
act_counter = 0
for key in [act_high, act_med, act_low]:
if hist.has_key(key):
act_counter += hist[key]
for key in [act_high, act_med, act_low]:
if hist.has_key(key):
perc_act = 100.0 * hist[key] / act_counter
print(" {:<18s}: {:6d} ({:4.1f}%)".format(key, hist[key], perc_act))
for key in [act_oor, "NaN"]:
if hist.has_key(key):
print(" {:<18s}: {:6d}".format(key, hist[key]))
def factsearch(sdf_list_or_file, query, invert=False, max_hits=2000, count_only=False, sorted=True, reverse=True):
result_list = []
result_indexes_list = []
mol_counter_out = 0
for el in query.split():
if el[:2] in "n_ s_ k_":
field = el
break
print(" > field {} extracted from query: {}".format(field, query))
query_mod = query.replace(field, "val")
if type(sdf_list_or_file) != list and sdf_list_or_file.atEnd(): # sdf is file
print(" * file object is at end, please reload.")
return None
print(" > searching...")
for mol_counter_in, mol in enumerate(sdf_list_or_file):
if not mol:
continue
hit = False
if field in mol.GetPropNames():
val = mol.GetProp(field).lower()
if field[:2] in "n_ k_":
try:
val_float = float(val)
except ValueError:
continue
val_int = int(val_float)
if val_int == val_float:
val = val_int
else:
val = val_float
if eval(query_mod):
hit = True
if invert:
hit = not hit
if hit:
mol_counter_out += 1
if not count_only:
result_list.append(mol)
result_indexes_list.append(mol_counter_in)
if mol_counter_in > 0 and mol_counter_in % 500 == 0:
print("\r processed: {:7d} found: {:6d}".format(mol_counter_in, mol_counter_out), end="")
sys.stdout.flush()
if not count_only and mol_counter_out >= max_hits:
print()
print(" * maximum number of hits reached.")
break
print("\r processed: {:7d} found: {:6d}".format(mol_counter_in+1, mol_counter_out))
print(" done.")
if count_only:
print(" > option <count_only> was selected.")
print(" no results were returned.")
if not count_only and sorted and field[:2] in "n_ k_":
sort_sdf(result_list, field, reverse=reverse)
if 0 < mol_counter_out < 6:
print(" > found {} matching records at {}.".format(mol_counter_out, str(result_indexes_list)))
return result_list
def substruct_search(sdf_list_or_file, smarts, invert=False, max_hits=5000, count_only=False, add_h=False):
result_list = []
result_indexes_list = []
mol_counter_out = 0
query = Chem.MolFromSmarts(smarts)
if not query:
print(" * ERROR: could not generate query from SMARTS.")
return None
if type(sdf_list_or_file) != list and sdf_list_or_file.atEnd(): # sdf is file
print(" * file object is at end, please reload.")
return None
if not add_h and "[H]" in smarts:
add_h = True
print(" > explicit hydrogens turned on (add_h = True)")
print(" > searching...")
for mol_counter_in, mol in enumerate(sdf_list_or_file):
hit = False
if not mol: continue
if add_h:
mol_with_h = Chem.AddHs(mol)
if mol_with_h.HasSubstructMatch(query):
hit = True
else:
if mol.HasSubstructMatch(query):
hit = True
if invert:
# reverse logic
hit = not hit
if hit:
mol_counter_out += 1
if not count_only:
result_list.append(mol)
result_indexes_list.append(mol_counter_in)
if mol_counter_in > 0 and mol_counter_in % 500 == 0:
print("\r processed: {:7d} found: {:6d}".format(mol_counter_in, mol_counter_out), end="")
sys.stdout.flush()
if not count_only and mol_counter_out >= max_hits:
print()
print(" * maximum number of hits reached.")
break
print("\r processed: {:7d} found: {:6d}".format(mol_counter_in+1, mol_counter_out))
print(" done.")
if count_only:
print(" > option <count_only> was selected.")
print(" no results were returned.")
if 0 < mol_counter_out < 6:
print(" > found {} matching records at {}.".format(mol_counter_out, str(result_indexes_list)))
return result_list
def similarity_search(sdf_list_or_file, smarts, similarity=0.8, max_hits=2000, count_only=False):
result_list = []
result_indexes_list = []
mol_counter_out = 0
query_mol = Chem.MolFromSmarts(smarts)
if not query_mol:
print(" * ERROR: could not generate query from SMARTS.")
return None
query_fp = FingerprintMols.FingerprintMol(query_mol)
if type(sdf_list_or_file) != list and sdf_list_or_file.atEnd(): # sdf is file
print(" * file object is at end, please reload.")
return None
print(" > searching...")
for mol_counter_in, mol in enumerate(sdf_list_or_file):
mol_fp = FingerprintMols.FingerprintMol(mol)
sim = DataStructs.FingerprintSimilarity(query_fp, mol_fp)
if sim >= similarity:
mol_counter_out += 1
if not count_only:
mol.SetProp("n_sim", "{:4.3f}".format(sim))
result_list.append(mol)
result_indexes_list.append(mol_counter_in)
if mol_counter_in > 0 and mol_counter_in % 500 == 0:
print( "\r processed: {:7d} found: {:6d}".format(mol_counter_in, mol_counter_out), end="")
sys.stdout.flush()
if not count_only and mol_counter_out >= max_hits:
print()
print(" * maximum number of hits reached.")
break
print("\r processed: {:7d} found: {:6d}".format(mol_counter_in+1, mol_counter_out))
print(" done.")
if count_only:
print(" > option <count_only> was selected.")
print(" no results were returned.")
print
if 0 < mol_counter_out < 6:
print(" > found {} matching records at {}.".format(mol_counter_out, str(result_indexes_list)))
return result_list
def similarity_hist(sdf_list_or_file, smarts, min_similarity=0.5):
"""use this to get a quick overview of the similarity distribution for a given compound (smarts)"""
result_list = []
mol_counter_out = 0
query_mol = Chem.MolFromSmarts(smarts)
if not query_mol:
print(" * ERROR: could not generate query from SMARTS.")
return None
query_fp = FingerprintMols.FingerprintMol(query_mol)
if type(sdf_list_or_file) != list and sdf_list_or_file.atEnd(): # sdf is file
print(" * file object is at end, please reload.")
return None
print(" > searching...")
for mol_counter_in, mol in enumerate(sdf_list_or_file):
mol_fp = FingerprintMols.FingerprintMol(mol)
sim = DataStructs.FingerprintSimilarity(query_fp, mol_fp)
if sim >= min_similarity:
mol_counter_out += 1
result_list.append(sim)
if mol_counter_in > 0 and mol_counter_in % 500 == 0:
print("\r processed: {:7d} found: {:6d}".format(mol_counter_in, mol_counter_out), end="")
sys.stdout.flush()
print("\r processed: {:7d} found: {:6d}".format(mol_counter_in+1, mol_counter_out))
print(" done.")
return result_list
def similarity_list(sdf_list_or_file, smarts):
"""return the molids and their similarity to the given compound (smarts)
returns list([molid, similarity])"""
result_list = []
query_mol = Chem.MolFromSmarts(smarts)
if not query_mol:
print(" * ERROR: could not generate query from SMARTS.")
return None
query_fp = FingerprintMols.FingerprintMol(query_mol)
if type(sdf_list_or_file) != list and sdf_list_or_file.atEnd(): # sdf is file
print(" * file object is at end, please reload.")
return None
print(" > processing...")
for mol_counter_in, mol in enumerate(sdf_list_or_file):
mol_fp = FingerprintMols.FingerprintMol(mol)
sim = DataStructs.FingerprintSimilarity(query_fp, mol_fp)
result_list.append([mol.GetProp("k_molid"), sim])
if mol_counter_in > 0 and mol_counter_in % 500 == 0:
print("\r processed: {:7d}".format(mol_counter_in, ), end="")
sys.stdout.flush()
print("\r processed: {:7d} found: {:6d}".format(mol_counter_in+1))
print(" done.")
return result_list
def get_num_props_dict(sdf_list, fields=None):
DEBUG = False
props_dict = {}
molid_list = [] # list of molid for picking and structure display
all_props_list = list_fields(sdf_list)
# only return the numerical database fields:
num_props_list = [prop for prop in all_props_list if prop[:2] == "n_"]
if fields == None or type(fields) != list:
props_list = num_props_list
else:
# make sure the <fields> are valid fields in the sdf
props_list = list(set(fields).intersection(set(num_props_list)))
# initialize dict
for prop in props_list:
props_dict[prop] = []
if DEBUG:
print(" ", props_list)
DEBUG = False
for mol in sdf_list:
molid = int(mol.GetProp("k_molid"))
molid_list.append(molid)
for prop in props_list:
if mol.HasProp(prop):
val = mol.GetProp(prop)
try:
val = float(val)
props_dict[prop].append(val)
except ValueError:
# prop is present in this record, but it is not a number
# insert MISSING_VAL
props_dict[prop].append(MISSING_VAL)
else:
# property is defined in sdf, but not in this record:
# insert a MSSING_VAL to ensure identical length of the data lists
props_dict[prop].append(MISSING_VAL)
return props_dict, molid_list
def get_str_list(sdf_list, field):
str_list = []
for mol in sdf_list:
if field in mol.GetPropNames():
str_list.append(mol.GetProp(field))
else:
str_list.append("n.d.")
return str_list
def remove_missing_values(l1, l2, l3, l4=None):
# remove the missing values in one list and the corresponding data
# at the same pos from the other list:
while MISSING_VAL in l1:
pos = l1.index(MISSING_VAL)
l1.pop(pos)
l2.pop(pos)
l3.pop(pos)
if l4:
l4.pop(pos)
# rinse and repeat for the other list
while MISSING_VAL in l2:
pos = l2.index(MISSING_VAL)
l2.pop(pos)
l1.pop(pos)
l3.pop(pos)
if l4:
l4.pop(pos)
if len(l1) != len(l2):
print(" * ERROR: different length of value lists after removal of MISSING_VAL !")
if len(l1) != len(l3):
print(" * ERROR: different length of value list and molid list after removal of MISSING_VAL !")
if l4 and len(l1) != len(l4):
print(" * ERROR: different length of value list and color list after removal of MISSING_VAL !")
def show_hist(sdf_list, fields=None, show=True, savefig=True):
"""if fields==None, show histograms of all available fields in the sdf,
otherwise use the supplied list of fields, e.g.
fields=["n_pic50, "n_hba"]"""
if not PYLAB:
print(" * show_hist does not work because pylab could not be imported.")
return
if type(sdf_list) != list:
print(" * ERROR: plots are currently only implemented for sdf lists (no files).")
return
props_dict, molid_list = get_num_props_dict(sdf_list, fields)
num_of_plots = len(props_dict)
num_rows = int(math.sqrt(num_of_plots))
num_cols = num_of_plots // num_rows
if num_of_plots % num_rows > 0:
num_cols += 1
if not savefig:
pylab.figure(figsize=(3, 2))
else:
pylab.figure()
pylab.subplots_adjust(hspace=0.3, wspace=0.2)
for counter, prop in enumerate(props_dict):
value_list = props_dict[prop]
# remove the missing values:
while MISSING_VAL in value_list:
value_list.remove(MISSING_VAL)
pylab.subplot(num_rows, num_cols, counter+1)
pylab.hist(value_list)
pylab.title(prop[2:])
if savefig:
pylab.savefig("histogram.png")
if show:
pylab.show()
def show_scattermatrix(sdf_list, fields=None, colorby=None, mode="show"):
"""if fields==None, show a scattermatrix of all available fields in the sdf,
otherwise use the supplied list of fields, e.g.
fields=["n_pic50, "n_hba"]"""
if not PYLAB:
print(" * show_scattermatrix does not work because pylab could not be imported.")
return
if type(sdf_list) != list:
print(" * ERROR: plots are currently only implemented for sdf lists (no files).")
return
props_dict, molid_list = get_num_props_dict(sdf_list, fields)
props_dict_len = len(props_dict)
num_of_plots = props_dict_len ** 2
num_rows = props_dict_len
num_cols = props_dict_len
if not colorby:
COLOR_IS_LIST = False
else:
avail_colors = ["blue", "green", "red", "cyan", "magenta", "yellow", "black"]
str_list = get_str_list(sdf_list, colorby)
str_set = list(set(str_list))
if len(str_set) > len(avail_colors):
print(" * not enough colors!")
COLOR_IS_LIST = False
else:
# convert the string values to colors
COLOR_IS_LIST = True
color_list_all = [avail_colors[str_set.index(i)] for i in str_list]
color_coding = ""
for ind, item in enumerate(str_set):
color_coding = color_coding + "{}: {} ".format(item, avail_colors[ind])
print(" > color coding:", color_coding)
props_list = props_dict.keys() # to have a defined order
num_of_props = len(props_list)
fig = pylab.figure()
pylab.subplots_adjust(hspace=0, wspace=0)
axes = []
axes_molid_dict = {} # track the displayed molides for each plot (axe)
for y_count, prop_y in enumerate(props_list):
for x_count, prop_x in enumerate(props_list):
if y_count == 0: # first row
if x_count == 0 or x_count == 1: # top left, don't let the top left hist share the y axis
ax = fig.add_subplot(num_rows, num_cols, y_count*num_of_props+x_count+1)
else:
ax = fig.add_subplot(num_rows, num_cols, y_count*num_of_props+x_count+1, sharey=axes[1])
else:
if x_count == 0:
ax = fig.add_subplot(num_rows, num_cols, y_count*num_of_props+x_count+1, sharex=axes[x_count])
elif x_count == y_count:
# don't let the histograms share the y axis
ax = fig.add_subplot(num_rows, num_cols, y_count*num_of_props+x_count+1, sharex=axes[x_count])
else:
ax = fig.add_subplot(num_rows, num_cols, y_count*num_of_props+x_count+1, sharex=axes[x_count], sharey=axes[y_count*num_of_props])
if prop_x == prop_y:
value_list = props_dict[prop_x][:]
# remove the missing values:
while MISSING_VAL in value_list:
value_list.remove(MISSING_VAL)
ax.hist(value_list)
else:
value_x_list = props_dict[prop_x][:]
value_y_list = props_dict[prop_y][:]
ax_molid_list = molid_list[:]
if len(value_x_list) != len(value_y_list):
print(" * ERROR: different length of value lists !")
return
if COLOR_IS_LIST:
color_list = color_list_all[:]
remove_missing_values(value_x_list, value_y_list, ax_molid_list, color_list)
else:
color_list = "r"
remove_missing_values(value_x_list, value_y_list, ax_molid_list)
if len(value_x_list) != len(value_y_list):
print(" * ERROR: different length of value lists after removal of MISSING_VAL !")
return
if COLOR_IS_LIST and len(value_x_list) != len(color_list):
print(" * ERROR: different length of value list and color list after removal of MISSING_VAL !")
return
axes_molid_dict[ax] = ax_molid_list
ax.scatter(value_x_list, value_y_list, s=POINTSIZE, color=color_list, picker=5)
if x_count % 2 == 0:
if y_count == 0:
ax.xaxis.set_label_position("top")
pylab.xlabel(prop_x)
ax.xaxis.set_ticks_position("bottom")
pylab.xticks(fontsize="small", visible=(y_count==num_of_props-1))
else:
if y_count == num_of_props-1:
ax.xaxis.set_label_position("bottom")
pylab.xlabel(prop_x)
ax.xaxis.set_ticks_position("top")
pylab.xticks(fontsize="small", visible=(y_count==0))
if y_count % 2 == 0:
if x_count == 0:
ax.yaxis.set_label_position("left")
pylab.ylabel(prop_y)
ax.yaxis.set_ticks_position("right")
pylab.yticks(fontsize="small", visible=(x_count==num_of_props-1 and y_count != num_of_props-1))
else:
if x_count == num_of_props-1:
ax.yaxis.set_label_position("right")
pylab.ylabel(prop_y)
ax.yaxis.set_ticks_position("left")
pylab.yticks(fontsize="small", visible=(x_count==0))
axes.append(ax)
if COLOR_IS_LIST:
pylab.suptitle(color_coding)
# pylab.text(.5, 0, color_coding, horizontalalignment='center')
pylab.savefig("scatter.png")
if mode == "show":
fig.show()
elif mode == "gui":
return fig, axes_molid_dict
def show_scattermatrix2(sdf_list, sdf_base, fields=None, mode="show"):
"""if fields==None, show a scattermatrix of all available fields in the sdf,
otherwise use the supplied list of fields, e.g.
fields=["n_pic50, "n_hba"]"""
if not PYLAB:
print(" * show_scattermatrix does not work because pylab could not be imported.")
return
if type(sdf_list) != list:
print(" * ERROR: plots are currently only implemented for sdf lists (no files).")
return
props_dict, molid_list = get_num_props_dict(sdf_list, fields)
props_dict_base, molid_list_base = get_num_props_dict(sdf_base, fields)
props_dict_len = len(props_dict)
num_rows = props_dict_len
num_cols = props_dict_len
fig = pylab.figure()
pylab.subplots_adjust(hspace=0, wspace=0)
props_list = props_dict.keys() # to have a defined order
num_of_props = len(props_list)
axes = []
axes_molid_dict = {} # track the displayed molides for each plot (axe)
for y_count, prop_y in enumerate(props_list):
for x_count, prop_x in enumerate(props_list):
if y_count == 0: # first row
if x_count == 0 or x_count == 1: # don't let the top left hist share the y axis
ax = fig.add_subplot(num_rows, num_cols, y_count*num_of_props+x_count+1)
else:
ax = fig.add_subplot(num_rows, num_cols, y_count*num_of_props+x_count+1, sharey=axes[1])
else:
if x_count == 0:
ax = fig.add_subplot(num_rows, num_cols, y_count*num_of_props+x_count+1, sharex=axes[x_count])
elif x_count == y_count:
# don't let the histograms share the y axis
ax = fig.add_subplot(num_rows, num_cols, y_count*num_of_props+x_count+1, sharex=axes[x_count])
else:
ax = fig.add_subplot(num_rows, num_cols, y_count*num_of_props+x_count+1, sharex=axes[x_count], sharey=axes[y_count*num_of_props])
if prop_x == prop_y:
value_list = props_dict[prop_x][:]
value_list_base = props_dict_base[prop_x][:]
# remove the missing values:
while MISSING_VAL in value_list:
value_list.remove(MISSING_VAL)
while MISSING_VAL in value_list_base:
value_list_base.remove(MISSING_VAL)
ax.hist(value_list_base)
ax.hist(value_list, color="r")
# pylab.yticks(visible=False)
else:
value_x_list = props_dict[prop_x][:]
value_y_list = props_dict[prop_y][:]
ax_molid_list = molid_list[:]
value_x_list_base = props_dict_base[prop_x][:]
value_y_list_base = props_dict_base[prop_y][:]
ax_molid_list_base = molid_list_base[:]
remove_missing_values(value_x_list, value_y_list, ax_molid_list)
remove_missing_values(value_x_list_base, value_y_list_base, ax_molid_list_base)
if len(value_x_list) != len(value_y_list):
print(" * ERROR: different length of value lists !")
if len(value_x_list_base) != len(value_y_list_base):
print(" * ERROR: different length of base value lists !")
color_list = ["b"] * len(value_x_list_base) + ["r"] * len(value_x_list)
axes_molid_dict[ax] = ax_molid_list_base + ax_molid_list
ax.scatter(value_x_list_base+value_x_list, value_y_list_base+value_y_list, s=POINTSIZE, color=color_list, picker=5)
# ax.scatter(value_x_list, value_y_list, s=POINTSIZE, color="r", picker=5)
if x_count % 2 == 0:
if y_count == 0:
ax.xaxis.set_label_position("top")
pylab.xlabel(prop_x)
ax.xaxis.set_ticks_position("bottom")
pylab.xticks(fontsize="small", visible=(y_count==num_of_props-1))
else:
if y_count == num_of_props-1:
ax.xaxis.set_label_position("bottom")
pylab.xlabel(prop_x)
ax.xaxis.set_ticks_position("top")
pylab.xticks(fontsize="small", visible=(y_count==0))
if y_count % 2 == 0:
if x_count == 0:
ax.yaxis.set_label_position("left")
pylab.ylabel(prop_y)
ax.yaxis.set_ticks_position("right")
pylab.yticks(fontsize="small", visible=(x_count==num_of_props-1 and y_count != num_of_props-1))
else:
if x_count == num_of_props-1:
ax.yaxis.set_label_position("right")
pylab.ylabel(prop_y)
ax.yaxis.set_ticks_position("left")
pylab.yticks(fontsize="small", visible=(x_count==0))
axes.append(ax)
# pylab.title(prop[2:])
pylab.savefig("scatter.png")
if mode == "show":
fig.show()
elif mode == "gui":
return fig, axes_molid_dict
def cluster_from_sdf_list(sdf_list, cutoff=0.2):
counter = Counter()
# generate the fingerprints
fp_list = [Chem.GetMorganFingerprintAsBitVect(mol, 3, 1024) for mol in sdf_list]
# second generate the distance matrix:
dists = []
num_of_fps = len(fp_list)
for i in range(1, num_of_fps):
sims = DataStructs.BulkTanimotoSimilarity(fp_list[i],fp_list[:i])
dists.extend([1-x for x in sims])
# now cluster the data:
cluster_idx_list = Butina.ClusterData(dists, num_of_fps, cutoff, isDistData=True)
for cluster in cluster_idx_list:
counter[len(cluster)] += 1
print(" clustersize num_of_clusters")
print(" =========== ===============")
for length in sorted(counter.keys(), reverse=True):
print(" {:4d} {:3d}".format(length, counter[length]))
# return sorted by cluster length
return sorted(cluster_idx_list, cmp=lambda x,y: cmp(len(x), len(y)), reverse=True)
def get_sdf_from_index_list(orig_sdf, index_list):
"""generate sdf_lists after clustering"""
cluster_sdf = [orig_sdf[x] for x in index_list]
return cluster_sdf
def get_sdf_from_id_list(sdf_list_or_file, id_dict_or_list, calc_ex_mw=True):
result_sdf = []
result_id_list = []
if type(sdf_list_or_file) != list and sdf_list_or_file.atEnd(): # sdf is file
print(" * file object is at end, please reload.")
return None
if type(id_dict_or_list) == dict:
add_props = True
id_list = id_dict_or_list.keys()
else:
add_props = False
id_list = id_dict_or_list
print(" > searching...")
for mol_counter_in, mol in enumerate(sdf_list_or_file):
molid = int(mol.GetProp("k_molid"))
if molid in id_dict_or_list:
if calc_ex_mw:
exmw = Desc.ExactMolWt(mol)
mol.SetProp("n_exmolwt", "{:.2f}".format(exmw))
if add_props:
mol.SetProp("s_pos", id_dict_or_list[molid])
result_sdf.append(mol)
result_id_list.append(molid)
if mol_counter_in > 0 and mol_counter_in % 500 == 0:
print("\r processed: {:7d} found: {:6d}".format(mol_counter_in, len(result_id_list)), end="")
sys.stdout.flush()
not_found_list = list(set(id_list).difference(result_id_list))
print("\r > result sdf generated with {} out of {} input records.".format(len(id_list), len(result_id_list)))
return result_sdf, not_found_list
def get_max_act_in_cluster(orig_sdf, cluster, act_prop):
max_act = -1000
for x in cluster:
mol = orig_sdf[x]
try:
value = float(mol.GetProp(act_prop))
except:
print(" * molecule at index {:6d} has not activity prop {}".format(x, act_prop))
continue
if value > max_act:
max_act = value
return max_act
def get_med_act_in_cluster(orig_sdf, cluster, act_prop):
med_act = 0.0
num_of_members = 0
for x in cluster:
mol = orig_sdf[x]
try:
value = float(mol.GetProp(act_prop))
except:
print(" * molecule at index {:6d} has not activity prop {}".format(x, act_prop))
continue
med_act += value
num_of_members += 1
if num_of_members == 0:
num_of_members = 1
return med_act / num_of_members
def analyze_cluster_idx_list(orig_sdf, cluster_idx_list, mode="remove_singletons", act_prop=None):
"""available modes:
remove_singletons: remove clusters with only one member
ind_activity: sort the clusters by the member with the highest activity
med_activity: sort the cluster by their medium activity
a NEW cluster_idx_list is returned, use get_sdf_from_index_list to generate a sdf from it"""
if act_prop:
print(" > sorting cluster members on {}...".format(act_prop), end="")
cluster_idx_list_sorted = []
for cluster in cluster_idx_list:
cluster_dict = {}
# map the position in the orig_sdf to the molid
sdf_list = get_sdf_from_index_list(orig_sdf, cluster)
for pos, idx in enumerate(cluster):
mol = sdf_list[pos]
cluster_dict[int(mol.GetProp("k_molid"))] = idx
sort_sdf(sdf_list, act_prop, reverse=True)
cluster_sorted = [cluster_dict[int(mol.GetProp("k_molid"))] for mol in sdf_list]
cluster_idx_list_sorted.append(cluster_sorted)
cluster_idx_list = cluster_idx_list_sorted
print(" done.")
if mode == "remove_singletons":
new_idx_list = [x for x in cluster_idx_list if len(x) > 1]
print(" > removed {} singletons from cluster list".format(len(cluster_idx_list) - len(new_idx_list)))
print(" the resulting list has {} clusters".format(len(new_idx_list)))
return new_idx_list
if mode == "ind_activity":
new_idx_list = sorted(cluster_idx_list, cmp=lambda x,y: cmp(get_max_act_in_cluster(orig_sdf, x, act_prop), get_max_act_in_cluster(orig_sdf, y, act_prop)), reverse=True)
print(" > max ind_activity and number of members in the first ten clusters:")
print(" index ind_act #members")
print(" ===== ======= ========")
for cl_counter, cluster in enumerate(new_idx_list):
print(" {:2d} {:6.1f} {:3d}".format(cl_counter, get_max_act_in_cluster(orig_sdf, cluster, act_prop), len(cluster)))
if cl_counter >= 9:
break
return new_idx_list
if mode == "med_activity":
new_idx_list = sorted(cluster_idx_list, cmp=lambda x,y: cmp(get_med_act_in_cluster(orig_sdf, x, act_prop), get_med_act_in_cluster(orig_sdf, y, act_prop)), reverse=True)
print(" > max med_activity in the first ten clusters:")
for cl_counter, cluster in enumerate(new_idx_list):
print("{}: {} ".format(cl_counter, get_med_act_in_cluster(orig_sdf, cluster, act_prop)), end="")
if cl_counter >= 9:
break
print()
return new_idx_list
print(" * unsupported mode.")
def write_clusters_as_sdf(orig_sdf, cluster_idx_list, basename="cluster"):
"""write every cluster in the index list as individual sdf file"""
basename = basename.split(".")[0]
for counter, cluster in enumerate(cluster_idx_list):
num_of_clusters = len(cluster_idx_list)
sdf_list = get_sdf_from_index_list(orig_sdf, cluster)
write_sdf(sdf_list, "{}_{:03d}_{:03d}.sdf".format(basename, counter, num_of_clusters-1))
def write_cluster_report(orig_sdf, cluster_idx_list, captionprop, reportname="cluster"):
intro = "<html>\n<head>\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"css/style.css\" />\n <title>ClusterViewer</title>\n</head>\n<body>\n<table width=\"\" cellspacing=\"1\" cellpadding=\"1\" border=\"1\" align=\"\" height=\"60\" summary=\"\">\n<tbody>"
extro = "</tbody>\n</table>\n</body>\n</html>"
filename = op.join(REPORT_FOLDER, "html", reportname + ".htm")
f = open(filename, "w")
f.write(intro)
for counter, cluster in enumerate(cluster_idx_list):
num_of_clusters = len(cluster_idx_list)
sdf_list = get_sdf_from_index_list(orig_sdf, cluster)
img = Draw.MolsToGridImage(sdf_list, molsPerRow=4, legends=[m.GetProp(captionprop) for m in sdf_list])
img_filename = "img/{}_{:03d}.png".format(reportname, counter)
img.save("html/"+img_filename)
hist_filename = "img/{}_{:03d}_hist.png".format(reportname, counter)
show_hist(sdf_list, fields=[captionprop], show=False, savefig=False)
pylab.savefig("html/"+hist_filename)
f.write(" <tr><td>\n<br><b>Cluster {:03d}:</b></td><td></td></tr>\n<tr><td><img src=\"{}\" alt=\"icon\" /></td><td><img src=\"{}\" alt=\"icon\" /></td></tr>\n".format(counter, img_filename, hist_filename))
f.write(extro)
f.close()
def neutralize_mol(mol, pattern=None, idprop="k_molid", show=False):
if not pattern:
pattern = (
# Imidazoles
('[n+;H]','n'),
# Amines
('[N+;!H0]','N'),
# Carboxylic acids and alcohols
('[$([O-]);!$([O-][#7])]','O'),
# Thiols
('[S-;X1]','S'),
# Sulfonamides
('[$([N-;X2]S(=O)=O)]','N'),
# Enamines
('[$([N-;X2][C,N]=C)]','N'),
# Tetrazoles
('[n-]','[nH]'),
# Sulfoxides
('[$([S-]=O)]','S'),
# Amides
('[$([N-]C=O)]','N'),
)
reactions = [(Chem.MolFromSmarts(x), Chem.MolFromSmiles(y, False)) for x, y in pattern]
old_smiles = Chem.MolToSmiles(mol)
replaced = False
for reactant, product in reactions:
while mol.HasSubstructMatch(reactant):
replaced = True
rms = Chem.ReplaceSubstructs(mol, reactant, product)
mol = rms[0]
if replaced:
new_smiles = Chem.MolToSmiles(mol)
mol_props = list(mol.GetPropNames())
props_dict = {prop: mol.GetProp(prop) for prop in mol_props}
mol = Chem.MolFromSmiles(new_smiles)
for prop in props_dict:
mol.SetProp(prop, props_dict[prop])
calc_props_in_mol(mol, include_date=False)
if show:
if mol.HasProp(idprop):
molid = mol.GetProp(idprop)
else:
molid = ""
print(" {:5d}: {:15s} --> {:15s}".format(molid, old_smiles, new_smiles))
if IPYTHON:
display_png(mol)
return mol, replaced
def neutralize_sdf(sdf_list_or_file, idprop="k_molid", show=False):
"""returns: neutral_sdf::list<mol>, neutralized_molids::list<int>
neutral_sdf: new sdf with all input mols, where the salts have been neutralized
neutralized_molids: list with the neutralized molids"""
neutral_sdf = []
neutralized_molids = []
counter_in = 0
counter_out = 0
if type(sdf_list_or_file) != list and sdf_list_or_file.atEnd(): # sdf is file
print(" * file object is at end, please reload.")
return None
print(" > neutralizing...")
for mol in sdf_list_or_file:
counter_in += 1
new_mol, replaced = neutralize_mol(mol, idprop=idprop, show=show)
if replaced:
counter_out += 1
neutralized_molids.append(int(mol.GetProp("k_molid")))
neutral_sdf.append(new_mol)
print(" neutralizing finished:")
print(" in: {:5d}".format(counter_in))
print(" neutralized: {:5d}".format(counter_out))
return neutral_sdf, neutralized_molids
def mol_grid(sdf_list, props, fn="img/grid.png"):
"""Draw a molecule grid from the input <sdf_list>. On IPython, an inline graphics will be returned
in addition to writing the image to <fn>.
The given sdf <props> (as a list) will be concatenated to the molecules' legends."""
if type(props) != list:
props = [props]
legends = []
for mol in sdf_list:
leg = [mol.GetProp(prop) for prop in props]
leg_str = "_".join(leg)
legends.append(leg_str)
img = Draw.MolsToGridImage(sdf_list, molsPerRow=5, subImgSize=(200, 200), legends=legends)
img.save(fn)
if IPYTHON:
return img
| [
"axelpahl@gmx.de"
] | axelpahl@gmx.de |
80c084fbb4031808a343ceeb3a1008829761814d | cbab0c479f3e62c52a9176ea7a77905194955c29 | /opself.py | 7749246045b76f7f879130c3e91d5c73b0478a9e | [] | no_license | BNBLACK/TEAM | 5594022d54ea1f1299ef68fb784cccdc902f237a | 6b31e156b30cc65feebcc836b2499d78dc937ecd | refs/heads/master | 2021-09-03T20:15:37.037942 | 2018-01-11T17:37:01 | 2018-01-11T17:37:01 | 117,129,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128,363 | py | # -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time,random,sys,json,codecs,threading,glob,re
cl = LINETCR.LINE() # Akun Utama
cl.login(token="EoFtpQvJto4RyahKbGo2.LLIy/AtXKNKKpuyeC/gJKG.sSKrWha5PiSso9Sza3ZbS2nDvU4LY8+NHMxgftuSI1Q=")
cl.loginResult()
ki = LINETCR.LINE() #Bot 1
ki.login(token="Eoy9olRQDLxhsfxOg2O3.juggmkkvlw6zSUXT/gRP0W.LzcEOstPR7llI5GUY7gxOj3K3b4AxXmIXoWYB9472LA=")
ki.loginResult()
kk = LINETCR.LINE() #Bot 2
kk.login(token="EoIo6e1l8mFvPebTSDyf.fXeD3+rOHjA3psoytyYx7W.+g0VdrtrUSc1xTJ/PKmDMyLYRepzIIkmHtyJS9jMFIE=")
kk.loginResult()
kc = LINETCR.LINE() #Bot 3
kc.login(token="EocDuLPWiOcoGSeulHcd.+YJQegCzmlp/DpdIuBUA7q.oJA5gz6VQ1mH9Uq38lzbJv6rYYFUQ4HY5vjVUpykOOc=")
kc.loginResult()
ks = LINETCR.LINE() #Bot 4
ks.login(token="EolbkklOPzoMojcnxnU2.8ZPbh5EZsGVtQaVVMZTemG.Y8/yrQauoMP6eoi8seGflxSTBeaDyuxbqwGbBhVTlRM=")
ks.loginResult()
ka = LINETCR.LINE() #Bot 5
ka.login(token="Eou5pkqtH7x1ajXzgIfb.T+PTxkW781eTVFAaiqIDgW.o/TNJ36QwTpWP/IHWiQ8DpWgNQTHGMxnwsMWEnllqFA=")
ka.loginResult()
kb = LINETCR.LINE() #Bot 6
kb.login(token="EouFgC5AsSBkGai2vl7f.5c3HczbPOjO8q9e33MwtRW.Nx10VJZHl/fjQoxn39AeSy77YxzKcznO0AKO/27Vauk=")
kb.loginResult()
ko = LINETCR.LINE() #Bot 7
ko.login(token="EoqbKZ2imMLZ5Kf5OTG2.LoKC+gOX+KZYr18Tp2m1KG.3jSSEZFmJnlRnEOgQHqOKhW4BOH7gLUuJPhPCl2hq8E=")
ko.loginResult()
ke = LINETCR.LINE() #Bot 8
ke.login(token="Eo8njXA9Ul8ZUMbZeCGc.nzI7ft5uCiGAf8fFerTwpa.sokDs5a6BcwWzy3P11g2TdOVx113lTb1+edaHXsO2O0=")
ke.loginResult()
ku = LINETCR.LINE() #Bot 9
ku.login(token="EoNczGezSaxz4HZyWaQ6.qdoJ0H7AQUeDXUofPn+ybG.xlSeu6s4weMoojhxsFzoknTOyNenSccJ/Mo9VdUYYXc=")
ku.loginResult()
print "login success plak"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage =""" ^[Bot One Piece]^
OWNER ™Koplaxs™"""
Setgroup ="""
[Admin Menu]
===================
||[Protect QR]
||- Qr on/off
||[Protect Join]
||- Join on/off
||[Mid Via Contact]
||- Contact on/off
||-[Cancel Invited]
||- Cancel all
===================
BOT PROTECT
==================="""
KAC=[cl,ki,kk,kc,ks,ka,kb,ko,ke,ku]
DEF=[ka,kb,ko,ke,ku]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Cmid = kc.getProfile().mid
Dmid = ks.getProfile().mid
Emid = ka.getProfile().mid
Fmid = kb.getProfile().mid
Gmid = ko.getProfile().mid
Hmid = ke.getProfile().mid
Imid = ku.getProfile().mid
Bots=[mid,Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Imid]
admin=["ued156c86ffa56024c0acba16f7889e6d","u457cf410aa13c3d8b6cf2f9ddf5cdb20","u5ddba0b0366f32d148979ef879edf1f0"]
owner=["ued156c86ffa56024c0acba16f7889e6d"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"Thanks for add me",
"lang":"JP",
"comment":"Thanks for add me",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cName":" ",
"cName2":"Luffy ",
"cName3":"Zorro ",
"cName4":"Sanji ",
"cName5":"Ussop ",
"cName6":"Chooper ",
"cName7":"Franky ",
"cName8":"Brook ",
"cName9":"Nami ",
"cName10":"Robin ",
"blacklist":{},
"wblacklist":True,
"dblacklist":True,
"Protectgr":True,
"Protectjoin":True,
"Protectcancl":True,
"protectionOn":True,
"atjointicket":True
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
setTime = {}
setTime = wait2['setTime']
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
#def NOTIFIED_READ_MESSAGE(op):
#try:
#if op.param1 in wait2['readPoint']:
#Name = cl.getContact(op.param2).displayName
#if Name in wait2['readMember'][op.param1]:
#pass
#else:
#wait2['readMember'][op.param1] += "\n・" + Name
#wait2['ROM'][op.param1][op.param2] = "・" + Name
#else:
#pass
#except:
#pass
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
#------Protect Group Kick start------#
if op.type == 11:
if wait["Protectgr"] == True:
if op.param2 not in Bots and admin:
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).updateGroup(G)
random.choice(KAC).sendText(op.param1,random.choice(KAC).getContact(op.param2).displayName + "Jangan Buka Kode QR Njiiir")
#------Protect Group Kick finish-----#
#------Cancel Invite User start------#
if op.type == 13:
if wait["Protectcancl"] == True:
if op.param2 not in Bots and admin:
group = ka.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids)
#------Cancel Invite User Finish------#
if op.type == 13:
if op.param3 in mid:
if op.param2 in Amid:
G = Amid.getGroup(op.param1)
G.preventJoinByTicket = False
Amid.updateGroup(G)
Ticket = Amid.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
Amid.updateGroup(G)
Ticket = Amid.reissueGroupTicket(op.param1)
if op.param3 in Amid:
if op.param2 in mid:
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Amid:
X = ki.getGroup(op.param1)
X.preventJoinByTicket = False
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in Cmid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in Dmid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ks.updateGroup(X)
Ti = ks.reissueGroupTicket(op.param1)
if op.param3 in Emid:
if op.param2 in Dmid:
X = ks.getGroup(op.param1)
X.preventJoinByTicket = False
ks.updateGroup(X)
Ti = ks.reissueGroupTicket(op.param1)
ka.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ka.updateGroup(X)
Ti = ka.reissueGroupTicket(op.param1)
if op.param3 in Fmid:
if op.param2 in Emid:
X = ka.getGroup(op.param1)
X.preventJoinByTicket = False
ka.updateGroup(X)
Ti = ka.reissueGroupTicket(op.param1)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kb.updateGroup(X)
Ti = kb.reissueGroupTicket(op.param1)
if op.param3 in Gmid:
if op.param2 in Fmid:
X = kb.getGroup(op.param1)
X.preventJoinByTicket = False
kb.updateGroup(X)
Ti = kb.reissueGroupTicket(op.param1)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ko.updateGroup(X)
Ti = ko.reissueGroupTicket(op.param1)
if op.param3 in Hmid:
if op.param2 in Gmid:
X = ko.getGroup(op.param1)
X.preventJoinByTicket = False
ko.updateGroup(X)
Ti = ko.reissueGroupTicket(op.param1)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
if op.param3 in Imid:
if op.param2 in mid:
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
ku.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.type == 13:
print op.param1
print op.param2
print op.param3
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
#------Joined User Kick start------#
if op.type == 13: #awal 17 ubah 13
if wait["Protectjoin"] == True:
if op.param2 not in admin and Bots: # Awalnya admin doang
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
#------Joined User Kick start------#
if op.type == 19:
if op.param2 not in Bots and admin: #AWAL DEF BUKAN KAC dan Bots
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
else:
pass
if op.type == 19:
if op.param3 in Bots and admin: #awal nya admin aja
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,admin)
else:
pass
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots and admin:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = False
random.choice(KAC).updateGroup(G)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
ka.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots and admin:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
ka.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots and admin:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
ki.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
ka.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
G = kk.getGroup(op.param1)
G.preventJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots and admin:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
ka.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
G = kc.getGroup(op.param1)
G.preventJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots and admin:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Because it is not in the group or Because it does not exist in the group \n["+op.param1+"]\nOf\n["+op.param2+"]\n I could not kick \n Add it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
ka.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
G = ks.getGroup(op.param1)
G.preventJoinByTicket = True
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Emid in op.param3:
if op.param2 in Bots and admin:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Because it is not in the group or Because it does not exist in the group \n["+op.param1+"]\nOf\n["+op.param2+"]\n I could not kick \n Add it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
ka.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
G = ka.getGroup(op.param1)
G.preventJoinByTicket = True
ka.updateGroup(G)
Ticket = ka.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Fmid in op.param3:
if op.param2 in Bots and admin:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Because it is not in the group or Because it does not exist in the group \n["+op.param1+"]\nOf\n["+op.param2+"]\n I could not kick \n Add it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
ka.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
G = kb.getGroup(op.param1)
G.preventJoinByTicket = True
kb.updateGroup(G)
Ticket = kb.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Gmid in op.param3:
if op.param2 in Bots and admin:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Because it is not in the group or Because it does not exist in the group \n["+op.param1+"]\nOf\n["+op.param2+"]\n I could not kick \n Add it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
ka.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
G = ko.getGroup(op.param1)
G.preventJoinByTicket = True
ko.updateGroup(G)
Ticket = ko.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Hmid in op.param3:
if op.param2 in Bots and admin:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Because it is not in the group or Because it does not exist in the group \n["+op.param1+"]\nOf\n["+op.param2+"]\n I could not kick \n Add it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
ka.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
G = ke.getGroup(op.param1)
G.preventJoinByTicket = True
ke.updateGroup(G)
Ticket = ke.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 25:
msg = op.message
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already")
ki.sendText(msg.to,"already")
kk.sendText(msg.to,"already")
kc.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"aded")
ki.sendText(msg.to,"aded")
kk.sendText(msg.to,"aded")
kc.sendText(msg.to,"aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Key","help","Help"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Admin menu"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,Setgroup)
else:
cl.sendText(msg.to,Sett)
elif ("Gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif ("Luffy gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv1 gn ","")
ki.updateGroup(X)
else:
ki.sendText(msg.to,"It can't be used besides the group.")
elif ("Zorro gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv2 gn ","")
kk.updateGroup(X)
else:
kk.sendText(msg.to,"It can't be used besides the group.")
elif ("Sanji gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv3 gn ","")
kc.updateGroup(X)
else:
kc.sendText(msg.to,"It can't be used besides the group.")
elif "Kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick ","")
random.choice(KAC).kickoutFromGroup(msg.to,[midd])
elif "Luffy kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("_second kick ","")
ki.kickoutFromGroup(msg.to,[midd])
elif "Zorro kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("_third kick ","")
kk.kickoutFromGroup(msg.to,[midd])
elif "Sanji kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("_fourth kick ","")
kc.kickoutFromGroup(msg.to,[midd])
elif "Invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif "Luffy invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("sinvite ","")
ki.findAndAddContactsByMid(midd)
ki.inviteIntoGroup(msg.to,[midd])
elif "Zorro invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("tinvite ","")
kk.findAndAddContactsByMid(midd)
kk.inviteIntoGroup(msg.to,[midd])
elif "Zorro invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("finvite ","")
kc.findAndAddContactsByMid(midd)
kc.inviteIntoGroup(msg.to,[midd])
#--------------- SC Add Admin ---------
elif "Admin add @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff add executing"
_name = msg.text.replace("Admin add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = ka.getGroup(msg.to)
gs = kb.getGroup(msg.to)
gs = ku.getGroup(msg.to)
gs = ke.getGroup(msg.to)
gs = ko.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Admin Ditambahkan")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Admin remove @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff remove executing"
_name = msg.text.replace("Admin remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = ka.getGroup(msg.to)
gs = kb.getGroup(msg.to)
gs = ku.getGroup(msg.to)
gs = ke.getGroup(msg.to)
gs = ko.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Admin Dihapus")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif msg.text in ["Adminlist","adminlist"]:
if admin == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"Tunggu...")
mc = "||Admin One Piece Bot||\n=====================\n"
for mi_d in admin:
mc += "••>" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#--------------------------------------
elif msg.text in ["Bot?"]:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ks.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
ka.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Fmid}
kb.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Gmid}
ko.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Hmid}
ke.sendMessage(msg)
elif msg.text in ["Me"]:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
random.choice(KAC).sendMessage(msg)
elif msg.text in ["Cv2"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
elif msg.text in ["愛�プレゼント","Gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
random.choice(KAC).sendMessage(msg)
elif msg.text in ["愛�プレゼント","All gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
elif msg.text in ["Cancel","cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Op cancel","Bot cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
G = k3.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
k3.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
k3.sendText(msg.to,"No one is inviting")
else:
k3.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
k3.sendText(msg.to,"Can not be used outside the group")
else:
k3.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Buka qr","Open qr"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"QR Sudah Dibuka")
else:
random.choice(KAC).sendText(msg.to,"Sudah Terbuka Plak")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
elif msg.text in ["Luffy buka qr","Luffy open qr"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Plak")
else:
ki.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Zorro buka qr","Zorro open qr"]:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = False
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done Plak")
else:
kk.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Sanji open qr","Sanji buka qr"]:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = False
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Plak")
else:
kc.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Tutup qr","Close qr"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = True
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Kode QR Sudah Di Tutup")
else:
random.choice(KAC).sendText(msg.to,"Sudah Tertutup Plak")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
elif msg.text in ["Luffy close qr","Luffy tutup qr"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = True
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Plak")
else:
ki.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Zorro tutup qr","Zorro close qr"]:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = True
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done Plak")
else:
kk.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Sanji tutup qr","Sanji close qr"]:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = True
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Plak")
else:
kc.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif "jointicket " in msg.text.lower():
rplace=msg.text.lower().replace("jointicket ")
if rplace == "on":
wait["atjointicket"]=True
elif rplace == "off":
wait["atjointicket"]=False
cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"]))
elif '/ti/g/' in msg.text.lower():
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(msg.text)
n_links=[]
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
if wait["atjointicket"] == True:
group=cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.mid,ticket_id)
cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name))
elif msg.text == "Ginfo":
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\nmembers:" + str(len(ginfo.members)) + "members\npending:" + sinvitee + "people\nURL:" + u + "it is inside")
else:
cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "Id Group" == msg.text:
if msg.from_ in admin:
kk.sendText(msg.to,msg.to)
elif "My mid" == msg.text:
if msg.from_ in admin:
random.choice(KAC).sendText(msg.to, msg.from_)
elif "Mid Bot" == msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
ks.sendText(msg.to,Dmid)
ka.sendText(msg.to,Emid)
kb.sendText(msg.to,Fmid)
ko.sendText(msg.to,Gmid)
ke.sendText(msg.to,Hmid)
elif "Koplaxs" == msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,mid)
elif "Luffy" == msg.text:
if msg.from_ in admin:
ki.sendText(msg.to,Amid)
elif "Zorro" == msg.text:
if msg.from_ in admin:
kk.sendText(msg.to,Bmid)
elif "Sanji" == msg.text:
if msg.from_ in admin:
kc.sendText(msg.to,Cmid)
elif msg.text in ["Wkwkwk","Wkwk","Wk","wkwkwk","wkwk","wk"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
cl.sendMessage(msg)
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hehehe","Hehe","He","hehehe","hehe","he"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Galau"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["You"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hadeuh"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Please"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Haaa"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Lol"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hmmm","Hmm","Hm","hmmm","hmm","hm"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Welcome"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["TL: "]:
if msg.from_ in admin:
tl_text = msg.text.replace("TL: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif msg.text in ["Cn "]:
if msg.from_ in admin:
string = msg.text.replace("Cn ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Cv1 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cv1 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = ki.getProfile()
profile_B.displayName = string
ki.updateProfile(profile_B)
ki.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Cv2 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cv2 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = kk.getProfile()
profile_B.displayName = string
kk.updateProfile(profile_B)
kk.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Mc "]:
if msg.from_ in admin:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif msg.text in ["Joinn on","joinn on"]:
if msg.from_ in admin:
if wait["Protectjoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"Done")
else:
wait["Protectjoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Joinn off","joinn off"]:
if msg.from_ in admin:
if wait["Protectjoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectjoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel on","cancel on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel All Invited On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel All Invited On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel off","cancel off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel All Invited Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel All Invited Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr on","qr on"]:
if msg.from_ in admin:
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Group On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Group On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr off","qr off"]:
if msg.from_ in admin:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect Group Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact On","Contact on","contact on"]:
if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Send Contact On")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Send Contact On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact Off","Contact off","contact off"]:
if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Send Contact Off")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Send Contact Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オン","Join on","Auto join:on","自動å�ƒåŠ ï¼šé–‹"]:
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オフ","Join off","Auto join:off","自動å�ƒåŠ ï¼šé—œ"]:
if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Gcancel:"]:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒�。�时开请指定人数��")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的�组用自动邀请拒�")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["強制自動退出:オン","Leave on","Auto leave:on","強制自動退出:開"]:
if msg.from_ in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["強制自動退出:オフ","Leave off","Auto leave:off","強制自動退出:關"]:
if msg.from_ in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["共有:オン","Share on","Share on"]:
if msg.from_ in admin:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["共有:オフ","Share off","Share off"]:
if msg.from_ in admin:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Status","Sbot"]:
if msg.from_ in admin:
md = ""
if wait["Protectjoin"] == True: md+="lock Block Join\n"
else: md+=" Block Join Off\n"
if wait["Protectgr"] == True: md+="lock Block Group\n"
else: md+=" Block Group Off\n"
if wait["Protectcancl"] == True: md+="lock Cancel All Invited\n"
else: md+=" Cancel All Invited Off\n"
if wait["contact"] == True: md+=" Contact : on\n"
else: md+=" Contact : off\n"
if wait["autoJoin"] == True: md+=" Auto join : on\n"
else: md +=" Auto join : off\n"
if wait["autoCancel"]["on"] == True:md+=" Group cancel :" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= " Group cancel : off\n"
if wait["leaveRoom"] == True: md+=" Auto leave : on\n"
else: md+=" Auto leave : off\n"
if wait["timeline"] == True: md+=" Share : on\n"
else:md+=" Share : off\n"
if wait["autoAdd"] == True: md+=" Auto add : on\n"
else:md+=" Auto add : off\n"
if wait["commentOn"] == True: md+=" Comment : on\n"
else:md+=" Comment : off\n"
cl.sendText(msg.to,md)
elif "album merit " in msg.text:
gid = msg.text.replace("album merit ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
cl.sendText(msg.to,mg)
elif "album " in msg.text:
gid = msg.text.replace("album ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
elif "album remove " in msg.text:
gid = msg.text.replace("album remove ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Deleted albums")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["Group id"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:\n%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Cancelall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"拒�了全部的邀请。")
elif "album removeat’" in msg.text:
gid = msg.text.replace("album removeat’","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Albums deleted")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","Add on","Auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]:
if msg.from_ in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"Done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オフ","Add off","Auto add:off","è‡ªå‹•è¿½åŠ ï¼šé—œ"]:
if msg.from_ in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif "Message change: " in msg.text:
wait["message"] = msg.text.replace("Message change: ","")
cl.sendText(msg.to,"message changed")
elif "Message add: " in msg.text:
wait["message"] = msg.text.replace("Message add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed")
else:
cl.sendText(msg.to,"done。")
elif msg.text in ["Message","è‡ªå‹•è¿½åŠ å•�候語確èª�"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"message changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif msg.text in ["コメント:オン","Comment on","Comment:on","自動首é �留言:開"]:
if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["コメント:オフ","Comment off","comment off","自動首é �留言:關"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Comment","留言確�"]:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv1 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv2 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kk.updateGroup(x)
gurl = kk.reissueGroupTicket(msg.to)
kk.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv3 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kc.updateGroup(x)
gurl = kc.reissueGroupTicket(msg.to)
kc.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Jam on/off Start-------------------#
elif msg.text in ["Jam on"]:
if msg.from_ in admin:
if wait["clock"] == True:
kc.sendText(msg.to,"Bot 4 jam on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Jam Selalu On")
elif msg.text in ["Jam off"]:
if msg.from_ in admin:
if wait["clock"] == False:
kc.sendText(msg.to,"Bot 4 jam off")
else:
wait["clock"] = False
kc.sendText(msg.to,"Jam Sedang Off")
#-------------Fungsi Jam on/off Finish-------------------#
#-------------Fungsi Change Clock Start------------------#
elif msg.text in ["Change clock"]:
n = msg.text.replace("Change clock","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
#-------------Fungsi Change Clock Finish-----------------#
#-------------Fungsi Jam Update Start---------------------#
elif msg.text in ["Jam Update"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Sukses update")
else:
kc.sendText(msg.to,"Aktifkan jam terlebih dulu")
#-------------Fungsi Jam Update Finish-------------------#
elif msg.text == "Cctv":
if msg.from_ in admin:
cl.sendText(msg.to, "Cek CCTV")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "Ciduk":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "||Di Read Oleh||%s\n||By : Koplaxs BOT||\n\n>Pelaku CCTV<\n%s-=CCTV=-\n•Bintitan\n•Panuan\n•Kurapan\n•Kudisan\n\nAmiin Ya Allah\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "Ketik Cctv dulu Koplak\nBaru Ketil Ciduk\nDASAR PIKUN ♪")
#-----------------------------------------------
#-----------------------------------------------
#----------------Fungsi Join Group Start-----------------------#
elif msg.text in ["Join all","Bot join"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ks.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ka.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kb.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ko.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ke.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ku.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "Semua Sudah Lengkap"
G.preventJoinByTicket(G)
cl.updateGroup(G)
elif msg.text in ["Kampret join"]:
if msg.form_ in admin:
x = ki.getGroup(msg.to)
x.preventJoinByTicket = False
ki.updateGroup(x)
invsend = 0
Ti = ki.reissueGroupTicket(msg.to)
cl.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(msg.to)
elif msg.text in ["Luffy join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Zorro join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Sanji Join"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
#----------------------Fungsi Join Group Finish---------------#
#-------------Fungsi Leave Group Start---------------#
elif msg.text in ["Bye op","Kabur all","Kaboor all"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
ka.leaveGroup(msg.to)
kb.leaveGroup(msg.to)
ko.leaveGroup(msg.to)
ke.leaveGroup(msg.to)
ku.leaveGroup(msg.to)
####cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye zorro"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye sanji"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye Ussop"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe1"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe2"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe3"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
#-------------Fungsi Leave Group Finish---------------#
#-------------Fungsi Tag All Start---------------#
elif msg.text in ["Tag all","Tagall"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(6)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(7)
akh = akh + 1
cb2 += "@nrik \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
#-------------Fungsi Tag All Finish---------------#
#----------------Fungsi Banned Kick Target Start-----------------------#
elif msg.text in ["Kill "]:
if msg.from_ in admin:
if msg.toType == 2:
group = random.choice(KAC).getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"Selamat tinggal")
random.choice(KAC).sendText(msg.to,"Jangan masuk lagidevil smile")
return
for jj in matched_list:
try:
klist=[cl,ki,kk,kc,ks,ka,kb,ku,ke,ko]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#----------------Fungsi Banned Kick Target Finish----------------------#
elif "Salam" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Salam","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
random.choice(KAC).sendText(msg.to,"maaf kalo gak sopan")
random.choice(KAC).sendText(msg.to,"makasih semuanya..")
random.choice(KAC).sendText(msg.to,"hehehhehe")
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
random.choice(KAC).sendMessage(msg)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Not found")
else:
for target in targets:
if target not in admin and Bots:
try:
klist=[cl,ki,kk,kc,ks,ka,kb,ku,ke,ko]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).sendText(msg.to,"Sorry Brader")
random.choice(KAC).sendText(msg.to,"Sorry Sister")
random.choice(KAC).sendText(msg.to,"No Baper")
#----------------Fungsi Kick User Target Start----------------------#
elif "Nk " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
klist=[cl,ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).sendText(msg.to,"Kasian Di Kick....")
random.choice(KAC).sendText(msg.to,"Hehehe")
#----------------Fungsi Kick User Target Finish----------------------#
elif "Blacklist @ " in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Blacklist @ ","")
_kicktarget = _name.rstrip(' ')
gs = random.choice(KAC).getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
random.choice(KAC).sendText(msg.to,"Succes Plak")
except:
random.choice(KAC).sendText(msg.to,"error")
#----------------Fungsi Banned User Target Start-----------------------#
elif "Banned @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Banned] Sukses"
_name = msg.text.replace("Banned @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Dilarang Banned Bot")
ki.sendText(msg.to,"Dilarang Banned Bot")
kk.sendText(msg.to,"Dilarang Banned Bot")
kc.sendText(msg.to,"Dilarang Banned Bot")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
random.choice(KAC).sendText(msg.to,"Akun telah sukses di banned")
except:
random.choice(KAC).sendText(msg.to,"Error")
#----------------Fungsi Banned User Target Finish-----------------------#
#----------------Mid via Tag--------------
elif "Mid @" in msg.text:
if msg.from_ in owner:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
random.choice(KAC).sendText(msg.to, g.mid)
else:
pass
#-----------------------------------------
#----------------Fungsi Unbanned User Target Start-----------------------#
elif "Unban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Unban] Sukses"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Tidak Ditemukan.....")
ki.sendText(msg.to,"Tidak Ditemukan.....")
kk.sendText(msg.to,"Tidak Ditemukan.....")
kc.sendText(msg.to,"Tidak Ditemukan.....")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Akun Bersih Kembali")
except:
ki.sendText(msg.to,"Error")
#----------------Fungsi Unbanned User Target Finish-----------------------#
#-------------Fungsi Spam Start---------------------#
elif msg.text in ["Up","up","Up Chat","Up chat","up chat","Upchat","upchat"]:
if msg.from_ in admin:
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
#-------------Fungsi Spam Finish---------------------#
#-------------Fungsi Broadcast Start------------#
elif "Bc " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Bc ","")
a = cl.getGroupIdsJoined()
a = ki.getGroupIdsJoined()
a = kk.getGroupIdsJoined()
a = kc.getGroupIdsJoined()
a = ks.getGroupIdsJoined()
a = ka.getGroupIdsJoined()
a = ku.getGroupIdsJoined()
a = ke.getGroupIdsJoined()
a = ko.getGroupIdsJoined()
a = kb.getGroupIdsJoined()
for taf in a:
cl.sendText(taf, (bctxt))
ki.sendText(taf, (bctxt))
kk.sendText(taf, (bctxt))
kc.sendText(taf, (bctxt))
ks.sendText(taf, (bctxt))
ka.sendText(taf, (bctxt))
kb.sendText(taf, (bctxt))
ke.sendText(taf, (bctxt))
ku.sendText(taf, (bctxt))
ko.sendText(taf, (bctxt))
#--------------Fungsi Broadcast Finish-----------#
elif msg.text in ["LG"]:
if msg.from_ in admin:
gids = cl.getGroupIdsJoined()
h = ""
for i in gids:
#####gn = cl.getGroup(i).name
h += "[•]%s Member\n" % (cl.getGroup(i).name +"👉"+str(len(cl.getGroup(i).members)))
cl.sendText(msg.to,"=======[List Group]======\n"+ h +"Total Group :"+str(len(gids)))
#--------------List Group------------
#------------ Keluar Dari Semua Group------
elif msg.text in ["Bot out","Op bye"]:
if msg.from_ in owner:
#gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = kk.getGroupIdsJoined()
gid = kc.getGroupIdsJoined()
gid = ks.getGroupIdsJoined()
gid = ka.getGroupIdsJoined()
gid = kb.getGroupIdsJoined()
gid = ko.getGroupIdsJoined()
gid = ke.getGroupIdsJoined()
gid = ku.getGroupIdsJoined()
for i in gid:
ku.leaveGroup(i)
ke.leaveGroup(i)
ko.leaveGroup(i)
kb.leaveGroup(i)
ka.leaveGroup(i)
ks.leaveGroup(i)
kc.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
#cl.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sayonara")
else:
cl.sendText(msg.to,"He declined all invitations")
#------------------------End---------------------
#-----------------End-----------
elif msg.text in ["Op katakan hi"]:
ki.sendText(msg.to,"Hi buddy Har Har")
kk.sendText(msg.to,"Hi buddy Har Har")
kc.sendText(msg.to,"Hi buddy Har Har")
#-----------------------------------------------
elif msg.text in ["Cv say hinata pekok"]:
ki.sendText(msg.to,"Hinata pekok Har Har")
kk.sendText(msg.to,"Hinata pekok Har Har")
kc.sendText(msg.to,"Hinata pekok Har Har")
elif msg.text in ["Cv say didik pekok"]:
ki.sendText(msg.to,"Didik pekok Har Har")
kk.sendText(msg.to,"Didik pekok Har Har")
kc.sendText(msg.to,"Didik pekok Har Har")
elif msg.text in ["Cv say bobo ah","Bobo dulu ah"]:
ki.sendText(msg.to,"Have a nice dream Cv Har Har")
kk.sendText(msg.to,"Have a nice dream Cv Har Har")
kc.sendText(msg.to,"Have a nice dream Cv Har Har")
elif msg.text in ["Cv say chomel pekok"]:
ki.sendText(msg.to,"Chomel pekok Har Har")
kk.sendText(msg.to,"Chomel pekok Har Har")
kc.sendText(msg.to,"Chomel pekok Har Har")
elif msg.text in ["#welcome"]:
ki.sendText(msg.to,"Selamat datang di Group Kami")
kk.sendText(msg.to,"Jangan nakal ok!")
#-----------------------------------------------
elif msg.text in ["PING","Ping","ping"]:
ki.sendText(msg.to,"PONG double thumbs upHar Har")
kk.sendText(msg.to,"PONG double thumbs upHar Har")
kc.sendText(msg.to,"PONG double thumbs upHar Har")
#-----------------------------------------------
#-------------Fungsi Respon Start---------------------#
elif msg.text in ["Respon","respon","Respon Dong","respon dong"]:
if msg.from_ in admin:
#cl.sendText(msg.to,"Luffy On")
ki.sendText(msg.to,"•••")
kk.sendText(msg.to,"••••")
kc.sendText(msg.to,"•••••")
ks.sendText(msg.to,"••••••")
ka.sendText(msg.to,"•••••••")
kb.sendText(msg.to,"••••••••")
ko.sendText(msg.to,"•••••••••")
ke.sendText(msg.to,"••••••••••")
ku.sendText(msg.to,"•••••••••••")
ku.sendText(msg.to,"Respon Complete")
#-------------Fungsi Respon Finish---------------------#
#-------------Fungsi Balesan Respon Start---------------------#
elif msg.text in ["Ini Apa","ini apa","Apaan Ini","apaan ini"]:
ki.sendText(msg.to,"Ya gitu deh intinya mah questioning")
#-------------Fungsi Balesan Respon Finish---------------------#
#-------------Fungsi Speedbot Start---------------------#
elif msg.text in ["Speed","Sp"]:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "Wait...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sDetik" % (elapsed_time))
#-------------Fungsi Speedbot Finish---------------------#
#-------------Fungsi Banned Send Contact Start------------------#
elif msg.text in ["Ban"]:
if msg.from_ in owner:
wait["wblacklist"] = True
cl.sendText(msg.to,"Kirim contact")
ki.sendText(msg.to,"Kirim contact")
kk.sendText(msg.to,"Kirim contact")
kc.sendText(msg.to,"Kirim contact")
elif msg.text in ["Unban"]:
if msg.from_ in owner:
wait["dblacklist"] = True
cl.sendText(msg.to,"Kirim contact")
ki.sendText(msg.to,"Kirim contact")
kk.sendText(msg.to,"Kirim contact")
kc.sendText(msg.to,"Kirim contact")
#-------------Fungsi Banned Send Contact Finish------------------#
elif msg.text in ["Creator"]:
msg.contentType = 13
msg.contentMetadata = {'mid': 'u457cf410aa13c3d8b6cf2f9ddf5cdb20'}
cl.sendMessage(msg)
cl.sendText(msg.to,"Itu Creator Kami")
#-------------Fungsi Chat ----------------
elif msg.text in ["Woy","woy","Woi","woi","bot","Bot"]:
quote = ['Istri yang baik itu Istri yang Mengizinkan Suaminya untuk Poligami 😂😂😂.','Kunci Untuk Bikin Suami Bahagia itu cuma satu..\nIzinkan Suamimu Untuk Selingkuh Coyyy ','Ah Kupret Lu','Muka Lu Kaya Jamban','Ada Orang kah disini?','Sange Euy','Ada Perawan Nganggur ga Coy?']
psn = random.choice(quote)
cl.sendText(msg.to,psn)
#-------------Fungsi Bannlist Start------------------#
elif msg.text in ["Banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
random.choice(KAC).sendText(msg.to,"Tidak Ada Akun Terbanned")
else:
random.choice(KAC).sendText(msg.to,"Blacklist user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Bannlist Finish------------------#
elif msg.text in ["Cek ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
random.choice(KAC).sendText(msg.to,cocoa + "")
elif msg.text in ["Kill ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
elif msg.text in ["Clear"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif "random: " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
strnum = msg.text.replace("random: ","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.01)
group.name = name
cl.updateGroup(group)
except:
cl.sendText(msg.to,"Error")
elif "albumat'" in msg.text:
try:
albumtags = msg.text.replace("albumat'","")
gid = albumtags[:6]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "created an album")
except:
cl.sendText(msg.to,"Error")
elif "fakecat'" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
anu = msg.text.replace("fakecat'","")
cl.sendText(msg.to,str(cl.channel.createAlbum(msg.to,name,anu)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
#---------CCTV-----------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n[•]" + Name
wait2['ROM'][op.param1][op.param2] = "[•]" + Name
else:
cl.sendText
except:
pass
if op.type == 17:
if op.param2 in Bots:
return
ginfo = cl.getGroup(op.param1)
random.choice(KAC).sendText(op.param1, "Selamat Datang Di Grup " + str(ginfo.name))
random.choice(KAC).sendText(op.param1, "Founder Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName)
random.choice(KAC).sendText(op.param1,"Budayakan Baca Note !!! yah Ka 😊\nSemoga Betah Kk 😘")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 15:
if op.param2 in Bots:
return
random.choice(KAC).sendText(op.param1, "Good Bye Kaka")
print "MEMBER HAS LEFT THE GROUP"
#------------------------
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"]
cl.updateProfile(profile)
profile2 = ki.getProfile()
profile2.displayName = wait["cName2"]
ki.updateProfile(profile2)
profile3 = kk.getProfile()
profile3.displayName = wait["cName3"]
kk.updateProfile(profile3)
profile4 = kc.getProfile()
profile4.displayName = wait["cName4"]
kc.updateProfile(profile4)
profile5 = ks.getProfile()
profile5.displayName = wait["cName5"]
ks.updateProfile(profile5a)
profile6 = ka.getProfile()
profile6.displayName = wait["cName6"]
ka.updateProfile(profile6)
profile7 = kb.getProfile()
profile7.displayName = wait["cName7"]
kb.updateProfile(profile7)
profile8 = ko.getProfile()
profile8.displayName = wait["cName8"]
ko.updateProfile(profile8)
profile9 = ke.getProfile()
profile9.displayName = wait["cName9"]
ke.updateProfile(profile9)
profile10 = ku.getProfile()
profile10.displayName = wait["cName10"]
ku.updateProfile(profile10)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
| [
"noreply@github.com"
] | BNBLACK.noreply@github.com |
b10fe46da3a50affbd28dd5f9b3c06391bce5a7a | 4670031febb4d97d76212f0cba0e6a6c55730ffe | /tests/plugins/filter/test_ipaddrs_in_ranges.py | 6357d93b7f9bbef855769dae2673a1d2b091452e | [
"Apache-2.0"
] | permissive | moussdia/ceph-ansible | 8039a050d93f073a93a137fd84e362a172351fee | 6dcfdf17d43635fcd0dc658c199702945a1228dd | refs/heads/master | 2022-12-18T19:57:30.390070 | 2020-09-18T14:03:13 | 2020-09-18T15:14:00 | 298,010,982 | 1 | 0 | Apache-2.0 | 2020-09-23T15:18:35 | 2020-09-23T15:18:34 | null | UTF-8 | Python | false | false | 1,525 | py | import sys
sys.path.append('./plugins/filter')
import ipaddrs_in_ranges
filter_plugin = ipaddrs_in_ranges.FilterModule()
class TestIpaddrsInRanges(object):
def test_one_ip_one_range(self):
ips = ['10.10.10.1']
ranges = ['10.10.10.1/24']
result = filter_plugin.ips_in_ranges(ips, ranges)
assert ips[0] in result
assert len(result) == 1
def test_two_ip_one_range(self):
ips = ['192.168.1.1', '10.10.10.1']
ranges = ['10.10.10.1/24']
result = filter_plugin.ips_in_ranges(ips, ranges)
assert ips[0] not in result
assert ips[1] in result
assert len(result) == 1
def test_one_ip_two_ranges(self):
ips = ['10.10.10.1']
ranges = ['192.168.1.0/24', '10.10.10.1/24']
result = filter_plugin.ips_in_ranges(ips, ranges)
assert ips[0] in result
assert len(result) == 1
def test_multiple_ips_multiple_ranges(self):
ips = ['10.10.10.1', '192.168.1.1', '172.16.10.1']
ranges = ['192.168.1.0/24', '10.10.10.1/24', '172.16.17.0/24']
result = filter_plugin.ips_in_ranges(ips, ranges)
assert ips[0] in result
assert ips[1] in result
assert ips[2] not in result
assert len(result) == 2
def test_no_ips_in_ranges(self):
ips = ['10.10.20.1', '192.168.2.1', '172.16.10.1']
ranges = ['192.168.1.0/24', '10.10.10.1/24', '172.16.17.0/24']
result = filter_plugin.ips_in_ranges(ips, ranges)
assert result == []
| [
"gabrioux@redhat.com"
] | gabrioux@redhat.com |
027bd1b740da5376e54fca89e08fdec63b25fe77 | d07deff7e448cd38e763e939c27d2bbbf884283c | /Actionable code/2018-07-30/Where_act_code_VAE_classif_alt.py | f14c4f997a59acf170cf1083232fc7622463b91b | [] | no_license | gdbmanu/git-aae-vae-gan | 40175fd8586c6be439f9ccf10d20e5d579e9870d | 5b3cd28c5c355307d02c31c19700703c4f770d9f | refs/heads/master | 2021-05-11T12:16:33.180221 | 2018-08-03T15:27:06 | 2018-08-03T15:27:06 | 117,655,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,493 | py | batch_size = 50
test_batch_size = 1
valid_size = .2
epochs = 100
lr = 0.001
momentum = 0.48
no_cuda = True
num_processes = 1
seed = 42
log_interval = 10
size = 64
LATENT_DIM = 64
mean = 0.
std = 1.
dimension = 25
verbose = False
NB_LABEL = 10
#HIDDEN_SIZE = 1024 #256
BIAS_CONV = True
BIAS_POS = False
BIAS_LABEL = False
BIAS_TRANSCODE = True
BIAS_DECONV = True
import os
import numpy as np
import torch
#torch.set_default_tensor_type('torch.FloatTensor')
torch.set_default_tensor_type('torch.DoubleTensor')
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torchvision.datasets import ImageFolder
import torch.multiprocessing as mp
import torchvision.models as models
import torchvision
from torch.utils.data.sampler import SubsetRandomSampler
from torch.autograd import Variable
from scipy.stats import multivariate_normal
import torch.optim as optim
class Data:
def __init__(self, args):
## Charger la matrice de certitude
path = "MNIST_accuracy.npy"
if os.path.isfile(path):
self.accuracy = np.load(path)
if verbose:
print('Loading accuracy... min, max=', self.accuracy.min(), self.accuracy.max())
else:
print('No accuracy data found.')
kwargs = {'num_workers': 1, 'pin_memory': True} if not args.no_cuda else {'num_workers': 1, 'shuffle': True}
self.data_loader = torch.utils.data.DataLoader(
datasets.MNIST('/tmp/data',
train=True, # def the dataset as training data
download=True, # download if dataset not present on disk
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(args.mean,), std=(args.std,))])),
batch_size=batch_size,
**kwargs)
self.args = args
# GPU boilerplate
self.args.cuda = not self.args.no_cuda and torch.cuda.is_available()
# if self.args.verbose:
# print('cuda?', self.args.cuda)
#
def show(self, gamma=.5, noise_level=.4, transpose=True):
images, foo = next(iter(self.data_loader))
from torchvision.utils import make_grid
npimg = make_grid(images, normalize=True).numpy()
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=((13, 5)))
import numpy as np
if transpose:
ax.imshow(np.transpose(npimg, (1, 2, 0)))
else:
ax.imshow(npimg)
plt.setp(ax, xticks=[], yticks=[])
return fig, ax
class Net(nn.Module):
def __init__(self, args):
super(Net, self).__init__()
self.args = args
# Visual input dim 256 encoding
self.conv1 = nn.Conv2d(1, 16, 4, bias = BIAS_CONV, stride=4, padding=0)
self.conv2 = nn.Conv2d(16, 64, 4, bias = BIAS_CONV, stride=4, padding=0) # dummy
self.conv3 = nn.Conv2d(64, 256, 4, bias = BIAS_CONV, stride=4, padding=0)
# if BIAS_CONV == True:
# self.conv1.bias.data.fill_(0)
# self.conv2.bias.data.fill_(0)
# self.conv3.bias.data.fill_(0)
'''# Mu, logVar Category encoding
self.fc_mu_d1 = nn.Linear(256, LATENT_DIM, bias = False)
self.fc_logvar_d1 = nn.Linear(256, LATENT_DIM, bias = False)
self.fc_mu_d2 = nn.Linear(256, LATENT_DIM, bias = False)
self.fc_logvar_d2 = nn.Linear(256, LATENT_DIM, bias = False)'''
# Mu, logVar position encoding
self.fc_x = nn.Linear(256, 32, bias = False)
self.fc_mu = nn.Linear(32, 6, bias = False)
self.fc_logvar = nn.Linear(32, 6, bias = False)
# classif output path (from third latent state)
self.fc_classif_1 = nn.Linear(2, 256, bias = True)
self.fc_classif_2 = nn.Linear(256, NB_LABEL, bias = False)
# transcoding from feature space to visual space
self.fc_z_d1 = nn.Linear(256, LATENT_DIM, bias = True)
self.fc_z_d2 = nn.Linear(256, LATENT_DIM, bias = True)
# !! gate !!
# self.fc_gate = nn.Linear(256, LATENT_DIM, bias = True)
# if BIAS_TRANSCODE == True:
# self.fc_transcode_d1.bias.data.fill_(0)
# self.fc_transcode_d2.bias.data.fill_(0)'''
# position (from first and second latent state)
self.fc_transcode = [0] * LATENT_DIM
for i in range(LATENT_DIM):
self.fc_transcode[i] = nn.Linear(2, 256)
self.deconv3 = nn.ConvTranspose2d(256, 64, 4, bias = BIAS_DECONV, stride=4, padding=0)
self.deconv2 = nn.ConvTranspose2d(64, 16, 4, bias = BIAS_DECONV, stride=4, padding=0) # dummy
self.deconv1 = nn.ConvTranspose2d(16, 1, 4, bias = True, stride=4, padding=0)
# if BIAS_DECONV == True:
# self.deconv4.bias.data.fill_(0)
# self.deconv3.bias.data.fill_(0)
# self.deconv2.bias.data.fill_(0)
self.deconv1.bias.data.fill_(0)
#self.fc_z = nn.Linear(256, 10, bias = False)
def forward(self, x, z, u_in):
# Visual Input
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
n = self.num_flat_features(x)
x = x.view(-1, n)
x_hat = torch.zeros_like(x)
'''# Category encoding (Linear)
mu_d1 = self.fc_mu_d1(x)
logvar_d1 = self.fc_logvar_d1(x)
std_d1 = torch.exp(0.5*logvar_d1)
eps_d1 = torch.randn_like(std_d1) #, requires_grad=True)
z_d1 = eps_d1.mul(std_d1).add_(mu_d1)
mu_d2 = self.fc_mu_d2(x)
logvar_d2 = self.fc_logvar_d2(x)
std_d2 = torch.exp(0.5*logvar_d2)
eps_d2 = torch.randn_like(std_d2) #, requires_grad=True)
z_d2 = eps_d2.mul(std_d2).add_(mu_d2)'''
# position encoding
x = F.relu(self.fc_x(x))
mu = self.fc_mu(x)
logvar = self.fc_logvar(x)
# Sample epsilon and generate u
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std) #, requires_grad=True)
# Estimated position from epsilon sampling (dim 2)
u_out = eps.mul(std).add_(mu)
z_latent = u_out[:,4:].view(-1, 2)
z_latent = F.relu(self.fc_classif_1(z_latent))
z_hat_logit = self.fc_classif_2(z_latent)
'''# translation (transformation)
u_d1 = u_out[:,0].view(-1, 1) #torch.cat((u[:,0],u[:,0],u[:,0],u[:,0],u[:,0],u[:,0],u[:,0],u[:,0]), 1)
u_d2 = u_out[:,1].view(-1, 1)
for _ in range(5):
u_d1 = torch.cat((u_d1, u_d1), 1)
u_d2 = torch.cat((u_d2, u_d2), 1)
# transcoding (Linear)
transfo_d1 = z_d1 - u_d1
transfo_d2 = z_d2 - u_d2
z_hat = F.relu(self.fc_transcode_d1(transfo_d1) + self.fc_transcode_d2(transfo_d2))
z_hat_logit = self.fc_z(z_hat)'''
#z_d1 = F.relu(self.fc_z_d1(z_latent.detach()))
z_d1 = self.fc_z_d1(z_latent.detach())
#z_d2 = F.relu(self.fc_z_d2(z_latent.detach()))
z_d2 = self.fc_z_d2(z_latent.detach())
# !! gate !!
#gate = F.relu(self.fc_gate(z_latent.detach()))
for i in range(LATENT_DIM):
z_2D = torch.cat((z_d1[:,i].view(-1,1), z_d2[:,i].view(-1,1)), 1).view(-1,2,1)
# Rotation
rot_11 = torch.cos(u_out[:,3]).view(-1,1,1)
rot_12 = -torch.sin(u_out[:,3]).view(-1,1,1)
rot_21 = torch.sin(u_out[:,3]).view(-1,1,1)
rot_22 = torch.cos(u_out[:,3]).view(-1,1,1)
rot_1 = torch.cat((rot_11, rot_12), 2)
rot_2 = torch.cat((rot_21, rot_22), 2)
rot = torch.cat((rot_1, rot_2), 1)
#transformed_z = torch.matmul(hom, torch.matmul(rot, z_2D)) + trans
transformed_z = torch.matmul(rot, z_2D).view(-1,2)
# zoom
zoom = 1 #(1 + u_out[:,2]).view(-1,1)
transformed_z *= zoom
#translation
trans = u_out[:,:2].view(-1,2)
transformed_z += trans
#trans_d1 = + z_d1[:,i]
#trans_d2 = u_out[:,1] + z_d2[:,i]
#translated_z = torch.cat((trans_d1.view(-1,1), trans_d2.view(-1,1)), 1)
x_hat += F.relu(self.fc_transcode[i](transformed_z))
# !! gate !!
#x_hat += F.relu(self.fc_transcode[i](translated_z)) * gate[:,i].view(-1,1).expand_as(x_hat)
if i == 0:
transfo_d1 = transformed_z[:,0].view(-1,1)
transfo_d2 = transformed_z[:,1].view(-1,1)
else:
transfo_d1 = torch.cat((transfo_d1, transformed_z[:,0].view(-1,1)), 1)
transfo_d2 = torch.cat((transfo_d2, transformed_z[:,1].view(-1,1)), 1)
'''x_hat = F.relu(self.fc_transcode[0](u_out[:,:2].view(-1, 2)))
transfo_d1 = 0
transfo_d2 = 0'''
x_hat = x_hat.view(-1, 256, 1, 1)
x_hat = F.relu(self.deconv3(x_hat))
x_hat = F.relu(self.deconv2(x_hat))
x_hat_logit = self.deconv1(x_hat)
return x_hat_logit, mu, logvar, u_out, z_hat_logit, transfo_d1, transfo_d2
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
class ML():
def __init__(self, args):
self.args = args
# GPU boilerplate
self.args.cuda = not self.args.no_cuda and torch.cuda.is_available()
if self.args.verbose:
print('cuda?', self.args.cuda)
if self.args.cuda:
self.model.cuda()
self.device = torch.device("cuda" if self.args.cuda else "cpu")
torch.manual_seed(self.args.seed)
if self.args.cuda:
torch.cuda.manual_seed(self.args.seed)
# self.args.classes = self.dataset.classes
# MODEL
self.model = Net(self.args).to(self.device)
# DATA
self.dataset = Data(self.args)
# LOSS
self.loss_func_x = torch.nn.BCEWithLogitsLoss(reduce=False)
self.loss_func_z = torch.nn.CrossEntropyLoss(reduce = False) #size_average=False)
#self.loss_func = torch.nn.MSELoss()
self.optimizer = optim.Adam(self.model.parameters(), lr=self.args.lr) #, weight_decay=0.0001*self.args.momentum)
#self.optimizer = optim.SGD(self.model.parameters(),
# lr=self.args.lr, momentum=self.args.momentum)
# def forward(self, img):
# # normalize img
# return (img - self.mean) / self.std
def train(self, path=None):
# cosmetics
try:
from tqdm import tqdm_notebook as tqdm
verbose = 1
except ImportError:
verbose = 0
if self.args.verbose == 0 or verbose == 0:
def tqdm(x, desc=None):
if desc is not None: print(desc)
return x
# setting up training
self.model.train()
if path is not None:
# using a data_cache
import os
import torch
if os.path.isfile(path):
self.model.load_state_dict(torch.load(path))
print('Loading file', path)
else:
print('Training model...')
for epoch in tqdm(range(1, self.args.epochs + 1), desc='Train Epoch' if self.args.verbose else None):
self.train_epoch(epoch, rank=0)
torch.save(self.model.state_dict(), path) #save the neural network state
print('Model saved at', path)
else:
for epoch in tqdm(range(1, self.args.epochs + 1), desc='Train Epoch' if self.args.verbose else None):
self.train_epoch(epoch, rank=0)
def train_epoch(self, epoch, rank=0):
torch.manual_seed(self.args.seed + epoch + rank*self.args.epochs)
for batch_idx, (data, target) in enumerate(self.dataset.data_loader):
# computes the couple
data, target = data.to(self.device), target.to(self.device)
data_full = np.zeros((batch_size, 1, self.args.size, self.args.size))
label_full = np.zeros((batch_size, NB_LABEL))
pos_full = np.zeros((batch_size, 2))
for idx in range(batch_size):
mid = np.int(self.args.size / 2)
draw = np.random.multivariate_normal((0,0),((1,0),(0,1)))
i_offset = min(max(-mid, np.int(draw[0] * mid / 3)), mid)
j_offset = min(max(-mid, np.int(draw[1] * mid / 3)), mid)
#print(draw, i_offset, j_offset)
data_full[idx, 0, :, :], label_full[idx, :], pos_full[idx,:] = couples_gen(data[idx, 0, :, :],
target[idx],
i_offset, j_offset, size=self.args.size, contrast=1.)
data_full = Variable(torch.DoubleTensor(data_full))
label_full = target #Variable(torch.LongTensor(label_full))
#label_full = Variable(torch.DoubleTensor(label_full))
pos_full = Variable(torch.DoubleTensor(pos_full))
# print(data.shape, data_full.shape)
# Clear all accumulated gradients
self.optimizer.zero_grad()
# Predict classes using images from the train set
x_hat_logit_output, mu_output, logvar_output, u_output, z_hat_logit_output = self.model(data_full, label_full, pos_full)[0:5]
# print(output.shape, acc_full.shape)
# Compute the loss based on the predictions and actual labels
loss_z_hat = self.loss_func_z(z_hat_logit_output, target).mul(size).mean()
z_hat = torch.max(z_hat_logit_output, 1)[1]
correct = (z_hat.view(-1) == target).double()
#correct_d = Variable(torch.DoubleTensor(correct))
accuracy = correct.mean()
loss_x_hat = self.loss_func_x(x_hat_logit_output, data_full).mul(size * size).mean()
KL_FLAG = .1
#KL_loss = -0.5 * (torch.sum(1 + logvar_output - mu_output.pow(2) - logvar_output.exp(), dim = 1)).mean() * KL_FLAG
'''KL_loss_d1 = -0.5 * (torch.sum(1 + logvar_d1_output - mu_d1_output.pow(2) - logvar_d1_output.exp())) * KL_FLAG
KL_loss_d2 = -0.5 * (torch.sum(1 + logvar_d2_output - mu_d2_output.pow(2) - logvar_d2_output.exp())) * KL_FLAG'''
KL_loss = -0.5 * (torch.sum(1 + logvar_output - mu_output.pow(2) - logvar_output.exp())).mean() * KL_FLAG
# Backpropagate the loss
loss = loss_z_hat + loss_x_hat + KL_loss
loss.backward()
# Adjust parameters according to the computed gradients
self.optimizer.step()
if self.args.verbose and self.args.log_interval>0:
if batch_idx % self.args.log_interval == 0:
print('\tTrain Epoch: {} [{}/{} ({:.0f}%)]\tclassif Loss: {:.2f}\tBCE_loss: {:.2f}\tKL: {:.2f}\tAccuracy: {:.2f}'.format(
epoch, batch_idx * len(data), len(self.dataset.data_loader.dataset),
100. * batch_idx / len(self.dataset.data_loader), loss_z_hat.item(),
loss_x_hat.item(), KL_loss.item(), accuracy.item()))
def test(self, dataloader=None):
if dataloader is None:
dataloader = self.dataset.data_loader
self.model.eval()
test_loss = 0
correct = 0
for data, target in dataloader:
data, target = data.to(self.device), target.to(self.device)
data_full = np.zeros((batch_size, 1, self.args.size, self.args.size))
for idx in range(batch_size):
i_offset = np.random.randint(self.args.size)
j_offset = np.random.randint(self.args.size)
data_full[idx, 0, :, :], pos[idx, :] = couples_gen(data[idx, 0, :, :],
i_offset, j_offset, size=self.args.size, contrast=1.)
data_full, acc_full = Variable(torch.DoubleTensor(data_full)), Variable(torch.DoubleTensor(acc_full))
output = self.model(data_full)
# TODO FINISH ...
if self.args.log_interval>0:
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(self.dataset.data_loader.dataset),
100. * correct / len(self.dataset.data_loader.dataset)))
return correct.numpy() / len(self.dataset.data_loader.dataset)
def show(self, gamma=.5, noise_level=.4, transpose=True, only_wrong=False):
data, target = next(iter(self.dataset.data_loader))
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
if only_wrong and not pred == target:
print('target:' + ' '.join('%5s' % self.dataset.dataset.classes[j] for j in target))
print('pred :' + ' '.join('%5s' % self.dataset.dataset.classes[j] for j in pred))
from torchvision.utils import make_grid
npimg = make_grid(data, normalize=True).numpy()
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=((13, 5)))
import numpy as np
if transpose:
ax.imshow(np.transpose(npimg, (1, 2, 0)))
else:
ax.imshow(npimg)
plt.setp(ax, xticks=[], yticks=[])
return fig, ax
else:
return None, None
def protocol(self, path=None):
# TODO: make a loop for the cross-validation of results
self.train(path=path)
Accuracy = self.test()
print('Test set: Final Accuracy: {:.3f}%'.format(Accuracy*100)) # print que le pourcentage de réussite final
return Accuracy
def couples_gen(data, target, i_offset, j_offset, size, contrast=1.):
data_full = np.zeros((size, size))
s_x, s_y = data.shape
#data_full[:s_x, :s_y] = data
#data_full = np.roll(data_full, -s_x//2 + i_offset + size // 2, axis=0)
#data_full = np.roll(data_full, -s_y//2 + j_offset + size // 2, axis=1)
#print('i_offset : ', i_offset, ', i range : ', i_inf+i_inf_cut, '-', i_sup-i_sup_cut)
#print('j_offset : ', j_offset, ', j range : ', j_inf+j_inf_cut, '-', j_sup-j_sup_cut)
#print(data_full[i_inf+i_inf_cut:i_sup-i_sup_cut, j_inf+j_inf_cut:j_sup-j_sup_cut].shape)
#print(data[i_inf_cut:s_x-i_sup_cut, j_inf_cut:s_y-j_sup_cut].shape)
i_inf = size // 2 + i_offset - s_x//2
i_inf_cut = -min(i_inf, 0)
i_sup = size // 2 + i_offset + s_x//2
i_sup_cut = max(i_sup - size, 0)
j_inf = size // 2 + j_offset - s_y//2
j_inf_cut = -min(j_inf, 0)
j_sup = size // 2 + j_offset + s_y//2
j_sup_cut = max(j_sup - size, 0)
data_full[i_inf+i_inf_cut:i_sup-i_sup_cut, j_inf+j_inf_cut:j_sup-j_sup_cut] = data[i_inf_cut:s_x-i_sup_cut, j_inf_cut:s_y-j_sup_cut]
target_full = np.zeros(NB_LABEL, dtype = 'int')
target_full[target] = 1
pos_full = np.zeros(2)
pos_full[0] = i_offset
pos_full[1] = j_offset
'''from scipy.signal import convolve2d
kernel = np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]])/4.
data_full = convolve2d(data_full, kernel, mode='same')
#noise_full = np.zeros((size, size))
noise_full = np.random.randn(size ** 2).reshape(size, size)'''
'''grid_x, grid_y = np.abs(np.mgrid[-size/2:size/2, -size/2:size/2]) * 8 / size
pos = np.empty((size, size, 2))
pos[:, :, 0] = grid_x; pos[:, :, 1] = grid_y
d = multivariate_normal([0, 0], [[1, 0], [0, 1]])
mask = d.pdf(pos)
mask2 = np.exp(-20 * mask)'''
#_ = plt.imshow(mask2)
'''grid_x, grid_y = np.mgrid[-size/2:size/2, -size/2:size/2] * 8 / size
#dist = np.sqrt(grid_x **2 + grid_y**2)
dist = 1
noise_amp = 0.1
data_full += noise_full * dist * noise_amp'''
return data_full, target_full, pos_full
def init(batch_size=batch_size, test_batch_size=test_batch_size, valid_size=valid_size, epochs=epochs,
lr=lr, momentum=momentum, no_cuda=no_cuda, num_processes=num_processes, seed=seed,
log_interval=log_interval, size=size, mean=mean, std=std,
dimension=dimension, verbose=verbose):
# Training settings
kwargs = {}
kwargs.update(batch_size=batch_size, test_batch_size=test_batch_size, valid_size=valid_size, epochs=epochs,
lr=lr, momentum=momentum, no_cuda=no_cuda, num_processes=num_processes, seed=seed,
log_interval=log_interval, size=size, mean=mean, std=std,
dimension=dimension, verbose=verbose)
# print(kwargs)
import easydict
return easydict.EasyDict(kwargs)
if __name__ == '__main__':
args = init_cdl()
ml = ML(args)
ml.main()
| [
"dauce@port-dauce.ec-m.fr"
] | dauce@port-dauce.ec-m.fr |
57f943f09cb2f07405fdecf41fc8fb91788e7d5d | 69b505c7c0bef3a12d03a84f6aa09c8bb04249bb | /9019.py | 355cda97e290b396da40dbfc8f0fa7c533c62555 | [] | no_license | cscim918/PS | 68819dc8ad3d18427bbf2cb432ef2699243eb2c7 | 55a3cbf226b4885d25609c0e4ab877d4a4a00ffa | refs/heads/master | 2023-03-10T09:14:00.576257 | 2021-02-24T03:30:56 | 2021-02-24T03:30:56 | 331,794,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | from collections import deque
import sys
input = sys.stdin.readline
def bfs():
q = deque()
q.append([a, ""])
while q:
number, result = q.popleft()
dn = (number * 2) % 10000
if dn == b: return result + "D"
elif arr[dn] == 0:
arr[dn] = 1
q.append([dn, result + "D"])
sn = number - 1 if number != 0 else 9999
if sn == b: return result + "S"
elif arr[sn] == 0:
arr[sn] = 1
q.append([sn, result + "S"])
ln = int(number % 1000 * 10 + number / 1000)
if ln == b: return result + "L"
elif arr[ln] == 0:
arr[ln] = 1
q.append([ln, result + "L"])
rn = int(number%10 * 1000 + number //10)
if rn == b: return result + "R"
elif arr[rn] == 0:
arr[rn] = 1
q.append([rn, result + "R"])
t = int(input())
for i in range(t):
a, b = map(int, input().split())
arr = [0 for i in range(10000)]
print(bfs()) | [
"cscim918@gmail.com"
] | cscim918@gmail.com |
efc25952ccf6616c059a4a035042cd8c36a4d1de | b0e67fbd4c42aba24f7d4bccb99e9aa037c0b7d5 | /googlenet_regression/train.py | 37225c5e78976f43cf1a58087d07336e983a3735 | [] | no_license | gombru/SocialMediaWeakLabeling | f979aea8218be115758ff8e1e9a945a701ac99b9 | 518437903ba7370a4098303a41196a08f1d6a58e | refs/heads/master | 2022-02-26T17:49:08.997335 | 2022-02-10T12:54:57 | 2022-02-10T12:54:57 | 84,461,511 | 15 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,399 | py | caffe_root = '../../caffe-master/' # this file should be run from {caffe_root}/examples (otherwise change this line)
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
from create_solver import create_solver
from do_solve import do_solve
from pylab import *
import os
caffe.set_device(1)
caffe.set_mode_gpu()
weights = '../../../hd/datasets/SocialMedia/models/pretrained/bvlc_googlenet.caffemodel'
assert os.path.exists(weights)
niter = 5000000
base_lr = 0.001 #Starting from 0.01 (from quick solver) -- Working 0.001
display_interval = 1000 #400
#number of validating images is test_iters * batchSize
test_interval = 1000 #10000
test_iters = 100 #100
#Name for training plot and snapshots
training_id = 'instaMiro_GoogleNet_regression_frozen'
#Set solver configuration
solver_filename = create_solver('prototxt/trainval_frozen_insta.prototxt', 'prototxt/trainval_frozen_insta.prototxt', training_id, base_lr=base_lr)
#Load solver
solver = caffe.get_solver(solver_filename)
#Copy init weights
print("Initializing with ImageNet")
solver.net.copy_from(weights)
#Restore solverstate
#solver.restore('../../../datasets/SocialMedia/models/CNNRegression/instagram_cities_1M_Inception_frozen_500_chunck_iter_280000.solverstate')
print 'Running solver for %d iterations...' % niter
do_solve(niter, solver, display_interval, test_interval, test_iters, training_id)
print 'Done.'
| [
"raulgombru@gmail.com"
] | raulgombru@gmail.com |
684cde1f1985bc66f952c4ba692fa8e8e49eaef2 | 526fffaa6f8ae0e006002e83dee80c918185f896 | /controllers/main.py | 23d95aa2dcbd76f75ebfee83bda0ce038bf8fe1f | [] | no_license | mwiesman/WFSnapNSearch | 162a26f5ac5a1d3af08c1a5d46fe81d1506b4bd9 | 0633503a7aa8275b3eb656176fc4c0cf0fe5b64e | refs/heads/master | 2021-01-09T20:39:52.298911 | 2016-07-12T20:55:08 | 2016-07-12T20:55:08 | 62,180,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,514 | py | from flask import *
import os
main = Blueprint('main', __name__, template_folder='templates')
@main.route('/', methods=['GET', 'POST'])
def main_route():
validFormats = set(['png', 'jpg', 'bmp', 'gif'])
# if (request.form.get("op") == "img_search"):
# picFile = request.files.get("pic")
# if picFile:
# filename = picFile.filename
# extension = filename[-3:]
# # make sure the file is of a valid picture format
# if extension.lower() not in validFormats and filename[-4:].lower() not in ['tiff', 'jpeg']:
# abort(404)
# # save the uploaded image so we can send it to the model
# curPath = os.path.dirname(__file__)
# relPath = "static/images"
# imagesFolder = os.path.abspath(os.path.join(curPath, os.pardir, relPath))
# picFile.save(os.path.join(imagesFolder, filename))
# # Send the file to the trained model
# # model.recognize(filename)
# # returns JSON data to search by
# # store into set/list/just use the json object and grab the values for each key maybe
# #call container imageInfo
# #redirectURL = "http://www.wayfair.com/keyword.php?keyword="
# redirectURL = "http://www.wayfair.com/keyword.php?keyword=blue+lamp"
# # redirectURL += str(imageInfo[0]);
# # for attr in imageInfo[1:]:
# # redirectURL += ("+" + str(attr))
# print redirectURL
# return redirect(redirectURL)
return render_template("index.html") | [
"mfwiesman@gmail.com"
] | mfwiesman@gmail.com |
04f1c6092d4b788eddd48501bf369547c8671d2e | e606a78fef5ef4aec3765a0a615223a82b5cec37 | /wrf.py | fcee070c46891b22f7d97a319f72e452b7caaf6b | [] | no_license | everdaniel/pycode | aaeccfe690d3dd6c71365186496dd0bd14f7e3fa | 35bd471bc82828685dd814cb690a43d42d0ffc42 | refs/heads/master | 2020-12-24T11:52:28.076786 | 2014-10-24T07:16:32 | 2014-10-24T07:16:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,247 | py | #!/usr/bin/env python
#encoding: utf-8
'''
module wrf
V. Noel 2010
'''
import netCDF4
import numpy as np
# private functions
def _tk (p, tpot):
f = 8.314472/29.19
tk = (tpot + 300)/((1000/(p/100))**f)
return tk
def _remap (var, lon, lat, lonorb, latorb):
'''Extrait des profils verticaux WRF sur une liste de coordonnees lon/lat.'''
n = lonorb.size # nombre de coordonnees
var2 = np.zeros([var.shape[0], n])
for i in np.arange(n):
# pour chaque profil, on calcule une grille de distance avec les coords WRF
d = np.sqrt((lon-lonorb[i])**2 + (lat-latorb[i])**2)
#im = argmin(d)
#imin, jmin = unravel_index(im, lon.shape)
# the code below is much faster
imin, jmin = divmod(d.argmin(), d.shape[1])
var2[:,i] = var[:,imin,jmin]
return var2.T
class wrf(object):
def __init__(self, wrfout):
self.nc = netCDF4.Dataset(wrfout)
def close(self):
self.nc.close()
self.nc = None
def coords(self, it=0):
if it >= self.nc.variables['XLONG'].shape[0]:
print('*ERROR* : Requested timestamp not in WRF file')
print('Requested timestamp index : ', it)
print('Number of time indexes in WRF file', self.nc.variables['XLONG'].shape[0])
return None, None
lon = self.nc.variables['XLONG'][it,...]
lat = self.nc.variables['XLAT'][it,...]
lon = np.squeeze(lon)
lat = np.squeeze(lat)
return lon, lat
def ntime(self):
return self.nc.variables['Times'].shape[0]
def times(self):
t = self.nc.variables['Times']
ts = []
ts = [ttxt.tostring() for ttxt in t[:,:]]
return ts
def time(self, it=0):
t = self.nc.variables['Times'][it,:]
t = t.tostring()
return t
def p_top(self, it=0):
p = self.nc.variables['P_TOP'][it]
return p
def pressure(self, it=0, pascals=False, on_orbit=None):
'''
Lit le champ de pression du fichier WRF.
Parametres:
it: indice temporel du champ a extraire [default: 0]
pascals: la pression sera renvoyee en pascals si True, en hPa si False.
on_orbit: Liste facultative de lon et lat sur lesquels extraire les profils (lon_orbit, lat_orbit)
Renvoie:
champ de pression 3D [X, Y, Z] ([PROFIL, Z] si on_orbit)
'''
p = self.nc.variables['P'][it,...] + self.nc.variables['PB'][it,...]
if not pascals:
p /= 100.
if on_orbit:
wrflon, wrflat = self.coords(it=it)
p = _remap(p, wrflon, wrflat, on_orbit[0], on_orbit[1])
p = np.squeeze(p)
return p
def temperature(self, it=0, kelvins=False, on_orbit=None):
'''
Lit le champ de temperature du fichier WRF.
Parametres:
it: indice temporel du champ a extraire [default: 0]
kelvins: la temperature sera renvoyee en K si True, en Celsius si False.
on_orbit: Liste facultative de lon et lat sur lesquels extraire les profils (lon_orbit, lat_orbit)
Renvoie:
champ de temperature 3D [X, Y, Z] ([PROFIL, Z] si on_orbit)
'''
p = self.nc.variables['P'][it,...] + self.nc.variables['PB'][it,...]
tpot = self.nc.variables['T'][it,...]
t = _tk (p, tpot)
if not kelvins:
t -= 273.
if on_orbit:
wrflon, wrflat = self.coords(it=it)
t = _remap(t, wrflon, wrflat, on_orbit[0], on_orbit[1])
t = np.squeeze(t)
return t
def u(self, it=0):
u = self.nc.variables['U'][it,...]
u = np.squeeze(u)
return u
def v(self, it=0):
v = self.nc.variables['V'][it,...]
v = np.squeeze(v)
return v
def w(self, it=0, on_orbit=None):
'''
Lit le champ de vitesse de vent vertical du fichier WRF.
Parametres:
it: indice temporel du champ a extraire [default: 0]
on_orbit: Liste facultative de lon et lat sur lesquels extraire les profils (lon_orbit, lat_orbit)
Renvoie:
champ de vitesse de vent verticale 3D [X, Y, Z] ([PROFIL, Z] si on_orbit)
'''
w = self.nc.variables['W'][it,...]
if on_orbit:
wrflon, wrflat = self.coords(it=it)
w = _remap(w, wrflon, wrflat, on_orbit[0], on_orbit[1])
return w
def map_peninsula (lon, lat, xvar, centerlon=-60, w=2000,h=2000, cb=False, cl=None, cbtitle=None, ec='None', ax=None, fp=None):
from mpl_toolkits import basemap
from matplotlib.pyplot import colorbar
import niceplots
m = basemap.Basemap(projection='lcc',lat_0=-70,lon_0=centerlon,width=w*1000,height=h*1000, resolution='i', ax=ax)
x, y = m(lon, lat)
if cl is None:
pc = m.pcolormesh(x, y, xvar, edgecolors=ec)
else:
pc = m.pcolormesh(x, y, xvar, vmin=cl[0], vmax=cl[1], edgecolors=ec)
m.drawmapboundary(color='grey')
if fp:
m.drawmeridians(np.arange(-105,-15,15), labels=[0,0,0,1], fontproperties=fp) # left, right, top, bottom
m.drawparallels(np.arange(-90,90,10), labels=[1,0,0,0], fontproperties=fp) # left, right, top, bottom
else:
m.drawmeridians(np.arange(-105,-15,15), labels=[0,0,0,1]) # left, right, top, bottom
m.drawparallels(np.arange(-90,90,10), labels=[1,0,0,0]) # left, right, top, bottom
m.drawcoastlines(color='grey')
if cb:
cbar=colorbar(pc)
if fp:
niceplots.beautify_colorbar(cbar, title=cbtitle)
else:
if cbtitle is not None:
cbar.set_label(cbtitle)
return m, pc
def antarctica_map(maxlat=-50, lon_0=180):
from mpl_toolkits.basemap import Basemap
m = Basemap(projection='splaea', boundinglat=maxlat, lon_0=lon_0, resolution='i')
return m
# def map_antarctica_temp(wrfout, maxlat=-50, it=0):
# from mpl_toolkits.basemap import Basemap
#
# m = Basemap(projection='splaea', boundinglat=maxlat, lon_0=180)
#
# lon, lat = coords(wrfout)
# temp = temperature(wrfout)
# plt.figure()
# x, y = m(lon, lat)
# m.pcolormesh(x, y, temp[it,:,:])
# m.drawcoastlines()
# plt.colorbar()
# def geo_show(f='geo_em.d01.nc'):
# print 'Reading file ' + f
# try:
# nc = netCDF4.Dataset(f)
# except:
# raise NameError(f + ' is not a netCDF file')
#
# try:
# lat = nc.variables['XLAT_M'][0,:,:]
# lon = nc.variables['XLONG_M'][0,:,:]
# hgt = nc.variables['HGT_M'][0,:,:]
# except:
# raise NameError('Cannot find variables XLAT_M, XLONG_M, HGT_M in ' + f)
#
# #m = Basemap(projection='lcc', lat_0=np.min(lat.ravel()), lon_0=np.min(lon.ravel()), width=10000*1000, height=6000*1000)
# m = Basemap(projection='robin', lon_0=0)
# x, y = m(lon, lat)
#
# print 'Plotting the damn thing'
# plt.figure()
#
# m.pcolor(x, y, hgt)
#
# m.drawmapboundary()
# m.drawmeridians(np.r_[-180:180:30])
# m.drawparallels(np.r_[-90:90:15])
#
# plt.show()
| [
"vincent.noel@gmail.com"
] | vincent.noel@gmail.com |
1671d4ad9ebc853197422bf927268c0e5d000640 | 33e5c43b1473129aa9d21979486c169586843a3e | /TFM/Machine Translation/preprocesoPalabras.py | a70b95625ac1a0fad4eddbb09dfea5f8bf50be13 | [] | no_license | jualora/MIARFID | 503813f2311bd11f1824759dd0047ae014419fc6 | 1222c3dddc07e987101da8987f650c33c7e4bb80 | refs/heads/master | 2022-12-15T07:26:02.719420 | 2020-09-15T11:00:57 | 2020-09-15T11:00:57 | 271,645,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py | import os
import openpyxl
import matplotlib.pyplot as plt
from translate.storage.tmx import tmxfile
for base, dirs, files in os.walk("./ParaProcesar"):
datasets = files
datasetCa = datasets[0]
datasetEs = datasets[1]
data_path_Ca = "./ParaProcesar/"+datasetCa
data_path_Es = "./ParaProcesar/"+datasetEs
frases = []
name_dataset = data_path_Ca[15:-9]
frasesCa = open(data_path_Ca).readlines()
frasesEs = open(data_path_Es).readlines()
for i in range(len(frasesCa)):
frases.append((frasesEs[i][:-1], frasesCa[i][:-1]))
menor10 = 0
menor20 = 0
menor30 = 0
menor40 = 0
menor50 = 0
menor60 = 0
menor70 = 0
menor80 = 0
menor90 = 0
for i in range(len(frases)):
palabras = frases[i][1].split(' ')
if len(palabras) <= 10:
menor10 += 1
elif len(palabras) <= 20:
menor20 += 1
elif len(palabras) <= 30:
menor30 += 1
elif len(palabras) <= 40:
menor40 += 1
elif len(palabras) <= 50:
menor50 += 1
elif len(palabras) <= 60:
menor60 += 1
elif len(palabras) <= 70:
menor70 += 1
elif len(palabras) <= 80:
menor80 += 1
elif len(palabras) <= 90:
menor90 += 1
x = [menor10, menor20 ,menor30, menor40, menor50, menor60, menor70, menor80, menor90]
excel = openpyxl.load_workbook('Palabras por Frase.xlsx')
sheet = excel.get_sheet_by_name('Hoja1')
j = 2
for i in range(len(x)):
sheet.cell(row = 44, column = j).value = x[i]
j += 1
excel.save('Palabras por Frase.xlsx') | [
"jalopez@JALOPEZ.localdomain"
] | jalopez@JALOPEZ.localdomain |
38812ef52de2dc2ad2207187a97207a4bfb5c913 | c7644f32855785c8c1cb4b8ba00ab1732991f691 | /4 - Conditional Operators/Input Validation/test/test_validation_with_if.py | 2600a8c36a48d509f2ad32214d129a05692865de | [] | no_license | Babkock/python | 6036212b52e989bb10eccd998b7e099cc58521a6 | 73272e448aabf2171c38c5adce1fa19d42009ea9 | refs/heads/master | 2020-07-09T12:53:42.588387 | 2019-12-06T22:55:59 | 2019-12-06T22:55:59 | 203,972,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | #!/usr/bin/python3
"""
Tanner Babcock
September 17, 2019
Module 4, Topic 4: Input Validation
"""
import unittest
from validation_with_if import average
class MyTestCase(unittest.TestCase):
def test_input_validation_if(self):
self.assertEqual(-1, average(-90, 90, 95))
self.assertEqual(-1, average(91, -95, 97))
self.assertEqual(-1, average(90, 80, -12))
if __name__ == "__main__":
unittest.main()
| [
"babkock@gmail.com"
] | babkock@gmail.com |
505b43b7f22b92821ac90e953a3cbc2653b2ea87 | ed939f4855e0086a5cb7a635df6276e7d555ce78 | /bin/merge_bed.py | 5ec612fe7aa87fe766586746f1e5d84cb1e1c7be | [
"MIT"
] | permissive | TreesLab/ICARES | c3fc8e48f56d67bf550c8b6e872692ed7e50f206 | 7c9844b37c770f2e6630b5899d473a0a4189fc64 | refs/heads/master | 2021-01-21T16:27:15.561681 | 2019-10-14T08:36:04 | 2019-10-14T08:36:04 | 54,468,163 | 0 | 1 | MIT | 2019-10-02T04:22:31 | 2016-03-22T11:00:47 | Shell | UTF-8 | Python | false | false | 1,397 | py | #! /usr/bin/env python2
import argparse
def merge_bed(main_file, second_files):
with open(main_file) as data_reader:
main_data = [line.rstrip('\n').split('\t') for line in data_reader]
for second_file in second_files:
with open(second_file) as data_reader:
second_data = [line.rstrip('\n').split('\t') for line in data_reader]
second_data_dict = dict([[tuple(line[:3]), line[3]] for line in second_data])
for i in range(len(main_data)):
line_key = tuple(main_data[i][:3])
if second_data_dict.has_key(line_key):
main_data[i].append(second_data_dict[line_key])
else:
main_data[i].append("")
return main_data
def write_tsv(result, out_file):
if out_file == None:
for line in result:
print '\t'.join(line)
else:
with open(out_file, 'w') as data_writer:
for line in result:
print >> data_writer, '\t'.join(line)
def main():
p = argparse.ArgumentParser()
p.add_argument("main_file", help="Main file.")
p.add_argument("second_file", nargs="+", help="Second file.")
p.add_argument("-o", "--output", help="Output.")
args = p.parse_args()
result = merge_bed(args.main_file, args.second_file)
write_tsv(result, args.output)
if __name__ == "__main__":
main()
| [
"chiangtw112358@gmail.com"
] | chiangtw112358@gmail.com |
92d458c9db4e12901388ea18e6213c115f1134bc | 98948945b539491c75652850dc7e08e07c715840 | /dm-templates/sap_db2-win/sap_db2-win.py | a48df956413e54da5311f8de081d17be45b02dc2 | [] | no_license | Myouss/NW_HA | 0f3ff7c58b082902b41093b951a395a4b9ea31e3 | d278148476ac7990288273ea000c257c2fbc51f8 | refs/heads/master | 2020-04-29T17:54:58.973009 | 2019-03-18T15:42:53 | 2019-03-18T15:42:53 | 176,309,391 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,843 | py | # ------------------------------------------------------------------------
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Description: Google Cloud Platform - SAP Deployment Functions
# Build Date: Fri Mar 15 13:25:46 GMT 2019
# ------------------------------------------------------------------------
"""Creates a Compute Instance with the provided metadata."""
COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/'
def GlobalComputeUrl(project, collection, name):
"""Generate global compute URL."""
return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/global/', collection, '/', name])
def ZonalComputeUrl(project, zone, collection, name):
"""Generate zone compute URL."""
return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/zones/', zone, '/', collection, '/', name])
def RegionalComputeUrl(project, region, collection, name):
"""Generate regional compute URL."""
return ''.join([COMPUTE_URL_BASE, 'projects/', project, '/regions/', region, '/', collection, '/', name])
def GenerateConfig(context):
"""Generate configuration."""
# Get/generate variables from context
zone = context.properties['zone']
project = context.env['project']
instance_name = context.properties['instanceName']
instance_type = ZonalComputeUrl(project, zone, 'machineTypes', context.properties['instanceType'])
region = context.properties['zone'][:context.properties['zone'].rfind('-')]
windows_image_project = context.properties['windowsImageProject']
windows_image = GlobalComputeUrl(windows_image_project, 'images', context.properties['windowsImage'])
networkTag = str(context.properties.get('networkTag', ''))
primary_startup_url = "https://storage.googleapis.com/sapdeploy/dm-templates/sap_db2-win/startup.ps1"
network_tags = { "items": str(context.properties.get('networkTag', '')).split(',') if len(str(context.properties.get('networkTag', ''))) else [] }
service_account = str(context.properties.get('serviceAccount', context.env['project_number'] + '-compute@developer.gserviceaccount.com'))
## Get deployment template specific variables from context
db2_sid = str(context.properties.get('db2SID', ''))
db2sid_size = context.properties['db2sidSize']
db2saptmp_size = context.properties['db2saptmpSize']
db2log_size = context.properties['db2logSize']
db2log_ssd = str(context.properties['db2logSSD'])
db2sapdata_size = context.properties['db2sapdataSize']
db2sapdata_ssd = str(context.properties['db2sapdataSSD'])
db2backup_size = context.properties['db2backupSize']
usrsap_size = context.properties['usrsapSize']
swap_size = context.properties['swapSize']
# Subnetwork: with SharedVPC support
if "/" in context.properties['subnetwork']:
sharedvpc = context.properties['subnetwork'].split("/")
subnetwork = RegionalComputeUrl(sharedvpc[0], region, 'subnetworks', sharedvpc[1])
else:
subnetwork = RegionalComputeUrl(project, region, 'subnetworks', context.properties['subnetwork'])
# Public IP
if str(context.properties['publicIP']) == "False":
networking = [ ]
else:
networking = [{
'name': 'external-nat',
'type': 'ONE_TO_ONE_NAT'
}]
## determine disk types
if db2sapdata_ssd == "True":
db2sapdata_type = "pd-ssd"
else:
db2sapdata_type = "pd-standard"
if db2log_ssd == "True":
db2log_type = "pd-ssd"
else:
db2log_type = "pd-standard"
# compile complete json
sap_node = []
disks = []
# /
disks.append({'deviceName': 'boot',
'type': 'PERSISTENT',
'boot': True,
'autoDelete': True,
'initializeParams': {
'diskName': instance_name + '-boot',
'sourceImage': windows_image,
'diskSizeGb': '64'
}
})
# D:\ (DB2)
sap_node.append({
'name': instance_name + '-db2sid',
'type': 'compute.v1.disk',
'properties': {
'zone': zone,
'sizeGb': db2sid_size,
'type': ZonalComputeUrl(project, zone, 'diskTypes','pd-standard')
}
})
disks.append({'deviceName': instance_name + '-db2sid',
'type': 'PERSISTENT',
'source': ''.join(['$(ref.', instance_name + '-db2sid', '.selfLink)']),
'autoDelete': True
})
# T:\ (Temp)
sap_node.append({
'name': instance_name + '-db2saptmp',
'type': 'compute.v1.disk',
'properties': {
'zone': zone,
'sizeGb': db2saptmp_size,
'type': ZonalComputeUrl(project, zone, 'diskTypes','pd-standard')
}
})
disks.append({'deviceName': instance_name + '-db2saptmp',
'type': 'PERSISTENT',
'source': ''.join(['$(ref.', instance_name + '-db2saptmp', '.selfLink)']),
'autoDelete': True
})
# L:\ (Log)
sap_node.append({
'name': instance_name + '-db2log',
'type': 'compute.v1.disk',
'properties': {
'zone': zone,
'sizeGb': db2log_size,
'type': ZonalComputeUrl(project, zone, 'diskTypes',db2log_type)
}
})
disks.append({'deviceName': instance_name + '-db2log',
'type': 'PERSISTENT',
'source': ''.join(['$(ref.', instance_name + '-db2log', '.selfLink)']),
'autoDelete': True
})
# E:\ (Data)
sap_node.append({
'name': instance_name + '-db2sapdata',
'type': 'compute.v1.disk',
'properties': {
'zone': zone,
'sizeGb': db2sapdata_size,
'type': ZonalComputeUrl(project, zone, 'diskTypes',db2sapdata_type)
}
})
disks.append({'deviceName': instance_name + '-db2sapdata',
'type': 'PERSISTENT',
'source': ''.join(['$(ref.', instance_name + '-db2sapdata', '.selfLink)']),
'autoDelete': True
})
# X:\ (Backup)
sap_node.append({
'name': instance_name + '-db2backup',
'type': 'compute.v1.disk',
'properties': {
'zone': zone,
'sizeGb': db2backup_size,
'type': ZonalComputeUrl(project, zone, 'diskTypes','pd-standard')
}
})
disks.append({'deviceName': instance_name + '-db2backup',
'type': 'PERSISTENT',
'source': ''.join(['$(ref.', instance_name + '-db2backup', '.selfLink)']),
'autoDelete': True
})
# OPTIONAL - S:\ (SAP)
if usrsap_size > 0:
sap_node.append({
'name': instance_name + '-usrsap',
'type': 'compute.v1.disk',
'properties': {
'zone': zone,
'sizeGb': usrsap_size,
'type': ZonalComputeUrl(project, zone, 'diskTypes','pd-standard')
}
})
disks.append({'deviceName': instance_name + '-usrsap',
'type': 'PERSISTENT',
'source': ''.join(['$(ref.', instance_name + '-usrsap', '.selfLink)']),
'autoDelete': True
})
# OPTIONAL - P:\ (Pagefile)
if swap_size > 0:
sap_node.append({
'name': instance_name + '-swap',
'type': 'compute.v1.disk',
'properties': {
'zone': zone,
'sizeGb': swap_size,
'type': ZonalComputeUrl(project, zone, 'diskTypes','pd-standard')
}
})
disks.append({'deviceName': instance_name + '-swap',
'type': 'PERSISTENT',
'source': ''.join(['$(ref.', instance_name + '-swap', '.selfLink)']),
'autoDelete': True
})
# VM instance
sap_node.append({
'name': instance_name,
'type': 'compute.v1.instance',
'properties': {
'zone': zone,
'minCpuPlatform': 'Automatic',
'machineType': instance_type,
'metadata': {
'items': [{
'key': 'windows-startup-script-url',
'value': primary_startup_url
}]
},
'canIpForward': True,
'serviceAccounts': [{
'email': service_account,
'scopes': [
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/servicecontrol',
'https://www.googleapis.com/auth/service.management.readonly',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/monitoring.write',
'https://www.googleapis.com/auth/trace.append',
'https://www.googleapis.com/auth/devstorage.read_write'
]
}],
'networkInterfaces': [{
'accessConfigs': networking,
'subnetwork': subnetwork
}],
"tags": network_tags,
'disks': disks
}
})
return {'resources': sap_node}
| [
"mohamed.youssef.dba@gmail.com"
] | mohamed.youssef.dba@gmail.com |
87d1d60d5d57d73ae310dd1aeea6458ff6f348d8 | b8fc8bb578fe381295363b8ee233adc413f81070 | /tests/integration_tests.py | 6500c3a2011fa0c8b1d2f5f9716d2cea1e43a26f | [] | no_license | Python3pkg/CssCoco | dbcf96b203e5c66b37ef0befb576a7b3b3b4407f | e652decacfccc7271112f02745558a86ed5349ac | refs/heads/master | 2021-01-21T17:28:06.600227 | 2017-05-21T11:44:36 | 2017-05-21T11:44:36 | 91,952,956 | 0 | 0 | null | 2017-05-21T11:44:26 | 2017-05-21T11:44:26 | null | UTF-8 | Python | false | false | 50,523 | py | from unittest import TestCase
import tests.helpers as helpers
import csscoco.lang.analysis.violations as violations
class TypeCheckerTests(TestCase):
filename = 'test.coco'
def setupFile(self, content):
fo = open(self.filename, 'w')
fo.write(content)
fo.close()
return fo
def get_coco_ast(self, data):
file = self.setupFile(data)
return helpers.ParseHelper.parse_coco_string(file.name)
def test_em_instead_of_pt_px_cm_valid(self):
"""
Use em instead of pt, px, cm
"""
coco_ast = self.get_coco_ast("Semantic { forbid unit{string in ['px', 'pt', 'cm']} message '' }")
css_tree = helpers.ParseHelper.parse_css_string('#a { margin: 5px; padding: 10cm; margin: 0pt; padding: 15em;}')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 3
def test_forbid_z_index(self):
"""
Avoid using z-index property
"""
coco_ast = self.get_coco_ast("Semantic { forbid property{name=='z-index'} message '' }")
css_tree = helpers.ParseHelper.parse_css_string('#a { z-index: 100; } .class { z-index: 200; }')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_forbid_important(self):
"""
Do not use !important
"""
coco_ast = self.get_coco_ast("Semantic { forbid important message '' }")
css_tree = helpers.ParseHelper.parse_css_string('#a { z-index: 100; margin: 5px !important;}'
'.class { padding: 10px !important; color: red; }')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_forbid_ids(self):
"""
Avoid using ids
"""
coco_ast = self.get_coco_ast("Semantic { forbid id message '' }")
css_tree = helpers.ParseHelper.parse_css_string('#a {} .class#id {} h1 #id {} h1 h2 {} .class {}')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 3
def test_lowercase_ids_and_classes(self):
"""
All id and class names should be lowercase
"""
coco_ast = self.get_coco_ast("Semantic { find p=(id or class) require p.name match lowercase message '' }")
css_tree = helpers.ParseHelper.parse_css_string('#a {} .class#id {} h1 #ID {} H1#id {} .clAss {}')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_lowercase_properties(self):
"""
Properties should be lowercase (vendor-specific properties are exception)
"""
coco_ast = self.get_coco_ast("Semantic { "
"find p=property{is_vendor_specific == false} "
"require p.name match lowercase "
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string('a{ z-index: 1; COLOR: 2; Margin: 5px;'
' -o-color: red; -O-COLOR: pink; }')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_lowercase_values(self):
"""
All values except the contents of strings should be lowercase
"""
coco_ast = self.get_coco_ast("Semantic { "
"find value=(not string) in value "
"require value.string match lowercase "
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string('a{ border: 5px solid red;'
'font-family: Consolas, Monaco, "Andale Mono", monospace; }')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_lowercase_tags(self):
"""
Html tags should be lowercase
"""
coco_ast = self.get_coco_ast("Semantic { "
"find t=tag "
"require t.name match lowercase "
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string('a{ } P{} PRE.class{}')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_semicolon_after_declarations(self):
"""
Put a ; at the end of declarations
"""
coco_ast = self.get_coco_ast("Semantic { "
"find d=declaration "
"require d.child(-1).string == ';' "
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string('a{ b:red; m:pink } P{ a:blue } ')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_no_strings_in_uri(self):
"""
Do not put quotes in url declarations
"""
coco_ast = self.get_coco_ast("Semantic { "
"forbid string in uri "
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string('a{ a: url(\'test\'); b: url("another") } ')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_use_short_hex(self):
"""
Use short hex values
"""
coco_ast = self.get_coco_ast("Semantic { "
"forbid hex{is_long and string match shorten} "
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string('a{ color: #112233; color: #E6E6E6 } ')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_use_short_margin(self):
"""
Use the shorthand margin property instead
"""
coco_ast = self.get_coco_ast("Semantic { "
"forbid ruleset{contains_all([ "
"property{name=='margin-right'},"
"property{name=='margin-left'},"
"property{name=='margin-top'},"
"property{name=='margin-bottom'}"
"])} message '' }")
css_tree = helpers.ParseHelper.parse_css_string('a{margin-left:0;margin-top:0;margin-bottom:0;margin-right:0}'
'b{margin-top:0;margin-bottom:0;margin-right:0}')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_unitless_zero(self):
"""
Do not use units after 0 values
"""
coco_ast = self.get_coco_ast("Semantic { "
"forbid number{num_value == 0} in (dimension or percentage) message '' }")
css_tree = helpers.ParseHelper.parse_css_string('a { margin: 0; padding: 0px; offset: 0cm; top: 0%; }')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 3
def test_use_leading_zero(self):
"""
Use a leading zero for decimal values
"""
coco_ast = self.get_coco_ast("Semantic { "
"find n=number{num_value < 1 and num_value > -1} "
"require n.string match '^0.*' "
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string('a { left: .6em; right: -.6em; top: 0.6em; }')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_use_single_quotes_in_attribute_selectors(self):
"""
Use single quotes in attribute selectors
"""
coco_ast = self.get_coco_ast("Semantic { "
"find v=attribute-value "
"require v is string and v.has_single_quotes "
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string('[attr=test] {} [attr=\'test\'] {} [attr="test"] {}')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_use_single_quotes_in_charsets(self):
"""
Use single quotes in charsets
"""
coco_ast = self.get_coco_ast("Semantic { "
"find s=string in charset "
"require s.has_single_quotes "
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string('@charset \'UFT-8\'; @charset "UFT-8";')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_use_single_quotes_in_values(self):
"""
Use single quotes in values
"""
coco_ast = self.get_coco_ast("Semantic { "
"find s=string in value "
"require s.has_single_quotes "
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string('a { font: "Arial" "Black"; color: \'red\'; }')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_use_single_quotes_in_string(self):
"""
Use single quotes in values
"""
coco_ast = self.get_coco_ast("Semantic { "
"find s=string "
"require s.has_single_quotes "
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string('a[b="test"] { font: "Black"; color: \'red\'; }')
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_forbid_charset(self):
"""
Do not specify the encoding of style sheets as these assume UTF-8
"""
coco_ast = self.get_coco_ast("Semantic { "
"forbid charset "
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string("@charset 'uft-8'")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_omit_protocol(self):
"""
Omit the protocol http(s) in url
"""
coco_ast = self.get_coco_ast("Semantic { "
"find u=uri "
"require u.string not match '(?i)https?:.*' "
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string("a { image: url('https://test'); "
"image: url(http://test) "
"image: url(\"http://test\") }")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 3
def test_no_overqualified_tags(self):
"""
Do not over-qualify classes and ids with html tags
"""
coco_ast = self.get_coco_ast("Semantic { "
"forbid tag (class or id) "
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string("h1.class {} h2#id {} h1 h2 {} ")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_no_import(self):
"""
Do not use @import
"""
coco_ast = self.get_coco_ast("Semantic { "
"forbid import "
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string("@import url;")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_no_width_and_border(self):
"""
Warning if a rule contains width and border, border-left, border-right, padding, padding-left, or padding-right
"""
coco_ast = self.get_coco_ast("Semantic { "
"forbid ruleset{contains(property{name=='width'}) "
"and contains(property{name in ['border',"
"'border-left',"
"'border-right',"
"'padding',"
"'padding-left',"
"'padding-right']}) }"
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string("a {width: 5px; border: 10px}")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_no_width_and_padding(self):
"""
Warning if a rule contains width and border, border-left, border-right, padding, padding-left, or padding-right
"""
coco_ast = self.get_coco_ast("Semantic { "
"forbid ruleset{contains(property{name=='width'}) "
"and contains(property{name in ['border',"
"'border-left',"
"'border-right',"
"'padding',"
"'padding-left',"
"'padding-right']}) }"
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string("a {width: 5px; padding: 10px}")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_no_height_and_border(self):
"""
Warning if a rule contains height and border, border-top, border-bottom, padding, padding-top, or padding-bottom
"""
coco_ast = self.get_coco_ast("Semantic { "
"forbid ruleset{contains(property{name=='height'}) "
"and contains(property{name in ['border',"
"'border-top',"
"'border-bottom',"
"'padding',"
"'padding-top',"
"'padding-bottom']}) }"
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string("a {height: 5px; border: 10px}")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_no_height_and_padding(self):
"""
Warning if a rule contains height and border, border-top, border-bottom, padding, padding-top, or padding-bottom
"""
coco_ast = self.get_coco_ast("Semantic { "
"forbid ruleset{contains(property{name=='height'}) "
"and contains(property{name in ['border',"
"'border-top',"
"'border-bottom',"
"'padding',"
"'padding-top',"
"'padding-bottom']}) }"
"message '' }")
css_tree = helpers.ParseHelper.parse_css_string("a {height: 5px; padding: 10px}")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_no_display_inline_block_and_float(self):
"""
A rule that has display: inline-block should not use float
"""
cos = "Semantic { " \
"forbid ruleset{contains(declaration{property.name=='display' and value.string=='inline-block'}) " \
"and contains(property{name=='float'})} message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a { display: inline-block; float: 4;}")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_no_display_block_and_vertical_align(self):
"""
A rule that has display: block should not use vertical-align
"""
cos = "Semantic { " \
"forbid ruleset{contains(declaration{property.name=='display' and value.string=='block'}) " \
"and contains(property{name=='vertical-align'})} message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a { display: block; vertical-align: 4;}")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_no_exact_duplicates(self):
"""
Warning if a property is included in a rule twice and contains the same value.
"""
cos = "Semantic { " \
"find (d1=declaration, d2=declaration) in ruleset "\
"forbid d1.property.name == d2.property.name and "\
"d1.value.string == d2.value.string message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a { color: red; margin: 0; color: red; }")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_no_crossed_duplicates(self):
"""
A property is included twice and is separated by at least one other property.
"""
cos = "Semantic { " \
"find (d1=declaration, d2=declaration, d3=declaration) in ruleset "\
"forbid d1.property.name == d3.property.name and "\
"d2.property.name != d1.property.name and "\
"d1.value.string != d3.value.string message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a { color: red; margin: 0; color: blue; }")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_no_empty_rules(self):
"""
Forbid empty rules.
"""
cos = "Semantic { " \
"forbid ruleset{count(declaration) == 0} "\
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a { } b {}")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_no_standard_property(self):
"""
Do not use a vendor-prefixed property without a standard property after it.
"""
cos = "Semantic ignore space, newline{ " \
"find d=declaration{property.is_vendor_specific} " \
"require d.next_sibling is declaration and " \
"((d.next_sibling.is_vendor_specific and d.next_sibling.property.standard == d.property.standard) or " \
"(not d.next_sibling.is_vendor_specific and d.next_sibling.property.standard == d.property.standard)) "\
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a { -webkit-hyphens: none; -moz-hyphens: "
"none; -ms-hyphens: none; }")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_color_fallback_property(self):
"""
No fallback color for properties with a rgba(), hsl(), or hsla() color
"""
cos = "Semantic ignore space, newline{ " \
"find (rgba or hsl or hsla) in d=declaration{property.name == 'color'} " \
"require d.previous_sibling is declaration and " \
"d.previous_sibling.property.name == 'color' and " \
"(d.previous_sibling.contains(hex) or d.previous_sibling.contains(colorname)) "\
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a { color: rgba(1,2,3,0); } "
"b {color: red; color:rgba(1,2,3,0); }")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_space_btw_colon_value(self):
"""
Put one space between the colon and the value of a declaration
"""
cos = "Whitespace { " \
"find c=colon v=value " \
"require space between c and v "\
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a { color: red; } "
"b {color: red; color:red; }")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_blank_lines_btw_rules_valid(self):
"""
Put one or two blank lines between rules
"""
cos = "Whitespace { " \
"find r1=ruleset r2=ruleset " \
"require newline{2,3} between r1 and r2 "\
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a { }\n\nb{}")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 0
def test_blank_lines_btw_rules(self):
"""
Put one or two blank lines between rules
"""
cos = "Whitespace { " \
"find r1=ruleset r2=ruleset " \
"require newline{2,3} between r1 and r2 "\
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a { }\nb{}\n\n\n\nc{}")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_blank_lines_btw_rules_ignore_comments(self):
"""
Put one or two blank lines between rules
"""
cos = "Whitespace { " \
"find r1=ruleset r2=ruleset " \
"require newline{2} between r1 and r2 "\
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a { }\n\n/*comment*/\nb{}\n\nc{}")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 0
def test_space_btw_selector_and_block_valid(self):
"""
Put one space between the last selector and the block
"""
cos = "Whitespace ignore newline comment newline { " \
"find s=selector b=block " \
"require space between s and b "\
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a, b {} c {}")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 0
def test_space_btw_selector_and_block(self):
"""
Put one space between the last selector and the block
"""
cos = "Whitespace ignore newline comment newline { " \
"find s=selector b=block " \
"require space between s and b "\
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a, b{} c {} d\n{}")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 3
def test_newline_btw_selectors(self):
"""
One selector per line
"""
cos = "Whitespace ignore newline comment newline { " \
"find s1=delim s2=simple-selector " \
"require newline between s1 and s2 "\
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a, b{} c,\nd {}")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_no_trailing_spaces(self):
"""
No trailing spaces
"""
cos = "Whitespace ignore newline comment newline { " \
"forbid (space or indent) (newline or eof) " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a {} \n b{} \n")
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_indentation(self):
"""
Use 4 spaces for indentation, no tabs
"""
cos = "Whitespace ignore newline comment newline { " \
"find i=indent " \
"require i.string match '^ {4}$' " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("\n a {}\n b{}\n c{}")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_declaration_on_newline(self):
"""
Put every declaration on a new line
"""
cos = "Whitespace ignore newline comment newline { " \
"find d=declaration " \
"require newline before d " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a{ color:red; color:red\ncolor:red;}")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_closing_brace_newline(self):
"""
Place closing brace on a new line
"""
cos = "Whitespace ignore newline comment newline { " \
"find b=block " \
"require newline before b.child(-1) " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a{ color:red;\n} a{ color:red;\n\n} a{ color:red; }")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_newline_around_block_braces_valid(self):
"""
Put newline after "{" and before "}"
"""
cos = "Whitespace ignore newline comment newline { " \
"find b=block{count(declaration) != 1} " \
"require newline after b.child(0) and newline before b.child(-1) " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a{\ncolor:red;\ncolor:red;\n}")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert not violation_log.number_of_violations()
def test_newline_around_block_braces_spaces(self):
"""
Put newline after "{" and before "}"
"""
cos = "Whitespace ignore newline comment newline { " \
"find b=block{count(declaration) != 1} " \
"require newline after b.child(0) and newline before b.child(-1) " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a{\ncolor:red;\ncolor:red; }b{ color:red;\ncolor:red;\n}")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_newline_around_block_braces_newlines(self):
"""
Put newline after "{" and before "}"
"""
cos = "Whitespace ignore newline comment newline { " \
"find b=block{count(declaration) != 1} " \
"require newline after b.child(0) and newline before b.child(-1) " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a{\ncolor:red;\ncolor:red;\n\n}b{\n\n\ncolor:red;\ncolor:red;\n}")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_newline_around_block_braces_no_matches(self):
"""
Put newline after "{" and before "}"
"""
cos = "Whitespace ignore newline comment newline { " \
"find b=block{count(declaration) != 1} " \
"require newline after b.child(0) and newline before b.child(-1) " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a{ color:red; }b{\n\n\ncolor:red;\n\n\n}")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert not violation_log.number_of_violations()
def test_space_around_oneliner_braces_valid(self):
"""
You can put spaces in one line declarations
"""
cos = "Whitespace ignore newline comment newline { " \
"find b=block{count(declaration) == 1} " \
"require (newline after b.child(0) and newline before b.child(-1)) or "\
"(space after b.child(0) and space before b.child(-1)) " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a{ color:red; }")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert not violation_log.number_of_violations()
def test_space_around_oneliner_braces(self):
"""
You can put spaces in one line declarations
"""
cos = "Whitespace ignore newline comment newline { " \
"find b=block{count(declaration) == 1} " \
"require (newline after b.child(0) and newline before b.child(-1)) or "\
"(space after b.child(0) and space before b.child(-1)) " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a{\ncolor:red; } a{ color:red; }")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_space_after_commas_valid(self):
"""
Multiple csv values should be separated by either space of newline
"""
cos = "Whitespace ignore newline comment newline { " \
"find c=comma " \
"require space or newline after c "\
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a{ color: rgb(1, 2,\n3); }")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert not violation_log.number_of_violations()
def test_space_after_commas(self):
"""
Multiple csv values should be separated by either space of newline
"""
cos = "Whitespace ignore newline comment newline { " \
"find c=comma " \
"require space or newline after c "\
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a{ color: rgb(1, 2,\n\n3); }")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_comments_on_new_line(self):
"""
Place comments on a new line
"""
cos = "Whitespace ignore indent{ " \
"find c=comment " \
"require newline{1,} before c and newline{1,} after c "\
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a{\n/*comment*/\n}\n b /*commend*/\n{}\n/*comment*/ c {}")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_no_js_prefixed_classes(self):
"""
Do not make js- prefixed classes. They are used exclusively from JS files. Use the is- prefix instead.
"""
cos = "Whitespace ignore indent{ " \
"forbid class{name match '.*js-.*'} " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string(".js-class{} .is-class{} .class{}")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_use_hex_for_colors(self):
"""
Use hex for colors
"""
cos = "Semantic ignore indent, space, newline, { " \
"forbid colorname " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a{ color: red; color: #FFFFFF }")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_use_rgba_with_opacity(self):
"""
Use rgba only when opacity is needed
"""
cos = "Semantic ignore indent, space, newline, { " \
"forbid rgba{opacity == 1} " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a{ color: rgba(1, 1, 2, 0.9); color: rgba(1, 1, 2, 1) }")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_use_px_for_font_size(self):
"""
Use px for font-size
"""
cos = "Semantic ignore indent, space, newline, { " \
"find d=declaration{property.name=='font-size'} " \
"require d.value.contains(dimension{unit=='px'}) " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a{ font-size: 10px; font-size: 10em; }")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_unitless_line_height(self):
"""
Line height should also be unit-less, unless necessary to be defined as a specific pixel value.
"""
cos = "Semantic ignore indent, space, newline, { " \
"find d=declaration{property.name=='line-height'} " \
"require not d.contains(dimension) or d.contains(dimension{unit=='px'}) " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("a{ line-height: 10px; line-height: 10em; line-height: 10; }")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_single_id_selector(self):
"""
If you must use an id selector make sure that you have no more than one in your rule declaration.
"""
cos = "Semantic ignore indent, space, newline, { " \
"find id in s=simple-selector " \
"forbid s.count(selector-part) > 1 " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css_tree = helpers.ParseHelper.parse_css_string("h1#a { } #b {} #a, #b{}")
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_not_oneliner_space_btw_rules_valid(self):
"""
Add one blank line between rulesets
"""
cos = "Whitespace ignore indent { " \
"find r1=ruleset r2=ruleset " \
"where not r1.is_single_line or not r2.is_single_line " \
"require newline{2} between r1 and r2 " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """a {
}
b {}
c {
}"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert not violation_log.number_of_violations()
def test_not_oneliner_space_btw_rules(self):
"""
Add one blank line between rulesets
"""
cos = "Whitespace ignore indent { " \
"find r1=ruleset r2=ruleset " \
"where not r1.is_single_line or not r2.is_single_line " \
"require newline{2} between r1 and r2 " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """a {
}
c {
}
d {
}"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_oneliner_space_btw_rules_valid(self):
"""
Single-line rules may appear on adjacent lines
"""
cos = "Whitespace ignore indent { " \
"find r1=ruleset r2=ruleset " \
"where r1.is_single_line and r2.is_single_line " \
"require newline{1, 2} between r1 and r2 " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """a { }
c { }
d {}"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert not violation_log.number_of_violations()
def test_oneliner_space_btw_rules(self):
"""
Single-line rules may appear on adjacent lines
"""
cos = "Whitespace ignore indent { " \
"find r1=ruleset r2=ruleset " \
"where r1.is_single_line and r2.is_single_line " \
"require newline{1, 2} between r1 and r2 " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """a { } c { }
d {}"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_disallow_adjoining_classes(self):
"""
Disallow adjoining classes
"""
cos = "Whitespace ignore indent { " \
"forbid class class " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """.a.b {} .a .b{} h1.a{}"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_disallow_star_hack(self):
"""
Disallow star hack
"""
cos = "Whitespace ignore indent { " \
"find p=property " \
"forbid p.name match '^\*.*'" \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """* {*color: red; color: red;}"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_disallow_font_face(self):
"""
There are more than five font-faces in the stylesheet
"""
cos = "Whitespace ignore indent { " \
"find s=stylesheet " \
"forbid s.count(fontface) > 5" \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """@font-face{} @font-face{} @font-face{} @font-face{} @font-face{} @font-face{}"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_disallow_attr_selectors(self):
"""
Disallow selectors that look like regular expressions
"""
cos = "Whitespace ignore indent { " \
"find a=attribute-selector-type " \
"forbid a.string in ['*=', '|=', '^=', '~=', '$=']" \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """[prop*='test'] {} [another='test']{}"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_disallow_universal_selector(self):
"""
Disallow the universal selector
"""
cos = "Whitespace ignore indent { " \
"forbid universal " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """* {}"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_over_qualify_headings(self):
"""
Do not over-qualify headings
"""
cos = "Semantic { " \
"find h=heading " \
"forbid h.next_sibling is class or h.next_sibling is id " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """h1#id, h1.class, h1 {}"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_gradient_fallback_headings(self):
"""
Provide fallback property for gradient values
"""
cos = "Semantic ignore newline, indent, tab, comment, space { " \
"find d=declaration{property.name == 'background-image' and contains(function{name match 'gradient.*'})} " \
"require d.previous_sibling is declaration and (d.previous_sibling.property.name == 'background-image' or " \
"(d.previous_sibling.property.name == 'background-color' and d.previous_sibling.contains(color)))" \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """ a { background-color: #444;
background-image: -webkit-gradient(linear, left top, left bottom, from(#444), to(#999));
background-image: -webkit-linear-gradient(top, #444, #999);
color: red;
background-image: -webkit-gradient(linear, left top, left bottom, from(#444), to(#999));
background-image: -webkit-linear-gradient(top, #444, #999); }"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_over_qualify_attr(self):
"""
Disallow unqualified attribute selectors.
This is a problem only if the attr selector appears as key and is unqualified
"""
cos = "Semantic ignore newline, indent, tab, comment, space { " \
"find a=attribute-selector{is_key} " \
"require a.previous_sibling is selector-part " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """[class] h1, [class], h2[class], h3 [class] {}"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_disallow_negative_text_indent(self):
"""
Disallow negative text indent
"""
cos = "Semantic ignore newline, indent, tab, comment, space { " \
"find d=declaration{property.name=='text-indent' and value.contains(unary)} in r=ruleset " \
"require r.contains(declaration{property.name=='direction' and value.string=='ltr'}) " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """a {text-indent: -1; direction: rtl; } b {text-indent: -1;} c{text-indent: -1; direction: ltr; }"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_disallow_vendor_properties(self):
"""
Try to avoid vendor-specific properties
"""
cos = "Semantic ignore newline, indent, tab, comment, space { " \
"forbid declaration{is_vendor_specific} " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """a {-o-text-indent: -1; direction: rtl; } """
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_tag_child_selector_clear(self):
"""
Selectors that have tag as their key selector should never use the child selector
"""
cos = "Semantic ignore newline, indent, tab, comment, space { " \
"find tag{is_key} in s=simple-selector " \
"require not s.contains(child-selector) " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """h1 > .c {} h2 {}"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert not violation_log.number_of_violations()
def test_tag_child_selector(self):
"""
Selectors that have tag as their key selector should never use the child selector
"""
cos = "Semantic ignore newline, indent, tab, comment, space { " \
"find tag{is_key} in s=simple-selector " \
"require not s.contains(child-selector) " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """.c > h2 {} a > .c h1 {}"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
def test_tag_class_selector(self):
"""
If a rule has a class as its key selector, don\'t add a tag name to the rule
"""
cos = "Semantic ignore newline, indent, tab, comment, space { " \
"find c=class{is_key} " \
"forbid c.previous_sibling is tag " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """h1.class {} h1 .class {}"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 1
def test_tag_class_id_selector(self):
"""
If a rule has an ID selector as its key selector, don\'t add the tag name to the rule. Don\'t add a class name either
"""
cos = "Semantic ignore newline, indent, tab, comment, space { " \
"find i=id{is_key} " \
"forbid i.previous_sibling is tag or i.previous_sibling is class " \
"message '' }"
coco_ast = self.get_coco_ast(cos)
css = """h1#id {} .class#id {} #id .class {}"""
css_tree = helpers.ParseHelper.parse_css_string(css)
css_tree.pretty_print()
_, violation_log = violations.ViolationsFinder.find(coco_ast, css_tree)
assert violation_log.number_of_violations() == 2
| [
"boryana.goncharenko@gmail.com"
] | boryana.goncharenko@gmail.com |
9df8149ee9d3fb219789b8f81b51d5b528283550 | 0c75ed6188f6c4e37c51779996e0d1bb142e7569 | /example.py | 66c868a4039794ab3bdf2d6f51b40c1892e44646 | [
"MIT"
] | permissive | connorhsm/Aus-Bills | 2bc3d7094a24541ffacb68087ecd1b44cf32af6e | 2452a4596bddf86587acceada8198e82f65834af | refs/heads/master | 2022-12-18T18:00:11.025964 | 2020-04-13T01:58:30 | 2020-04-13T01:58:30 | 285,990,540 | 0 | 0 | MIT | 2020-08-08T06:52:13 | 2020-08-08T06:52:12 | null | UTF-8 | Python | false | false | 464 | py | from ausbills.federal_parliment import all_bills, Bill
import json
import random
outlist = []
for bill in all_bills:
b_data = Bill(bill["id"]).data
print(b_data["short_title"])
b_data["yes"] = 500 + int(random.random()*500)
b_data["no"] = 500 + int(random.random()*500)
b_data["ballotspec_hash"] = "blahblahblahblahblahblahblahblahblah"
outlist.append(b_data)
with open('bill_data.json', 'w') as outfile:
json.dump(outlist, outfile)
| [
"kip.crossing@gmail.com"
] | kip.crossing@gmail.com |
d5ff49f09ad70e362bb4bcbf3835cb9fb3aa9c6c | 86011f5700ae20117c5ac7f1e5defae324d955d9 | /Inbox.py | 6573c99da35c89347173e27cd6875069be22f6ff | [
"Apache-2.0"
] | permissive | maxtheaxe/pollen | bbc806f1ea0ec860b75c914f6aa381eb92a210a0 | 14588693a639ad38a35305d6c9ab92bd99b32355 | refs/heads/main | 2023-06-19T22:36:13.841719 | 2021-06-24T14:58:43 | 2021-06-24T14:58:43 | 304,699,444 | 7 | 0 | null | 2021-06-24T14:58:44 | 2020-10-16T17:48:09 | Python | UTF-8 | Python | false | false | 2,703 | py | import pgpy
from MessageBox import MessageBox
from Outbox import Outbox
from ConversationManager import ConversationManager
from TransitMessage import TransitMessage
from Pocket import Pocket
class Inbox(MessageBox):
'''handles all aspects of incoming messages'''
def __init__(self, messages = []):
super().__init__(messages)
# should sort messages
# either just try and catch decrypting everything or sort out own msgs on receipt
# own messages should be placed into appropriate conversations and deleted
def add_message(self, new_message):
'''de-jsonify messages and add new TransitMessage to list'''
transit_message = TransitMessage(jsoned_message = new_message)
super().add_message(transit_message) # add message to list
return
def plain_save(self, message, password):
'''decrypts TransitMessage and returns as LocalMessage'''
return message.detransit(password)
def sort_messages(self, own_outbox, convo_mgr, password):
'''sort own messages, store in either conversations or outbox'''
for i in range(len(self.messages)):
# check if message is intended for self
if (self.messages[i].for_self()):
# print("the message was for me")
# decrypt and convert to LocalMessage
local_message = self.plain_save(self.messages[i], password)
# add to conversation with appropriate peer (sender)
convo_mgr.add_message(local_message)
else: # message intended for re-transmission
own_outbox.add_transit_message(self.messages[i])
# could also pop messages out from front rather than wiping, idk which is better
self.messages = []
return
if __name__ == '__main__':
from LocalMessage import LocalMessage
new_box = Inbox()
new_outbox = Outbox()
new_convo_mgr = ConversationManager()
message = "hey, do messages work?"
password = "fake_password"
# same key is sender and receiver in this case; easier for testing
peer, _ = pgpy.PGPKey.from_file("pollen_key.asc")
peer = peer.pubkey # simplify to just pubkey
# peer, _ = pgpy.PGPKey.from_file("other_pub.asc") # pubkey from local dir
print("type: ", type(peer))
sent = False # issue here (might be flipped?)
new_message = LocalMessage(message, peer, sent)
new_transit = TransitMessage(new_message, password)
print("I am sender: ", bytes(new_transit.sender) == bytes(Pocket().public_key()))
jsonified_transit = new_transit.jsonify()
new_box.add_message(jsonified_transit)
print("num messages: ", new_box.count_messages())
# new_box.remove_message(0)
new_box.sort_messages(new_outbox, new_convo_mgr, password)
print("num messages: ", new_box.count_messages())
print("outbox messages: ", new_outbox.count_messages())
print("num conversations: ", new_convo_mgr.count_conversations()) | [
"max.perrello@gmail.com"
] | max.perrello@gmail.com |
5108a4792f75d0e3781150c471d7da2a6fd5e5f0 | 7dc4161819947091b1d15533e7d3fba4f4497885 | /First Semester/2014_Gender.py | 37cdb278ef398dfa51cd7a8981900d27ca343591 | [] | no_license | StanleyLin-TW/IM-Project | 40cf622cefa0cc7a02e98aa3f2035107b43da440 | c8ce2e9cb0ae9cbd46bfa0ec57e09a7ef562eefd | refs/heads/master | 2020-04-08T04:21:31.015276 | 2019-01-09T07:34:29 | 2019-01-09T07:34:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | import csv
import time
from datetime import datetime
f1=open("/Users/StanleyLIn/Desktop/專題研究/2014.csv")
f2=open("2014F_data.csv",'w')
f3=open("2014M_data.csv",'w')
writer_F= csv.writer(f2, delimiter=',',dialect='excel')
writer_M= csv.writer(f3, delimiter=',',dialect='excel')
count=0
for row in csv.DictReader(f1):
count+=1
if(count==1):
writer_F.writerow(row)
writer_M.writerow(row)
print(count)
if(row["DEP_TIME"]!=""):
if(row["SEX"]=="F"):
writer_F.writerow(row.values())
if(row["SEX"]=="M"):
writer_M.writerow(row.values())
| [
"noreply@github.com"
] | StanleyLin-TW.noreply@github.com |
137172e8f2fd0c2b998ef40b2f14358dcc834869 | 1087386a01ad19c9d24154f1b6410fcf7a158c2c | /Old/garbageClock.py | 46043174793f4912a8be46e39c774cedf59aaacf | [
"MIT"
] | permissive | mkhouse/Weekday-Garbage-Clock | 0e1d8550dc013d9e11d95d56fa53bcf3d62fbdc7 | d4a0e49eb527364f4b45052e69099a264ade3800 | refs/heads/main | 2023-03-04T09:34:46.646832 | 2021-02-20T00:31:48 | 2021-02-20T00:31:48 | 317,043,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,193 | py | """
GARBAGE CLOCK for Adafruit Matrix Portal displays current time, date, day of
week, garbage alert, and countdown to garbage day.
Requires WiFi internet access.
GARBAGE CLOCK written by Melissa House is derived from:
MOON PHASE CLOCK for Adafruit Matrix Portal: displays current time, lunar
phase and time of next moonrise or moonset. Requires WiFi internet access.
Original MOON PHASE CLOCK Written by Phil 'PaintYourDragon' Burgess for
Adafruit Industries.
MIT license, all text above must be included in any redistribution.
BDF fonts from the X.Org project.
"""
# pylint: disable=import-error
import gc
import time
import math
import random
import json
import board
import busio
import displayio
import terminalio
from rtc import RTC
from adafruit_matrixportal.network import Network
from adafruit_matrixportal.matrix import Matrix
from digitalio import DigitalInOut, Pull
from adafruit_debouncer import Debouncer
#from adafruit_matrixportal.matrixportal import MatrixPortal
from adafruit_bitmap_font import bitmap_font
import adafruit_display_text.label
import adafruit_lis3dh
try:
from secrets import secrets
except ImportError:
print('WiFi secrets are kept in secrets.py, please add them there!')
raise
# CONFIGURABLE SETTINGS ----------------------------------------------------
TWELVE_HOUR = True # If set, use 12-hour time vs 24-hour (e.g. 3:00 vs 15:00)
BITPLANES = 6 # Ideally 6, but can set lower if RAM is tight
DEMO = True # Enable / Disable demo mode to scroll through each day
# SOME UTILITY FUNCTIONS AND CLASSES ---------------------------------------
def parse_time(timestring, is_dst=-1):
""" Given a string of the format YYYY-MM-DDTHH:MM:SS.SS-HH:MM (and
optionally a DST flag), convert to and return an equivalent
time.struct_time (strptime() isn't available here). Calling function
can use time.mktime() on result if epoch seconds is needed instead.
Time string is assumed local time; UTC offset is ignored. If seconds
value includes a decimal fraction it's ignored.
"""
date_time = timestring.split('T') # Separate into date and time
year_month_day = date_time[0].split('-') # Separate time into Y/M/D
hour_minute_second = date_time[1].split('+')[0].split('-')[0].split(':')
return time.struct_time(int(year_month_day[0]),
int(year_month_day[1]),
int(year_month_day[2]),
int(hour_minute_second[0]),
int(hour_minute_second[1]),
int(hour_minute_second[2].split('.')[0]),
-1, -1, is_dst)
def update_time(timezone=None, demo_num=0, demo_hour="7"):
""" Update system date/time from WorldTimeAPI public server;
no account required. Pass in time zone string
(http://worldtimeapi.org/api/timezone for list)
or None to use IP geolocation. Returns current local time as a
time.struct_time and UTC offset as string. This may throw an
exception on fetch_data() - it is NOT CAUGHT HERE, should be
handled in the calling code because different behaviors may be
needed in different situations (e.g. reschedule for later).
"""
if timezone: # Use timezone api
time_url = 'http://worldtimeapi.org/api/timezone/' + timezone
else: # Use IP geolocation
time_url = 'http://worldtimeapi.org/api/ip'
if DEMO == False:
time_data = NETWORK.fetch_data(time_url,
json_path=[['datetime'], ['dst'],
['utc_offset'], ['day_of_week']])
else:
month = str(random.randint(1,12))
day = str(random.randint(1,28))
if demo_hour:
hour = demo_hour
else:
hour = str(random.randint(6,20))
# hour = str(random.randint(0,23)) # will occasionally show night mode
minute = str(random.randint(10,59))
demoDateTime = '2020-' + month + '-' + day + 'T' + hour + ':' + minute + ':15.813019-08:00'
# time data JSON example: ['2020-11-28T20:45:15.813019-08:00', False, '-08:00', 6]
time_data = [demoDateTime, False, '-08:00', demo_num]
time_struct = parse_time(time_data[0], time_data[1])
if time_data[3] == 0: # Sunday
weekday = "SUN"
garbage = "3 days"
color = "green"
hcolor = 0x33CC33
elif time_data[3] == 1: # Monday
weekday = "MON"
garbage = "2 days"
color = "green"
hcolor = 0x33CC33
elif time_data[3] == 2 and time_struct.tm_hour < 19: # Tuesday before 7pm
weekday = "TUE"
garbage = "2nite"
color = "yellow"
hcolor = 0xFFFF00
elif time_data[3] == 2 and time_struct.tm_hour >= 19: # Tuesday after 7pm
weekday = "TUE"
garbage = "NOW"
color = "red"
hcolor = 0xFF0000
elif time_data[3] == 3 and time_struct.tm_hour <= 7 and time_struct.tm_min <= 59: # Wednesday 5am - 7:59am
weekday = "WED"
garbage = "NOW"
color = "red"
hcolor = 0xFF0000
elif time_data[3] == 3 and time_struct.tm_hour >= 8 : # Wednesday after 9am
weekday = "WED"
garbage = "done"
color = "green"
hcolor = 0x33CC33
elif time_data[3] == 4: # Thursday
weekday = "THU"
garbage = "6 days"
color = "green"
hcolor = 0x33CC33
elif time_data[3] == 5: # Friday
weekday = "FRI"
garbage = "5 days"
color = "green"
hcolor = 0x33CC33
elif time_data[3] == 6: # Saturday
weekday = "SAT"
garbage = "4 days"
color = "green"
hcolor = 0x33CC33
RTC().datetime = time_struct
return time_struct, time_data[2], weekday, garbage, color, hcolor
def hh_mm(time_struct):
""" Given a time.struct_time, return a string as H:MM or HH:MM, either
12- or 24-hour style depending on global TWELVE_HOUR setting.
"""
if TWELVE_HOUR:
if time_struct.tm_hour > 12:
hour_string = str(time_struct.tm_hour - 12) # 13-23 -> 1-11 (pm)
elif time_struct.tm_hour > 0:
hour_string = str(time_struct.tm_hour) # 1-12
else:
hour_string = '12' # 0 -> 12 (am)
else:
hour_string = '{0:0>2}'.format(time_struct.tm_hour)
return hour_string + ':' + '{0:0>2}'.format(time_struct.tm_min)
# ONE-TIME INITIALIZATION --------------------------------------------------
# set up the display
MATRIX = Matrix(bit_depth=BITPLANES)
DISPLAY = MATRIX.display
#set up the buttons - not currently working
pin_down = DigitalInOut(board.BUTTON_DOWN)
pin_down.switch_to_input(pull=Pull.UP)
button_down = Debouncer(pin_down)
pin_up = DigitalInOut(board.BUTTON_UP)
pin_up.switch_to_input(pull=Pull.UP)
button_up = Debouncer(pin_up)
ACCEL = adafruit_lis3dh.LIS3DH_I2C(busio.I2C(board.SCL, board.SDA),
address=0x19)
_ = ACCEL.acceleration # Dummy reading to blow out any startup residue
time.sleep(0.1)
DISPLAY.rotation = (int(((math.atan2(-ACCEL.acceleration.y,
-ACCEL.acceleration.x) + math.pi) /
(math.pi * 2) + 0.875) * 4) % 4) * 90
LARGE_FONT = bitmap_font.load_font('/fonts/helvB12.bdf')
SMALL_FONT = bitmap_font.load_font('/fonts/helvR10.bdf')
LARGE_FONT.load_glyphs('0123456789:')
SMALL_FONT.load_glyphs('0123456789:/.%')
# Display group is set up once, then we just shuffle items around later.
# Order of creation here determines their stacking order.
GROUP = displayio.Group(max_size=10)
# sets empty_group for night mode
empty_group = displayio.Group()
# Element 0 is a stand-in item, later replaced with the garbage can bitmap
# pylint: disable=bare-except
try:
FILENAME = 'bmps/garbage-start-' + str(DISPLAY.rotation) + '.bmp'
BITMAP = displayio.OnDiskBitmap(open(FILENAME, 'rb'))
TILE_GRID = displayio.TileGrid(BITMAP, pixel_shader=displayio.ColorConverter(),)
GROUP.append(TILE_GRID)
except:
GROUP.append(adafruit_display_text.label.Label(SMALL_FONT, color=0xFF0000,
text='OOPS'))
GROUP[0].x = (DISPLAY.width - GROUP[0].bounding_box[2] + 1) // 2
GROUP[0].y = DISPLAY.height // 2 - 1
# Elements 1-4 are an outline around the moon percentage -- text labels
# offset by 1 pixel up/down/left/right. Initial position is off the matrix,
# updated on first refresh. Initial text value must be long enough for
# longest anticipated string later.
for i in range(4):
GROUP.append(adafruit_display_text.label.Label(SMALL_FONT, color=0,
text='99.9%', y=-99))
# Element 5 is days until garbage out (on top of the outline labels)
GROUP.append(adafruit_display_text.label.Label(SMALL_FONT, color=0xFFFF00,
text='99.9%', y=-99))
# Element 6 is the current time
GROUP.append(adafruit_display_text.label.Label(LARGE_FONT, color=0x808080,
text='12:00', y=-99))
# Element 7 is the current date
GROUP.append(adafruit_display_text.label.Label(SMALL_FONT, color=0x808080,
text='12/31', y=-99))
# Element 8 is the time of (or time to) next rise/set event
GROUP.append(adafruit_display_text.label.Label(SMALL_FONT, color=0x00FF00,
text='12:00', y=-99))
DISPLAY.show(GROUP)
NETWORK = Network(status_neopixel=board.NEOPIXEL, debug=False)
NETWORK.connect()
# TIMEZONE is set up once, constant over app lifetime
# Load time zone string from secrets.py, else IP geolocation for this too
# (http://worldtimeapi.org/api/timezone for list).
try:
TIMEZONE = secrets['timezone'] # e.g. 'America/New_York'
except:
TIMEZONE = None # IP geolocation
# Set initial clock time, also fetch initial UTC offset while
# here (NOT stored in secrets.py as it may change with DST).
# pylint: disable=bare-except
demo_num = 0
LAST_SYNC = 0
demo_hour = str(random.randint(6,20))
repeatDayCount = 0
# MAIN LOOP ----------------------------------------------------------------
while True:
gc.collect()
NOW = time.time() # Current epoch time in seconds
LOCALNOW = time.localtime() # local time
#--DOES NOT WORK BELOW--
#button_down.update()
#button_up.update()
#if button_up.fell:
# print("button up pressed")
#--DOES NOT WORK ABOVE--
# Sync with time server every ~5 minutes - the clock drifts if left too long
if DEMO == False:
if LAST_SYNC == 0:
try:
DATETIME, UTC_OFFSET, WEEKDAY, GARBAGEDAY, COLOR, HCOLOR = update_time(TIMEZONE)
except:
DATETIME, UTC_OFFSET, WEEKDAY, GARBAGEDAY, COLOR, HCOLOR = time.localtime(), '+00:00', "???", "???", "grey", 0x66666
LAST_SYNC = time.mktime(DATETIME)
elif NOW - LAST_SYNC > 60*5:
try:
DATETIME, UTC_OFFSET, WEEKDAY, GARBAGEDAY, COLOR, HCOLOR = update_time(TIMEZONE)
LAST_SYNC = time.mktime(DATETIME)
continue # Time may have changed; refresh NOW value
except:
# update_time() can throw an exception if time server doesn't
# respond. That's OK, keep running with our current time, and
# push sync time ahead to retry in 30 minutes (don't overwhelm
# the server with repeated queries).
LAST_SYNC += 60 # 1 minute
continue
elif DEMO == True:
# normal demo mode start
if NOW - LAST_SYNC > 5 or LAST_SYNC == 0: #increment every 10 seconds
if demo_num == 2 or demo_num == 3: # on Tuesday and Wednesday
if repeatDayCount == 0:
demo_hour="7" #set demo hour to 7AM to show first half of the day
repeatDayCount += 1 #will repeat the day
elif repeatDayCount == 1:
demo_hour="19" #set demo hour to 7PM to show second half of the day
repeatDayCount = 0 #reset repeatDayCount to 0 to move to next day
# normal demo mode end
#
# special time demo mode start
# uncomment this and comment normal demo move (above)to test night mode
# or to test other states requiring specific times
# if NOW - LAST_SYNC > 5 or LAST_SYNC == 0: #increment every 10 seconds
# if demo_hour == "7":
# demo_hour = "22"
# elif demo_hour == "22":
# demo_hour = "7"
# special time demo mode end
# uncomment to here
#
DATETIME, UTC_OFFSET, WEEKDAY, GARBAGEDAY, COLOR, HCOLOR = update_time(TIMEZONE, demo_num, demo_hour)
if repeatDayCount == 0: # increment the day if it's not repeating
if demo_num < 6:
demo_num += 1
else:
demo_num = 0
# demo_hour = str(random.randint(6,20)) # will not show night mode
demo_hour = str(random.randint(0,23)) # will occasionally show night mode
LAST_SYNC = time.mktime(DATETIME)
continue # Time may have changed; refresh NOW value
# Don't draw anything from 10pm to 6am (this thing is BRIGHT)
if (DATETIME.tm_hour >= 22 and DATETIME.tm_min >= 0) or (DATETIME.tm_hour <= 6 and DATETIME.tm_min >= 0):
DISPLAY.show(empty_group)
# If it's not night, use normal daytime colors
else:
# Sets the display orientation based on whether the board is horizontal or vertical
if DISPLAY.rotation in (0, 180): # Horizontal 'landscape' orientation
CENTER_X = 48 # Text along right
TRASH_Y = 0 # Garbage at left
TIME_Y = 6 # Time at top right
EVENT_Y = 26 # Day of week at bottom right
else: # Vertical 'portrait' orientation
CENTER_X = 16 # Text down center
TIME_Y = 6 # Time/date at top
EVENT_Y = 26 # Day of week in middle
TRASH_Y = 32 # Garbage at bottom
DISPLAY.show(GROUP)
# Update trash can image (GROUP[0])
FILENAME = 'bmps/garbage_can_' + COLOR + '.bmp'
BITMAP = displayio.OnDiskBitmap(open(FILENAME, 'rb'))
TILE_GRID = displayio.TileGrid(BITMAP,
pixel_shader=displayio.ColorConverter(),)
TILE_GRID.x = 0
TILE_GRID.y = TRASH_Y
GROUP[0] = TILE_GRID
# Set element 5 first, use its size and position for setting others
#GROUP[5].text is the text over the image
GROUP[5].text = GARBAGEDAY
GROUP[5].color = HCOLOR
GROUP[5].x = 16 - GROUP[5].bounding_box[2] // 2
GROUP[5].y = TRASH_Y + 16
for _ in range(1, 5):
GROUP[_].text = GROUP[5].text
GROUP[1].x, GROUP[1].y = GROUP[5].x, GROUP[5].y - 1 # Up 1 pixel
GROUP[2].x, GROUP[2].y = GROUP[5].x - 1, GROUP[5].y # Left
GROUP[3].x, GROUP[3].y = GROUP[5].x + 1, GROUP[5].y # Right
GROUP[4].x, GROUP[4].y = GROUP[5].x, GROUP[5].y + 1 # Down
# GROUP[8] is day of week
GROUP[8].text = WEEKDAY + " "
XPOS = CENTER_X - (GROUP[8].bounding_box[2] + 6) // 2
GROUP[8].x = XPOS + 6
GROUP[8].y = EVENT_Y
# Show weekday in color matching trash color
GROUP[8].color = HCOLOR
# Update time (GROUP[6]) and date (GROUP[7])
# GROUP[6] is the time
GROUP[6].text = hh_mm(LOCALNOW)
# Show time in orange if AM, blue if PM
GROUP[6].color = (0xFF6600 if DATETIME.tm_hour < 12 else 0x3300CC)
GROUP[6].x = CENTER_X - GROUP[6].bounding_box[2] // 2
GROUP[6].y = TIME_Y
# GRYOUP[7] is the date
GROUP[7].text = str(LOCALNOW.tm_mon) + '.' + str(LOCALNOW.tm_mday)
GROUP[7].x = CENTER_X - GROUP[7].bounding_box[2] // 2
GROUP[7].y = TIME_Y + 10
DISPLAY.refresh() # Force full repaint (splash screen sometimes sticks)
time.sleep(5)
| [
"mkhouse@mac.com"
] | mkhouse@mac.com |
d150eff8b16383dbb01dddcf72bb97a39b403dbe | 501fbb652c238001075fbeb572648fee8e8ee113 | /deploy/bootstrap/run/cega/conf.py | a343b7054e9a901315b1cce850c02e687abd54fd | [
"Apache-2.0"
] | permissive | jrambla/LocalEGA | 9842187c59c21a99ceb0f71dd7c3741f4a54b873 | 201efdd8bf78d49bfebd40d9d40e91e502c362a7 | refs/heads/master | 2022-10-23T15:15:24.795251 | 2020-06-17T17:35:07 | 2020-06-17T17:35:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
from urllib.parse import urlencode
import configparser
from ..defs import generate_mq_hash
HOSTNAME_DOMAIN = os.getenv('HOSTNAME_DOMAIN','')
cega_connection_params=urlencode({ 'heartbeat': 60,
'connection_attempts': 30,
'retry_delay': 10,
'server_name_indication': f'cega-mq{HOSTNAME_DOMAIN}',
'verify': 'verify_peer',
'fail_if_no_peer_cert': 'true',
'cacertfile': '/etc/rabbitmq/CA.cert',
'certfile': '/etc/rabbitmq/ssl.cert',
'keyfile': '/etc/rabbitmq/ssl.key',
}, safe='/-_.')
config = configparser.RawConfigParser()
config['DEFAULT'] = {}
config['mq'] = {
'version': '3.7.8',
'connection': f"amqps://legatest:legatest@cega-mq{HOSTNAME_DOMAIN}:5671/lega",
'connection_params': cega_connection_params,
'user': 'legatest',
'password_hash': generate_mq_hash('legatest'),
'vhost': 'lega',
'exchange': 'localega.v1',
}
config['users'] = {
'endpoint': r'https://cega-users/lega/v1/legas/users',
'credentials': 'legatest:legatest',
}
# output
config.write(sys.stdout)
| [
"frederic.haziza@crg.eu"
] | frederic.haziza@crg.eu |
5744be3149b224d833b2493773ab92e6f4402abb | 8b2654f2a81c9f57e68a2e5663dc2add1d4c2b04 | /posts/admin.py | 25465ddc5c0d6e9032bf8a0f01b3628b54cc6caf | [] | no_license | karan-singare/zappit | c555187261557961bedecfbf427951a9a89ff648 | 4c23a158c241aeace5a1daebd86fa6a4197e8239 | refs/heads/master | 2023-01-07T05:03:01.354214 | 2020-10-30T02:02:07 | 2020-10-30T02:02:07 | 308,485,178 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from django.contrib import admin
from .models import Post, Vote
# Register your models here.
admin.site.register(Post)
admin.site.register(Vote)
| [
"karansingare@gmail.com"
] | karansingare@gmail.com |
7773625d944e8d59829eec7f9ca851f2c5ca1fcf | a66f92f305956320b55c0a190506f57e3df45ac7 | /notlar/draft/ingilizce/views.py | ffadfa52c54515af2001a48d465cdf1668b88ccb | [] | no_license | kopuskopecik/django-projects | a12380c37130cb20974c785a76a6f4a8fb18d238 | 4aceafaf5ff29761c3867c9f62c93c258d9c16ec | refs/heads/master | 2022-12-05T11:41:53.904378 | 2020-08-24T18:35:51 | 2020-08-24T18:35:51 | 289,997,905 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,565 | py | from django.shortcuts import render, get_list_or_404, get_object_or_404, redirect
from django.views.generic import CreateView, UpdateView, DetailView, ListView, TemplateView
from django.contrib import messages
from braces.views import LoginRequiredMixin
from .models import Lesson
from .forms import LessonForm
from hakkimizda.forms import IletisimForm
from hakkimizda.models import Iletisim
class HeaderMixin():
# Burası view sınıfları ile ilgili metotları override edeceğimiz yer. Buradan miras inheritance yapan
# tüm viewlerde aynı durumlar gözlenir.
@property
def action(self):
msg = "{0} is missing action.".format(self.__class__)
raise NotImplementedError(msg)
def form_valid(self, form):
msg = "Lesson {0}!".format(self.action)
messages.info(self.request, msg)
return super(HeaderMixin, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(HeaderMixin, self).get_context_data(**kwargs)
genel1 = Lesson.objects.general1()
genel2 = Lesson.objects.general2()
moduller = Lesson.objects.modules()
paketler1 = Lesson.objects.packet1()
paketler2 = Lesson.objects.packet2()
context["genel1"] = genel1
context["genel2"] = genel2
context["moduller"] = moduller
context["paketler1"] = paketler1
context["paketler2"] = paketler2
return context
class LessonIndexView(HeaderMixin, ListView):
template_name = 'ingilizce/index.html'
context_object_name = 'lessons'
def get_queryset(self):
return Lesson.objects.filter(filtre2__contains = "ana")
class AnaIndexView(HeaderMixin, ListView):
template_name = 'ingilizce/ana_index.html'
context_object_name = 'lessons'
def get_queryset(self):
return get_list_or_404(Lesson, slug2=self.kwargs['slug2'])
class LessonAllIndexView(HeaderMixin, ListView):
template_name = 'ingilizce/all_lesson.html'
context_object_name = 'lessons'
def get_queryset(self):
q = self.request.GET.get("q")
if q:
matching = Lesson.objects.filter(headline__icontains=q)
if matching:
return matching
else:
matching = Lesson.objects.filter(content__icontains = q).distinct()
if matching:
return matching
else:
return []
return Lesson.objects.all()
class LessonDetailView(HeaderMixin, DetailView):
model = Lesson
template_name = 'ingilizce/detail.html'
def get_context_data(self, **kwargs):
context = super(LessonDetailView, self).get_context_data(**kwargs)
common = Lesson.objects.filter(slug2 = self.kwargs['slug2'])
# other_lessons = Lesson.objects.exclude(slug = self.kwargs['slug']).filter(filtre2__contains = "ana").filter(number__gte = self.kwargs['number'])[0:6]
context["common_lessons"] = common
return context
class LessonCreateView(LoginRequiredMixin, HeaderMixin, CreateView):
model = Lesson
template_name = 'ingilizce/form.html'
action = "created"
# Explicitly attach the FlavorForm class
form_class = LessonForm
class LessonUpdateView(LoginRequiredMixin, HeaderMixin, UpdateView):
model = Lesson
template_name = 'ingilizce/form.html'
action = "updated"
# Explicitly attach the FlavorForm class
form_class = LessonForm
class LessonTemplateView(HeaderMixin, CreateView):
model = Iletisim
action = "about"
template_name = 'ingilizce/about.html'
form_class = IletisimForm
def ing_delete(request, slug, slug2):
if not request.user.is_authenticated:
return Http404()
lesson = get_object_or_404(Lesson, slug=slug)
lesson.delete()
lesson.delete_number()
return redirect("ingilizce:index") | [
"kopuskopecik@gmail.com"
] | kopuskopecik@gmail.com |
7e7ffe412e8cbab13c6623a2ab6df0fad00f46da | fa5eb288a75b6aebc7c173008ce589e477f90d3e | /home/migrations/0019_gallery_gallerycategory.py | 73db4125a3a317fb42e4f0d413a2c0bd4a019e9d | [] | no_license | Vincodetech/bhimbakend | 4e5144afdf659be9245e3ab4cc8e383dbf161101 | 83bee6f8149eb61346b4a9acd15099fb8c14418d | refs/heads/master | 2023-07-01T13:50:24.050025 | 2021-08-07T04:31:32 | 2021-08-07T04:31:32 | 345,263,606 | 0 | 0 | null | 2021-04-27T09:05:23 | 2021-03-07T05:08:22 | CSS | UTF-8 | Python | false | false | 1,486 | py | # Generated by Django 3.1.5 on 2021-02-04 10:30
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0018_auto_20210204_1111'),
]
operations = [
migrations.CreateModel(
name='GalleryCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_name', models.CharField(default='', max_length=255)),
('active', models.BooleanField(default=True)),
('created_at', models.DateField(default=datetime.datetime.now)),
('updated_at', models.DateField(default=datetime.datetime.now)),
],
),
migrations.CreateModel(
name='Gallery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='gallery/')),
('active', models.BooleanField(default=True)),
('created_at', models.DateField(default=datetime.datetime.now)),
('updated_at', models.DateField(default=datetime.datetime.now)),
('category', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='home.gallerycategory')),
],
),
]
| [
"71717023+Vincodetech@users.noreply.github.com"
] | 71717023+Vincodetech@users.noreply.github.com |
19a4a211d8ed80a52b10fd768f5a871ea0221494 | a5a5839e76f01b580c17fbaccbde8063731c99ee | /utils/mysql_module.py | 6297e4f61c680f402d3851e89d36314b2ca1cdf8 | [
"MIT"
] | permissive | digitalmoneybits2/DigitalMoneyBot | 6804764a87840c101c5da7bcb405cb9de41bda2c | 34808b18dfdccd8a2d91e2152fd3e92c9397010e | refs/heads/main | 2023-02-18T01:25:37.329475 | 2021-01-19T19:08:50 | 2021-01-19T19:08:50 | 330,454,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,455 | py | import pymysql.cursors
from utils import parsing
from utils import rpc_module
from utils import helpers
from decimal import Decimal
import datetime
rpc = rpc_module.Rpc()
class Mysql:
"""
Singleton helper for complex database methods
"""
instance = None
def __init__(self):
if not Mysql.instance:
Mysql.instance = Mysql.__Mysql()
def __getattr__(self, name):
return getattr(self.instance, name)
class __Mysql:
def __init__(self):
config = parsing.parse_json('config.json')["mysql"]
self.__host = config["db_host"]
self.__port = int(config.get("db_port", 3306))
self.__db_user = config["db_user"]
self.__db_pass = config["db_pass"]
self.__db = config["db"]
self.__connected = 1
self.__setup_connection()
self.txfee = parsing.parse_json('config.json')["txfee"]
self.treasurer = parsing.parse_json('config.json')["treasurer"]
self.stake_bal = parsing.parse_json('config.json')["stake_bal"]
self.donation = parsing.parse_json('config.json')["donation"]
self.game_bal = parsing.parse_json('config.json')["game_bal"]
self.stake_pay = parsing.parse_json('config.json')["stake_pay"]
self.MIN_CONFIRMATIONS_FOR_DEPOSIT = parsing.parse_json('config.json')["MIN_CONFIRMATIONS_FOR_DEPOSIT"]
# "treasurer": 100000000000000010,
# "stake_bal": 100000000000000011,
# "donation": 100000000000000012,
# "game_bal": 100000000000000013,
# "stake_pay": 10000000000009999,
def __setup_connection(self):
self.__connection = pymysql.connect(
host=self.__host,
port=self.__port,
user=self.__db_user,
password=self.__db_pass,
db=self.__db)
def __setup_cursor(self, cur_type):
# ping the server and reset the connection if it is down
self.__connection.ping(True)
return self.__connection.cursor(cur_type)
# region User
def make_user(self, snowflake):
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "INSERT INTO users (snowflake_pk, balance, balance_unconfirmed, staking_balance) VALUES(%s, %s, %s, %s)"
cursor.execute(to_exec, (str(snowflake), '0', '0', '0'))
cursor.close()
self.__connection.commit()
def check_for_user(self, snowflake):
"""
Checks for a new user (NO LONGER CREATES A NEW USER - THAT IS HANDLED BY bot.py)
"""
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT snowflake_pk FROM users WHERE snowflake_pk LIKE %s"
cursor.execute(to_exec, (str(snowflake)))
result_set = cursor.fetchone()
cursor.close()
return result_set
def messages_user(self, snowflake):
"""
Checks for a new user (NO LONGER CREATES A NEW USER - THAT IS HANDLED BY bot.py)
"""
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT snowflake_pk, last_msg_time, rain_last_msg_time, rain_msg_count FROM users WHERE snowflake_pk LIKE %s"
cursor.execute(to_exec, (str(snowflake)))
result_set = cursor.fetchone()
cursor.close()
return result_set
def register_user(self, snowflake):
"""
Registers a new user
"""
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT snowflake_pk, address, balance, balance_unconfirmed, staking_balance, last_msg_time, rain_last_msg_time, rain_msg_count FROM users WHERE snowflake_pk LIKE %s"
cursor.execute(to_exec, (str(snowflake)))
result_set = cursor.fetchone()
cursor.close()
if result_set is None:
# address = rpc.getnewaddress(str(snowflake))
self.make_user(snowflake)
def new_address(self, snowflake):
address = rpc.getnewaddress(str(snowflake))
print('address:', address)
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = """
UPDATE users
SET address = "{:s}"
WHERE snowflake_pk = {:s}
"""
print('to_exec:', to_exec.format(str(address), str(snowflake)))
cursor.execute(to_exec.format(str(address), str(snowflake)))
print('execute:', )
print('execute done')
cursor.close()
self.__connection.commit()
return str(address)
def get_user(self, snowflake):
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT balance, balance_unconfirmed, staking_balance, address FROM users WHERE snowflake_pk LIKE %s"
cursor.execute(to_exec, (str(snowflake)))
result_set = cursor.fetchone()
cursor.close()
return result_set
# TODO
def get_staking_user(self, snowflake):
# print('get_staking_user', snowflake, self.stake_bal)
if snowflake == self.stake_bal:
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT snowflake_pk, balance, balance_unconfirmed FROM users WHERE snowflake_pk LIKE %s"
cursor.execute(to_exec, (str(snowflake)))
result_set = cursor.fetchone()
cursor.close()
return result_set
else:
return None
def get_all_balance(self, snowflake, check_update=False):
if check_update:
self.check_for_updated_balance(snowflake)
result_set = self.get_user(snowflake)
return result_set
def get_user_balance(self, snowflake, check_update=False):
if check_update:
self.check_for_updated_balance(snowflake)
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT balance FROM users WHERE snowflake_pk LIKE %s"
cursor.execute(to_exec, (str(snowflake)))
result_set = cursor.fetchone()
cursor.close()
return result_set.get("balance")
def get_user_unconfirmed_balance(self, snowflake):
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT balance_unconfirmed FROM users WHERE snowflake_pk LIKE %s"
cursor.execute(to_exec, (str(snowflake)))
result_set = cursor.fetchone()
cursor.close()
return result_set.get("balance_unconfirmed")
def get_user_staking_balance(self, snowflake):
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT staking_balance FROM users WHERE snowflake_pk LIKE %s"
cursor.execute(to_exec, (str(snowflake)))
result_set = cursor.fetchone()
cursor.close()
return result_set.get("staking_balance")
def get_user_by_address(self, address):
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT snowflake_pk FROM users WHERE address LIKE %s"
cursor.execute(to_exec, (str(address)))
result_set = cursor.fetchone()
cursor.close()
return result_set.get('snowflake_pk')
def get_address(self, snowflake):
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT address FROM users WHERE snowflake_pk LIKE %s"
cursor.execute(to_exec, (str(snowflake)))
result_set = cursor.fetchone()
cursor.close()
return result_set.get("address")
# region Balance
def set_balance(self, snowflake, to, is_unconfirmed=False, is_staking=False):
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
if is_unconfirmed:
to_exec = "UPDATE users SET balance_unconfirmed = %s WHERE snowflake_pk = %s"
elif is_staking:
to_exec = "UPDATE users SET staking_balance = %s WHERE snowflake_pk = %s"
else:
to_exec = "UPDATE users SET balance = %s WHERE snowflake_pk = %s"
cursor.execute(to_exec, (to, str(snowflake),))
cursor.close()
self.__connection.commit()
def add_to_balance(self, snowflake, amount):
self.set_balance(snowflake, self.get_user_balance(
snowflake) + Decimal(amount))
def add_to_staking_balance(self, snowflake, amount):
self.set_balance(snowflake, Decimal(self.get_user_staking_balance(
snowflake)) + Decimal(amount), is_staking=True)
def remove_from_staking_balance(self, snowflake, amount):
self.set_balance(snowflake, Decimal(self.get_user_staking_balance(
snowflake)) - Decimal(amount), is_staking=True)
def remove_from_balance(self, snowflake, amount):
self.set_balance(snowflake, self.get_user_balance(
snowflake) - Decimal(amount))
def add_to_balance_unconfirmed(self, snowflake, amount):
balance_unconfirmed = self.get_user_unconfirmed_balance(snowflake)
self.set_balance(
snowflake, balance_unconfirmed + Decimal(amount),
is_unconfirmed=True)
def remove_from_balance_unconfirmed(self, snowflake, amount):
balance_unconfirmed = self.get_user_unconfirmed_balance(snowflake)
self.set_balance(
snowflake, balance_unconfirmed - Decimal(amount),
is_unconfirmed=True)
def check_for_updated_balance(self, snowflake):
"""
Uses RPC to get the latest transactions and updates
the user balances accordingly
This code is based off of parse_incoming_transactions in
https://github.com/tehranifar/ZTipBot/blob/master/src/wallet.py
"""
transaction_list = rpc.listtransactions(str(snowflake), 100)
for tx in transaction_list:
if tx["category"] != "receive":
continue
txid = tx["txid"]
amount = tx["amount"]
confirmations = tx["confirmations"]
user = tx["account"]
if user != str(snowflake):
continue
status = self.get_transaction_status_by_txid(txid)
if status == "DOESNT_EXIST" and confirmations >= self.MIN_CONFIRMATIONS_FOR_DEPOSIT:
print("NEW DEPOSIT {}".format(txid))
self.add_to_balance(user, amount)
self.add_deposit(user, amount, txid, 'CONFIRMED')
elif status == "DOESNT_EXIST" and confirmations < self.MIN_CONFIRMATIONS_FOR_DEPOSIT:
self.add_deposit(user, amount, txid, 'UNCONFIRMED')
self.add_to_balance_unconfirmed(user, amount)
elif status == "UNCONFIRMED" and confirmations >= self.MIN_CONFIRMATIONS_FOR_DEPOSIT:
self.add_to_balance(user, amount)
self.remove_from_balance_unconfirmed(snowflake, amount)
self.confirm_deposit(txid)
def get_transaction_status_by_txid(self, txid):
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT status FROM deposit WHERE txid = %s"
cursor.execute(to_exec, (txid,))
result_set = cursor.fetchone()
cursor.close()
if not result_set:
return "DOESNT_EXIST"
return result_set["status"]
# endregion
# region Deposit/Withdraw/Tip/Soak
def add_deposit(self, snowflake, amount, txid, status):
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "INSERT INTO deposit(snowflake_fk, amount, txid, status) VALUES(%s, %s, %s, %s)"
cursor.execute(to_exec, (str(snowflake), '{:.8f}'.format(amount), str(txid), str(status)))
cursor.close()
self.__connection.commit()
def confirm_deposit(self, txid):
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "UPDATE deposit SET status = %s WHERE txid = %s"
cursor.execute(to_exec, ('CONFIRMED', str(txid)))
cursor.close()
self.__connection.commit()
def create_withdrawal(self, snowflake, address, amount):
txfee = self.txfee
amount = float(amount)
res = rpc.settxfee(txfee)
print('res =', res)
if res is False:
return None
txid = rpc.sendtoaddress(address, round(amount - txfee, 8))
print('txid =', txid)
if not txid:
return None
self.remove_from_balance(snowflake, amount)
return self.add_withdrawal(snowflake, amount, txid)
def add_withdrawal(self, snowflake, amount, txid):
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "INSERT INTO withdrawal(snowflake_fk, amount, txid) VALUES(%s, %s, %s)"
cursor.execute(to_exec, (str(snowflake), '{:.8f}'.format(amount), str(txid)))
cursor.close()
self.__connection.commit()
return txid
def add_tip(self, snowflake_from_fk, snowflake_to_fk, amount):
self.remove_from_balance(snowflake_from_fk, amount)
self.add_to_balance(snowflake_to_fk, amount)
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
tip_exec = "INSERT INTO tip(snowflake_from_fk, snowflake_to_fk, amount) VALUES(%s, %s, %s)"
cursor.execute(tip_exec, (str(snowflake_from_fk), str(snowflake_to_fk), '{:.8f}'.format(amount)))
cursor.close()
self.__connection.commit()
def add_rain(self, snowflake_from_fk, snowflake_to_fk, amount):
self.remove_from_balance(snowflake_from_fk, amount)
self.add_to_balance(snowflake_to_fk, amount)
def pay_rain(self, snowflake_from_fk, amount):
self.remove_from_balance(snowflake_from_fk, amount)
def give_rain(self, snowflake_to_fk, amount):
self.add_to_balance(snowflake_to_fk, amount)
# endregion
# region Last message
def user_last_msg_check(self, user_id, content, is_private):
# if the user is not registered
if self.get_user(user_id) is None:
return False
else:
user = self.messages_user(user_id)
# if user is missing return false
if user is None:
return False
# Get difference in seconds between now and last msg. If it is less than 1s, return False
if user["last_msg_time"] is not None:
since_last_msg_s = (datetime.datetime.utcnow() - user["last_msg_time"]).total_seconds()
if since_last_msg_s < 1:
return False
else:
since_last_msg_s = None
# Do not process the messages made in DM
if not is_private:
self.update_last_msg(user, since_last_msg_s, content)
return True
def update_last_msg(self, user, last_msg_time, content):
rain_config = parsing.parse_json('config.json')['rain']
min_num_words_required = rain_config["min_num_words_required"]
delay_between_messages_required_s = rain_config["delay_between_messages_required_s"]
user_activity_required_m = rain_config["user_activity_required_m"]
content_adjusted = helpers.unicode_strip(content)
words = content_adjusted.split(' ')
adjusted_count = 0
prev_len = 0
for word in words:
word = word.strip()
cur_len = len(word)
if cur_len > 0:
if word.startswith(":") and word.endswith(":"):
continue
if prev_len == 0:
prev_len = cur_len
adjusted_count += 1
else:
res = prev_len % cur_len
prev_len = cur_len
if res != 0:
adjusted_count += 1
if adjusted_count >= min_num_words_required:
break
if last_msg_time is None:
user["rain_msg_count"] = 0
else:
if last_msg_time >= (user_activity_required_m * 60):
user["rain_msg_count"] = 0
is_accepted_delay_between_messages = False
if user["rain_last_msg_time"] is None:
is_accepted_delay_between_messages = True
elif (datetime.datetime.utcnow() - user["rain_last_msg_time"]).total_seconds() > delay_between_messages_required_s:
is_accepted_delay_between_messages = True
if adjusted_count >= min_num_words_required and is_accepted_delay_between_messages:
user["rain_msg_count"] += 1
user["rain_last_msg_time"] = datetime.datetime.utcnow()
user["last_msg_time"] = datetime.datetime.utcnow()
cursor = self.__setup_cursor(
pymysql.cursors.DictCursor)
to_exec = "UPDATE users SET last_msg_time = %s, rain_last_msg_time = %s, rain_msg_count = %s WHERE snowflake_pk = %s"
cursor.execute(to_exec, (user["last_msg_time"], user["rain_last_msg_time"], user["rain_msg_count"], user["snowflake_pk"]))
cursor.close()
self.__connection.commit()
# endregion
# region Active users
def get_active_users_id(self, user_activity_since_minutes, is_rain_activity):
since_ts = datetime.datetime.utcnow() - datetime.timedelta(minutes=user_activity_since_minutes)
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
if not is_rain_activity:
to_exec = "SELECT snowflake_pk FROM users WHERE last_msg_time > %s ORDER BY snowflake_pk"
else:
to_exec = "SELECT snowflake_pk FROM users WHERE rain_last_msg_time > %s ORDER BY snowflake_pk"
cursor.execute(to_exec, (str(since_ts)))
users = cursor.fetchall()
cursor.close()
return_ids = []
for user in users:
return_ids.append(user["snowflake_pk"])
return return_ids
# endregion
# region Registered users
def get_reg_users_id(self):
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT snowflake_pk FROM users ORDER BY snowflake_pk"
cursor.execute(to_exec)
users = cursor.fetchall()
cursor.close()
return_reg_ids = []
for user in users:
return_reg_ids.append(user["snowflake_pk"])
return return_reg_ids
# endregion
# transaction history related calls - deposits
# return a list of txids of a users deposit transactions
def get_deposit_list(self, status):
# database search
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT txid FROM deposit WHERE status = %s"
cursor.execute(to_exec, str(status))
deposits = cursor.fetchall()
cursor.close()
return_deptxid_list = []
for transaction in deposits:
return_deptxid_list.append(transaction["txid"])
return return_deptxid_list
# return a list of txids of a users deposit transactions
def get_deposit_list_byuser(self, snowflake):
# database search
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT txid FROM deposit WHERE snowflake_fk = %s"
cursor.execute(to_exec, str(snowflake))
deposits = cursor.fetchall()
cursor.close()
return_deptxid_list = []
for transaction in deposits:
return_deptxid_list.append(transaction["txid"])
return return_deptxid_list
# get deposit info from txid
def get_deposit_amount(self, txid):
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT amount FROM deposit WHERE txid = %s"
cursor.execute(to_exec, str(txid))
deposit = cursor.fetchone()
cursor.close()
return deposit["amount"]
# endregion
# transaction history related calls - withdrawals
# return a list of txids of a users withdrawal transactions
def get_withdrawal_list_byuser(self, snowflake):
# database search
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT txid FROM withdrawal WHERE snowflake_fk = %s"
cursor.execute(to_exec, str(snowflake))
deposits = cursor.fetchall()
cursor.close()
return_wittxid_list = []
for transaction in deposits:
return_wittxid_list.append(transaction["txid"])
return return_wittxid_list
# get deposit info from txid
def get_withdrawal_amount(self, txid):
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT amount FROM withdrawal WHERE txid = %s"
cursor.execute(to_exec, str(txid))
withdrawal = cursor.fetchone()
cursor.close()
return withdrawal["amount"]
# endregion
# tip information calls
def get_tip_amounts_from_id(self, snowflake, snowflake_to):
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT snowflake_to_fk, amount FROM tip WHERE snowflake_from_fk = %s"
cursor.execute(to_exec, str(snowflake))
user_tips = cursor.fetchall()
cursor.close()
return_tip_amounts = []
for tips in user_tips:
if int(tips["snowflake_to_fk"]) == int(snowflake_to):
return_tip_amounts.append(tips["amount"])
return return_tip_amounts
def get_total_tip_amounts_from_id(self, snowflake):
donate_accounts = [int(self.treasurer), int(self.donation), int(self.stake_pay), int(self.game_bal)]
cursor = self.__setup_cursor(pymysql.cursors.DictCursor)
to_exec = "SELECT snowflake_to_fk, amount FROM tip WHERE snowflake_from_fk = %s"
cursor.execute(to_exec, str(snowflake))
user_tips = cursor.fetchall()
cursor.close()
return_tip_amounts = []
for tips in user_tips:
if int(tips["snowflake_to_fk"]) in donate_accounts:
return_tip_amounts.append(tips["amount"])
return return_tip_amounts
# end region
| [
"noreply@github.com"
] | digitalmoneybits2.noreply@github.com |
cf27c340335ab2e0a3a132df247a6f3df6dabda6 | ddf05e11407709eab868bc3cf89e7e8eeb2c5a7a | /STLight/urls.py | c19e591395002bb1a7a6ca1227c8d171370bbc1f | [] | no_license | chocooi/STLight | 75e48d0c355e2029730425b0d430f18c4a685e6f | 2c600fabe19f2af5f9993fac268971dc43fe4b04 | refs/heads/master | 2021-01-20T15:03:27.256017 | 2017-05-31T09:38:39 | 2017-05-31T09:38:39 | 90,708,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | """STLight URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('stlsite.urls')),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
| [
"chocooi@naver.com"
] | chocooi@naver.com |
5aeec2b010ee47d3d27f372c4a4383504b4d6508 | 24a01d1d9ebc1d873ef7227a7fdc9f7c8f217389 | /Coursera_online/week2/test2.4.3.py | f637ece67de923f2b0b4ba2937c46f1563f3a804 | [] | no_license | oknelvapi/GitPython | 22b18b9034baabc6d2c58e362ff6a9794996ea87 | b5b0890e178a224d063df89fb04e53b3fa0092a4 | refs/heads/master | 2021-01-18T17:31:15.028321 | 2017-12-01T15:14:39 | 2017-12-01T15:14:39 | 100,488,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | # Сума введеної послідовності
num = int(input())
numSum = 0
i = 1
while num != 0:
numSum += num
num = int(input())
i += 1
print(numSum)
| [
"eldhim@gmail.com"
] | eldhim@gmail.com |
38bc0b12bcb9b7ecd503b6b57a2e931cec458b63 | e14372adf86d3c4f9e73c9f7111db3215c696c3d | /1.入门/风变编程/好人卡.py | 2c1a1a5dc863f8fde5b1c86a7a1579475d755b4b | [] | no_license | hewei-bit/PYTHON_learning | 71ddd7560a52575528547187f4fb40f39a3cbbdb | 18de8e5bdca165df5a5a4b5e0887846593656f4e | refs/heads/master | 2022-12-02T13:38:05.907135 | 2020-08-13T04:57:41 | 2020-08-13T04:57:41 | 261,647,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | Python 3.7.1 (v3.7.1:260ec2c36a, Oct 20 2018, 14:57:15) [MSC v.1915 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> a = input('两人的关系是否已经达到了朋友之上,恋人未满')
if a == '不是':
print('进度条不够,再等等')
elif a == '是':
b = input('你是不是想和对方进一步发展')
if b == '不是':
print('还是做朋友吧')
elif b =='是':
c = input('对方是不是想有进一步的发展?')
if c == '不是':
print('恭喜获得“好人卡”。')
elif c == '是':
print('恭喜你们有情人终成眷属!')
| [
"1003826976@qq.com"
] | 1003826976@qq.com |
256cff179bdf5d0bfdbf3b452f8219aa728a0d6f | c27d523ebcd05054067caf77499dbdf407e7b968 | /base/exceptions.py | 4b3b176f8ad9798c1fcf483e33a839766dca0c9a | [] | no_license | Sunrit07/rozprava-backend | 1385a9dfd9c7ccf04e9aa10cde572b36a9b87545 | 02bfc05a87462c288d0bc2b4c1f5269668961960 | refs/heads/main | 2023-07-12T11:23:28.042728 | 2021-08-30T16:15:47 | 2021-08-30T16:15:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | class AlreadyExistsError(Exception):
"""Error raised when trying to operate over a data that already exists."""
pass
| [
"sonicxxx7@gmail.com"
] | sonicxxx7@gmail.com |
c6c39f9811a920af1cc67b65891fc0a83b982a9a | 4a513ee1781ac98947cc69c340955b318af91e95 | /구현 코드/순열 조합 알고리즘/외장함수로 배열 순열구하기.py | 8715cbe4711ce75085d7fa2704f3dcb9fad3c60b | [] | no_license | atom015/algorithm | 7843d71cee5b760acdb3b9d74f37a091133c9ced | ad94a3dd12cb23363fa401f9cc67b52b36317206 | refs/heads/master | 2023-03-05T09:35:06.282154 | 2021-02-24T18:39:42 | 2021-02-24T18:39:42 | 194,266,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | from itertools import permutations
n = int(input()) #n개의 순열
arr = list(map(int,input().split()))
li = permutations(arr,n)
for i in li:
print(i)
#가능한 모든순서,반복없음
| [
"zeezlelove@gmail.com"
] | zeezlelove@gmail.com |
4839b48c723044dac92e8a9f02ee2d61cbdf805c | 2053bf5708e2d1d07dcecfea3a74cee03c89759d | /12_django_rest_basic/coffeehouse/about/forms.py | c789cd9d14923d18052f2b23826327c21f9253a2 | [] | no_license | alimp5/beginningdjango | 59bcdf5f81cfa08187a8517706bc3c4f63c1f4bb | 69dbaaced5d057a1f5a44ff2f3e43fe45bde4f10 | refs/heads/master | 2023-04-28T02:50:04.865078 | 2021-04-22T02:01:14 | 2021-04-22T02:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,194 | py | from django import forms
from coffeehouse.about.models import Contact
import re
class GenderField(forms.ChoiceField):
def __init__(self, *args, **kwargs):
super(GenderField, self).__init__(*args, **kwargs)
self.error_messages = {"required":"Please select a gender, it's required"}
self.choices = ((None,'Select gender'),('M','Male'),('F','Female'))
class PlaceholderInput(forms.widgets.Input):
template_name = 'about/placeholder.html'
input_type = 'text'
def get_context(self, name, value, attrs):
context = super(PlaceholderInput, self).get_context(name, value, attrs)
context['widget']['attrs']['maxlength'] = 50
context['widget']['attrs']['placeholder'] = name.title()
return context
class ContactForm(forms.ModelForm):
gender = GenderField()
class Meta:
model = Contact
fields = '__all__'
widgets = {
'name': PlaceholderInput,
'email':PlaceholderInput,
'comment':forms.Textarea
}
error_messages = {
'comment':{"required":"Please, pretty please provide a comment"}
}
labels = {
'email':'Your email'
}
field_order=['email','name','gender','comment']
def __init__(self, *args, **kwargs):
# Get 'initial' argument if any
initial_arguments = kwargs.get('initial', None)
updated_initial = initial_arguments
if initial_arguments:
# We have initial arguments, fetch 'user' placeholder variable if any
user = initial_arguments.get('user',None)
# Now update the form's initial values if user
if user:
updated_initial['name'] = getattr(user, 'first_name', None)
updated_initial['email'] = getattr(user, 'email', None)
# You can also initialize form fields with hardcoded values
# or perform complex DB logic here to then perform initialization
#updated_initial['comment'] = 'Please provide a comment'
# Finally update the kwargs initial reference
kwargs.update(initial=updated_initial)
super(ContactForm, self).__init__(*args, **kwargs)
def clean(self):
# Call clean() method to ensure base class validation
super(ContactForm, self).clean()
# Get the field values from cleaned_data dict
name = self.cleaned_data.get('name','')
email = self.cleaned_data.get('email','')
# Check if the name is part of the email
if name.lower() not in email:
# Name is not in email , raise an error
message = "Please provide an email that contains your name, or viceversa"
#self.add_error('name', message)
#self.add_error('email', forms.ValidationError(message))
self.add_error(None, message)
#raise forms.ValidationError("Please provide an email that contains your name, or viceversa")
def clean_name(self):
# Get the field value from cleaned_data dict
value = self.cleaned_data['name']
# Check if the value is all upper case
if value.isupper():
# Value is all upper case, raise an error
raise forms.ValidationError("Please don't use all upper case for your name, use lower case",code='uppercase')
# Always return value
return value
def clean_email(self):
# Get the field value from cleaned_data dict
value = self.cleaned_data['email']
# Check if the value end in @hotmail.com
if value.endswith('@hotmail.com'):
# Value ends in @hotmail.com, raise an error
raise forms.ValidationError("Please don't use a hotmail email, we simply don't like it",code='hotmail')
# Always return value
return value
| [
"daniel@webforefront.com"
] | daniel@webforefront.com |
366c94d8e826c40a231a69859b6d3ddc9e132211 | 21767c2ea0e8443f9f5f765207548ac32fb8636b | /bin/easy_install-2.7 | 9a62f1a1a08fbab17de8923555b1468ed9cb5300 | [] | no_license | mrrealer/supreme-garbanzo | 177e2aba0a8101271fa53d522c77597c2a4f286a | 286b2aa6a337eeacf6346b333ae4db14a49fee00 | refs/heads/master | 2021-01-10T06:11:43.564582 | 2016-02-28T03:41:41 | 2016-02-28T03:41:41 | 52,701,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | 7 | #!/Users/dwaynesamuel/Desktop/Development/abc_project/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"dwayne.samuel@gmail.com"
] | dwayne.samuel@gmail.com |
52410dcf72fa288ce1176afe24d90d99e395f1da | ee00ebe5e71c36b05fbff993b19e9723b963313f | /14_longestCommonPrefix.py | b9af545eab2b091169ffed02c384c7511ace36e5 | [] | no_license | 26XINXIN/leetcode | f365560d93604a28abf399707b333f3c11f924ec | 78ed11f34fd03e9a188c9c6cb352e883016d05d9 | refs/heads/master | 2021-06-28T16:31:45.103879 | 2020-09-19T20:33:55 | 2020-09-19T20:33:55 | 144,975,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | class Solution:
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if strs == []:
return ""
prefix = ""
while self.not_empty(strs):
first = list()
for n in range(len(strs)):
first.append(strs[n][0])
strs[n] = strs[n][1:]
if first.count(first[0]) == len(strs):
prefix += first[0]
else:
break
return prefix
def not_empty(self, strs):
for str in strs:
if str == "":
return False
return True
| [
"yangxin.nlp@bytedance.com"
] | yangxin.nlp@bytedance.com |
15bbb15577f45c954e177b859c505b4e76b7bbc3 | d327842178b02628896ecfcd1ea206b35cf39a7d | /translate.py | 5dbee9a31af03ef6c07662dc0e93947dca39d439 | [] | no_license | aditya278/Telegram-Translation-Chat-bot | a3d7bd302dc0908b8cdd3b68d763e2a754a74e82 | 2eccddf0463c4b81b6ef48235a281ec569d2b3a3 | refs/heads/master | 2020-07-11T21:51:18.517831 | 2019-08-27T08:06:45 | 2019-08-27T08:06:45 | 204,651,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | from googletrans import Translator
def Translation(msg, dest='en'):
translator = Translator()
translation = translator.translate(msg, dest)
return translation.text
| [
"aditya.shukla278@gmail.com"
] | aditya.shukla278@gmail.com |
1d1825d15f2ba2d022bf23c66e7348bab2d20bdd | 62d94c28c606e539e07109ea478cdb4f6d2c6414 | /fbchat-terminal/chat.py | ade0de9f471ff3553c4e7e0d10e76f0d4448a98f | [] | no_license | afonsocarlos/fbchat-terminal | 73fa68dd22036bbca6730f9e71228801b4ab0264 | 67e912386cc11ec58902d95aaa2812ed0178920c | refs/heads/master | 2021-06-01T03:27:17.320902 | 2016-05-10T20:21:43 | 2016-05-10T20:21:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,942 | py | # -*- coding: utf-8 -*-
"""
Terminal facebook messenger in python.
obs: in case Windows command prompt exit with charmap codec error
run the command chcp 65001 in it to fix the problem.
"""
from datetime import datetime
from passlib.hash import pbkdf2_sha256
import fbchat
import getpass
import labels
import os, sys
class Chat():
def __init__(self, username, password):
self.client = fbchat.Client(username, password)
self.client.password = pbkdf2_sha256.encrypt(self.client.password, rounds=1000, salt_size=16)
self.chatting = True
def lock_chat(self):
''' Lock chat.
Hide messages and lock messenger until the correct
password or exit() command is entered
'''
message = ''
while not pbkdf2_sha256.verify(message, self.client.password):
clear()
message = getpass.getpass(">>")
if message == "exit()": # gives the option to exit
sys.exit()
# Completely unnecessary! (I think.. But maybe it prevents password to be hacked '-')
message = None
del message
def choose_friend(self):
'''Choose a friend to talk to according to the name user input.'''
friend = input("Who do you want to talk to: ")
users = self.client.getUsers(friend)
option = 0
if len(users) <= 0:
print("No friends found.")
print("Try again.")
self.choose_friend()
elif len(users) > 1:
print("Which of these friends?")
for i, user in enumerate(users):
print("%d. %s" % (i, user.name))
try:
option = int(input())
self.friend = users[option].name
self.userid = users[option].uid
except ValueError as e:
print("Option must be a valid number.")
print("Try again.")
# recursion certainly is not the best solution here, but it was the easiest
self.choose_friend()
except IndexError as e:
print("Invalid Index")
raise e
def chat(self):
'''Chat with chosen friend.'''
self.chatting = True
while self.chatting:
current = "you"
friend_thread = self.client.getThreadInfo(self.userid, 0)
for message in reversed(friend_thread):
if int(message.author.split(':')[1]) == self.userid:
if current == "you":
current = self.friend
print("%s%s: %s" % (labels.START_LABEL, current, labels.END_LABEL))
else:
if current == self.friend:
current = "you"
print("%s%s: %s" % (labels.START_LABEL, current, labels.END_LABEL))
try:
print("%s - %s" % (message.timestamp_datetime, message.body))
except Exception:
# remember to set chcp 65001 on windows to make cmd utf8 compatible
print("%s - %s" % (message.timestamp_datetime,
message.body.encode('cp860', errors='ignore')))
message = input("type your message: ")
if message:
if message == "exit()":
sys.exit()
if message == "lock()": # this will hide chat from eavesdroppers
self.lock_chat()
elif message == "new_chat()":
self.chatting = False
elif message == "help()":
self._show_help()
else:
self.client.send(self.userid, message)
def _show_help(self):
print("You can type these commands while talking to your friends:")
print("new_chat() -> change friend to chat.")
print("exit() -> exit this program.")
print("help() -> show this help message.")
input("Hit Enter to continue...")
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
def main():
# disguising application
os.system('title Terminal') # check if it works in Linux
username = input("username: ")
password = getpass.getpass("password: ")
chat = Chat(username, password)
username = password = None
del username
del password
print(labels.START_TITLE + "********************************" + labels.END_TITLE)
print(labels.START_TITLE + "* Welcome to fbchat-terminal *" + labels.END_TITLE)
print(labels.START_TITLE + "********************************" + labels.END_TITLE)
chat._show_help()
while True:
chat.choose_friend()
chat.chat()
if __name__ == '__main__':
main() | [
"me@work.com"
] | me@work.com |
b6fab749c9a3c76967b5b1e53ce4824db4122763 | 86429b67cb1f9c1573c162f451f6efbc1c76031f | /약수의합.py | 16a2b336f67ebeda4844d47a03af08d9c39f3eff | [] | no_license | nothingct/BOJ_CodingTest | b6da172514d38882bcfeccb7c437231af61b6568 | 59dc72240355bbc1ed7c4b9cf4a51ba294e23c69 | refs/heads/master | 2023-07-19T01:37:10.112609 | 2021-08-11T19:56:18 | 2021-08-11T19:56:18 | 388,266,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | MAX = 1000000
f = [1] * (MAX+1)
g = [0]* (MAX+1)
for i in range(2,MAX+1):
j=1
while i*j <=MAX:
f[i*j] += i
j+=1
for i in range(1,MAX+1):
g[i] = g[i-1]+f[i]
t= int(input())
ans = []
for _ in range(t):
n= int(input())
ans.append(g[n])
print('\n'.join(map(str,ans))+'\n')
| [
"nothingct@naver.com"
] | nothingct@naver.com |
54d95c1e82c024a0402747adbc14b870c9ffec84 | 80308dc6fcbc87294d4ae96c4b30d0aac6e88d6a | /multifield_batch_update/setup_menus.py | 9830921ba0279918a17ff3e4ef21ba33a3a77575 | [
"Apache-2.0"
] | permissive | d3v3l0/anki_multifield_batch_update | 0d2924b4d2c164d7aee5702fb6b648878da5aaea | 4960a6e969256ff0df2ebf37312e80fa009fcddb | refs/heads/master | 2022-01-07T00:08:58.959963 | 2019-07-16T04:54:29 | 2019-07-16T04:54:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | # Copyright 2019 Matthew Hayes
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from anki.hooks import addHook
from aqt.qt import QFileDialog, QStandardPaths
from aqt.utils import tooltip
from .dialogs.batch_update import BatchUpdateDialog
from .dialogs.change_log import ChangeLogDialog
def open_load_file_dialog(browser):
nids = browser.selectedNotes()
if nids:
try:
ext = ".csv"
default_path = QStandardPaths.writableLocation(QStandardPaths.DocumentsLocation)
path = os.path.join(default_path, f"changes{ext}")
options = QFileDialog.Options()
# native doesn't seem to works
options |= QFileDialog.DontUseNativeDialog
result = QFileDialog.getOpenFileName(
browser, "Import CSV for Batch Update", path, f"CSV (*{ext})",
options=options)
if not isinstance(result, tuple):
raise Exception("Expected a tuple from save dialog")
file = result[0]
if file:
BatchUpdateDialog(browser, nids, file).exec_()
except Exception as e:
tooltip("Failed: {}".format(e))
else:
tooltip("You must select some cards first")
def open_changelog_dialog(browser):
ChangeLogDialog(browser).exec_()
def setup_menus(browser):
menu = browser.form.menuEdit
menu.addSeparator()
submenu = menu.addMenu("Multi-field Batch Update")
action = submenu.addAction("Import CSV")
action.triggered.connect(
lambda _: open_load_file_dialog(browser))
action = submenu.addAction("View Log")
action.triggered.connect(
lambda _: open_changelog_dialog(browser))
addHook("browser.setupMenus", setup_menus)
| [
"matthew.terence.hayes@gmail.com"
] | matthew.terence.hayes@gmail.com |
fc0c3a7b7f649ff965390dc90c2b147b96033a90 | a1dece3e2195969b04b2751745d9b3aedf263a98 | /2022/10/b.py | ce13e6fd84f3d6e757e6217d376776a23bf0f6e1 | [] | no_license | vnil/advent-of-code | 4804060365666413ee8601d8aa6fa82595c5f49f | f00940a5442b81f1eefa6dd65b5bd952c6ea03cc | refs/heads/main | 2023-03-07T22:28:32.482267 | 2023-02-27T16:12:52 | 2023-02-27T16:12:52 | 225,295,002 | 0 | 0 | null | 2023-02-27T16:20:53 | 2019-12-02T05:45:38 | Python | UTF-8 | Python | false | false | 906 | py | # Take time to read
# Consider different approaches
# +-1 errors?
# Got last item?
from collections import Counter
from collections import defaultdict, deque
from pprint import pprint as pp
from itertools import combinations
from copy import deepcopy
import math
import sys
filename = sys.argv[1]
content = open(filename, 'r').readlines()
content = [c.strip() for c in content]
c = 0
x = 1
su = 0
arr = ['.'] * 240
def cycle():
global x, c, su
pos = c % 40
if x == pos or x == pos - 1 or x == pos + 1:
arr[c] = '#'
c+=1
print(c)
for row in content:
if row == 'noop':
cycle()
continue
else:
op, val = row.split()
cycle()
cycle()
x+=int(val)
print(''.join(arr[0:40]))
print(''.join(arr[40:80]))
print(''.join(arr[80:120]))
print(''.join(arr[120:160]))
print(''.join(arr[160:200]))
print(''.join(arr[200:]))
| [
"viktor.nilsson@tretton37.com"
] | viktor.nilsson@tretton37.com |
8143669c1aa900477f7a438e23b893a33b80b9ca | 7df236ea048a0cb5bdd0f5481ab5aaef2e681597 | /tsl/api/views.py | c1553c55c5947b0afdb85633a7dc1140627fe7a3 | [] | no_license | dominicsmorra/TSL-Project | 722be409633bd3937d085f585d2058691037f2de | d5967d132c7fb31038de1bf17fd34c77c3101de4 | refs/heads/master | 2023-06-12T19:16:17.174211 | 2021-07-09T06:03:00 | 2021-07-09T06:03:00 | 383,884,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,470 | py | from rest_framework import status, permissions
from rest_framework.response import Response
from rest_framework_simplejwt.views import TokenObtainPairView
from rest_framework.views import APIView
from rest_framework.generics import (
ListAPIView,
RetrieveAPIView,
CreateAPIView,
DestroyAPIView,
UpdateAPIView
)
from rest_framework_simplejwt.tokens import RefreshToken
from .serializers import MyTokenObtainPairSerializer, CustomUserSerializer, PostSerializer
from .models import Post
class PostListView(ListAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = (permissions.AllowAny, )
class PostCreateView(CreateAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = (permissions.AllowAny, )
class PostDeleteView(DestroyAPIView):
permission_classes = (permissions.AllowAny, )
def get(self, request, pk, format='json'):
post = Post.objects.get(pk=pk)
serializer = PostSerializer(data=request.data)
if post:
post.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(serializer.errors, status=status.HTTP_404_NOT_FOUND)
class ObtainTokenView(TokenObtainPairView):
serializer_class = MyTokenObtainPairSerializer
class CustomUserCreate(APIView):
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
def post(self, request, format='json'):
serializer = CustomUserSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
if user:
json = serializer.data
return Response(json, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class HelloWorldView(APIView):
def get(self, request):
return Response(data={"hello":"world"}, status=status.HTTP_200_OK)
class LogoutAndBlacklistRefreshTokenForUserView(APIView):
permission_classes = (permissions.AllowAny,)
authentication_classes = ()
def post(self, request):
try:
refresh_token = request.data["refresh_token"]
token = RefreshToken(refresh_token)
token.blacklist()
return Response(status=status.HTTP_205_RESET_CONTENT)
except Exception as e:
return Response(status=status.HTTP_400_BAD_REQUEST) | [
"dominicsmorra@gmail.com"
] | dominicsmorra@gmail.com |
4c3bdb59f2ceb1a8d91f74e8f2fcc42912087c03 | c87402ae301bdb163cdb7c8c8d1f4f8ba32aa519 | /python/028.py | d145c6732e40eaed174cf374c0d111f2e5615c76 | [] | no_license | jabagawee/Project-Euler | e22a6b6df6015919ffadb1f1374696b5b6d89353 | 01bccf9c7a5bb8ab57428ebbf0f375d8e2db8d9d | refs/heads/master | 2021-01-23T13:41:36.045702 | 2014-04-24T01:01:10 | 2014-04-24T01:01:10 | 3,687,419 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | ans, curr = 1, 1
SIZE = 1001
for x in xrange(1, (SIZE - 1) / 2 + 1):
for y in xrange(4):
curr += 2*x
ans += curr
print ans
| [
"andrew@jabagawee.com"
] | andrew@jabagawee.com |
2720e576fe35dc1f1645bec769b4faee289dcce9 | acfef9ee466c33ef16697372513db5ee244ee4d2 | /datasets/NIHE-Northern-Ireland-housing-bulletin/nihe-building-developement.py | 7aba1e08d344f5036754f9356895d845e7ac3aad | [] | no_license | GSS-Cogs/family-homelessness | 6695832fb34b3f78c7d86b67aa8977fb73c30ab6 | 4f89b3b319998ef61066da8396229719182a8362 | refs/heads/master | 2022-08-09T21:14:20.441818 | 2022-08-03T15:24:18 | 2022-08-03T15:24:18 | 251,271,212 | 1 | 0 | null | 2022-10-17T08:30:52 | 2020-03-30T10:23:41 | Python | UTF-8 | Python | false | false | 8,022 | py | #!/usr/bin/env python
# coding: utf-8
# In[33]:
from gssutils import *
import json
import datetime
from pandas import ExcelWriter
import pandas as pd
from pathlib import Path
def right(s, amount):
return s[-amount:]
def left(s, amount):
return s[:amount]
year = int(right(str(datetime.datetime.now().year),4)) - 1
print(year)
info = json.load(open('info.json'))
landingPage = info['landingPage']
landingPage
# In[34]:
scraper = Scraper('https://www.communities-ni.gov.uk/publications/topic/8182?search=%22Northern+Ireland+Housing+Bulletin%22&Search-exposed-form=Go&sort_by=field_published_date')
scraper
# The URL was changed from the landing page taken from the info.json since the scraper is not made to use it.
# Could go back and edit the scraper but kinda seems like a pain in the ass considering the landing page is non-specific to the dataset.
# In[35]:
dist = scraper.distributions[0]
dist
# In[36]:
xls = pd.ExcelFile(dist.downloadURL, engine="odf")
with ExcelWriter("data.xls") as writer:
for sheet in xls.sheet_names:
pd.read_excel(xls, sheet).to_excel(writer,sheet)
writer.save()
tabs = loadxlstabs("data.xls")
# In[37]:
tidied_sheets = []
for tab in tabs:
if 'T1_1' in tab.name or 'T1_2' in tab.name:
cell = tab.filter(contains_string('Quarter / Year'))
remove = tab.filter(contains_string('Source:')).expand(LEFT).expand(DOWN).expand(RIGHT)
period = cell.shift(DOWN).expand(DOWN).is_not_blank() - remove
devType = cell.shift(RIGHT).expand(RIGHT).is_not_blank() - remove
if tab.name == 'T1_1':
buildingStage = 'Starts'
else:
buildingStage = 'Completions'
observations = period.fill(RIGHT).is_not_blank() - tab.filter('`')
dimensions = [
HDim(period, 'Period', DIRECTLY, LEFT),
HDim(devType, 'Development Type', DIRECTLY, ABOVE),
HDimConst('Building Stage', buildingStage),
HDimConst('Measure Type', 'Houses'),
HDimConst('Housing Type', 'All'),
HDimConst('Unit', 'Count')
]
tidy_sheet = ConversionSegment(tab, dimensions, observations)
savepreviewhtml(tidy_sheet, fname="Preview.html")
tidied_sheets.append(tidy_sheet.topandas())
elif 'T1_3' in tab.name or 'T1_4' in tab.name:
cell = tab.filter(contains_string('Type of Housing'))
remove = tab.filter(contains_string('Source:')).expand(LEFT).expand(DOWN).expand(RIGHT)
period = cell.fill(RIGHT).is_not_blank() | cell.shift(1,1).expand(RIGHT).is_not_blank() - remove
housingType = cell.shift(DOWN).expand(DOWN).is_not_blank() - remove
housingType2 = cell.shift(DOWN).shift(RIGHT).expand(DOWN).is_not_blank() - remove
if tab.name == 'T1_3':
buildingStage = 'Starts'
else:
buildingStage = 'Completions'
observations = housingType2.fill(RIGHT).is_not_blank() - remove
dimensions = [
HDim(period, 'Period', DIRECTLY, ABOVE),
HDim(housingType, 'Housing Type', CLOSEST, ABOVE),
HDim(housingType2, 'Housing Type 2', DIRECTLY, LEFT),
HDimConst('Building Stage', buildingStage),
HDimConst('Measure Type', 'Houses'),
HDimConst('Development Type', 'Social Housing Development'),
HDimConst('Unit', 'Count')
]
tidy_sheet = ConversionSegment(tab, dimensions, observations)
savepreviewhtml(tidy_sheet, fname="Preview.html")
tidied_sheets.append(tidy_sheet.topandas())
else:
continue
# In[38]:
df = pd.concat(tidied_sheets, ignore_index = True, sort = False).fillna('')
df['Period'] = df['Period'].map(lambda x: 'government-year/' + left(x, 4) if left(x, 2) == '20' else x)
df['Period'] = df['Period'].map(lambda x: x + '-' + str(int(right(x, 4)) + 1) if 'government-year' in x else x)
df['Period'] = df['Period'].map(lambda x: 'government-quarter/' + right(x, 4) + '-' + str(int(right(x, 4)) + 1) + '/Q' + left(x, 3) + '' if 'government-year/' not in x and left(right(x, 4),2) == '20' else x)
df['Period'] = df.apply(lambda x: str(x['Period']).replace('Apr', '1') if 'government-quarter' in x['Period'] else x['Period'], axis = 1)
df['Period'] = df.apply(lambda x: str(x['Period']).replace('Jul', '2') if 'government-quarter' in x['Period'] else x['Period'], axis = 1)
df['Period'] = df.apply(lambda x: str(x['Period']).replace('Oct', '3') if 'government-quarter' in x['Period'] else x['Period'], axis = 1)
df['Period'] = df.apply(lambda x: str(x['Period']).replace('Jan', '4') if 'government-quarter' in x['Period'] else x['Period'], axis = 1)
df['Housing Type'] = df['Housing Type'] + ' ' + df['Housing Type 2']
df = df.drop(['Housing Type 2'], axis=1)
df = df.replace({'Development Type' : {
'TotalNew DwellingStarts' : 'Total',
'TotalNew DwellingCompletions' : 'Total'},
'Housing Type' : {
'Shared Sub-total' : 'Shared Total',
'Self-Contained Sub-total' : 'Self-Contained Total',
'Self-Contained Totals' : 'Totals'},
'Period' : {
'Apr-Jun' : 'government-quarter/2019-2020/Q1',
'Jul-Sep' : 'government-quarter/2019-2020/Q2',
'Oct-Dec' : 'government-quarter/2019-2020/Q3'}})
df.rename(columns={'OBS' : 'Value'}, inplace=True)
df.head()
# In[39]:
from IPython.core.display import HTML
for col in df:
if col not in ['Value']:
df[col] = df[col].astype('category')
display(HTML(f"<h2>{col}</h2>"))
display(df[col].cat.categories)
# In[40]:
tidy = df[['Period','Development Type', 'Building Stage','Housing Type','Value','Measure Type','Unit']]
for column in tidy:
if column in ('Marker', 'Development Type', 'Building Stage', 'Housing Type'):
tidy[column] = tidy[column].map(lambda x: pathify(x))
tidy.head(25)
# In[41]:
scraper.dataset.title = 'NIHE - Building Development'
scraper.dataset.comment = """
The date of a new dwelling start is the date on which the first building control inspection takes place.
The figures only include applications for new dwellings received by Building Control in NI.
The figures include domestic apartments and dwellings as defined by Building Control purpose group.
Figures will be revised on an annual basis to capture Building Control applications received outside of the quarter.
The date of a new dwelling completion is the date on which the building control completion inspection takes place.
The Housing Executive no longer builds new dwellings. This has been the case since 2001-02. Occasionally it may still replace on Housing Executive new builds will no longer be available.
Housing Association new social housing dwelling starts are recorded when housing associations confirm the start on-site of new build/rehabilitation/re-improvement units, or the purchase of Off-the-Shelf units, for social housing.
The formal definitions of all scheme types can be found in the Housing Association Guide at: https://www.communities-ni.gov.uk/scheme-types
Housing Association new social housing dwelling completions are recorded when housing associations confirm the completion of new build/rehabilitation/re-improvement units, or the purchase of Off-the-Shelf units, for social housing.
"""
# In[41]:
# In[42]:
out = Path('out')
out.mkdir(exist_ok=True)
title = pathify('NIHE - Building Development')
tidy.drop_duplicates().to_csv(out / f'{title}.csv', index = False)
scraper.dataset.family = 'homelessness'
scraper.dataset.theme = THEME['housing-planning-local-services']
scraper.dataset.license = 'http://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/'
with open(out / f'{title}.csv-metadata.trig', 'wb') as metadata:
metadata.write(scraper.generate_trig())
#csvw = CSVWMetadata('https://gss-cogs.github.io/family-homelessness/reference/')
#csvw.create(out / 'observations.csv', out / 'observations.csv-schema.json')
| [
"jonathan.j.walters@gmail.com"
] | jonathan.j.walters@gmail.com |
e713240de7ae59f0a0a0af423507413283cbcabb | 6c53bda724a852ff7f99b44d4777ba1ee85bc76a | /testimport/testingimport.py | 0f515e9307ded961ac0c6849c1d3869685236947 | [] | no_license | Happy-Lama/Ambition | eb92f1c194a4a8c950e33233dac1a87c85986bf5 | b8e1e3f595ae528aa1a7037e3e1b2a03a5066546 | refs/heads/master | 2022-04-10T07:19:28.135548 | 2020-03-28T10:01:17 | 2020-03-28T10:01:17 | 245,880,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | from MyFunctions import add
print(add(1,2)) | [
"kldenis2001@gmail.com"
] | kldenis2001@gmail.com |
7db9b648d623bb3ae7d34cc2d55aeb3112bdd8a0 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/databoxedge/v20210201preview/get_mec_role.py | 4e08fec53f2b3eaad31997b2c9c9689f86f8de67 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 5,958 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetMECRoleResult',
'AwaitableGetMECRoleResult',
'get_mec_role',
]
@pulumi.output_type
class GetMECRoleResult:
"""
MEC role.
"""
def __init__(__self__, connection_string=None, controller_endpoint=None, id=None, kind=None, name=None, resource_unique_id=None, role_status=None, system_data=None, type=None):
if connection_string and not isinstance(connection_string, dict):
raise TypeError("Expected argument 'connection_string' to be a dict")
pulumi.set(__self__, "connection_string", connection_string)
if controller_endpoint and not isinstance(controller_endpoint, str):
raise TypeError("Expected argument 'controller_endpoint' to be a str")
pulumi.set(__self__, "controller_endpoint", controller_endpoint)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_unique_id and not isinstance(resource_unique_id, str):
raise TypeError("Expected argument 'resource_unique_id' to be a str")
pulumi.set(__self__, "resource_unique_id", resource_unique_id)
if role_status and not isinstance(role_status, str):
raise TypeError("Expected argument 'role_status' to be a str")
pulumi.set(__self__, "role_status", role_status)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> Optional['outputs.AsymmetricEncryptedSecretResponse']:
"""
Activation key of the MEC.
"""
return pulumi.get(self, "connection_string")
@property
@pulumi.getter(name="controllerEndpoint")
def controller_endpoint(self) -> Optional[str]:
"""
Controller Endpoint.
"""
return pulumi.get(self, "controller_endpoint")
@property
@pulumi.getter
def id(self) -> str:
"""
The path ID that uniquely identifies the object.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Role type.
Expected value is 'MEC'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceUniqueId")
def resource_unique_id(self) -> Optional[str]:
"""
Unique Id of the Resource.
"""
return pulumi.get(self, "resource_unique_id")
@property
@pulumi.getter(name="roleStatus")
def role_status(self) -> str:
"""
Role status.
"""
return pulumi.get(self, "role_status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Role configured on ASE resource
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
class AwaitableGetMECRoleResult(GetMECRoleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMECRoleResult(
connection_string=self.connection_string,
controller_endpoint=self.controller_endpoint,
id=self.id,
kind=self.kind,
name=self.name,
resource_unique_id=self.resource_unique_id,
role_status=self.role_status,
system_data=self.system_data,
type=self.type)
def get_mec_role(device_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMECRoleResult:
"""
MEC role.
:param str device_name: The device name.
:param str name: The role name.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['deviceName'] = device_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:databoxedge/v20210201preview:getMECRole', __args__, opts=opts, typ=GetMECRoleResult).value
return AwaitableGetMECRoleResult(
connection_string=__ret__.connection_string,
controller_endpoint=__ret__.controller_endpoint,
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
resource_unique_id=__ret__.resource_unique_id,
role_status=__ret__.role_status,
system_data=__ret__.system_data,
type=__ret__.type)
| [
"noreply@github.com"
] | morrell.noreply@github.com |
9f579718f48595ddae2c729e936801f9fb340e39 | 2cadaf598a7d5cc98680a23d2aa7384223259c92 | /src/solution_f25ffba3.py | 5da0c0658d4a4fb5fe5e3719ecbad477a2093e46 | [
"Apache-2.0"
] | permissive | emmetlee/ARC | 0dcad958f9f204a695fa1476c057c769516ac633 | 8748321a54db6adc60595d92944d6f11e38f9be3 | refs/heads/master | 2020-09-12T05:11:47.277602 | 2019-12-01T21:24:01 | 2019-12-01T21:24:01 | 222,319,212 | 0 | 0 | null | 2019-11-17T22:11:45 | 2019-11-17T22:11:44 | null | UTF-8 | Python | false | false | 2,185 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 27 23:26:33 2019
@author: emmetlee
"""
import sys
import json
import numpy as np
def solve(grid):
"""Call solve() function """
#Open the json file passed into the solve function
d1 = json.load (open(grid))
#Work out the shape and size of the input dictionary
tmpd1 = {k: len(v) for k,v in d1.items()}
lengths1 = [lengthv for lengthv in tmpd1.values()]
"""Print the Output Grids for training inputs"""
#training grids are in lengths[0]
for x in range(lengths1[0]):
for y in reversed(range(len(d1['train'][0]['input']))):
output_grid2 = [] #An empty list
if y>=(len(d1['train'][0]['input'])/2): #bottom half of grid
output_grid2.extend(d1['train'][x]['input'][y])
else:
#mirror the bottom half of the grid to the top half
#of the output grid
output_grid2.extend(
(d1['train'][x]['input'][-y+(len(d1['train'][0]['input'])-1)]))
print(np.asarray(output_grid2))
print(" ")
"""Print the Output Grids for Evaluation inputs"""
for x in range(lengths1[1]):
for y in reversed(range(len(d1['test'][0]['input']))):
output_grid2 = [] #An empty list
if y>=(len(d1['train'][0]['input'])/2): #bottom half of grid
output_grid2.extend(d1['test'][x]['input'][y])
else:
#mirror the bottom half of the input grid to the top half
#of the output grid
output_grid2.extend(
(d1['test'][x]['input'][-y+(len(d1['test'][0]['input'])-1)]))
print(np.asarray(output_grid2))
print(" ")
"""main function will call the solve function and pass in the json"""
def main():
"""Call solve() function and pass the json
Read the first command-line argument (after python script)
as the input file"""
input_grid = sys.argv[1]
solve(input_grid) #pass the input file to the solve function
"""Call the main function"""
if __name__ == "__main__":
main() | [
"emmetlee@avaya.com"
] | emmetlee@avaya.com |
af8037882b5b9cd779f9d7cc0a06e7e7f6386acf | 2e919a1e9f5d6b0ffef37b626d5dc97a5b2f63cd | /src/exceptions.py | 380df767487eeb558edf1579046a045c2e41b44f | [] | no_license | lucasguerra91/some-python | c3992ac6c49f015bf14f37996ff3c8b0ed7b4d0a | 0678e8d884e0641d592a3a457db11cc2085c8b27 | refs/heads/master | 2021-01-18T13:06:32.454114 | 2019-06-24T01:46:58 | 2019-06-24T01:46:58 | 80,723,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | try:
print(5/0)
except ZeroDivisionError:
print("You can't divide by Zero!")
# Ejemplo de try dentro de divisiones
print("Give me two numbers, and i will divide them")
print("Enter 'q' to quit. ")
while True:
first_number = input("\nFirst number: ")
if first_number == 'q':
break
second_number = input("Second number: ")
if second_number == 'q':
break
try:
answer = float(first_number) / float(second_number)
except ZeroDivisionError:
print("You can't divide by Zero!")
else:
print(answer)
# Ejemplo de try para file not found
filename = 'alice.txt'
try:
with open(filename) as file_obj:
contents = file_obj.read()
except FileNotFoundError:
msg = "\n\nSorry, the file " + filename + " does not exist. "
print(msg)
else:
#Count the approximate number of words in the file
words = contents.split()
num_words = len(words)
print("The file " + filename + "has about " + str(num_words) + " words.")
# me quede en la pagina 204 | [
"guerra986@gmail.com"
] | guerra986@gmail.com |
0bb8cc0c7e2ebe27c19854bab4f930bd934494f9 | d8df81c37d267c21917e16a0df0931e9954d0f88 | /virtual/bin/wheel | b8807cae90a68f035eb739418d0d872d70411bcf | [
"MIT"
] | permissive | njoanc/neighborhood | 7ec639a8b74f02998031387500de2dcf88169f43 | 81658f4cdfe4dbfd4080d55d8136349be0da7c23 | refs/heads/master | 2021-09-09T06:24:18.735569 | 2020-05-17T14:12:40 | 2020-05-17T14:12:40 | 179,490,905 | 0 | 1 | null | 2021-09-08T00:56:49 | 2019-04-04T12:14:15 | Python | UTF-8 | Python | false | false | 243 | #!/home/wecode/Desktop/nextdoor/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"wecode@wecode"
] | wecode@wecode | |
ea4f1076b9bc672d07920d9619fbd9167214a8d7 | 395e0c9cd078210bc3313485d6f5ed8068b5e3a3 | /test_redis/test_auth.py | 25627e03ccd649cfb4addd1ce7b830ab08df6826 | [] | no_license | skysbird/test-twemproxy | 90709c548228250aea6501dacc0e9b340c167b32 | c13bf76d6d73b1df0e8cdcda1b2ec13a8d5722b0 | refs/heads/master | 2020-12-06T19:08:58.446646 | 2014-12-03T05:14:48 | 2014-12-03T05:14:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,698 | py | #!/usr/bin/env python
#coding: utf-8
from common import *
all_redis = [
RedisServer('127.0.0.5', 2100, '/tmp/r/redis-2100/', CLUSTER_NAME, 'redis-2100', auth = 'hellopasswd'),
RedisServer('127.0.0.5', 2101, '/tmp/r/redis-2101/', CLUSTER_NAME, 'redis-2101', auth = 'hellopasswd'),
]
nc = NutCracker('127.0.0.5', 4100, '/tmp/r/nutcracker-4100', CLUSTER_NAME, all_redis, mbuf=mbuf, verbose=nc_verbose, redis_auth = 'hellopasswd')
nc_badpass = NutCracker('127.0.0.5', 4101, '/tmp/r/nutcracker-4101', CLUSTER_NAME, all_redis, mbuf=mbuf, verbose=nc_verbose, redis_auth = 'badpasswd')
nc_nopass = NutCracker('127.0.0.5', 4102, '/tmp/r/nutcracker-4102', CLUSTER_NAME, all_redis, mbuf=mbuf, verbose=nc_verbose)
def setup():
print 'setup(mbuf=%s, verbose=%s)' %(mbuf, nc_verbose)
for r in all_redis + [nc, nc_badpass, nc_nopass]:
r.deploy()
r.stop()
r.start()
def teardown():
for r in all_redis + [nc, nc_badpass, nc_nopass]:
assert(r._alive())
r.stop()
if clean:
r.clean()
default_kv = {'kkk-%s' % i : 'vvv-%s' % i for i in range(10)}
def getconn():
r = redis.Redis(nc.host(), nc.port())
return r
'''
cases:
redis proxy case
1 1 test_auth_basic
1 bad test_badpass_on_proxy
1 0 test_nopass_on_proxy
0 0 already tested on other case
0 1
'''
def test_auth_basic():
# we hope to have same behavior when the server is redis or twemproxy
conns = [
redis.Redis(all_redis[0].host(), all_redis[0].port()),
redis.Redis(nc.host(), nc.port()),
]
for r in conns:
assert_fail('NOAUTH|operation not permitted', r.ping)
assert_fail('NOAUTH|operation not permitted', r.set, 'k', 'v')
assert_fail('NOAUTH|operation not permitted', r.get, 'k')
# bad passwd
assert_fail('invalid password', r.execute_command, 'AUTH', 'badpasswd')
# everything is ok after auth
r.execute_command('AUTH', 'hellopasswd')
r.set('k', 'v')
assert(r.ping() == True)
assert(r.get('k') == 'v')
# auth fail here, should we return ok or not => we will mark the conn state as not authed
assert_fail('invalid password', r.execute_command, 'AUTH', 'badpasswd')
assert_fail('NOAUTH|operation not permitted', r.ping)
assert_fail('NOAUTH|operation not permitted', r.get, 'k')
def test_nopass_on_proxy():
r = redis.Redis(nc_nopass.host(), nc_nopass.port())
# if you config pass on redis but not on twemproxy,
# twemproxy will reply ok for ping, but once you do get/set, you will get errmsg from redis
assert(r.ping() == True)
assert_fail('NOAUTH|operation not permitted', r.set, 'k', 'v')
assert_fail('NOAUTH|operation not permitted', r.get, 'k')
# proxy has no pass, when we try to auth
assert_fail('Client sent AUTH, but no password is set', r.execute_command, 'AUTH', 'anypasswd')
pass
def test_badpass_on_proxy():
r = redis.Redis(nc_badpass.host(), nc_badpass.port())
assert_fail('NOAUTH|operation not permitted', r.ping)
assert_fail('NOAUTH|operation not permitted', r.set, 'k', 'v')
assert_fail('NOAUTH|operation not permitted', r.get, 'k')
# we can auth with bad pass (twemproxy will say ok for this)
r.execute_command('AUTH', 'badpasswd')
# after that, we still got NOAUTH for get/set (return from redis-server)
assert(r.ping() == True)
assert_fail('NOAUTH|operation not permitted', r.set, 'k', 'v')
assert_fail('NOAUTH|operation not permitted', r.get, 'k')
def setup_and_wait():
time.sleep(60*60)
| [
"idning@gmail.com"
] | idning@gmail.com |
5cb56ab8c19aefca42ed1e81a29896f955cb110e | 193a914d773346d62964c1257a1373c4bb9aaca6 | /B045計算ドリル.py | 88ec369e7879fd7ba3fc0db1c179e6ef9721f236 | [] | no_license | hujuu/py-test | f32d6ce723b0c8bc12d61f6aadc9141bd34b9272 | 9b225f34b4a2e6be971fd909aec96698649a049b | refs/heads/master | 2022-08-17T22:21:21.290603 | 2022-08-02T01:22:53 | 2022-08-02T01:22:53 | 19,423,217 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | import random
setting = input()
set_data = setting.split()
plus = int(set_data[0])
minus = int(set_data[1])
res = []
plus_count = 0
while plus_count < plus:
left = random.randint(0, 99)
right = random.randint(0, 99 - left)
temp = [left, right]
if temp not in res:
res.append(temp)
plus_count += 1
for gen in range(plus):
print(res[gen][0], "+", res[gen][1], "=")
res = []
count = 0
while count < minus:
left = random.randint(0, 99)
right = random.randint(0, 99 - left)
temp = [left, right]
if temp not in res:
res.append(temp)
count += 1
for gen in range(minus):
print(res[gen][0], "-", res[gen][1], "=")
| [
"out.of.the.base.sfc@gmail.com"
] | out.of.the.base.sfc@gmail.com |
8860d334a01dc28063918c5dd36d0a8126459b73 | 34dd946a15565989e58f5e1ecc0812f01e7e6e8b | /lib/MailUserDistribution.py | e29dc5a08ace74780af92a59dbe14951eead8f61 | [] | no_license | rainbowers/AutomaticUser | 49194534bdd3c5c7b18bc9c68541dce9b4aa3333 | 82ae207f39b1771d7dfa1226cb1f44dba6bed313 | refs/heads/main | 2023-06-16T11:12:19.427792 | 2021-07-02T02:26:26 | 2021-07-02T02:26:26 | 381,607,982 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,927 | py | # -*- coding:utf-8 -*-
__author__ = 'Rainbower'
import pymysql
from config.settings import Params
import logging
# 日志设置
LOG_FORMAT = "%(asctime)s %(levelname)s %(filename)s %(lineno)d %(message)s"
logging.basicConfig(filename='../logs/debug.log', level=logging.INFO, format=LOG_FORMAT)
class MailUserDistribution():
def __init__(self):
self.port = Params['MysqlConfig']['port']
self.host = Params['MysqlConfig']['host']
self.user = Params['MysqlConfig']['user']
self.password = Params['MysqlConfig']['password']
self.conn = pymysql.connect(self.host,self.user,self.password,'jr_ldap_exchange',self.port)
self.cursor = self.conn.cursor()
def insert_update_maildb(self,**kwargs):
if type(kwargs) is dict:
#maildb jobnumber username gender mobile email jobgrade title organization is_active createtime
select_sql = 'select jobnumber from mail_user_distribution where jobnumber="%s"' % kwargs['jobnumber']
insert_sql = 'insert into mail_user_distribution(maildb,jobnumber,username,gender,mobile,email,jobgrade,title,organization,is_active,createtime) values("%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s")' % (
kwargs['maildb'], kwargs['jobnumber'], kwargs['username'], kwargs['gender'], kwargs['mobile'],
kwargs['email'], kwargs['jobgrade'], kwargs['title'], kwargs['organization'], kwargs['is_active'],
kwargs['createtime'])
update_sql = "update mail_user_distribution_counter set counter=%s where maildb='%s'" % (
kwargs['counter'], kwargs['maildb'])
update_user = 'update mail_user_distribution set mobile="%s",email="%s",jobgrade="%s",title="%s",organization="%s",is_active="%s",createtime="%s" where jobnumber="%s"' % (
kwargs['mobile'], kwargs['email'], kwargs['jobgrade'], kwargs['title'], kwargs['organization'],
kwargs['is_active'], kwargs['createtime'], kwargs['jobnumber'])
# print(update_user)
try:
self.conn.ping(reconnect=True)
#cursor = self.conn.cursor()
is_exist = self.cursor.execute(select_sql)
if is_exist == 0:
self.cursor.execute(insert_sql)
self.cursor.execute(update_sql)
logging.info(kwargs['jobnumber']+"入库成功")
else:
# print(kwargs['jobnumber']+"已存在、更新信息")
self.cursor.execute(update_user)
self.conn.commit()
self.conn.close()
except Exception as e:
logging.info(kwargs['jobnumber'] + "入库失败" + e)
self.conn.rollback()
def select_maildb(self):
'''
查找counter值最小的数据
:return: dbname,counter
'''
sql = "select maildb,counter from mail_user_distribution_counter where counter=(select min(counter) from mail_user_distribution_counter) limit 1"
try:
self.conn.ping(reconnect=True)
self.cursor.execute(sql)
result = self.cursor.fetchone()
except:
self.conn.rollback()
return result
def select_email(self,name):
'''
查找邮箱地址是否存在
:param name:
:return:
'''
namelist = []
sql = 'select email from mail_user_distribution where email like "%s%s";' %(name,'%')
try:
self.conn.ping(reconnect=True)
self.cursor.execute(sql)
results = self.cursor.fetchall()
for i in results:
namelist.append(i[0].split('@')[0])
except:
self.conn.rollback()
return namelist
if __name__ == '__main__':
a = MailUserDistribution()
a.select_email() | [
"694484533@qq.com"
] | 694484533@qq.com |
60ac3506785fe460cf5314302f7df31e91c27fef | 5a713577c8e10335de8c6e8dffc9390ec6cc5e59 | /apps/beltexam/migrations/0001_initial.py | 027cba91e3183b68306e881af8b34e9b2a236b79 | [] | no_license | RRuais/belt_exam | 76e18862ab3fb81e8c8a89d3e504d7826daeb36a | 0357377e06d43e3a79b12e0097ad59176a3906af | refs/heads/master | 2021-01-12T12:13:47.027120 | 2016-10-31T03:05:33 | 2016-10-31T03:05:33 | 72,379,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,365 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-30 20:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=200)),
('last_name', models.CharField(max_length=200)),
('email', models.EmailField(max_length=254)),
('password', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='message',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='beltexam.User'),
),
migrations.AddField(
model_name='comment',
name='message_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='beltexam.Message'),
),
migrations.AddField(
model_name='comment',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='beltexam.User'),
),
]
| [
"richruais@gmail.com"
] | richruais@gmail.com |
6089791cde362af5b1269ec38499d9fe9eee7e2f | 87d5b21265c381104de8f45aa67842a4adc880eb | /3438. Find Right interval.py | daca0e5ac90173dd5e8a9a5b8a06529e6b0cc223 | [] | no_license | MYMSSENDOG/leetcodes | ac047fe0d951e0946740cb75103fc94aae967166 | 8a52a417a903a0742034161471a084bc1e494d68 | refs/heads/master | 2020-09-23T16:55:08.579319 | 2020-09-03T19:44:26 | 2020-09-03T19:44:26 | 225,543,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | class Solution:
def findRightInterval(self, intervals):
for i, interval in enumerate(intervals):
interval.append(i)
s_intervals = sorted(intervals)
intervals.sort(key = lambda x : x[1])
e_intervals = intervals
s_idx = 0
ret = [-1] * len(intervals)
for s, e, idx in e_intervals:
while s_idx < len(s_intervals) and s_intervals[s_idx][0] < e:
s_idx += 1
if s_idx < len(s_intervals):
ret[idx] = s_intervals[s_idx][2]
return ret
intervals = [[1,4],[2,3],[3,4]]
sol = Solution()
print(sol.findRightInterval(intervals)) | [
"fhqmtkfkd@naver.com"
] | fhqmtkfkd@naver.com |
688356afd565f78fa2f2a9088532e9feabef2731 | 853e455782a944c4fed84b8f9bf4fa9a45d196d2 | /paw-pics-app-backend/paw-pics-app-env/bin/rst2odt_prepstyles.py | 1052d2337e554f5134b15457c2a2299685b1a27a | [
"MIT"
] | permissive | kevtraver1/Paw_Pics | de529393f7431e250000651884968d738e90f352 | d8649c4d67dc4eaa4f977b8da8f594323e724067 | refs/heads/master | 2022-12-13T10:12:54.436541 | 2018-12-26T21:39:39 | 2018-12-26T21:39:39 | 163,121,838 | 0 | 0 | MIT | 2022-12-10T04:25:39 | 2018-12-26T01:14:52 | Python | UTF-8 | Python | false | false | 1,779 | py | #!/Users/ktravers/Documents/AWS_Paw_Pics/Paw_Pics/paw-pics-app-backend/paw-pics-app-env/bin/python3
# $Id: rst2odt_prepstyles.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
#
# Author: Michael Schutte <michi@uiae.at>
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print >> sys.stderr, __doc__
print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0]
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
# vim:tw=78:sw=4:sts=4:et:
| [
"kevtraver1@gmail.com"
] | kevtraver1@gmail.com |
21a818a1b4255fb5851b3a266529d7db81e8bec9 | 128bd6301b594bd3483f400fe7bb20fdb51f9e19 | /codec/tqn.py | 01b1513bf9afee342325ee0c0257c6affefff517 | [] | no_license | Bakerjc-bgner/Parallel-SGD | a0c8a65f26d03cf04f6787adb98caa6134d7a37f | 9678c74f8e361a686e3db121712cccfb4f5162e1 | refs/heads/master | 2023-02-17T11:08:17.371571 | 2020-11-13T07:54:43 | 2020-11-13T07:54:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,078 | py | import numpy as np
from codec.essential import BlockWeight
from codec.interfaces import Codec, netEncapsulation
from utils.constants import Parameter_Server
def stochastic_ternarization(arr):
"""
Stochastic quantization implementation based on paper in NIPS:
Alistarh et al. “QSGD: Communication-Efficient SGD via Gradient
Quantization and Encoding”. NIPS2017
"""
return np.asarray((np.random.random(arr.shape) < np.abs(arr)) * np.sign(arr))
def ternarization(arr, epsilon=1e-9):
"""
TERNARIZATION TQN implementation based on chapter 3.1.1 in
L. Hou and J. T. Kwok. Loss-aware weight quantization of deep networks.
In International Conference on Learning Representations (ICLR), 2018.
"""
a = 0.7
b = stochastic_ternarization(arr / a)
for i in range(1):
a = np.sum(np.multiply(b, arr)) / (np.sum(np.square(b)) + 1)
b = stochastic_ternarization(arr / (a + epsilon))
return a, b
class TQNPackage:
def __init__(self, content, node_id=-2):
"""
Build SGQ transmitting package
:param content: weights delta content
:param node_id: node_id where the package from
"""
self.node_id = node_id
self.__alpha = 0
self.__beta = 0
if content is not None:
self.__alpha, self.__beta = ternarization(content)
def content(self):
return self.__alpha * self.__beta
def encode(self):
"""
return encode object for network transmission
Class must have decode process in pair
:return: encode object
"""
res = dict()
res['TQNNode_ID'] = self.node_id
res['TQNALPHA'] = self.__alpha
res['TQNBETA'] = self.__beta.astype('int8')
return res
@staticmethod
def decode(dic):
"""
decode process cooperate with obj.encode()
:param dic: the result from obj.encode()
:return: rebuild object
"""
pkg = TQNPackage(None, dic['TQNNode_ID'])
pkg.__alpha = dic['TQNALPHA']
pkg.__beta = dic['TQNBETA'].astype('float64')
return pkg
class TQNClient(Codec):
def __init__(self, node_id):
super().__init__(node_id)
def update_blocks(self, block_weight: BlockWeight):
return netEncapsulation(Parameter_Server, TQNPackage(block_weight.content, self.node_id).encode())
def dispose(self):
pass
def receive_blocks(self, content: dict):
pkg = TQNPackage.decode(content)
self.set_result(pkg.content())
class TQNServer(Codec):
def __init__(self, node_id):
super().__init__(node_id)
self.__global_weights = 0
def update_blocks(self, block_weight: BlockWeight):
pass
def receive_blocks(self, content: dict):
pkg = TQNPackage.decode(content)
self.__global_weights -= pkg.content()
return netEncapsulation(pkg.node_id, TQNPackage(self.__global_weights, Parameter_Server).encode())
def dispose(self):
pass
| [
"zzaddp@live.com"
] | zzaddp@live.com |
2f3aeb4b0aa9b08d754e8194ba4c9812c27253a2 | 8c94fac8f49d0b3c54e985a610c166746a8e5f57 | /app/templates/sa_types.py | 71c7fc8bbbdc72f3942947f8f84b71113dc47f02 | [
"MIT"
] | permissive | PuZheng/generator-flask-skeleton | cef932a62559ee29e1d16c3876d7e5705e7bd509 | 746b8347d3a5e2f6f88dbd1b33df68f6058d4383 | refs/heads/master | 2020-12-24T13:28:11.377335 | 2015-06-23T04:17:47 | 2015-06-23T04:17:47 | 30,238,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | # -*- coding: UTF-8 -*-
import types
class ChoiceType(types.TypeDecorator):
'''
ChoiceType is used seperated the actual MAGIC NUMBER and representation,
the user could only use the representation to read and write the field
'''
impl = types.Integer
def __init__(self, choices, *args, **kw):
'''
:type choices: dict
:param choices: key is the value stored, value is the representation
'''
self.choices = dict(choices)
super(ChoiceType, self).__init__(*args, **kw)
def process_bind_param(self, value, dialect):
if value is None:
return None
for k, v in self.choices.items():
if v == value:
return k
raise ValueError(u'value must be of ' +
', '.join(self.choices.values()))
def process_result_value(self, value, dialect):
return None if value is None else self.choices[value]
class ListType(types.TypeDecorator):
impl = types.String
def process_bind_param(self, value, dialect):
return '|'.join(value)
def process_result_value(self, value, dialect):
return value.split('|') if value else []
| [
"xiechao06@gmail.com"
] | xiechao06@gmail.com |
5beb2b2bb1915d2a491d8f0434744c26066c0e23 | c10a018274e84889659bc1aa98064e61c5399e94 | /runs/out2/plot_carter.py | 536edd15d6107c2b2c0363c957bbe2ed6ad7784a | [
"MIT"
] | permissive | natj/bender | 9f0d86b5cac7e8bfa681a0e03f08996d3b9da5e8 | 41820faa301429721bd3d14eadd6f9b3ef620a9e | refs/heads/master | 2020-04-04T06:01:29.155732 | 2017-11-27T12:19:25 | 2017-11-27T12:19:25 | 46,436,958 | 1 | 1 | null | 2016-02-05T16:46:17 | 2015-11-18T17:55:04 | TeX | UTF-8 | Python | false | false | 9,067 | py | #!/usr/bin/python
from __future__ import division
import sys
import os
import h5py
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1 import ImageGrid
from matplotlib.patches import Ellipse
from matplotlib.colors import LogNorm
print "Matplotlib version", matplotlib.__version__
cmap = plt.get_cmap('plasma_r')
import numpy as np
import scipy.integrate as integrate
import scipy.interpolate as interpolate
import scipy.ndimage as ndimage
import scipy.misc as misc
from scipy.spatial import ConvexHull, Delaunay
plt.rc('font', family='serif')
plt.rc('xtick', labelsize=8)
plt.rc('ytick', labelsize=8)
plt.rc('axes', labelsize=8)
#fig = plt.figure(figsize=(3.54, 2.19)) #single column fig
fig = plt.figure(figsize=(7.48, 1.6)) #two column figure
#if len(sys.argv) != 3:
# print sys.argv[0], ": INFILE OUTFILE"
# exit(0)
#
#filename = sys.argv[1]
#outfilename = sys.argv[2]
#filename = 'extreme_400pts_1.4m_15km_600hz_15inc.h5'
#outfilename = 'extreme_400pts_1.4m_15km_600hz_15inc.pdf'
filename = 'casual_400pts_1.6m_12km_400hz_15inc.h5'
outfilename = 'casual_400pts_1.6m_12km_400hz_15inc.pdf'
path = os.path.abspath(os.path.dirname(sys.argv[0]))
print "Path is", path
print "Reading file", filename
f = h5py.File(filename, 'r')
# load data matrices and header variables
angular_velocity = f["header"].attrs["angular_velocity"]
redshift_matrix = f["image/redshift_matrix"][()]
time_delay_matrix = f["image/time_delay_matrix"][()]
hit_matrix = f["image/hit_matrix"][()]
hit_indices = hit_matrix >= 1
x_matrix = f["image/x_matrix"][()]
y_matrix = f["image/y_matrix"][()]
theta_matrix = f["image/theta_matrix"][()]
phi_matrix = f["image/phi_matrix"][()]
r_matrix = f["image/r_matrix"][()]
angle_matrix_rest = f["image/angle_matrix_rest"][()]
angle_matrix_obs = f["image/angle_matrix_observer"][()]
# fix nans
def fixnans(mat):
mat[np.logical_not(np.isfinite(mat))] = 0.0
return mat
if 1:
fixnans(redshift_matrix)
fixnans(time_delay_matrix)
fixnans(theta_matrix)
fixnans(phi_matrix)
# extent of the image plane
min_x = np.amin(x_matrix)
max_x = np.amax(x_matrix)
min_y = np.amin(y_matrix)
max_y = np.amax(y_matrix)
print 'x min/max {} {}'.format(min_x, max_x)
print 'y min/max {} {}'.format(min_y, max_y)
# in pixels
x_pixels = x_matrix.shape[0]
y_pixels = x_matrix.shape[1]
# maximum carters constant and H values
C1_matrix = np.zeros_like(x_matrix)
C2_matrix = np.zeros_like(x_matrix)
H_matrix = np.zeros_like(x_matrix)
#
# other useful stuff
# luminosity distance
lumdist = f["header"].attrs["luminosity_distance"]
# stefan-boltzmann constant in planck units
sigma_sb = 35392.0;
# temperature conversion factor from keV to Kelvin
kelvin_per_keV = 1.16045e7;
# juris NS data
#juridata = np.loadtxt(path + "/juridata/nu1Hz_blackbody_rho30deg.dat")
#juridata2 = np.loadtxt(path + "/juridata/nu400Hz_blackbody_rho30deg.dat")
#juridata_1deg = np.loadtxt(path + "/juridata/nu1Hz_blackbody_rho1deg.dat")
#juridata_1deg2 = np.loadtxt(path + "/juridata/nu400Hz_blackbody_rho1deg.dat")
# imshow with some defaults
def imgplane_imshow(obj, data, interpolation="none", cmap=cmap, **kwargs):
return obj.imshow(np.swapaxes(data, 0, 1), interpolation=interpolation, cmap=cmap, origin="lower",
extent=(min_x, max_x, min_y, max_y), **kwargs)
# get carter matrix data
num_geodesics = f["geodesics"].attrs["num_geodesics"]
print "geodesics in file:", num_geodesics
geodata = {}
ij_matrix = f["image/ij_matrix"]
max_redshift = np.nanmax(redshift_matrix)
print "max redshift", max_redshift
for geo_index in xrange(num_geodesics):
#for geo_index in xrange(100):
dataset = "geodesics/geodesic_{0}/coordinates".format(geo_index)
print "reading in geodesic data", geo_index, "from dataset", dataset
dataset = "geodesics/geodesic_{0}/hamiltonian".format(geo_index)
Hvalues_in = f[dataset][()]
Hvalues = np.abs(Hvalues_in-Hvalues_in[-1])
Cvalues_in1 = f["geodesics/geodesic_{0}/carters_constant_1".format(geo_index)][()]
#Cvalues1 = np.abs(Cvalues_in1-Cvalues_in1[-1])
Cvalues1 = np.abs(Cvalues_in1-Cvalues_in1[-1])/Cvalues_in1[-1]
Cvalues_in2 = f["geodesics/geodesic_{0}/carters_constant_2".format(geo_index)][()]
#Cvalues2 = np.abs(Cvalues_in2-Cvalues_in2[-1])
Cvalues2 = np.abs(Cvalues_in2-Cvalues_in2[-1])/Cvalues_in2[-1]
#Cratio = np.abs(Cvalues2/Cvalues1)
geo_ij = ij_matrix[geo_index, :]
H_matrix[geo_ij[0], geo_ij[1]] = np.amax(Hvalues)
C1_matrix[geo_ij[0], geo_ij[1]] = np.amax(Cvalues1)
C2_matrix[geo_ij[0], geo_ij[1]] = np.amax(Cvalues2)
def fmt(x, pos):
a, b = '{:.2e}'.format(x).split('e')
b = int(b)
#return r'${} \times 10^{{{}}}$'.format(a, b)
return r'$10^{{{}}}$'.format(b)
def plot_mat(ax, mat, title='', fmt=None, vmin=None, vmax=None, extent=None):
if fmt is not None:
formatter = matplotlib.ticker.FuncFormatter(fmt)
else:
formatter = None
#im = ax.imshow(mat.T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)
im = ax.imshow(mat.T, origin='lower', cmap=cmap,
norm=LogNorm(vmin=vmin, vmax=vmax,),
extent=extent)
#divider = make_axes_locatable(ax)
#cax = divider.new_horizontal(size="5%", pad=0.7, pack_start=True)
#fig.add_axes(cax)
fig.colorbar(im, ax=ax, shrink=0.6, format=formatter)
ax.set_title(title)
#ax.minorticks_on()
#ax.get_xaxis().set_visible(False)
#ax.get_yaxis().set_visible(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
#ax.set_xlim(-10.0, 10.0)
#ax.set_ylim(-10.0, 10.0)
ax.set_xlim(-8.0, 8.0)
ax.set_ylim(-8.0, 8.0)
##################################################
#Construct output xy image plane from img object
##################################################
x_span = 12.5
y_span = 12.5
x_bins = 500
y_bins = 500
xs = np.linspace(-x_span, x_span, x_bins)
ys = np.linspace(-y_span, y_span, y_bins)
##################################################
# plot values on image plane
def trans(mat):
return np.flipud(mat.T)
#return mat
def detrans(mat):
return np.flipud(mat).T
def clean_image(mat):
#mask all 0.0 elements and transpose
mat_masked = np.ma.masked_where(mat == 0, mat)
return trans(mat_masked)
#read redshift array
#fname = "../out/reds_f600pbbr15m1.4i15.csv"
fname = "../out/reds_f400pbbr12m1.6i15.csv"
data = np.genfromtxt(fname, delimiter=',')
redshift = np.reshape(data, (x_bins, y_bins) )
redshift = clean_image(redshift)
##################################################
#fname2 = '../out/reds_f600_bb_r15_m1.4_i15.csv'
fname2 = '../out/reds_f400_bb_r12_m1.6_i15.csv'
data2 = np.genfromtxt(fname2, delimiter=',')
redshift2 = np.reshape(data2, (x_bins, y_bins) )
redshift2 = clean_image(redshift2)
# other settings for imshow
extent=( xs[0], xs[-1], ys[0], xs[-1] )
interpolation = 'nearest'
# relative error
relerr = np.zeros(( x_bins, y_bins))
for i, x in enumerate(xs):
for j, y in enumerate(ys):
val1 = redshift[i,j]
val2 = redshift2[i,j]
errval = 0.0
if not(val2 == 0.0):
errval = np.abs( (val2 - val1)/val2 )
#errval = np.log10( np.abs((val2 - val1)/val2) )
relerr[i,j] = errval
relerr = np.ma.masked_where(relerr == 0, relerr)
relerr = relerr.T
##################################################
gs = plt.GridSpec(1, 4)
ax1 = plt.subplot(gs[0,0])
ax2 = plt.subplot(gs[0,1])
ax3 = plt.subplot(gs[0,2])
ax4 = plt.subplot(gs[0,3])
extentC = (-8.0, 8.0, -8.0, 8.0)
#extentC = (-10.0, 10.0, -10.0, 10.0)
plot_mat(ax1, H_matrix, title='Abs. err. in $H$', fmt=fmt,vmin=1.0e-13, vmax=1.0e-10, extent=extentC)
plot_mat(ax2, C1_matrix, title='Rel. err. in $C_1$', fmt=fmt, vmin=1.0e-3, vmax=1.0e1, extent=extentC)
plot_mat(ax3, C2_matrix, title='Rel. err. in $C_2$', fmt=fmt, vmin=1.0e-3, vmax=1.0e1, extent=extentC)
plot_mat(ax4, relerr, title='Rel. err. in $z$', fmt=fmt, vmin=1.0e-4, vmax=1.0e-2, extent=extent)
#plot_mat(ax1, H_matrix, fmt=fmt,vmin=1.0e-13, vmax=1.0e-10, extent=extentC)
#plot_mat(ax2, C1_matrix, fmt=fmt, vmin=1.0e-3, vmax=1.0e1, extent=extentC)
#plot_mat(ax3, C2_matrix, fmt=fmt, vmin=1.0e-3, vmax=1.0e1, extent=extentC)
#plot_mat(ax4, relerr, fmt=fmt, vmin=1.0e-4, vmax=1.0e-2, extent=extent)
fig.text(0.06, 0.5, '$R_{\\mathrm{e}} = 12$km \n $M=1.6 M_{\\odot}$ \n $\\nu=400$ Hz \n $i = 15^{\\circ}$',
ha='center', va='center', rotation='vertical' )#, size=28)
#fig.text(0.06, 0.5, '$R_{\\mathrm{e}} = 15$km \n $M=1.4 M_{\\odot}$ \n $\\nu=600$ Hz \n $i = 15^{\\circ}$',
# ha='center', va='center', rotation='vertical' )#, size=28)
plt.subplots_adjust(left=0.12, bottom=-0.13, right=0.98, top=1, wspace=0.11, hspace=0)
#fig.tight_layout()
#plt.tight_layout()
#plt.subplots_adjust(wspace=0.3)
#plt.savefig(outfilename )#, bbox_inches='tight')
plt.savefig(outfilename, bbox_inches='tight')
#plt.show()
| [
"nattila.joonas@gmail.com"
] | nattila.joonas@gmail.com |
c95c439e43cc1f9df41cfe9a8bc0fd4ccc988e75 | d8d370a5bd11e19000829498c41eee0e45056f21 | /Senado_API.py | d3969e35955f3fa6cc3093f215dff8ce1eee6269 | [] | no_license | Ryukamusa/API_Senado | 77db8b56ac4ea7977caaf2fcf623b488b1770250 | bc74294ebdf15d0830a9ce4857093c7ea7b20e4b | refs/heads/master | 2021-07-20T23:54:34.861744 | 2017-10-25T14:19:44 | 2017-10-25T14:19:44 | 108,268,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,892 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 8 18:12:35 2017
@author: helio
"""
#==============================================================================
#==============================================================================
# # 1a
#==============================================================================
#==============================================================================
class Plenario(object):
def __init__(self):
self.numero_senadores=0 #inicia o número de senadores no plenário (começa vazio)
self.senadores=[] #inicia a lista de senadores no plenário (começa vazio)
def adicionar(self,nome):
'''Adiciona novo senador ao plenário'''
self.numero_senadores+=1 #aumenta o número de senadores no plenário
self.senadores.append(nome) #coloca o senador de nome "nome" na lista de senadores em exercício no plenário
def senadores_nomes(self):
ret=[sen.nome for sen in self.senadores]
return ret
class Parlamentar(object):
def __init__(self,lista,plenario):
#### Parlamentar ID
id_dic,mand_dic,legis,titular,suplentes,exercicio=lista
self.codigo=id_dic['codigo'] # code
self.nome=id_dic['nome'] # nome
self.nome_completo=id_dic['nome_completo'] # nome completo
self.sexo=id_dic['sexo'] # sexo
self.forma_tratamento=id_dic['forma'] # forma tratamento
self.foto=id_dic['foto'] # foto url
self.pagina=id_dic['pagina'] # pagina pessoal
self.email=id_dic['email'] # email pessoal
self.partido=id_dic['partido'] # partido de filiacao
self.UF=id_dic['uf'] # UF de partido
plenario.adicionar(self)
#### Mandato parlamentar
self.mandato=Mandato(mand_dic,legis,titular,suplentes,exercicio)
class Mandato(object):
def __init__(self,mand_dic,legis,titl,supl,exerc):
self.codigo=mand_dic['codigo'] # codigo do mandato
self.UF=mand_dic['uf'] # UF do mandato
# legislatura 1
self.legislatura_1=Legislatura(legis,k='1')
self.legislatura_2=Legislatura(legis,k='2')
self.suplente_1=Suplente(supl,k='1')
self.suplente_2=Suplente(supl,k='2')
self.titular=Titular(titl)
self.exercicios=Exercicios(exerc)
class Legislatura(object):
def __init__(self,legis,k='1'):
self.numero=legis['leg%s'%k]['n'] # 1a legislatura
self.inicio=legis['leg%s'%k]['inicio'] # 1a legislatura
self.fim=legis['leg%s'%k]['fim'] # 1a legislatura
class Titular(object):
def __init__(self,titl,k='1'):
self.codigo=titl['codigo'] # 1o titular
self.nome=titl['nome'] # 1o titular
self.participacao=titl['particip'] # 1o titular
class Suplente(object):
def __init__(self,supl,k='1'):
self.codigo=supl['sup%s'%k]['codigo'] # 1o suplente
self.nome=supl['sup%s'%k]['nome'] # 1o suplente
self.participacao=supl['sup%s'%k]['particip'] # 1o suplente
class Exercicios(object):
def __init__(self,exerc):
self.exercicios=exerc
#==============================================================================
#==============================================================================
# # 1b
#==============================================================================
#==============================================================================
import requests
from xml.etree import ElementTree
def getp(lst,k='Parlam'):
for i in range(len(lst)):
a=lst[i]
if k in a.tag:
return i
def get_ID(parlamentar):
ids=parlamentar.find('IdentificacaoParlamentar')
cod=ids.find('CodigoParlamentar').text
nome=ids.find('NomeParlamentar').text
nome_comp=ids.find('NomeCompletoParlamentar').text
sexo=ids.find('SexoParlamentar').text
forma=ids.find('FormaTratamento').text
foto=ids.find('UrlFotoParlamentar').text
pagina=ids.find('UrlPaginaParlamentar').text
email=ids.find('EmailParlamentar').text
uf=ids.find('UfParlamentar').text
partido=ids.find('SiglaPartidoParlamentar').text
id_dic={'codigo':cod,'nome':nome,'nome_completo':nome_comp,'sexo':sexo,
'forma':forma,'foto':foto,'pagina':pagina,'email':email,'uf':uf,
'partido':partido}
return id_dic
def get_mandato(parlamentar):
ids=parlamentar.find('Mandato')
cod=ids.find('CodigoMandato').text
uf=ids.find('UfParlamentar').text
mand_dic={'codigo':cod,'uf':uf}
return mand_dic
def get_legislatura(parlamentar):
id1=parlamentar.find('Mandato').find('PrimeiraLegislaturaDoMandato')
id2=parlamentar.find('Mandato').find('SegundaLegislaturaDoMandato')
n1,in1,fin1=id1.find('NumeroLegislatura').text,id1.find('DataInicio').text,id1.find('DataFim').text
n2,in2,fin2=id2.find('NumeroLegislatura').text,id2.find('DataInicio').text,id2.find('DataFim').text
legis={'leg1':{'n':n1,'inicio':in1,'fim':fin1},
'leg2':{'n':n2,'inicio':in2,'fim':fin2}}
return legis
def get_titular(parlamentar):
try:
ids=parlamentar.find('Mandato').find('Titular')
code,nome,part=ids.find('CodigoParlamentar').text,ids.find('NomeParlamentar').text,ids.find('DescricaoParticipacao').text
tit={'codigo':code,'nome':nome,'particip':part}
except:
tit={'codigo':None,'nome':None,'particip':None}
return tit
def get_suplentes(parlamentar):
suplentes={'sup1':{'codigo':None,'nome':None,'particip':None},
'sup2':{'codigo':None,'nome':None,'particip':None}}
for ids in parlamentar.find('Mandato').find('Suplentes'):
code,nome,part=ids.find('CodigoParlamentar').text,ids.find('NomeParlamentar').text,ids.find('DescricaoParticipacao').text
sup={'codigo':code,'nome':nome,'particip':part}
suplentes['sup%i'%int(part[0])]=sup
return suplentes
def get_exercicio(parlamentar):
ids=parlamentar.find('Mandato').find('Exercicios')
exercicios={}
for n,ex in enumerate(ids):
exr={}
for chl in ex:
exr[chl.tag]=chl.text
exercicios[ex.tag+"%i"%(n+1)]=exr
return exercicios
def getParlamentarInfo(parlamentar):
id_dic=get_ID(parlamentar)
mand_dic=get_mandato(parlamentar)
legis=get_legislatura(parlamentar)
titular=get_titular(parlamentar)
suplentes=get_suplentes(parlamentar)
exercicio=get_exercicio(parlamentar)
return [id_dic,mand_dic,legis,titular,suplentes,exercicio]
url = "http://legis.senado.gov.br/dadosabertos/senador/lista/atual"
response = requests.get(url)
tree = ElementTree.fromstring(response.content)
chld=tree.getchildren()
pos=getp(chld,k='Parl')
parlams=tree[pos].findall('Parlamentar')
plenario=Plenario()
for i in range(len(parlams)):
infos_list=getParlamentarInfo(parlams[i])
_=Parlamentar(infos_list,plenario)
| [
"helio.mgl@gmail.com"
] | helio.mgl@gmail.com |
9211cf3096169124aceb6966857aca70685f1434 | 671474dd27dd904b799d113a62af4ddd1d4d1d53 | /leetcode/ContainMostWater.py | a81ce0bbedd3fc8c29750ca0bada2d26997dfc55 | [] | no_license | babyrain045/pyproject | 0663d96f715eb49c68457e60b7c40ef25c0400ba | e31dd17af01219d6a0eaa1153129a7b807ffdbbf | refs/heads/master | 2021-10-24T11:26:21.922015 | 2019-03-25T15:36:54 | 2019-03-25T15:36:54 | 116,096,171 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 918 | py |
a_list = [1,8,6,2,5,4,8,3,7]
lenth = len(a_list)
count = 1
r_index = -1
r_flag = a_list[r_index]
area = 0
while(count - r_index <= lenth):
if a_list[count-1] < a_list[r_index]:
print(a_list[count-1],a_list[r_index])
r_wid = lenth + r_index + 1
up_area = (r_wid - count) * a_list[count-1]
if up_area > area:
area = up_area
count = count + 1
print("jishu:",count)
print(a_list[count-1],r_flag)
print("zuobian area:",up_area,a_list[count - 1],r_wid,count)
else:
r_wid = r_index + lenth + 1
r_flag = a_list[r_index]
up_area = (r_wid - count) * r_flag
print(a_list[count-1],r_flag)
print("youbian area:",up_area)
if up_area > area:
area = up_area
r_index = r_index - 1
print(area)
| [
"am_crrr@qq.com"
] | am_crrr@qq.com |
2be77f166383e055b27d557705bce9f9511b40c5 | dce8035b3f68a862405a2907f509992ad084cbaf | /models/config_data.py | c6508f37ee8cfc4f8f01cb76d5f3f09f112d33dc | [
"MIT"
] | permissive | AlberLC/flananini | 1ce97a17c9fd8d56f28318a1e58d4dbef9b5e594 | 0b3b2f8f12af89da67a1addd24d0274110738b47 | refs/heads/main | 2023-03-30T11:59:55.928477 | 2021-04-10T20:12:35 | 2021-04-10T20:12:35 | 349,248,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,839 | py | import json
from dataclasses import dataclass
@dataclass
class ConfigData:
api_id = None
api_hash = None
phone = None
email_name = None
email_pass = None
period = None
n_emails = None
send_to = None
emails_to_check = []
def __init__(self):
with open('config.json', 'a+') as file:
file.seek(0)
text = file.read()
if not text:
text = '{}'
file.write(text)
config = json.loads(text)
self.api_id = config.get('api_id')
self.api_hash = config.get('api_hash')
self.phone = config.get('phone')
self.email_name = config.get('email_name')
self.email_pass = config.get('email_pass')
self.period = config.get('period', 5)
self.n_emails = config.get('n_emails', 5)
self.send_to = config.get('send_to', 'me')
self.emails_to_check = config.get('emails_to_check', [])
def __bool__(self):
return bool(self.api_id and
self.api_hash and
self.phone and
self.email_name and
self.email_pass and
self.period and
self.n_emails and
self.send_to and
self.emails_to_check)
def save_config(self, api_id, api_hash, phone, email_name, email_pass, period, n_emails, send_to, emails_to_check):
self.api_id = api_id
self.api_hash = api_hash
self.phone = phone
self.email_name = email_name
self.email_pass = email_pass
self.period = period
self.n_emails = n_emails
self.send_to = send_to
self.emails_to_check = emails_to_check
with open('config.json', 'w') as file:
json.dump(self.__dict__, file)
| [
"alberlc@outlook.com"
] | alberlc@outlook.com |
8f1ace82148eeb7f7d40c967c40b5fa627ed65dc | 07a2d9a942402bc1b525af5baea96aa455d45604 | /users/signals.py | 841356d7b11aeb25d84f1ee5ed57bb88b7ad713e | [] | no_license | Vahid97/Devsearch | 7c0b9085c362fe0282ff5082b29ea6d114c7c328 | 0c1677b1c1a343263eec46a0b9209f27e588e65e | refs/heads/master | 2023-08-24T06:21:39.024279 | 2021-10-14T16:19:31 | 2021-10-14T16:19:31 | 415,063,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.contrib.auth.models import User
from .models import Profile
def createProfile(sender, instance, created, **kwargs):
print("profile create triggered")
if created:
user = instance
profile = Profile.objects.create(
user=user,
username = user.username,
email = user.email,
name = user.first_name,
)
def updateUser(sender, instance, created, **kwarg):
profile = instance
user = profile.user
if created == False:
user.first_name = profile.name
user.username = profile.username
user.email = profile.email
user.save()
def deleteUser(sender, instance, **kwargs):
user = instance.user
user.delete()
post_save.connect(createProfile, sender=User)
post_save.connect(updateUser, sender=Profile)
post_delete.connect(deleteUser, sender=Profile)
| [
"vahidefendi5@gmail.com"
] | vahidefendi5@gmail.com |
6a96f06a548d7b7272f54e5882dc0e37c7143af7 | 56e93249ee04b30f1f20fd40361e160504e7e5ac | /venv/bin/pip | 8d532b612b036fe2cfab2965308b4a9d1c27a863 | [] | no_license | elahe-dastan/AI-NLP-textClassification | f5683c0fe632e28a70d3191bd43ab469fd205705 | 03d2d3778c15c77d4dd361290ccade0aaad4b565 | refs/heads/master | 2022-04-09T10:20:48.186155 | 2020-03-01T10:35:26 | 2020-03-01T10:35:26 | 236,068,057 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | #!/home/raha/PycharmProjects/NLP/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"elahe.dstn@gmail.com"
] | elahe.dstn@gmail.com | |
e00b4d1e0425338317639ae69e1c7d45820b13fc | dbfc6a1fa80eeabb5773fdc31d5b0fabad5bdd02 | /directory_unit/migrations/0004_auto_20151213_2022.py | 3b29d5076464120cc5e4efa820c2dea5b0983262 | [] | no_license | daaray/library_website | 75803a655f391789235f70b8a552fbf79b18b428 | 589d594ef2bc874a1c84f32e3853400f4f40bd03 | refs/heads/master | 2020-12-30T13:59:48.061235 | 2017-05-09T20:39:41 | 2017-05-09T20:41:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('directory_unit', '0003_unitsupervisor'),
]
operations = [
migrations.AlterModelOptions(
name='directoryunit',
options={'ordering': ['fullName']},
),
]
| [
"jej@moss.lib.uchicago.edu"
] | jej@moss.lib.uchicago.edu |
cc50e8cf7bfcdfdd3fa42fef1852bb043cd992f3 | 4a4b05119f116be2f674cf39e66944ae44f76f55 | /rolldecayestimators/tests/__init__.py | 714f8e93dcd2aa29643f667e5931f0b471622b68 | [] | no_license | martinlarsalbert/rolldecay-estimators | a84ea4fc8dca2b8b7e6573c9b6e8d162809c6302 | c74642ce2b5299d4aa849c277fbaf8688f79f760 | refs/heads/master | 2022-11-20T20:39:13.488465 | 2022-11-07T13:04:40 | 2022-11-07T13:04:40 | 237,425,836 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | import os
path = os.path.dirname(__file__) | [
"marale@kth.se"
] | marale@kth.se |
8b14517720719953b2311219495656be8d43d63d | 6458a9435dc52f73fee0c1b37c4ab6cfc41356fd | /davis/gif_gen.py | 6e3242db5fa2b07be678bbdf757c829006f413de | [] | no_license | belaalb/frameGAN | 3af57c8b812cd7c90b3d9cbccb6fc585a2068e19 | 8a58ddcc4a7f24f145e8dd1ab9ef37ea9cf64712 | refs/heads/master | 2020-03-19T23:03:20.378243 | 2019-05-29T01:46:41 | 2019-05-29T01:46:41 | 136,991,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,529 | py | import cv2
import numpy as np
import math
import glob
import os
from tqdm import trange
def gif_gen(im_size=64,input_path="./results/", output_path='./davis_gif/'):
if not os.path.exists(output_path):
os.makedirs(output_path)
classesDir = os.listdir(input_path)
vcat = []
total = []
aux_index = 0
INPUT_SIZE = 20
FRAMES = 40
for v in trange(0,INPUT_SIZE, desc='Progress in DAVIS Dataset'):
filenames = glob.glob(input_path + classesDir[v] + "/*.png")
filenames.sort()
#print(filenames)
images = [cv2.imread(filenames[i],cv2.IMREAD_COLOR) for i in range(0,FRAMES)]
images = np.asarray(images)
imgSize = images[0].shape
total.append(images)
total = np.asarray(total)
for j in range(0,FRAMES):
hcat = []
for inputNumber in range(0,INPUT_SIZE):
black = [255,255,255] #---Color of the border---
#print(images.shape)
constant=cv2.copyMakeBorder(total[inputNumber,j,:,:,:],2,1,1,2,cv2.BORDER_CONSTANT,value=black )
cv2.imshow('constant',constant)
#cv2.waitKey(0)
if(hcat == []):
hcat = constant
else:
hcat = cv2.hconcat((hcat, constant))
cv2.imwrite(output_path+str(j)+".png",hcat)
#cv2.imshow('Final', hcat)
#cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
gif_gen()
| [
"hrm@cin.ufpe.br"
] | hrm@cin.ufpe.br |
0d0ce9e1b68e5d5eddb4e9de6d3f7d68284fd168 | 7b67d9e7ea01f43d0734860ea371419fd47f3a13 | /test.py | 18f11f80cb84c5c307d1ba2cff3ca6a560a73d51 | [] | no_license | milanetf/variant_calling | 915eeda82f330fc58f9fb6a7d995719f4a8947c5 | 1bcb4f182f8b15f2e60ad3031c60abdd421f3970 | refs/heads/master | 2020-03-22T13:26:46.627182 | 2018-07-09T15:05:26 | 2018-07-09T15:05:26 | 140,108,520 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,148 | py | """Usage: test.py (-h --help) ...
test.py -t <testType> ...
testType:
1 variantCallingCore test
2 variantCalling test
3 addVarinatToIntVcArray test
Options:
-h --help | help
-t testType | execute specified test
"""
import variant_calling
import msvcrt
from docopt import docopt
def testVariantCallingCoreFunction():
print("Executing variantCallingCore function test...")
variant_calling.VCFfile = open(variant_calling.dir_path + "\output\VCF_test.vcf", "a+")
variant_calling.VCFfile.seek(0)
variant_calling.VCFfile.truncate()
variant_calling.VCFfile.seek(0)
variant_calling.content = [["1", "336", "C", "34", "A$A$A$A$AAAA,.,AA,,..AAAAaaaa...CcTtGg", "test"], ["1", "400", "A", "12", "...,,+2AC+2AC+2AC+2AC+2AC+2AC+2AC+2AC+2AC+2AC+2AC+2AC,.,...,", "test"], ["1", "500", "A", "12", "...,,-2AC-2AC-2AC-2AC-2AC-2AC-2AC-2AC-2AC-2AC-2AC-2AC,.,...,", "test"], ["1", "501", "A", "1", ".", "test"], ["1", "502", "C", "1", ".", "test"]]
variant_calling.variantCalling()
variant_calling.VCFfile.seek(0)
#print(VCFfile.read())
if variant_calling.VCFfile.read() == "1 336 C A GT 0/1\n1 400 A AAC GT 1/1\n1 500 AAC A GT 1/1\n":
print("Test run succesufully...")
else:
print("Test fail...")
def testVariantCallingFunction():
print("Executing variantCalling function test...")
incVcArray = variant_calling.variantCallingCore(["1", "336", "C", "34", "A$A$A$A$AAAA,.,.,,,..AAAAaaaa...CcTtGg+2AC", "test"])
sum = 0
print(incVcArray)
for x in incVcArray:
sum += x[0]
print(sum)
if sum != 35:
print("Test fail...")
if incVcArray[0][0] == 12 and incVcArray[1][0] == 2 and incVcArray[2][0] == 2 and incVcArray[3][0] == 16 and incVcArray[4][0] == 2 and incVcArray[5][0] == 1:
print("Test run succesufully...")
else:
print("Test fail...")
def testAddVarinatToIntVcArray():
print("Executing addVarinatToIntVcArray function test...")
p = ['.', 'T', 'G', 'A', 'C']
incVcArray = [[0, p[i]] for i in range(len(p))]
variant_calling.addVarinatToIntVcArray(incVcArray, "+2AC")
if incVcArray[5][1] == "+2AC":
print("Insertion added succesufully...")
variant_calling.addVarinatToIntVcArray(incVcArray, "-2AT")
if incVcArray[6][1] == "-2AT":
print("Deletion added succesufully...")
print("Function addVarinatToIntVcArray executed succesufully...")
else:
print("Test failed due to deletion adding!")
else:
print("Test failed due to insertion adding!")
def testFunction(m):
global VCFfile
if str(m) == '1':
testVariantCallingCoreFunction()
elif str(m) == '2':
testVariantCallingFunction()
elif str(m) == '3':
testAddVarinatToIntVcArray()
else:
print("Entered value is not allowed...")
# Run the program
if __name__ == '__main__':
arguments = docopt(__doc__)
print(arguments)
testFunction(arguments['-t'][0]) | [
"noreply@github.com"
] | milanetf.noreply@github.com |
610b5e884721abf6ec324e338013d09b0103064c | ce862c7ff3de8866a6ba1a445dbd099e7cc8f077 | /frontend_render/views.py | 3716f3b05a0f93540f949628509e69def47226e4 | [] | no_license | TsingJyujing/ServerDash | 288ce44d465c08132c947e2b83c9b107815b96ae | 91ac73f848dc4ed4e000fd79198685810851e693 | refs/heads/master | 2021-01-13T07:55:53.904425 | 2017-12-27T14:26:22 | 2017-12-27T14:26:22 | 95,017,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,194 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import string
from django.shortcuts import render
def get(req, key, default):
try:
return req.GET[key]
except:
return default
def view_current(req):
return render(req, "view_current.html")
def view_history_cpu(req):
return render(req, "view_history_info.html", context={
"title": "CPU用量查看",
"div_name": "cpu_usage",
"chart_title": "CPU用量",
"data_legend": "CPU(%)",
"api_name": "/api/history/cpu/sum",
})
def view_history_memory_virtual(req):
return render(req, "view_history_info.html", context={
"title": "内存用量查看",
"div_name": "ram_usage",
"chart_title": "内存用量",
"data_legend": "RAM(%)",
"api_name": "/api/history/memory/virtual",
})
def view_history_memory_swap(req):
return render(req, "view_history_info.html", context={
"title": "交换内存用量查看",
"div_name": "swap_ram_usage",
"chart_title": "交换内存用量",
"data_legend": "RAM(%)",
"api_name": "/api/history/memory/swap",
})
def view_history_cpu_single(req):
index = string.atoi(get(req, "cpuid", "0"))
return render(req, "view_history_info.html", context={
"title": "CPU [%d] 用量查看" % index,
"div_name": "swap_ram_usage",
"chart_title": "CPU[%d]" % index,
"data_legend": "Usage(%)",
"api_name": "/api/history/cpu/single?cpuid=%d" % index,
})
def view_main(req):
return render(req, "index_page.html")
def view_current_lite(req):
return render(req, "view_current_table.html")
def view_current_disk(req):
return render(req, "view_current_disk.html")
def view_current_process(req):
return render(req, "view_process.html")
def view_history_disk(req):
device_id = get(req, "device", "C:\\")
return render(req, "view_history_disk.html", context={
"device": device_id,
"data_api": "/api/history/disk/query?device=" + device_id.replace("\\", "\\\\")
})
| [
"yuanyifan1993@foxmail.com"
] | yuanyifan1993@foxmail.com |
6fc802b1a8479f363d733f1f6ab7160b0e51f014 | 5c9a29f5a3bbaa328be5ead3f69a79cb4bfd6148 | /GoState.py | 22c577dbd82130783a2089ae903e4ea7008f2a23 | [] | no_license | alaydshah/GoAI | 546c2de431cdbfcc361c4f8fe55278a35e20fd6c | dc1f4b8931fc8abf677dac15a065899d2cecd64b | refs/heads/master | 2022-04-08T18:27:31.464043 | 2020-03-27T23:30:09 | 2020-03-27T23:30:09 | 249,115,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,232 | py | from GoBoard import Board
from GoMove import Move
from GoPoint import Point
from copy import deepcopy
EMPTY, BLACK, WHITE = range(0, 3)
class State:
def __init__(self, board, p_state, next_player, move):
self.board: Board = board
self.previous_state: State = p_state
self.next_player = next_player
self.other_player = 3 - next_player
self.last_move: Move = move
# def is_move_valid(self, move):
# if move.is_pass:
# return True
# if not self.board.valid_point_check(move):
# return False
# temp_board = deepcopy(self.board)
# temp_board.place_stone(move, self.next_player)
# if not temp_board.has_liberty(move.point):
# return False
# if temp_board == self.previous_state.board:
# return False
# return True
def apply_move(self, move: Move):
next_board = deepcopy(self.board)
if move.is_play:
if not self.board.valid_move_check(move): # Basic Move Validity Checks
return None
next_board.place_stone(move, self.next_player)
point: Point = move.point
if not next_board.has_liberty(point): # Suicidal Move Check
return None
if next_board == self.previous_state.board: # Ko Violation check
return None
return State(next_board, self, self.other_player, move)
def stone_diff(self):
black_stones = 0
white_stones = 0
for i in range(self.board.board_size):
for j in range(self.board.board_size):
point = Point(i, j)
if self.board.get_point_color(point) == BLACK:
black_stones += 1
elif self.board.get_point_color(point) == WHITE:
white_stones += 1
return black_stones - white_stones - self.board.board_size/2.0
def is_terminal(self):
last_move: Move = self.last_move
last_to_last_move: Move = self.previous_state.last_move
if last_move is not None and last_to_last_move is not None:
return last_move.is_pass and last_to_last_move.is_pass
return False
| [
"alaylien11@gmail.com"
] | alaylien11@gmail.com |
2847b8fc07b0d1d4b7a080625656673b26af874b | d659810b24ebc6ae29a4d7fbb3b82294c860633a | /aliyun-python-sdk-ecs/aliyunsdkecs/__init__.py | 35ecf57dbcca9c1abba51cbcd6779a7fa33dc5d4 | [
"Apache-2.0"
] | permissive | leafcoder/aliyun-openapi-python-sdk | 3dd874e620715173b6ccf7c34646d5cb8268da45 | 26b441ab37a5cda804de475fd5284bab699443f1 | refs/heads/master | 2023-07-31T23:22:35.642837 | 2021-09-17T07:49:51 | 2021-09-17T07:49:51 | 407,727,896 | 0 | 0 | NOASSERTION | 2021-09-18T01:56:10 | 2021-09-18T01:56:09 | null | UTF-8 | Python | false | false | 22 | py | __version__ = '4.24.8' | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
72a562a710202adb8ea56eb6eaf46f7950173472 | 2143b0593a92a705862e54a4c5da0f9fbf27d464 | /project_B_q4.py | b04c1bd6e40b126ffb5c60195d30958a465bea3a | [] | no_license | TomDrnd/NN_Basic_Classification_and_Function_Approximation | eb17a3cb1ecc4e0c683969b36ab2aeb1cc2e836a | 5a6dc1ecec2e2b0444d1710df70677979756f6da | refs/heads/master | 2021-07-13T05:45:01.316792 | 2017-10-19T03:06:23 | 2017-10-19T03:06:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,307 | py | import time
import numpy as np
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
np.random.seed(10)
epochs = 1000
batch_size = 32
no_hidden1 = 60 #num of neurons in hidden layer 1
no_hidden2 = 20 #num of neurons in next hidden layers
learning_rate = 10**-4
floatX = theano.config.floatX
# scale and normalize input data
def scale(X, X_min, X_max):
return (X - X_min)/(X_max - X_min)
def normalize(X, X_mean, X_std):
return (X - X_mean)/X_std
def shuffle_data (samples, labels):
idx = np.arange(samples.shape[0])
np.random.shuffle(idx)
#print (samples.shape, labels.shape)
samples, labels = samples[idx], labels[idx]
return samples, labels
#read and divide data into test and train sets
cal_housing = np.loadtxt('cal_housing.data', delimiter=',')
X_data, Y_data = cal_housing[:,:8], cal_housing[:,-1]
Y_data = (np.asmatrix(Y_data)).transpose()
X_data, Y_data = shuffle_data(X_data, Y_data)
#separate train and test data
m = 3*X_data.shape[0] // 10
testX, testY = X_data[:m],Y_data[:m]
trainX, trainY = X_data[m:], Y_data[m:]
# scale and normalize data
trainX_max, trainX_min = np.max(trainX, axis=0), np.min(trainX, axis=0)
testX_max, testX_min = np.max(testX, axis=0), np.min(testX, axis=0)
trainX = scale(trainX, trainX_min, trainX_max)
testX = scale(testX, testX_min, testX_max)
trainX_mean, trainX_std = np.mean(trainX, axis=0), np.std(trainX, axis=0)
testX_mean, testX_std = np.mean(testX, axis=0), np.std(testX, axis=0)
trainX = normalize(trainX, trainX_mean, trainX_std)
testX = normalize(testX, testX_mean, testX_std)
no_features = trainX.shape[1]
x = T.matrix('x') # data sample
d = T.matrix('d') # desired output
no_samples = T.scalar('no_samples')
# initialize weights and biases for hidden layer(s) and output layer
w_o = theano.shared(np.random.randn(no_hidden2)*.01, floatX )
b_o = theano.shared(np.random.randn()*.01, floatX)
w_h1 = theano.shared(np.random.randn(no_features, no_hidden1)*.01, floatX )
b_h1 = theano.shared(np.random.randn(no_hidden1)*0.01, floatX)
w_h2 = theano.shared(np.random.randn(no_hidden1, no_hidden2)*.01, floatX )
b_h2 = theano.shared(np.random.randn(no_hidden2)*0.01, floatX)
w_h3 = theano.shared(np.random.randn(no_hidden2, no_hidden2)*.01, floatX )
b_h3 = theano.shared(np.random.randn(no_hidden2)*0.01, floatX)
init_w_o = w_o.get_value()
init_b_o = b_o.get_value()
init_w_h1 = w_h1.get_value()
init_b_h1 = b_h1.get_value()
init_w_h2 = w_h2.get_value()
init_b_h2 = b_h2.get_value()
# learning rate
alpha = theano.shared(learning_rate, floatX)
#Define mathematical expression:
h1_out = T.nnet.sigmoid(T.dot(x, w_h1) + b_h1)
h2_out = T.nnet.sigmoid(T.dot(h1_out, w_h2) + b_h2)
h3_out = T.nnet.sigmoid(T.dot(h2_out, w_h3) + b_h3)
y4 = T.dot(h2_out, w_o) + b_o #output for 4 hidden layers
y5 = T.dot(h3_out, w_o) + b_o #output for 5 hidden layers
#we need to differentiate the expression for each network
cost4 = T.abs_(T.mean(T.sqr(d - y4)))
accuracy4 = T.mean(d - y4)
cost5 = T.abs_(T.mean(T.sqr(d - y5)))
accuracy5 = T.mean(d - y5)
#define gradients
dw_o, db_o, dw_h, db_h, dw_h2, db_h2 = T.grad(cost4, [w_o, b_o, w_h1, b_h1, w_h2, b_h2])
dw_o, db_o, dw_h, db_h, dw_h2, db_h2, dw_h3, db_h3 = T.grad(cost5, [w_o, b_o, w_h1, b_h1, w_h2, b_h2, w_h3, b_h3])
train4 = theano.function(
inputs = [x, d],
outputs = cost4,
updates = [[w_o, w_o - alpha*dw_o],
[b_o, b_o - alpha*db_o],
[w_h1, w_h1 - alpha*dw_h],
[b_h1, b_h1 - alpha*db_h],
[w_h2, w_h2 - alpha*dw_h2],
[b_h2, b_h2 - alpha*db_h2]],
allow_input_downcast=True
)
train5 = theano.function(
inputs = [x, d],
outputs = cost5,
updates = [[w_o, w_o - alpha*dw_o],
[b_o, b_o - alpha*db_o],
[w_h1, w_h1 - alpha*dw_h],
[b_h1, b_h1 - alpha*db_h],
[w_h2, w_h2 - alpha*dw_h2],
[b_h2, b_h2 - alpha*db_h2],
[w_h3, w_h3 - alpha*dw_h3],
[b_h3, b_h3 - alpha*db_h3]],
allow_input_downcast=True
)
test4 = theano.function(
inputs = [x, d],
outputs = [y4, cost4, accuracy4],
allow_input_downcast=True
)
test5 = theano.function(
inputs = [x, d],
outputs = [y5, cost5, accuracy5],
allow_input_downcast=True
)
alpha.set_value(learning_rate)
networks = [4,5]
n=len(trainX)
for i in range(len(networks)):
train_cost = np.zeros(epochs)
test_cost = np.zeros(epochs)
test_accuracy = np.zeros(epochs)
w_o.set_value(init_w_o) #just needed for the second network
b_o.set_value(init_b_o)
w_h1.set_value(init_w_h1)
b_h1.set_value(init_b_h1)
w_h2.set_value(init_w_h2)
b_h2.set_value(init_b_h2)
min_error = 1e+15
best_iter = 0
for iter in range(epochs):
if iter % 100 == 0:
print(iter)
trainX, trainY = shuffle_data(trainX, trainY)
cost = 0.0
for start, end in zip(range(0, n, batch_size), range(batch_size, n, batch_size)):
if i==0:
cost += train4(trainX[start:end], np.transpose(trainY[start:end]))
else:
cost += train5(trainX[start:end], np.transpose(trainY[start:end]))
train_cost[iter] = cost/(n // batch_size)
if i==0:
pred, test_cost[iter], test_accuracy[iter] = test4(testX, np.transpose(testY))
else:
pred, test_cost[iter], test_accuracy[iter] = test5(testX, np.transpose(testY))
if test_cost[iter] < min_error:
best_iter = iter
min_error = test_cost[iter]
best_w_o = w_o.get_value()
best_b_o = b_o.get_value()
best_w_h1 = w_h1.get_value()
best_b_h1 = b_h1.get_value()
best_w_h2 = w_h2.get_value()
best_b_h2 = b_h2.get_value()
if i==1:
best_w_h3 = w_h3.get_value()
best_b_h3 = b_h3.get_value()
#set weights and biases to values at which performance was best
w_o.set_value(best_w_o)
b_o.set_value(best_b_o)
w_h1.set_value(best_w_h1)
b_h1.set_value(best_b_h1)
w_h2.set_value(best_w_h2)
b_h2.set_value(best_b_h2)
if i==1:
w_h3.set_value(best_w_h3)
b_h3.set_value(best_b_h3)
best_pred, best_cost, best_accuracy = test5(testX, np.transpose(testY))
else:
best_pred, best_cost, best_accuracy = test4(testX, np.transpose(testY))
print(str(networks[i])+'-layers network : Minimum error: %.1f, Best accuracy %.1f, Number of Iterations: %d'%(best_cost, best_accuracy, best_iter))
plt.figure(1,figsize=(15,9))
plt.subplot(121)
plt.plot(range(epochs), test_accuracy)
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.title('Test accuracy for 4-layer and 5-layer networks')
plt.subplot(122)
plt.plot(range(epochs), test_accuracy)
plt.axis([0, epochs, -10000, 20000])
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.title('Test accuracy for 4-layer and 5-layer networks rescaled')
plt.legend(['hidden layers = 4', 'hidden layers = 5'], loc='lower right')
plt.savefig('p_1b_4_5_layers_networks.png')
plt.show()
| [
"noreply@github.com"
] | TomDrnd.noreply@github.com |
f79c36b7ee1617a0ebd4a27bb4e854e57aab82ee | c337edfc8b93fe65b459e4da38ce8e8da3c5aa8b | /Lista 3/I-Beautiful.py | ac79a098b16d6a06fcda981867e065f92b930001 | [] | no_license | nayarasps/AA-Iniciante-20.3 | a0801599d4bc2db5b1f53146a5b9b5d67970679a | c0d7a474cf5831f383cc208a7ed8545f2c132a36 | refs/heads/main | 2023-01-01T17:03:00.041248 | 2020-10-19T03:34:46 | 2020-10-19T03:34:46 | 301,019,609 | 1 | 0 | null | 2020-10-19T03:34:47 | 2020-10-04T01:47:32 | Python | UTF-8 | Python | false | false | 476 | py | # @author Nayara Souza
# UFCG - Universidade Federal de Campina Grande
# AA - Basico
n = int(input())
for i in range(n):
v = int(input())
l = list(map(int, input().split()))
s = ''
da = {}
e = v
d = 0
for j in range(v):
da[l[j]] = j
for t in range(1,v+1):
e = min(e,da[t])
d = max(d,da[t])
if (d-e)+1 > t:
s += '0'
else:
s += '1'
print(s)
| [
"nayarasps10@gmail.com"
] | nayarasps10@gmail.com |
9e92114db3572cb13555baf1a6ae66b4dd16ce66 | 571e7c2ef8298ae45e86c28eba46815dd1c87b61 | /folioport/apps/account/migrations/0011_auto__chg_field_folioportuser_site.py | c175aad57b6272143ec753f7c0e73187c93451b5 | [] | no_license | mmoravcik/folioport | c5baf50c1abc0919b4b210c7ff4cccf6684f57a2 | 1df6b9cbe533ca4cb1f8538c66975bbaaff3068a | refs/heads/master | 2020-04-06T04:09:04.707884 | 2015-03-31T18:52:26 | 2015-03-31T18:52:26 | 4,400,112 | 0 | 0 | null | 2013-06-16T15:36:04 | 2012-05-21T23:47:21 | Python | UTF-8 | Python | false | false | 3,707 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'FolioportUser.site'
db.alter_column(u'account_folioportuser', 'site_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'], null=True))
def backwards(self, orm):
# Changing field 'FolioportUser.site'
db.alter_column(u'account_folioportuser', 'site_id', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['sites.Site']))
models = {
u'account.folioportuser': {
'Meta': {'object_name': 'FolioportUser'},
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'google_analytics_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'logo_width': ('django.db.models.fields.IntegerField', [], {'default': '120', 'blank': 'True'}),
'own_blog_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'site_catch_phrase': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'site_logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'social_media': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['account.SocialMedia']", 'symmetrical': 'False', 'blank': 'True'}),
'subdomain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'use_system_blog': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'account.socialmedia': {
'Meta': {'object_name': 'SocialMedia'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'html_code': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'script_code': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['account'] | [
"matus@potatolondon.com"
] | matus@potatolondon.com |
003a685dd62d0fc0c3566edb06ab0528003a429a | 39bc2aadd859051b59c82309ca08af53d8deecac | /src/awkward/_v2/forms/recordform.py | e99d450064824e0b4f96aa801dc3f3f932b89d1b | [
"BSD-3-Clause"
] | permissive | emg110/awkward-1.0 | c626bfd1f91dc8b857bd5ca48098414c690d775d | 8aa771b064f6d6b51872390cc2288aed8b952ed8 | refs/heads/main | 2023-08-01T12:35:08.333049 | 2021-09-23T20:38:38 | 2021-09-23T20:38:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,084 | py | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import awkward as ak
from awkward._v2.forms.form import Form, _parameters_equal, nonvirtual
from awkward._v2.forms.indexedform import IndexedForm
class RecordForm(Form):
def __init__(
self,
contents,
keys,
has_identifier=False,
parameters=None,
form_key=None,
):
if not isinstance(contents, Iterable):
raise TypeError(
"{0} 'contents' must be iterable, not {1}".format(
type(self).__name__, repr(contents)
)
)
for content in contents:
if not isinstance(content, Form):
raise TypeError(
"{0} all 'contents' must be Form subclasses, not {1}".format(
type(self).__name__, repr(content)
)
)
if keys is not None and not isinstance(keys, Iterable):
raise TypeError(
"{0} 'keys' must be iterable, not {1}".format(
type(self).__name__, repr(contents)
)
)
self._keys = keys
self._contents = list(contents)
self._init(has_identifier, parameters, form_key)
@property
def keys(self):
if self._keys is None:
return [str(i) for i in range(len(self._contents))]
else:
return self._keys
@property
def is_tuple(self):
return self._keys is None
@property
def contents(self):
return self._contents
def __repr__(self):
args = [repr(self._contents), repr(self._keys)] + self._repr_args()
return "{0}({1})".format(type(self).__name__, ", ".join(args))
def index_to_key(self, index):
if 0 <= index < len(self._contents):
if self._keys is None:
return str(index)
else:
return self._keys[index]
else:
raise IndexError(
"no index {0} in record with {1} fields".format(
index, len(self._contents)
)
)
def key_to_index(self, key):
if self._keys is None:
try:
i = int(key)
except ValueError:
pass
else:
if 0 <= i < len(self._contents):
return i
else:
try:
i = self._keys.index(key)
except ValueError:
pass
else:
return i
raise IndexError(
"no field {0} in record with {1} fields".format(
repr(key), len(self._contents)
)
)
def haskey(self, key):
if self._keys is None:
try:
i = int(key)
except ValueError:
return False
else:
return 0 <= i < len(self._contents)
else:
return key in self._keys
def content(self, index_or_key):
if ak._util.isint(index_or_key):
index = index_or_key
elif ak._util.isstr(index_or_key):
index = self.key_to_index(index_or_key)
else:
raise TypeError(
"index_or_key must be an integer (index) or string (key), not {0}".format(
repr(index_or_key)
)
)
return self._contents[index]
def _tolist_part(self, verbose, toplevel):
out = {"class": "RecordArray"}
contents_tolist = [
content._tolist_part(verbose, toplevel=False) for content in self._contents
]
if self._keys is not None:
out["contents"] = dict(zip(self._keys, contents_tolist))
else:
out["contents"] = contents_tolist
return self._tolist_extra(out, verbose)
def __eq__(self, other):
if isinstance(other, RecordForm):
if (
self._has_identifier == other._has_identifier
and self._form_key == other._form_key
and self.is_tuple == other.is_tuple
and len(self._contents) == len(other._contents)
and _parameters_equal(self._parameters, other._parameters)
):
if self.is_tuple:
for i in range(len(self._contents)):
if self._contents[i] != other._contents[i]:
return False
else:
return True
else:
if set(self._keys) != set(other._keys):
return False
else:
for key, content in zip(self._keys, self._contents):
if content != other.content(key):
return False
else:
return True
else:
return False
else:
return False
def generated_compatibility(self, other):
other = nonvirtual(other)
if other is None:
return True
elif isinstance(other, RecordForm):
if self.is_tuple == other.is_tuple:
self_keys = set(self.keys)
other_keys = set(other.keys)
if self_keys == other_keys:
return _parameters_equal(
self._parameters, other._parameters
) and all(
self.content(x).generated_compatibility(other.content(x))
for x in self_keys
)
else:
return False
else:
return False
else:
return False
def _getitem_range(self):
return RecordForm(
self._contents,
self._keys,
has_identifier=self._has_identifier,
parameters=self._parameters,
form_key=None,
)
def _getitem_field(self, where, only_fields=()):
if len(only_fields) == 0:
return self.content(where)
else:
nexthead, nexttail = ak._v2._slicing.headtail(only_fields)
if ak._util.isstr(nexthead):
return self.content(where)._getitem_field(nexthead, nexttail)
else:
return self.content(where)._getitem_fields(nexthead, nexttail)
def _getitem_fields(self, where, only_fields=()):
indexes = [self.key_to_index(key) for key in where]
if self._keys is None:
keys = None
else:
keys = [self._keys[i] for i in indexes]
if len(only_fields) == 0:
contents = [self.content(i) for i in indexes]
else:
nexthead, nexttail = ak._v2._slicing.headtail(only_fields)
if ak._util.isstr(nexthead):
contents = [
self.content(i)._getitem_field(nexthead, nexttail) for i in indexes
]
else:
contents = [
self.content(i)._getitem_fields(nexthead, nexttail) for i in indexes
]
return RecordForm(
contents,
keys,
has_identifier=self._has_identifier,
parameters=None,
form_key=None,
)
def _carry(self, allow_lazy):
if allow_lazy:
return IndexedForm(
"i64",
self,
has_identifier=self._has_identifier,
parameters=None,
form_key=None,
)
else:
return RecordForm(
self._contents,
self._keys,
has_identifier=self._has_identifier,
parameters=self._parameters,
form_key=None,
)
@property
def purelist_isregular(self):
return True
@property
def purelist_depth(self):
return 1
@property
def minmax_depth(self):
if len(self._contents) == 0:
return (0, 0)
mins, maxs = [], []
for content in self._contents:
mindepth, maxdepth = content.minmax_depth
mins.append(mindepth)
maxs.append(maxdepth)
return (min(mins), max(maxs))
@property
def branch_depth(self):
if len(self._contents) == 0:
return (False, 1)
anybranch = False
mindepth = None
for content in self._contents:
branch, depth = content.branch_depth
if mindepth is None:
mindepth = depth
if branch or mindepth != depth:
anybranch = True
if mindepth > depth:
mindepth = depth
return (anybranch, mindepth)
| [
"noreply@github.com"
] | emg110.noreply@github.com |
2aaceb0750b723c6938838e69f0c254ae825d98f | 1e41a85b4bc9ca847686a13ed729bc277a295f3e | /calculator_app/migrations/0001_initial.py | 50edb92d4d54aa84a7cf0b87baca44d0a9cb84b8 | [] | no_license | eileendwyer/django_calculator | 6c6827556e227f03a5cb1d63a7ab41a6ad3b3f2f | 25138b3244030a8bf83714b460d7f65e835be905 | refs/heads/master | 2021-01-16T20:38:42.796930 | 2016-06-16T12:03:55 | 2016-06-16T12:03:55 | 61,143,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 974 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-16 10:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Operation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_number', models.FloatField()),
('operator_choice', models.CharField(max_length=2)),
('second_number', models.FloatField()),
('result', models.FloatField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"eolson@smalltalku.com"
] | eolson@smalltalku.com |
02d1e68bba90632556650dfb07a3f9a032336629 | 47eb127f940d262bdd2a84fa61122fa30eae9fd0 | /LibraryTest.py | e87b87ff3321b98a970aff958a30335cd1979252 | [] | no_license | rpatelk/SimpleLibrary | df4e8133b3ebcc9adbc4b79fbef7de0988337c82 | 53c056677119a82a08c950a405c6ad9039677dc6 | refs/heads/main | 2023-03-30T02:25:11.124086 | 2021-04-03T22:49:30 | 2021-04-03T22:49:30 | 352,394,531 | 0 | 0 | null | 2021-04-03T22:49:01 | 2021-03-28T17:33:27 | null | UTF-8 | Python | false | false | 2,858 | py | import unittest
from Book import Book
from Library import Library
# Test class for Library class
# @author Raj Patel
class BookTest(unittest.TestCase):
# Tests the methods associated with library name.
def test_name(self):
b = Book("Hatchet", "Gary Paulsen", "Survival, Fiction", "61743")
b2 = Book("Outsiders", "S. E. Hinton", "Drama, Fiction", "52963")
books = [b, b2]
l = Library("Hello Library", "123 Hello World", books)
# Passing Test
self.assertEqual(l.getLibraryName(), "Hello Library")
l.setLibraryName("Simple Library")
self.assertEqual(l.getLibraryName(), "Simple Library")
# Failing Test
l.setLibraryName("Hello Library")
self.assertNotEqual(l.getLibraryName(), "Simple Library")
l.setLibraryName("Simple Library")
self.assertNotEqual(l.getLibraryName(), "Hello Library")
# Tests the methods associated with address.
def test_name(self):
b = Book("Hatchet", "Gary Paulsen", "Survival, Fiction", "61743")
b2 = Book("Outsiders", "S. E. Hinton", "Drama, Fiction", "52963")
books = [b, b2]
l = Library("Hello Library", "123 Hello World", books)
# Passing Test
self.assertEqual(l.getAddress(), "123 Hello World")
l.setAddress("456 Simple Library St.")
self.assertEqual(l.getAddress(), "456 Simple Library St.")
# Failing Test
l.setAddress("123 Hello World")
self.assertNotEqual(l.getAddress(), "456 Simple Library St.")
l.setAddress("456 Simple Library St.")
self.assertNotEqual(l.getAddress(), "123 Hello World")
# Tests the methods associated with list of books.
def test_name(self):
b = Book("Hatchet", "Gary Paulsen", "Survival, Fiction", "61743")
b2 = Book("Outsiders", "S. E. Hinton", "Drama, Fiction", "52963")
books = [b, b2]
l = Library("Hello Library", "123 Hello World", books)
# Test Size of Book list.
self.assertEqual(len(l.getListOfBooks()), 2)
# Add new book and test size.
b3 = Book("Tangerine", "Edward Bloor", "Fiction", "10446")
l.getListOfBooks().append(b3)
# Fail
self.assertNotEqual(len(l.getListOfBooks()), 2)
# Pass
self.assertEqual(len(l.getListOfBooks()), 3)
#Test correct books
list = l.getListOfBooks()
self.assertEqual(list[0].toString(), "Name: Hatchet\nAuthor: Gary Paulsen\nGenre: Survival, Fiction\nId: 61743\nChecked In: Yes")
self.assertEqual(list[1].toString(), "Name: Outsiders\nAuthor: S. E. Hinton\nGenre: Drama, Fiction\nId: 52963\nChecked In: Yes")
self.assertEqual(list[2].toString(), "Name: Tangerine\nAuthor: Edward Bloor\nGenre: Fiction\nId: 10446\nChecked In: Yes")
if __name__ == '__main__':
unittest.main() | [
"rkpatel4@ncsu.edu"
] | rkpatel4@ncsu.edu |
f0ca656cee2c4b280753d6bd9f26bbb9da0f3a6e | 33c3b437c1018c8650de6346d51273acbdcc9331 | /oms/handlers/check/check_content_type.py | 886aac4aaa2f0d1d6f53c6ff445289a603a62063 | [
"MIT"
] | permissive | guoxu3/oms_backend | 09e8766f58d52a3e1bf047b41b9a3e1a249df2a3 | 8865a78bcf133339564568b40d879d7b516b4c96 | refs/heads/master | 2021-06-15T17:31:24.558954 | 2017-04-24T02:43:46 | 2017-04-24T02:43:46 | 68,985,344 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from lib import verify
def check_content_type(request):
ok = True
info = ""
content_type = dict(request.headers)['Content-Type']
body = request.body
if not verify.is_content_type_right(content_type):
ok = False
info = "Request content-type format error"
if not verify.is_json(body):
ok = False
info = 'Request body format error'
return ok, info
| [
"guoxu3@qq.com"
] | guoxu3@qq.com |
b46eab83eca8e1c46b34e189710aef24c51c03b9 | 39f5aee2c4d50ccb5beffca529a829b840eb9e63 | /betrobot/grabbing/intelbet/downloading.py | 47e1a632567605d43b0de73bcd2f39dd92e7dd11 | [] | no_license | Gufran33/betrobot | 24e65f6edca34c4d47e8cad24f75b571663d8749 | 6059e5f5ed56a014c40e718761a9649059a9c11d | refs/heads/master | 2023-03-18T02:08:13.666394 | 2017-12-07T20:04:10 | 2017-12-07T20:04:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | from betrobot.util.requests_util import requests_get
def intelbet_get(*args, **kwargs):
response = requests_get('www.intelbet.ru', *args, **kwargs)
if response is None:
return None
return response.text
| [
"kuraga333@mail.ru"
] | kuraga333@mail.ru |
0ed6ce89cb3196a33d6854d121592735fa2cc028 | 194852f65ef166f765e1a80bb230cbea79ad3ea9 | /example/Bomberman/Runtime/GING/__init__.py | b848d28fadf8a4c63dbabbf13f6c44b9d80f42a0 | [] | no_license | limBuddies/GING | 03f78bbd6d0b49e6991edca3a3e00a0824c0c907 | b1a4ccde2b2958de85cd3edef3242904df5896b0 | refs/heads/main | 2023-04-28T17:34:39.685308 | 2021-05-19T22:47:24 | 2021-05-19T22:47:24 | 367,539,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from .Vector2 import Vector2
from .Transform import Transform
from .Sprite import Sprite
from PyQt5.QtCore import Qt as KeyCode
| [
"1048035187@qq.com"
] | 1048035187@qq.com |
d358bdae6b436fae0c7e1869e34bbdec63dd9da6 | 82e15a148febfb0f3202b6eecbbbf3195836729a | /AnomalyDetection/Clustering.py | e0ff41c2d3791e8df9d30af0725f122686d0c813 | [] | no_license | jiafangdi-guang/Anomaly-Detection-in-Social-Networks | 71316bd8ce53aa1acfdc1c613cc95f4364a13bda | c506c88cab2d61282dda6c2ced999c2693e66dbc | refs/heads/master | 2022-11-28T20:40:07.084887 | 2020-07-29T03:35:18 | 2020-07-29T03:35:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | # -*- coding: utf-8 -*-
# Created by saki on 2017/4/30.
from sklearn.cluster import KMeans
import numpy as np
import string
file = open('/Users/saki/Desktop/AnomalyDetection/feat/enron_feat.txt', 'r')
n_nodes = 0
nodes = []
vectors = []
for line in file.readlines():
n_nodes += 1
array = line.split(' ')
nodes.append([array[0], array[17].strip()])
vec = []
for i in range(1, 18):
vec.append(string.atof(array[i]))
# print n_nodes, vec
vectors.append(vec)
file.close()
emb = np.array(vectors)
k_clusters = 70
print 'Nodes:', n_nodes
print 'Clusters:', k_clusters
kmeans = KMeans(n_clusters = k_clusters).fit(emb)
print kmeans.cluster_centers_
outFile = open('/Users/saki/Desktop/AnomalyDetection/comm/enron_kmeans.csv', 'w')
num = 0
for label in kmeans.labels_:
outFile.write(nodes[num][0] + ',' + str(label) + '\n')
num += 1
outFile.close() | [
"zuoqi.zhang@gmail.com"
] | zuoqi.zhang@gmail.com |
641fdbba25341aa0ec1541b59c712c5065f0647f | 1c435576fc0a91e484589a6f6959e5a455939e48 | /main/models.py | 5b4d79d9b19bc2ee7da6c124aefb2e9befcd4d5f | [] | no_license | yaena1223/hw5 | 503d27453344cc2a660b656d407bd545df13f14c | 35b9e92119bb14d82311ba4aaa51993415d7dd40 | refs/heads/main | 2023-04-25T17:32:34.460265 | 2021-06-01T10:21:32 | 2021-06-01T10:21:32 | 372,784,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | from django.db import models
# Create your models here.
class Post(models.Model):
id = models.AutoField(primary_key = True)
title = models.CharField(max_length=200)
writer = models.CharField(max_length=100)
pub_date = models.DateTimeField()
body = models.TextField()
image = models.ImageField(upload_to = "post/",blank = True, null = True)
def __str__(self):
return self.title
def summary(self):
return self.body[:20] | [
"rewrite_the_stars_@naver.com"
] | rewrite_the_stars_@naver.com |
215416dff24ac71fb0ee6b5b123c03219240b7c2 | dc29b57b9a025287574117a4e7c7fc27663d6063 | /pydemo/src/thread/multiprocess.py | cba839f261a09fe087364507e0b5bbabafbe06fc | [] | no_license | bspeng922/pyutils | e4d0e988d5c168a3a9e97da2d09c6b714faa2c9a | 4fa6c75a7159e03383c0f89d67d1ca37f3d0f0a5 | refs/heads/master | 2020-04-11T09:59:19.089455 | 2017-01-06T07:42:20 | 2017-01-06T07:42:20 | 7,434,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | import time
import sys
import urllib2
import HTMLParser
from bs4 import BeautifulSoup
import multiprocessing
hosts = ["http://www.baidu.com", "http://www.amazon.com","http://www.ibm.com","http://www.pystack.org",
"http://www.python.org","http://www.microsoft.com"]
def read(host):
try:
context = urllib2.urlopen(host, timeout=6)
except:
print "load %s failure"%host
return
try:
title = BeautifulSoup(context).title.string
except HTMLParser.HTMLParseError:
print "parser %s title failure"%host
return
print "%s - %s"%(host, title)
class Reader(multiprocessing.Process):
def __init__(self, queue):
multiprocessing.Process.__init__(self)
self.queue = queue
def run(self):
while 1:
host = self.queue.get()
read(host)
self.queue.task_done()
def concuryread():
start = time.time()
queue = multiprocessing.JoinableQueue()
process = []
for i in range(10):
for host in hosts:
queue.put(host)
process.append(Reader(queue))
[p.start() for p in process]
queue.join()
end = time.time()
print "Elapsed Time: %d"%(end-start)
if __name__ == "__main__":
concuryread() | [
"bspeng922@gmail.com"
] | bspeng922@gmail.com |
1d4c1a6ab1573020b7a7835556029878c3f1e9d6 | 2c238be39b551af4caf0aa1a32f32502b86e00f1 | /examples/research_projects/seq2seq-distillation/_test_seq2seq_examples.py | 0e27896b1c63a848b7017d0dbe687e5ef454a374 | [
"Apache-2.0"
] | permissive | marinatricanico/transformers | eaeaf9bf6972e2083ec262c71fc77d06dfee1dbd | c89180a9de1fc2e98654812fd1c233c3bc6a8d43 | refs/heads/master | 2023-07-03T17:07:45.394194 | 2021-08-11T16:09:41 | 2021-08-11T16:09:41 | 395,073,287 | 1 | 0 | Apache-2.0 | 2021-08-11T17:50:05 | 2021-08-11T17:50:04 | null | UTF-8 | Python | false | false | 16,566 | py | import argparse
import logging
import os
import sys
import tempfile
from pathlib import Path
import pytest
import pytorch_lightning as pl
import torch
from torch import nn
import lightning_base
from convert_pl_checkpoint_to_hf import convert_pl_to_hf
from distillation import distill_main
from finetune import SummarizationModule, main
from parameterized import parameterized
from run_eval import generate_summaries_or_translations
from transformers import AutoConfig, AutoModelForSeq2SeqLM
from transformers.hf_api import HfApi
from transformers.testing_utils import CaptureStderr, CaptureStdout, TestCasePlus, require_torch_gpu, slow
from utils import label_smoothed_nll_loss, lmap, load_json
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
CUDA_AVAILABLE = torch.cuda.is_available()
CHEAP_ARGS = {
"max_tokens_per_batch": None,
"supervise_forward": True,
"normalize_hidden": True,
"label_smoothing": 0.2,
"eval_max_gen_length": None,
"eval_beams": 1,
"val_metric": "loss",
"save_top_k": 1,
"adafactor": True,
"early_stopping_patience": 2,
"logger_name": "default",
"length_penalty": 0.5,
"cache_dir": "",
"task": "summarization",
"num_workers": 2,
"alpha_hid": 0,
"freeze_embeds": True,
"enc_only": False,
"tgt_suffix": "",
"resume_from_checkpoint": None,
"sortish_sampler": True,
"student_decoder_layers": 1,
"val_check_interval": 1.0,
"output_dir": "",
"fp16": False, # TODO(SS): set this to CUDA_AVAILABLE if ci installs apex or start using native amp
"no_teacher": False,
"fp16_opt_level": "O1",
"gpus": 1 if CUDA_AVAILABLE else 0,
"n_tpu_cores": 0,
"max_grad_norm": 1.0,
"do_train": True,
"do_predict": True,
"accumulate_grad_batches": 1,
"server_ip": "",
"server_port": "",
"seed": 42,
"model_name_or_path": "sshleifer/bart-tiny-random",
"config_name": "",
"tokenizer_name": "facebook/bart-large",
"do_lower_case": False,
"learning_rate": 0.3,
"lr_scheduler": "linear",
"weight_decay": 0.0,
"adam_epsilon": 1e-08,
"warmup_steps": 0,
"max_epochs": 1,
"train_batch_size": 2,
"eval_batch_size": 2,
"max_source_length": 12,
"max_target_length": 12,
"val_max_target_length": 12,
"test_max_target_length": 12,
"fast_dev_run": False,
"no_cache": False,
"n_train": -1,
"n_val": -1,
"n_test": -1,
"student_encoder_layers": 1,
"freeze_encoder": False,
"auto_scale_batch_size": False,
"overwrite_output_dir": False,
"student": None,
}
def _dump_articles(path: Path, articles: list):
content = "\n".join(articles)
Path(path).open("w").writelines(content)
ARTICLES = [" Sam ate lunch today.", "Sams lunch ingredients."]
SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
T5_TINY = "patrickvonplaten/t5-tiny-random"
T5_TINIER = "sshleifer/t5-tinier-random"
BART_TINY = "sshleifer/bart-tiny-random"
MBART_TINY = "sshleifer/tiny-mbart"
MARIAN_TINY = "sshleifer/tiny-marian-en-de"
FSMT_TINY = "stas/tiny-wmt19-en-de"
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
def make_test_data_dir(tmp_dir):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(tmp_dir, f"{split}.source"), ARTICLES)
_dump_articles(os.path.join(tmp_dir, f"{split}.target"), SUMMARIES)
return tmp_dir
class TestSummarizationDistiller(TestCasePlus):
@classmethod
def setUpClass(cls):
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
return cls
@slow
@require_torch_gpu
def test_hub_configs(self):
"""I put require_torch_gpu cause I only want this to run with self-scheduled."""
model_list = HfApi().model_list()
org = "sshleifer"
model_ids = [x.modelId for x in model_list if x.modelId.startswith(org)]
allowed_to_be_broken = ["sshleifer/blenderbot-3B", "sshleifer/blenderbot-90M"]
failures = []
for m in model_ids:
if m in allowed_to_be_broken:
continue
try:
AutoConfig.from_pretrained(m)
except Exception:
failures.append(m)
assert not failures, f"The following models could not be loaded through AutoConfig: {failures}"
def test_distill_no_teacher(self):
updates = dict(student_encoder_layers=2, student_decoder_layers=1, no_teacher=True)
self._test_distiller_cli(updates)
def test_distill_checkpointing_with_teacher(self):
updates = dict(
student_encoder_layers=2,
student_decoder_layers=1,
max_epochs=4,
val_check_interval=0.25,
alpha_hid=2.0,
model_name_or_path="IGNORE_THIS_IT_DOESNT_GET_USED",
)
model = self._test_distiller_cli(updates, check_contents=False)
ckpts = list(Path(model.output_dir).glob("*.ckpt"))
self.assertEqual(1, len(ckpts))
transformer_ckpts = list(Path(model.output_dir).glob("**/*.bin"))
self.assertEqual(len(transformer_ckpts), 2)
examples = lmap(str.strip, Path(model.hparams.data_dir).joinpath("test.source").open().readlines())
out_path = tempfile.mktemp() # XXX: not being cleaned up
generate_summaries_or_translations(examples, out_path, str(model.output_dir / "best_tfmr"))
self.assertTrue(Path(out_path).exists())
out_path_new = self.get_auto_remove_tmp_dir()
convert_pl_to_hf(ckpts[0], transformer_ckpts[0].parent, out_path_new)
assert os.path.exists(os.path.join(out_path_new, "pytorch_model.bin"))
def test_loss_fn(self):
model = AutoModelForSeq2SeqLM.from_pretrained(BART_TINY)
input_ids, mask = model.dummy_inputs["input_ids"], model.dummy_inputs["attention_mask"]
target_ids = torch.tensor([[0, 4, 8, 2], [0, 8, 2, 1]], dtype=torch.long, device=model.device)
decoder_input_ids = target_ids[:, :-1].contiguous() # Why this line?
lm_labels = target_ids[:, 1:].clone() # why clone?
model_computed_loss = model(
input_ids, attention_mask=mask, decoder_input_ids=decoder_input_ids, labels=lm_labels, use_cache=False
).loss
logits = model(input_ids, attention_mask=mask, decoder_input_ids=decoder_input_ids, use_cache=False).logits
lprobs = nn.functional.log_softmax(logits, dim=-1)
smoothed_loss, nll_loss = label_smoothed_nll_loss(
lprobs, lm_labels, 0.1, ignore_index=model.config.pad_token_id
)
with self.assertRaises(AssertionError):
# TODO: understand why this breaks
self.assertEqual(nll_loss, model_computed_loss)
def test_distill_mbart(self):
updates = dict(
student_encoder_layers=2,
student_decoder_layers=1,
num_train_epochs=4,
val_check_interval=0.25,
alpha_hid=2.0,
task="translation",
model_name_or_path="IGNORE_THIS_IT_DOESNT_GET_USED",
tokenizer_name=MBART_TINY,
teacher=MBART_TINY,
src_lang="en_XX",
tgt_lang="ro_RO",
)
model = self._test_distiller_cli(updates, check_contents=False)
assert model.model.config.model_type == "mbart"
ckpts = list(Path(model.output_dir).glob("*.ckpt"))
self.assertEqual(1, len(ckpts))
transformer_ckpts = list(Path(model.output_dir).glob("**/*.bin"))
all_files = list(Path(model.output_dir).glob("best_tfmr/*"))
assert len(all_files) > 2
self.assertEqual(len(transformer_ckpts), 2)
def test_distill_t5(self):
updates = dict(
student_encoder_layers=1,
student_decoder_layers=1,
alpha_hid=2.0,
teacher=T5_TINY,
model_name_or_path=T5_TINY,
tokenizer_name=T5_TINY,
)
self._test_distiller_cli(updates)
def test_distill_different_base_models(self):
updates = dict(
teacher=T5_TINY,
student=T5_TINIER,
model_name_or_path=T5_TINIER,
tokenizer_name=T5_TINIER,
)
self._test_distiller_cli(updates)
def _test_distiller_cli(self, updates, check_contents=True):
default_updates = dict(
label_smoothing=0.0,
early_stopping_patience=-1,
train_batch_size=1,
eval_batch_size=2,
max_epochs=2,
alpha_mlm=0.2,
alpha_ce=0.8,
do_predict=True,
model_name_or_path="sshleifer/tinier_bart",
teacher=CHEAP_ARGS["model_name_or_path"],
val_check_interval=0.5,
)
default_updates.update(updates)
args_d: dict = CHEAP_ARGS.copy()
tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
output_dir = self.get_auto_remove_tmp_dir()
args_d.update(data_dir=tmp_dir, output_dir=output_dir, **default_updates)
model = distill_main(argparse.Namespace(**args_d))
if not check_contents:
return model
contents = os.listdir(output_dir)
contents = {os.path.basename(p) for p in contents}
ckpt_files = [p for p in contents if p.endswith("ckpt")]
assert len(ckpt_files) > 0
self.assertIn("test_generations.txt", contents)
self.assertIn("test_results.txt", contents)
metrics = load_json(model.metrics_save_path)
last_step_stats = metrics["val"][-1]
self.assertGreaterEqual(last_step_stats["val_avg_gen_time"], 0.01)
self.assertGreaterEqual(1.0, last_step_stats["val_avg_gen_time"])
self.assertIsInstance(last_step_stats[f"val_avg_{model.val_metric}"], float)
desired_n_evals = int(args_d["max_epochs"] * (1 / args_d["val_check_interval"]) + 1)
self.assertEqual(len(metrics["val"]), desired_n_evals)
self.assertEqual(len(metrics["test"]), 1)
return model
class TestTheRest(TestCasePlus):
@parameterized.expand(
[T5_TINY, BART_TINY, MBART_TINY, MARIAN_TINY, FSMT_TINY],
)
def test_finetune(self, model):
args_d: dict = CHEAP_ARGS.copy()
task = "translation" if model in [MBART_TINY, MARIAN_TINY, FSMT_TINY] else "summarization"
args_d["label_smoothing"] = 0.1 if task == "translation" else 0
tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
output_dir = self.get_auto_remove_tmp_dir()
args_d.update(
data_dir=tmp_dir,
model_name_or_path=model,
tokenizer_name=None,
train_batch_size=2,
eval_batch_size=2,
output_dir=output_dir,
do_predict=True,
task=task,
src_lang="en_XX",
tgt_lang="ro_RO",
freeze_encoder=True,
freeze_embeds=True,
)
assert "n_train" in args_d
args = argparse.Namespace(**args_d)
module = main(args)
input_embeds = module.model.get_input_embeddings()
assert not input_embeds.weight.requires_grad
if model == T5_TINY:
lm_head = module.model.lm_head
assert not lm_head.weight.requires_grad
assert (lm_head.weight == input_embeds.weight).all().item()
elif model == FSMT_TINY:
fsmt = module.model.model
embed_pos = fsmt.decoder.embed_positions
assert not embed_pos.weight.requires_grad
assert not fsmt.decoder.embed_tokens.weight.requires_grad
# check that embeds are not the same
assert fsmt.decoder.embed_tokens != fsmt.encoder.embed_tokens
else:
bart = module.model.model
embed_pos = bart.decoder.embed_positions
assert not embed_pos.weight.requires_grad
assert not bart.shared.weight.requires_grad
# check that embeds are the same
assert bart.decoder.embed_tokens == bart.encoder.embed_tokens
assert bart.decoder.embed_tokens == bart.shared
example_batch = load_json(module.output_dir / "text_batch.json")
assert isinstance(example_batch, dict)
assert len(example_batch) >= 4
def test_finetune_extra_model_args(self):
args_d: dict = CHEAP_ARGS.copy()
task = "summarization"
tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
args_d.update(
data_dir=tmp_dir,
tokenizer_name=None,
train_batch_size=2,
eval_batch_size=2,
do_predict=False,
task=task,
src_lang="en_XX",
tgt_lang="ro_RO",
freeze_encoder=True,
freeze_embeds=True,
)
# test models whose config includes the extra_model_args
model = BART_TINY
output_dir = self.get_auto_remove_tmp_dir()
args_d1 = args_d.copy()
args_d1.update(
model_name_or_path=model,
output_dir=output_dir,
)
extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
args_d1[p] = 0.5
args = argparse.Namespace(**args_d1)
model = main(args)
for p in extra_model_params:
assert getattr(model.config, p) == 0.5, f"failed to override the model config for param {p}"
# test models whose config doesn't include the extra_model_args
model = T5_TINY
output_dir = self.get_auto_remove_tmp_dir()
args_d2 = args_d.copy()
args_d2.update(
model_name_or_path=model,
output_dir=output_dir,
)
unsupported_param = "encoder_layerdrop"
args_d2[unsupported_param] = 0.5
args = argparse.Namespace(**args_d2)
with pytest.raises(Exception) as excinfo:
model = main(args)
assert str(excinfo.value) == f"model config doesn't have a `{unsupported_param}` attribute"
def test_finetune_lr_schedulers(self):
args_d: dict = CHEAP_ARGS.copy()
task = "summarization"
tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
model = BART_TINY
output_dir = self.get_auto_remove_tmp_dir()
args_d.update(
data_dir=tmp_dir,
model_name_or_path=model,
output_dir=output_dir,
tokenizer_name=None,
train_batch_size=2,
eval_batch_size=2,
do_predict=False,
task=task,
src_lang="en_XX",
tgt_lang="ro_RO",
freeze_encoder=True,
freeze_embeds=True,
)
# emulate finetune.py
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = SummarizationModule.add_model_specific_args(parser, os.getcwd())
args = {"--help": True}
# --help test
with pytest.raises(SystemExit) as excinfo:
with CaptureStdout() as cs:
args = parser.parse_args(args)
assert False, "--help is expected to sys.exit"
assert excinfo.type == SystemExit
expected = lightning_base.arg_to_scheduler_metavar
assert expected in cs.out, "--help is expected to list the supported schedulers"
# --lr_scheduler=non_existing_scheduler test
unsupported_param = "non_existing_scheduler"
args = {f"--lr_scheduler={unsupported_param}"}
with pytest.raises(SystemExit) as excinfo:
with CaptureStderr() as cs:
args = parser.parse_args(args)
assert False, "invalid argument is expected to sys.exit"
assert excinfo.type == SystemExit
expected = f"invalid choice: '{unsupported_param}'"
assert expected in cs.err, f"should have bailed on invalid choice of scheduler {unsupported_param}"
# --lr_scheduler=existing_scheduler test
supported_param = "cosine"
args_d1 = args_d.copy()
args_d1["lr_scheduler"] = supported_param
args = argparse.Namespace(**args_d1)
model = main(args)
assert (
getattr(model.hparams, "lr_scheduler") == supported_param
), f"lr_scheduler={supported_param} shouldn't fail"
| [
"noreply@github.com"
] | marinatricanico.noreply@github.com |
9983502c4c9ae45ecd7ad425ed9c5b3e0fd0d181 | 67e76193d99323d2a2c5eb88265fb6495ab3e978 | /oldexamples/eyetest.py | d2c0bb81c67ee1204047c11e2a9597c17dcc2c78 | [
"MIT"
] | permissive | wh28325/DeepEEG | 44348571ea4e5d9c21e0f0db26b5801cc29f4c1b | 7e2461bb26975589c529110d691a41b6d184ca58 | refs/heads/master | 2023-01-23T00:56:10.590746 | 2020-09-08T03:53:19 | 2020-09-08T03:53:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from utils import *
data_dir = '/Users/kylemathewson/Desktop/'
exp = 'bikepark'
subs = [ '009']
sessions = ['quiet','traffic']
nsesh = len(sessions)
event_id = {'Standard': 1, 'Target': 2}
raw = LoadBVData(subs,sessions,data_dir,exp)
raw.plot(scalings='auto')
raw = GrattonEmcpRaw(raw)
raw.plot(scalings='auto')
| [
"kylemath@gmail.com"
] | kylemath@gmail.com |
aa68a8225f735ee1ef00596abf186adb82c205d8 | e5ff9d373c3cac523c734fc712db24aa5c7a448d | /백준/문자열/10809.py | 562826e510b7a5ff82960aee6b220a82f67c112f | [] | no_license | yeonkyu-git/Algorithm | 0cc97a2b4d0fb50c69afc5521c63ccbae7f44a4e | 2b08691be7deade3efae67ce4d007c375525fbf9 | refs/heads/master | 2023-02-27T03:18:18.470695 | 2021-02-06T05:43:31 | 2021-02-06T05:43:31 | 333,090,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | import sys
a = [-1 for i in range(26)]
str_input = sys.stdin.readline().strip()
for i in str_input:
index_a = ord(i) - ord('a')
if a[index_a] == -1:
a[index_a] = str_input.index(i)
print(' '.join(map(str, a))) | [
"dusrbpoiiij@naver.com"
] | dusrbpoiiij@naver.com |
f103902997394e8ecae92439b358d9a30c7b74b0 | 72dc1482dd520eb43ad3671a06256d9fd9c73e74 | /modules/Router.py | 5bc66e16dabadadd3429a2e4e62cef8009527fd3 | [] | no_license | mcdnmd/traceroute | 63405d938bfae9c2dc85d3e5804f994820a7dc3b | 71cebf65a5c30a16d890f8e730c99ad10d14ded3 | refs/heads/main | 2023-01-05T11:25:23.615100 | 2020-10-31T08:17:57 | 2020-10-31T08:17:57 | 306,633,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,081 | py | import socket
import time
from modules.SocketManager import SocketManager
from modules.TerminalWriter import TerminalWriter
class Router:
def __init__(self, traceroute):
self.traceroute = traceroute
self.sockmanager = SocketManager(self.traceroute.timeout)
self.terminal_writer = TerminalWriter(self.traceroute.ttl)
def start(self):
self.terminal_writer.add_start_line(self.traceroute.dest,
self.traceroute.ttl,
self.traceroute.max_hops)
self.terminal_writer.print_buffer()
while True:
addr = self.make_request_to_intermediate_server()
self.terminal_writer.print_buffer()
self.traceroute.ttl += 1
if self.traceroute.is_a_destination(addr):
break
def make_request_to_intermediate_server(self):
current_addr = None
for i in range(self.traceroute.pack_per_hop):
receiver = self.sockmanager.create_receiver(self.traceroute.port)
sender = self.sockmanager.create_sender(self.traceroute.ttl,
self.traceroute.method)
start_time = time.time()
self.sockmanager.send_message(sender, self.traceroute.dest,
self.traceroute.port)
current_addr, end_time = self.sockmanager.receive_message(receiver)
if current_addr == -1 and end_time == -1:
self.terminal_writer.add_text('*')
else:
ping = (end_time - start_time) * 1000
self.terminal_writer.add_info_from_intermediate_server(
self.traceroute.ttl, current_addr[0], ping)
return current_addr
def get_server_address(self):
try:
self.traceroute.target = socket.gethostbyname(self.traceroute.dest)
except Exception as e:
raise IOError(f'Unable to resolve{self.traceroute.dest}:'
f' {e}')
| [
"kpoltoradnev@gmail.com"
] | kpoltoradnev@gmail.com |
a9add25a141fac2ee8d74f33a3cc8876fb27557a | b22e7e017332057581c7bbd36d0e497f793664ef | /string_to_integer.py | c0f25a0930c3c305e004c91147790cdbba8bf2a2 | [
"MIT"
] | permissive | jungker/leetcode-python | dcbee029bdb34fdd60c9f013d817f91ca75b3b41 | 4b2e55fab829cf37ce211eac5884f8159e0ab8df | refs/heads/master | 2021-01-19T11:57:09.074803 | 2017-04-20T02:15:20 | 2017-04-20T02:15:20 | 87,877,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
size = len(str)
if not size:
return 0
i = 0
while str[i].isspace():
i += 1
sign = 1
if str[i] in {'+', '-'}:
sign = 1 if str[i] == '+' else 0
i += 1
if i == size:
return 0
res = 0
while i < size and str[i].isdigit():
res = res * 10 + (ord(str[i]) - ord('0'))
i += 1
if sign and res > 2147483647:
return 2147483647
elif not sign and res > 2147483648:
return -2147483648
return res if sign else -res
if __name__ == '__main__':
solution = Solution()
s = '-3924xekr324'
s = '2147483648'
print solution.myAtoi(s)
| [
"seek_zp@163.com"
] | seek_zp@163.com |
d0dd83eb16141df7a2d15e1aa01dda85e39ae9bb | 81c08aa2085e9de7371e97fedb65d377d4ddae7d | /examples/ais3_crackme/ais3_crackme.py | dbddf705c541d03e3a7977b7446b3dfd1869f180 | [
"BSD-2-Clause"
] | permissive | heruix/r2angrdbg | 3445b8288efaaa441fda469a094a66a64dc9afc7 | 963850ddcb04900cf4794dcb43bad0594014c05b | refs/heads/master | 2020-03-23T10:23:56.831116 | 2018-07-17T18:54:55 | 2018-07-17T18:54:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | import r2pipe
import r2angrdbg
r2 = r2pipe.open("ais3_crackme")
r2angrdbg.init(r2)
r2.cmd("aaa")
r2.cmd("ood DUMMY")
r2.cmd("db 0x004005f9")
r2.cmd("dc")
sm = r2angrdbg.StateManager()
sm.sim(sm["rax"], 100)
m = sm.simulation_manager()
m.explore(find=0x00400602, avoid=0x0040060e)
conc = sm.concretize(m.found[0])
for addr in conc:
print "0x%x ==>" % addr, repr(conc[addr])
sm.to_dbg(m.found[0])
print r2.cmd("x 100 @ rax")
r2.cmd("dc")
| [
"andreafioraldi@gmail.com"
] | andreafioraldi@gmail.com |
1e8d79071c151fcb5d0639009d17e3536e758394 | db5d913635c74ef99faad2a1413704ef592f8ad7 | /pset_pandas1_wine_reviews/check_imported_data/p3.py | e5de614c957c862d0f2956f27df3360e96e6b88c | [] | no_license | theelk801/pydev-psets | 7ac4b77bfdbf484e85ea7959e846810f8064b217 | 8c4a178199782174252ca76c7ba40fcb355ceede | refs/heads/master | 2020-05-07T11:37:09.578380 | 2019-05-14T21:34:45 | 2019-05-14T21:34:45 | 180,468,816 | 0 | 0 | null | 2019-04-10T00:14:42 | 2019-04-10T00:14:41 | null | UTF-8 | Python | false | false | 328 | py | """
Checking Imported Data III - DataFrame Labels
"""
import numpy as np
import pandas as pd
wine_reviews = pd.read_csv('raw_data/winemag-data-130k.csv')
# Access the labels on the rows of data.
# Access the labels on the columns of data.
# Return the labels for the rows and columns in wine_reviews in one command.
| [
"jgarreffa112@gmail.com"
] | jgarreffa112@gmail.com |
fddffc59a31ae38afbe59b69911ad473cfcda4dd | bdbdcd7da09d518c95fabef409e8f3e22c47c28f | /Wiki_based/localizer.py | e63ad6190bcee0e4a41dbefa7129bffa767305bb | [
"MIT"
] | permissive | DrosoNeuro/Semester_Project | 835871e866bd51a4fd2db2e8fbac5105514093e2 | 2de38eef4ae6b3c350f8b742021ff098ecb376c4 | refs/heads/master | 2020-05-09T10:25:22.667677 | 2018-01-11T14:19:40 | 2018-01-11T14:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,779 | py | import wikipedia
import pandas as pd
import numpy as np
import pickle
import string
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
def top_tfidf_feats(row, features, top_n=25):
''' Get top n tfidf values in row and return them with their corresponding feature names.'''
topn_ids = np.argsort(row)[::-1][:top_n]
top_feats = [(features[i], row[i]) for i in topn_ids]
df = pd.DataFrame(top_feats)
df.columns = ['feature', 'tfidf']
return df
def top_feats_in_doc(Xtr, features, row_id, top_n=25):
''' Top tfidf features in specific document (matrix row) '''
row = np.squeeze(Xtr[row_id].toarray())
return top_tfidf_feats(row, features, top_n)
sample_localizations = ['Alabama',
'Alaska state',
'Arizona state',
'Arkansas state',
'California state',
'Colorado state',
'Connecticut state',
'Delaware state',
'Florida state',
'Georgia state',
'Hawaii state',
'Idaho state',
'Illinois state',
'Indiana state',
'Iowa state',
'Kansas state',
'Kentucky state',
'Louisiana state',
'Maine state',
'Maryland state',
'Massachusetts state',
'Michigan state',
'Minnesota state',
'Mississippi state',
'Missouri state',
'Montana state',
'Nebraska state',
'Nevada state',
'New Hampshire state',
'New Jersey state',
'New Mexico state',
'New York state',
'North Carolina state',
'North Dakota state',
'Ohio state',
'Oklahoma state',
'Oregon state',
'Pennsylvania state',
'Rhode Island state',
'South Carolina state',
'South Dakota state',
'Tennessee state',
'Texas state',
'Utah state',
'Vermont state',
'Virginia state',
'Washington state',
'West Virginia state',
'Wisconsin state',
'Wyoming state',
'Ontario',
'Quebec',
'Nova Scotia',
'New Brunswick',
'Manitoba',
'British Columbia',
'Prince Edward state',
'Saskatchewan state',
'Alberta state',
'Newfoundland and Labrador state',
'Washington, D.C. state',
'Chihuahua state',
'Baja California state',
'Freeport bahamas',
'Nuevo Leon',
]
class Localizer:
def __init__(self):
self.locations = []
self.texts = []
self.X = []
self.features = []
def add_SingleLocation(self, location):
self.locations.append(location)
def add_listLocation(self, locationList=sample_localizations):
if len(self.locations) == 0:
self.locations = locationList
else:
self.locations = self.locations + locationList
def get_WikiText(self):
translator = str.maketrans('','', string.punctuation)
for l in self.locations:
try:
p = wikipedia.page(str(l))
s = p.content
self.texts.append(s.translate(translator))
except:
print("Be more specific with " + l)
def printText(self):
for t in self.texts:
print(t)
def vectorizer(self, language="english"):
vectorizer = CountVectorizer(stop_words = language)
self.X = vectorizer.fit_transform(self.texts)
self.features = vectorizer.get_feature_names()
return self.X, self.features
def make_map(self, top):
self.df = pd.DataFrame(columns=self.locations)
self.df_tfidf = pd.DataFrame(columns=self.locations)
for n, state in enumerate(self.locations):
self.df[state] = top_feats_in_doc(self.X, self.features, n, top)['feature']
self.df_tfidf[state] = top_feats_in_doc(self.X, self.features, n, top)['tfidf']
def search_for(self, sentence, top=10):
translator = str.maketrans('','', string.punctuation)
sentence = sentence.translate(translator)
results = []
for state in self.locations:
results.append((state,sum(self.df[state].str.match('|'.join(sentence.lower().split())))))
return sorted(results,key=lambda x: x[1], reverse=True)[:top]
def search_for_tf_idf(self, sentence, top=10):
translator = str.maketrans('','', string.punctuation)
sentence = sentence.translate(translator)
results = []
for state in self.locations:
booleans = self.df[state].str.match('|'.join(sentence.lower().split()))
results.append((state,sum(self.df_tfidf[state][booleans])))
return sorted(results,key=lambda x: x[1], reverse=True)[:top]
def score(self, sentence, correct_value, top=10):
res_list = [x[0] for x in self.search_for(sentence, top)]
return correct_value in res_list
def score_tfidf(self, sentence, correct_value, top=10):
res_list = [x[0] for x in self.search_for(sentence, top)]
return correct_value in res_list
if __name__ == "__main__":
L = Localizer()
L.add_listLocation(sample_localizations)
L.get_WikiText()
L.vectorizer()
L.make_map(25)
print(L.search_for("Alabama, my home, my state"))
print(L.search_for_tf_idf("Alabama, my home, my state"))
print(L.score('Alabama my home', 'Alabama'))
| [
"axel.uran@epfl.ch"
] | axel.uran@epfl.ch |
9ca4e761dc05c607f6a8a2c2db93e5efbede6ca5 | 243a3415a9be9ac6df2c9198406291066ea16a8b | /url_threaded_fetch.py | 1b51bb03f0e5690f21467e9aaffa365310d4c185 | [] | no_license | Sandy4321/twitter-sentiments | 0ebb6ad2c974ca56018a241972a19d861ec2af9a | 4c4f96bf604f6c85437afb7a14cb9dad0a91defb | refs/heads/master | 2021-01-16T18:04:41.536709 | 2012-04-02T17:33:45 | 2012-04-02T17:33:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | import Queue
import threading
import urllib2
import time
from BeautifulSoup import BeautifulSoup
hosts = [ "http://www.google.com", "http://www.greplin.com", "http://www.amazon.com", "http://apple.com", "http://www.techcrunch.com", "http://www.ibm.com" ]
queue = Queue.Queue()
class ThreadUrl(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
host = self.queue.get()
url = urllib2.urlopen(host)
chunk = url.read()
soup = BeautifulSoup(chunk)
print soup.findAll(['title'])
self.queue.task_done()
start = time.time()
def main():
for i in range(5):
t = ThreadUrl(queue)
t.setDaemon(True)
t.start()
for host in hosts:
queue.put(host)
queue.join()
main()
print "Elapsed time : %s", (time.time() - start)
| [
"syst3m.w0rm@gmail.com"
] | syst3m.w0rm@gmail.com |
8ac7831c5ef9919ed5b2d70de3696fd28530bba2 | 6c26895f5a75a4a2dafe06f602a1b0432d4eca67 | /v08000/script/Encoder.py | 4a8ce5945b28e8d8fff380a43346adf4666e06b8 | [] | no_license | konumaru/m5_forecasting_accuracy | fee426136577d8446fa6cd68a1100e8a58747d38 | fdbffb48211a739783e0b70ae07807db819d8305 | refs/heads/master | 2022-11-12T13:27:27.874744 | 2020-06-26T06:47:57 | 2020-06-26T06:47:57 | 245,319,637 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,055 | py | import numpy as np
import pandas as pd
class GaussianTargetEncoder():
def __init__(self, group_cols, target_col="target", prior_cols=None):
self.group_cols = group_cols
self.target_col = target_col
self.prior_cols = prior_cols
def _get_prior(self, df):
if self.prior_cols is None:
prior = np.full(len(df), df[self.target_col].mean())
else:
prior = df[self.prior_cols].mean(1)
return prior
def fit(self, df):
self.stats = df.assign(mu_prior=self._get_prior(df), y=df[self.target_col])
self.stats = self.stats.groupby(self.group_cols).agg(
n=("y", "count"),
mu_mle=("y", np.mean),
sig2_mle=("y", np.var),
mu_prior=("mu_prior", np.mean),
)
def transform(self, df, prior_precision=1000, stat_type="mean"):
precision = prior_precision + self.stats.n / self.stats.sig2_mle
if stat_type == "mean":
numer = prior_precision * self.stats.mu_prior\
+ self.stats.n / self.stats.sig2_mle * self.stats.mu_mle
denom = precision
elif stat_type == "var":
numer = 1.0
denom = precision
elif stat_type == "precision":
numer = precision
denom = 1.0
else:
raise ValueError(f"stat_type={stat_type} not recognized.")
mapper = dict(zip(self.stats.index, numer / denom))
if isinstance(self.group_cols, str):
keys = df[self.group_cols].values.tolist()
elif len(self.group_cols) == 1:
keys = df[self.group_cols[0]].values.tolist()
else:
keys = zip(*[df[x] for x in self.group_cols])
values = np.array([mapper.get(k) for k in keys]).astype(float)
prior = self._get_prior(df)
values[~np.isfinite(values)] = prior[~np.isfinite(values)]
return values
def fit_transform(self, df, *args, **kwargs):
self.fit(df)
return self.transform(df, *args, **kwargs)
| [
"konumaru1022@gmail.com"
] | konumaru1022@gmail.com |
88a341d74a0a210628c3ee0eb3951051a972fb3b | 6781c034d4745255e4c4290274147e3a578b2f51 | /Github Version/Multihack Compiled.py | 4b2d8f453fefab59d11efece85ff691dce6f0538 | [] | no_license | mooncloset/CSGO | 6dfcf9b608b8da31b0078a9b9d6ad3f9fc7429c0 | 3cdd6aa5d20e3d28bde37566659e8a9d6acca142 | refs/heads/master | 2020-05-16T01:54:37.511650 | 2019-05-01T00:15:19 | 2019-05-01T00:15:19 | 182,614,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | import keyboard
import pymem
import pymem.process
import time
import sys
from multiprocessing import Process
from Wallhack import wallhack
from newbhoptesting import bhop
#added 4/21/19
dwForceJump = (0x5188988) #offsets updated 4/27/19
dwLocalPlayer = (0xCD4774)
m_fFlags = (0x104)
dwEntityList = (0x4CE54EC) #for wallhack
dwGlowObjectManager = (0x5225718)
m_iGlowIndex = (0xA3F8)
m_iTeamNum = (0xF4) #end wallhack
pm = pymem.Pymem("csgo.exe")
client = pymem.process.module_from_name(pm.process_handle, "client_panorama.dll").lpBaseOfDll
print("Hit")
one = 1
while one == 1:
def main():
def BhopPortion():
newbhoptesting.bhop()
print ("BhopPortion Loaded")
def WallhackPortion():
Wallhack.wallhack()
print ("WallhackPortion Loaded")
#end
if __name__ == '__main__':
p1 = Process(target=BhopPortion)
p1.start()
p2 = Process(target=WallhackPortion)
p2.start()
main()
| [
"noreply@github.com"
] | mooncloset.noreply@github.com |
fd8d4c23273bcef79b2690971aaeaaf9e5d9a8d3 | 9fa71d5834dae1c8900b3444f564b11326374d36 | /packages/ipm_cloud_postgresql/frotas/rotinas_envio/buscaIdGerado.py | f130f69f3143dfdab0903ba72a3f64a2c03e2a98 | [] | no_license | JoaoPauloLeal/toolbox | a85e726cfeb74603cb64d73c4af64757a9a60db7 | 924c063ba81395aeddc039a51f8365c02e527963 | refs/heads/master | 2023-06-07T02:17:42.069985 | 2021-06-28T19:06:40 | 2021-06-28T19:06:40 | 381,128,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,903 | py | import packages.ipm_cloud_postgresql.model as model
import bth.interacao_cloud as interacao_cloud
import json
import logging
from datetime import datetime
tipo_registro = 'buscaIdGerado'
sistema = 304
limite_lote = 100
url = "https://api.protocolo.betha.cloud/protocolo/service-layer/v1/api/documento"
def iniciar_processo_envio(params_exec, ano, *args, **kwargs):
print('- Iniciando processo de montagem de Json.')
# Obtém o texto do arquivo assunto.sql na pasta 'sql_padrao'
with open(get_path(f'id_gerado.json'), "r", encoding='utf-8') as f:
data = json.load(f)
f.close()
lista_id_gerados = []
# Ira varrer a content pegando os ids que correspondem ao idGerado para exclusão
for conteudo in data['content']:
lista_id_gerados.append(conteudo['id'])
# Prepara o json de exclusão
prepara_json_exclusao_envio(lista_id_gerados)
print('- Criação de dados finalizado.')
def get_path(tipo_json):
path_padrao = f'sistema_origem/ipm_cloud_postgresql/protocolo/json_default/'
path = path_padrao + tipo_json
return path
def aplica_parametros(params_exec, t):
texto_consulta = t
try:
for param in params_exec:
# texto_consulta = texto_consulta.replace(('{{' + param + '}}'), str(params_exec.get(param)))
texto_consulta = texto_consulta.find("assunto", '"id":')
print(texto_consulta)
except Exception as error:
print("Erro ao executar função 'aplica_parametros'.", error)
finally:
return texto_consulta
def prepara_json_exclusao_envio(dados):
print('- Iniciando envio dos dados.')
lista_dados_enviar = []
contador = 0
id_integracao = "ExclusaoIdGeradosIncoerentes"
for item in dados:
dict_dados = {
"idIntegracao": id_integracao,
"idGerado": item,
"conteudo": {
"idGerado": item
}
}
lista_dados_enviar.append(dict_dados)
contador += 1
# Caso necessario ver o json que esta sendo formado
# print(lista_dados_enviar)
# Sera sempre criado um novo arquivo com o conteudo de ids feitos no modelo anterior
with open(get_path(f'template_exclude.json'), "w", encoding='utf-8') as f:
f.write(str(lista_dados_enviar))
f.close()
# model.insere_tabela_controle_migracao_registro2(params_exec, lista_req=lista_controle_migracao)
# req_res = interacao_cloud.preparar_requisicao(lista_dados=lista_dados_enviar,
# token=token,
# url=url,
# tipo_registro=tipo_registro,
# tamanho_lote=limite_lote)
# model.insere_tabela_controle_lote(req_res)
print('- Envio de dados finalizado.')
| [
"joao.leal@betha.com.br"
] | joao.leal@betha.com.br |
ca132e77807f48ce5e8809af652b09d928890e7e | c6ad15d5c05c4ae8b17f1ca67482c927aebe08b2 | /getGameTorrent.py | 656a1cc662b3fe2ed702bb12bdae54f579c93487 | [] | no_license | tingmwu/python_spider | 7d4fa8cebfb62441bb56e74d1d21020804446675 | 9933075a258cc0e873160166191d1085c6e9429b | refs/heads/master | 2021-01-30T08:10:02.782447 | 2020-04-14T14:55:31 | 2020-04-14T14:55:31 | 243,495,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | import requests
import json
import re
import os
```
获取GBT小组游戏空间(http: // renxufeng.ys168.com /)的所有游戏种子
```
menu_url = "http://cd.ys168.com/f_ht/ajcx/ml.aspx?cz=ml_dq&_dlmc=renxufeng&_dlmm="
menu = requests.get(menu_url).text
# print(menu)
r = re.compile('<li id="ml_(.*?)" .*?</li>')
menu_id = re.findall(r, menu)
for i in menu_id:
detial_url = "http://cd.ys168.com/f_ht/ajcx/wj.aspx?cz=dq&mlbh={}&_dlmc=renxufeng&_dlmm=".format(
i)
detail_menu = requests.get(detial_url).text
# print(detail_menu)
r = re.compile('<li .*? href="(.*?)".*?</li>')
detail = re.findall(r, detail_menu)
if not os.path.isfile('game.txt'):
with open('game.txt', 'w') as fp:
fp.close()
for i in detail:
with open('game.txt', 'a') as fp:
fp.write(i)
fp.write('\n')
fp.close()
| [
"tingmwu@163.com"
] | tingmwu@163.com |
1f80863a8bd3dc8c97c4f31a4661e782490b8114 | c25cd48cadfa3492ec238bb033d4ece681a216f4 | /aula08ex25.py | 35c9288d8f77ca4e7abadf22a0752d5a7cc8f897 | [] | no_license | nobregagui/Guicodigo1 | 1647fcebee52bab2719ab798420bbbc9f53f24d3 | 6984c8fae3502e452481a68dc40f65605af8b454 | refs/heads/master | 2022-09-02T20:55:09.648612 | 2020-05-28T03:10:08 | 2020-05-28T03:10:08 | 267,476,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | name = input('Digite um nome: ').strip()
print('silva' in name.lower())
| [
"nobregagui8@gmail.com"
] | nobregagui8@gmail.com |
7bdec58e10459198d2a9ce7f8a10735a8fd6537d | 632747fbfbf1122497792291cb5a6a9f24add48d | /kurs/04_funkcje/zadania/zadanie7.py | d3f652fcca6946ce86cfd2b7ed83db9611d2851e | [] | no_license | mjoze/kurs_python | 892ac4fab7f3b3fb1d88b236c362a7630a565af7 | 78ec0dda34a5450a4c47d79f3957c65dfbac0276 | refs/heads/master | 2020-08-07T10:54:56.198237 | 2020-04-30T15:31:50 | 2020-04-30T15:31:50 | 213,421,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,171 | py | """ Napisz program, który na podstawie numeru karty odpowie czy ma doczynienia z Visą,
MasterCard, a może AmericanExpress.
Co wiemy o tych numerach tych kart?
All Visa card numbers start with a 4. New cards have 16 digits. Old cards have 13.
MasterCard numbers either start with the numbers 51 through 55 or with the numbers 2221 through 2720.
All have 16 digits.
American Express card numbers start with 34 or 37 and have 15 digits."""
# def check_card(number):
# number_card = str(number)
# # number_card_list = [int(i) for i in str(number)]
#
# if int(number_card[0]) == 4 and len(number_card) >= 13:
# print("visa")
# elif (int(number_card[0:2]) in range(51, 56) or int(number_card[0:5]) in range(2221, 2721)) \
# and len(number_card) == 16:
# print('mc')
# elif (int(number_card[0:2]) == 34 or 37) and len(number_card) == 15:
# print('ae')
# else:
# print('zły numer karty')
#
v = 4195905348577260
mc = 5372077848173651
ae = 349799046528565
def is_visa(is_card, number):
if not is_card:
return False
if len(number) == 16 or len(number) == 13:
if number[0] == '4':
return True
def is_mastercard(is_card, number):
if not is_card:
return False
if len(number) == 16:
if int(number[0:2]) in range(51, 56) or int(number[0:4]) in range(2221, 2721):
return True
def is_american_express(is_card, number):
if not is_card:
return False
if len(number) == 15:
if number[0:2] in ("34", "37"):
return True
can_be_card_number = False
card_number = input("Put your card number here: ")
if len(card_number) < 13 or len(card_number) > 16:
print("wrong number")
else:
if card_number.isdecimal():
print("Can be card number")
can_be_card_number = True
else:
print("Not a number")
if is_visa(can_be_card_number, card_number):
print("I'm visa")
elif is_mastercard(can_be_card_number, card_number):
print("I'm master card")
elif is_american_express(can_be_card_number, card_number):
print("I'm american express")
else:
print("Not known card type")
| [
"mirek.jozefiak@gmail.com"
] | mirek.jozefiak@gmail.com |
83e8da5dc1fd4fbfef64bb7e13668ad6573c1d43 | 45b64f620e474ac6d6b2c04fbad2730f67a62b8e | /Varsity-Final-Project-by-Django-master/.history/project/quiz/views_20210424120959.py | 0c72f460a7b6765c0935e951051cc7eeac3e5ad1 | [] | no_license | ashimmitra/Final-Project | 99de00b691960e25b1ad05c2c680015a439277e0 | a3e1d3c9d377e7b95b3eaf4dbf757a84a3858003 | refs/heads/master | 2023-04-11T06:12:35.123255 | 2021-04-26T15:41:52 | 2021-04-26T15:41:52 | 361,796,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,464 | py | from django.shortcuts import render
from quiz.models import Quiz
from quiz.models import Bangla
from quiz.models import Math
from quiz.models import Science
from quiz.models import GK
from quiz.models import Mat
from quiz.models import Sci
from quiz.models import GNK
#def welcome(request):
#return render(request, 'welcome.html')
def english(request):
questions = Quiz.objects.all()
return render(request, 'english.html', { 'questions': questions})
def bangla(request):
questions = Bangla.objects.all()
return render(request, 'bangla.html', { 'questions': questions})
def math(request):
questions = Math.objects.all()
return render(request, 'math.html', { 'questions': questions})
def science(request):
questions = Science.objects.all()
return render(request, 'science.html', { 'questions': questions})
def generalknowledge(request):
questions = GK.objects.all()
return render(request, 'generalknowledge.html', { 'questions': questions})
def sci(request):
questions = Sci.objects.all()
return render(request, 'sci.html', { 'questions': questions})
def mat(request):
questions = Mat.objects.all()
return render(request, 'sci.html', { 'questions': questions})
def gnk(request):
questions = Sci.objects.all()
return render(request, 'sci.html', { 'questions': questions})
def result(request):
results = Math.objects.all()
return render(request, 'result.html', {'results': result})
| [
"34328617+ashimmitra@users.noreply.github.com"
] | 34328617+ashimmitra@users.noreply.github.com |
20ef3b12d7c50486b7a19f6fa772cd57ae8f4462 | 5f140a7a55e27200ea11a350778811359fcc90e9 | /handlers/basic.py | aa949ad1d443166281e450a5b2ba3e379b076486 | [] | no_license | BoyceYang/wsbs | da570abbdca8907dc248411ae70fd329faac2ea7 | 538a0ead4f9de647d80f79a5e023516c5460a300 | refs/heads/master | 2016-09-01T05:48:18.834134 | 2015-11-08T09:04:08 | 2015-11-08T09:04:08 | 45,722,677 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tornado.web
class BasicHandler(tornado.web.RequestHandler):
def __init__(self, application, request, **kwargs):
super(BasicHandler, self).__init__(application, request, **kwargs) | [
"nicpick.yang@163.com"
] | nicpick.yang@163.com |
24bb1d60d014e1290d927cd6244054e2867e1cf9 | c356af0f374a064c3a0193f7050ffa0c4a698d65 | /Resources/wsgi.py | d9075ed86eefad2fbcdcaf8f3bb49ac7ed2701c4 | [
"MIT"
] | permissive | SuicideSin/Python2FullFlaskService | d6aebfcfd361df6eb8206540ba807d6429133791 | a56f1cabbcf32a9fd184a22fb0b29e456a993f79 | refs/heads/master | 2022-01-20T23:20:49.322966 | 2019-07-18T15:34:58 | 2019-07-18T15:34:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | from MainService import app
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080) | [
"alexm.box@gmail.com"
] | alexm.box@gmail.com |
aea32d24fb05c9015ddad497331ecd03e6a168ff | 20d1107dfe7fcc9ad0309e3a0cbd07ab618dfb30 | /All _stuff/Tíma dót/Float_junction.py | a75e16a40b292815cb6571553597c3dfa94e6f51 | [] | no_license | helgistein/verk-5 | e0755a4586e81096acae86411919b8684026b4bc | e231015c473a13bcd69205d96e35c9bf4d51cfa4 | refs/heads/master | 2020-03-27T23:30:51.342248 | 2018-12-06T13:39:05 | 2018-12-06T13:39:05 | 147,323,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py |
# is_float function definition goes here
def is_float(s):
try:
float(s)
return True
except ValueError:
return False
# Do not change the lines below
print(is_float('3.45'))
print(is_float('3e4'))
print(is_float('abc'))
print(is_float('4'))
print(is_float('.5')) | [
"helgisteinarolafsson@gmail.com"
] | helgisteinarolafsson@gmail.com |
764c4c0f67ebb327c7d59004a389359e6f550f4d | 6d88b7b4216d76aae19ac342189bf68456b3a993 | /main.py | 0cbfdaf6d22a8c5e05e44f92b6fccbc43a86e787 | [] | no_license | MarcusMMO/keyboardspambot | a865f1b78ac47518ff48fbe3e2a1ca298bc56607 | 3087297a460f02a2877d6159b4f6c31b7310c85d | refs/heads/master | 2022-12-24T19:31:53.624182 | 2020-10-02T13:34:49 | 2020-10-02T13:34:49 | 300,623,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | from selenium import webdriver
import time
from sys import platform
if platform == "darwin":
PATH = './chromedriver'
elif platform == "win32":
PATH = './chromedriver.exe'
else:
print('This Python Script is not compatible with your operating system...')
exit()
browser = webdriver.Chrome(PATH)
browser.get('https://www.typingtest.com/')
time.sleep(2)
agreeButton = browser.find_element_by_class_name('sc-bwzfXH')
agreeButton.click()
startButton = browser.find_element_by_class_name('start-btn')
startButton.click()
time.sleep(3)
clickArea = browser.find_element_by_class_name('test-notification')
clickArea.click()
editArea = browser.find_element_by_class_name('test-edit-area')
timer = browser.find_element_by_class_name('test-timer')
while True:
textArea = browser.find_element_by_class_name('test-text-area')
text = textArea.text
for letters in text:
editArea.send_keys(letters)
| [
"oosthuizenmarcus@gmail.com"
] | oosthuizenmarcus@gmail.com |
b10863f7a0d0a4858ab55dcafc1350350938e9a5 | 0c061162f0a01b2711de1843db5a8e5ef69c8dce | /src/swarm_rescue/tuto_spg_jupyter/tuto_04_01_textures.py | 6bc4109c6df6f599837a1c2d68ef5d4828f4097f | [
"MIT"
] | permissive | Cindy0725/swarm-rescue | a9206eeddaa99edff46d4fdfff48511108ac4bd1 | ce323c90ccb4a25216c63abccd09d3f31eea1189 | refs/heads/main | 2023-09-05T15:01:20.237092 | 2021-11-18T13:10:30 | 2021-11-18T13:10:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,975 | py | from simple_playgrounds.playground import SingleRoom
from simple_playgrounds.engine import Engine
from simple_playgrounds.element.elements.basic import Physical
from simple_playgrounds.common.texture import ColorTexture, RandomUniformTexture, RandomTilesTexture, \
MultipleCenteredStripesTexture, CenteredRandomTilesTexture
my_playground = SingleRoom(size=(200, 200))
# The most basic texture is a uniform color.
elem = Physical(physical_shape='square', radius=10, texture=[123, 234, 0])
my_playground.add_element(elem, ((50, 50), 0))
elem = Physical(physical_shape='circle', radius=10, texture=ColorTexture(color=(222, 0, 0)))
my_playground.add_element(elem, ((100, 50), 0))
elem = Physical(physical_shape='pentagon', radius=10,
texture={'texture_type': 'color', 'color': (0, 0, 222)})
my_playground.add_element(elem, ((150, 50), 0))
tex_uniform = RandomUniformTexture(color_min=(100, 100, 0), color_max=(200, 250, 0))
elem = Physical(physical_shape='pentagon', radius=10, texture=tex_uniform)
my_playground.add_element(elem, ((50, 100), 0))
tex_tiles = RandomTilesTexture(color_min=(150, 100, 0), color_max=(200, 250, 0), size_tiles=5)
elem = Physical(physical_shape='rectangle', size=(20, 30), texture=tex_tiles)
my_playground.add_element(elem, ((100, 100), 0))
tex_polar = MultipleCenteredStripesTexture(color_1=(200, 100, 50), color_2=(100, 100, 150), n_stripes=5)
elem = Physical(physical_shape='pentagon', radius=20, texture=tex_polar)
my_playground.add_element(elem, ((50, 150), 0))
tex_random_tiles_centered = CenteredRandomTilesTexture(color_min=(100, 0, 100), color_max=(200, 0, 200),
size_tiles=20)
elem = Physical(physical_shape='hexagon', radius=20, texture=tex_random_tiles_centered)
my_playground.add_element(elem, ((100, 150), 0))
engine = Engine(time_limit=10000, playground=my_playground, screen=True)
engine.run(update_screen=True, print_rewards=True)
engine.terminate()
| [
"emmanuel.battesti@ensta-paris.fr"
] | emmanuel.battesti@ensta-paris.fr |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.