blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c93f3061445440c61fac3d56c93db2d8ca6ae38f | 171a504d38951da46ac8b6f594477f6798f18d00 | /applications/StructuralMechanicsApplication/python_scripts/trilinos_structural_mechanics_implicit_dynamic_solver.py | f173ddd9c60ca0d19eb7cdd8975b30abd6b870c0 | [] | no_license | adrigzr/Kratos | e3d385c10e6a9661f95dfbf998dca3844b7d14c1 | 9a281b74acb00f5590e0fec1bd3caa34255e5d9b | refs/heads/master | 2021-07-24T01:18:50.128534 | 2017-10-20T08:29:02 | 2017-10-20T08:29:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,063 | py | from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
#import kratos core and applications
import KratosMultiphysics
import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication
import KratosMultiphysics.mpi as mpi
import KratosMultiphysics.TrilinosApplication as TrilinosApplication
import KratosMultiphysics.MetisApplication as MetisApplication
import trilinos_structural_mechanics_solver
# Check that KratosMultiphysics was imported in the main script
KratosMultiphysics.CheckForPreviousImport()
def CreateSolver(main_model_part, custom_settings):
return TrilinosImplicitMechanicalSolver(main_model_part, custom_settings)
class TrilinosImplicitMechanicalSolver(trilinos_structural_mechanics_solver.TrilinosMechanicalSolver):
"""The trilinos structural mechanics implicit dynamic solver.
Public member variables:
dynamic_settings -- settings for the implicit dynamic solvers.
For more information see:
structural_mechanics_solver.py
trilinos_structural_mechanics_solver.py
"""
def __init__(self, main_model_part, custom_settings):
# Set defaults and validate custom settings.
self.dynamic_settings = KratosMultiphysics.Parameters("""
{
"damp_factor_m" :-0.3
}
""")
self.validate_and_transfer_matching_settings(custom_settings, self.dynamic_settings)
# Validate the remaining settings in the base class.
if not custom_settings.Has("scheme_type"): # Override defaults in the base class.
custom_settings.AddEmptyValue("scheme_type")
custom_settings["scheme_type"].SetString("Newmark")
# Construct the base solver.
super(TrilinosImplicitMechanicalSolver, self).__init__(main_model_part, custom_settings)
def AddVariables(self):
super(TrilinosImplicitMechanicalSolver, self).AddVariables()
# Add dynamic variables.
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.ACCELERATION)
if self.settings["rotation_dofs"].GetBool():
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.ANGULAR_VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.ANGULAR_ACCELERATION)
print("::[TrilinosImplicitMechanicalSolver]:: Variables ADDED")
#### Private functions ####
def _create_solution_scheme(self):
scheme_type = self.settings["scheme_type"].GetString()
if (scheme_type == "Newmark"):
damp_factor_m = 0.0
elif (scheme_type == "Bossak"):
damp_factor_m = self.dynamic_settings["damp_factor_m"].GetDouble()
else:
raise Exception("Unsupported scheme_type: " + scheme_type)
mechanical_scheme = TrilinosApplication.TrilinosResidualBasedBossakDisplacementScheme(damp_factor_m)
return mechanical_scheme
| [
"cpuigbo@cimne.upc.edu"
] | cpuigbo@cimne.upc.edu |
888d90fbd5a780402d5392e40b30f69a2708ef1a | 396841279a035033487b6c9fd5db6fc699b288af | /backend/chat/models.py | 07c413103e65923af2fa18a0731c2326de9ffa76 | [] | no_license | Shamsulhaq/realtime-chat | b9d9b753252f70e6d682a6f86630474408bebb40 | 5462bcb1f42787d3c8e4a62037c7ef401bcce077 | refs/heads/master | 2023-07-06T22:48:14.355157 | 2021-08-05T11:37:34 | 2021-08-05T11:37:34 | 393,011,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | # at chatapp/backend/chat/models.py
from django.contrib.auth import get_user_model
from django.db import models
from django.urls import reverse
User = get_user_model()
class Conversation(models.Model):
user_one = models.ForeignKey(
User,
related_name='participent',
on_delete=models.CASCADE
)
user_two = models.ForeignKey(
User,
related_name='participent_two',
on_delete=models.CASCADE
)
timestamp = models.DateTimeField(
auto_now_add=True
)
class Meta:
unique_together = ['user_one', 'user_two']
def __str__(self):
return str(self.id)
def last_message(self):
return self.messages.all().last()
def conversation_url(self):
return reverse("chats:room", kwargs={"room_name": self.pk})
class Message(models.Model):
conversation = models.ForeignKey(
Conversation,
related_name='messages',
on_delete=models.CASCADE
)
author = models.ForeignKey(
User,
related_name='sender',
on_delete=models.CASCADE
)
content = models.TextField()
timestamp = models.DateTimeField(
auto_now_add=True
)
def __str__(self):
return self.content
| [
"bmshamsulhaq65@gmail.com"
] | bmshamsulhaq65@gmail.com |
b4730082313d847d8b38fedb02dcf83f354fb541 | da8471ad2f90a3efa31acb0c986020357cdb5e4c | /confidant/scripts/archive.py | 1f93f9e044726de0bdc2c6fdcfd05a0b21cbfc89 | [
"Apache-2.0"
] | permissive | lyft/confidant | af18cc7085303ee5bab873c78567e14ae48630ab | 8033824e0b3c156ee5588e5b31f8dff8e421a01e | refs/heads/master | 2023-09-01T20:46:07.051295 | 2023-08-21T17:01:49 | 2023-08-21T17:01:49 | 42,324,225 | 1,918 | 146 | Apache-2.0 | 2023-09-06T21:20:59 | 2015-09-11T18:02:12 | Python | UTF-8 | Python | false | false | 2,789 | py | import sys
import logging
from datetime import datetime
from flask_script import Command, Option
from confidant import settings
from confidant.models.credential import Credential
from confidant.services import credentialmanager
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
class ArchiveCredentials(Command):
"""
Command to permanently archive credentials to an archive dynamodb table.
"""
option_list = [
Option(
'--days',
dest='days',
type=int,
help=('Permanently archive disabled credentials last modified'
' greater than this many days (mutually exclusive with'
' --ids)'),
),
Option(
'--force',
action='store_true',
dest='force',
default=False,
help=('By default, this script runs in dry-run mode, this option'
' forces the run and makes the changes indicated by the'
' dry run'),
),
Option(
'--ids',
dest='ids',
help=('Archive a comma separated list of credential IDs. (mutually'
' exclusive with --days)'),
),
]
def run(self, days, force, ids):
if not settings.DYNAMODB_TABLE_ARCHIVE:
logger.error('DYNAMODB_TABLE_ARCHIVE is not configured, exiting.')
return 1
if days and ids:
logger.error('--days and --ids options are mutually exclusive')
return 1
if not days and not ids:
logger.error('Either --days or --ids options are required')
return 1
credentials = []
if ids:
# filter strips an empty string
_ids = [_id.strip() for _id in list(filter(None, ids.split(',')))]
if not _ids:
logger.error('Passed in --ids argument is empty')
return 1
for credential in Credential.batch_get(_ids):
if credential.enabled:
logger.warning(
'Skipping enabled credential {}'.format(credential.id)
)
continue
credentials.append(credential)
else:
for credential in Credential.data_type_date_index.query(
'credential'
):
tz = credential.modified_date.tzinfo
now = datetime.now(tz)
delta = now - credential.modified_date
if not credential.enabled and delta.days > days:
credentials.append(credential)
credentialmanager.archive_credentials(credentials, force=force)
| [
"noreply@github.com"
] | lyft.noreply@github.com |
2b60ca9956c9c1ab4e069b128087a7712afe9fa7 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2652486_0/Python/Nooodles/C.py | d3ac03b56445bbc8e423e2fc1572aa92e94b5711 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | def Solve( P ):
Is3 = False; Is5 = False; K = len(P);
New = []; ANS = [];
for e in P:
New.append(e);
if e%3 == 0:
Is3 = True;
if e%5 == 0:
Is5 = True;
print New;
New.sort();
while New[0] == 1:
New.pop(0);
K -= 1
print New;
if Is3:
ANS.append(3);
for i in range(K):
if New[i]%3 == 0:
New[i] /= 3 ;
if Is5:
ANS.append(5);
for i in range(K):
if New[i]%5 == 0:
New[i] /= 5;
print Is3, Is5, New;
return
def Brute1(a,b,c):
VAL = [1, a, b, c, a*b, a*c, b*c, a*b*c];
VAL.sort();
for i in range(6,-1,-1):
if VAL[i] == VAL[i+1]:
VAL.pop(i);
return VAL;
T = int(raw_input());
#R = 100; N = 3; M = 5; K = 7;
Data = [];
for q in range(T):
print 'Case #1:';
[R, N, M, K] = map(int, raw_input().split());
PAIR = []; PROD = [];
for a in range(2,6):
for b in range(a,6):
for c in range(b,6):
PAIR += [str(a)+str(b)+str(c)];
PROD.append( Brute1(a,b,c) );
# print PAIR[-1], PROD[-1]
# for a in range(20):
# print a, PROD[a];
# print
for case in range(R):
Inp = map(int, raw_input().split())
# print Inp
Hit = [];
for i in range(20):
Good = True;
for e in Inp:
if e not in PROD[i]:
# print 'badness', i, e, PAIR[i], PROD[i]
Good = False;
if Good:
Hit.append(i);
print PAIR[Hit[0]]
# Match( map(int, raw_input().split()) );
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
9c56448ce8d78dda08d50c5e06464a14cce689bd | 22ebcc842dbc933bfa8fdad89b8b8ef48ecc91c7 | /plots/spread/plot_spread_factors2.py | 5d429b0c892e8313c0fa1fbb03e2abb8a10e8d4b | [] | no_license | klikooo/thesis-src | 192651c18f243c59cfa588e7052dc1a96ab0a146 | 64f2ee824afdc2d3fd0f98c6d9fcfda597b9ad9f | refs/heads/master | 2020-04-16T18:16:20.638147 | 2019-08-20T14:59:52 | 2019-08-20T14:59:52 | 161,623,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,132 | py | from decimal import Decimal
import copy
import plots.spread.plot as plot
import matplotlib.pyplot as plt
import util
setting = {"experiment": '3',
"data_set": util.DataSet.ASCAD,
"subkey_index": 2,
"unmask": True,
"desync": 0,
"use_hw": True,
"spread_factor": 6,
"epochs": 80,
"batch_size": 100,
"lr": '%.2E' % Decimal(0.0001),
"l2_penalty": 0,
"train_size": 1000,
"kernel_sizes": [0],
"num_layers": [0],
"channel_sizes": [0],
"network_name": "SpreadNet",
"runs": range(5),
"init_weights": "",
"title": "",
"plot_colors": ["acqua", "black", "brown", "darkblue", "darkgreen", "fuchsia",
"goldenrod", "green", "grey", "indigo", "lavender"],
"ge_x": [],
"ge_y": [],
"ta": [],
"va": [],
"tl": [],
"vl": [],
"line_title": [],
"line_title2": "$Spread_{PH}$",
"plot_markers": [" ", "*", "+"]
}
def plot_factors(spread_factors, save_name, x_lim, y_lim, show=False, train_size=1000):
setting_spread = copy.deepcopy(setting)
setting_spread.update({"network_name": "SpreadNet",
"line_title2": "$Spread_{PH}$",
"plot_colors": ["r", "g", "b"],
"plot_marker": [" "],
"train_size": train_size,
})
setting_dense_spread = copy.deepcopy(setting)
setting_dense_spread.update({"network_name": "DenseSpreadNet",
"line_title2": "$MLP_{RT}$",
"plot_colors": ["r", "g", "b"],
"plot_marker": ["-"],
"train_size": train_size,
})
settings_spread = []
settings_dense_spread = []
colors = ["r", "g", "b", "y", "g", "b"]
for spread_factor, color in zip(spread_factors, colors):
print(spread_factor)
s_spread = copy.deepcopy(setting_spread)
s_dense_spread = copy.deepcopy(setting_dense_spread)
s_spread.update({
"data_set": util.DataSet.ASCAD,
"spread_factor": spread_factor,
"plot_colors": [color],
"plot_markers": [" "],
"line_title2": s_spread['line_title2'] + " sf " + str(spread_factor)
})
s_dense_spread.update({
"spread_factor": spread_factor,
"plot_colors": [color],
"plot_markers": ["h"],
"line_title2": s_dense_spread['line_title2'] + " sf " + str(spread_factor)
})
settings_spread.append(s_spread)
dpa_spread = copy.deepcopy(s_spread)
dpa_spread.update({"data_set": util.DataSet.DPA_V4,
"plot_colors": [color],
"plot_markers": ["h"],
"line_title2": dpa_spread['line_title2'] + " DPA sf " + str(spread_factor)})
settings_dense_spread.append(s_dense_spread)
settings_spread.append(dpa_spread)
network_settings = {
"SpreadNet": settings_spread,
# "DenseSpreadNet": settings_dense_spread
}
plot.create_plot(network_settings, save_name, x_lim, y_lim)
if show:
plt.show()
#########
# ASCAD #
#########
data_set = util.DataSet.ASCAD
setting.update({"data_set": util.DataSet.ASCAD})
###############
# TEST FOR HW #
###############
# Set the global setting to HW
setting.update({"use_hw": True})
# Test for HW with different training sizes
path = "/media/rico/Data/TU/thesis/report/img/spread/factors"
hw_save_name = f"{path}/{data_set}_hw_" + "{}.png"
plot_factors([3, 6, 9, 12], hw_save_name.format(1000), [-1, 100], [0, 101], show=False)
plot_factors([3, 6, 9, 12], hw_save_name.format(5000), [-1, 25], [0, 70], train_size=5000)
plot_factors([3, 6, 9, 12], hw_save_name.format(20000), [-1, 25], [0, 70], train_size=20000)
plot_factors([3, 6, 9, 12], hw_save_name.format(40000), [-1, 25], [0, 70], train_size=40000)
###############
# TEST FOR ID #
###############
# Set the global setting to ID
setting.update({"use_hw": False})
# Test for ID with different training sizes
id_save_name = f"{path}/{data_set}_id_" + "{}.png"
plot_factors([3, 6, 9, 12], id_save_name.format(1000), [-100, 3500], [0, 140], show=False)
plot_factors([3, 6, 9, 12], id_save_name.format(5000), [-1, 25], [0, 70], train_size=5000)
plot_factors([3, 6, 9, 12], id_save_name.format(20000), [-1, 10], [0, 30], train_size=20000)
plot_factors([3, 6, 9, 12], id_save_name.format(40000), [-1, 10], [0, 20], train_size=40000)
#########
# DPAv4 #
#########
# Set the data set
data_set = util.DataSet.DPA_V4
setting.update({"data_set": util.DataSet.ASCAD})
###############
# TEST FOR HW #
###############
# Set the global setting to HW
setting.update({"use_hw": True})
# Test for HW with different training sizes
path = "/media/rico/Data/TU/thesis/report/img/spread/factors"
hw_save_name = f"{path}/{data_set}_hw_" + "{}.png"
plot_factors([3, 6, 9, 12], hw_save_name.format(1000), [-1, 75], [0, 105], show=False)
plot_factors([3, 6, 9, 12], hw_save_name.format(5000), [-1, 25], [0, 70], train_size=5000)
plot_factors([3, 6, 9, 12], hw_save_name.format(20000), [-1, 25], [0, 70], train_size=20000)
plot_factors([3, 6, 9, 12], hw_save_name.format(40000), [-1, 25], [0, 70], train_size=40000)
###############
# TEST FOR ID #
###############
# Set the global setting to ID
setting.update({"use_hw": False})
# Test for ID with different training sizes
id_save_name = f"{path}/{data_set}_id_" + "{}.png"
plot_factors([3, 6, 9, 12], id_save_name.format(1000), [-100, 3500], [0, 140], show=False)
plot_factors([3, 6, 9, 12], id_save_name.format(5000), [-1, 25], [0, 100], train_size=5000)
plot_factors([3, 6, 9, 12], id_save_name.format(20000), [-1, 10], [0, 30], train_size=20000)
plot_factors([3, 6, 9, 12], id_save_name.format(40000), [-1, 10], [0, 20], train_size=40000)
| [
"rico12978@hotmail.com"
] | rico12978@hotmail.com |
2835fc3c17d97c08452787df63f76db069a5df95 | e8a87fa14006f1479161293a898e2f73eefc91f7 | /Week4/Tarefa 02/Exercicio_01_number_primo.py | 459db3bf35bb6448bddfe6e7a3737684cd7193bd | [
"MIT"
] | permissive | WesGtoX/Intro-Computer-Science-with-Python-Part01 | 7880cc4483662104ecaa3c199826cb129ae00dca | 50561e4c104ced2f5e468e382f45e4ca5cb2279e | refs/heads/master | 2020-03-21T06:29:10.584323 | 2018-06-21T21:28:43 | 2018-06-21T21:28:43 | 138,223,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | n = int(input("Digite um número inteiro: "))
primo = True
divisor = 2
while divisor < n and primo:
if n % divisor == 0:
primo = False
divisor += 1
if primo and n != 1:
print("primo")
else:
print("não primo")
| [
"wesley_gto_x@yahoo.com.br"
] | wesley_gto_x@yahoo.com.br |
494992c771c97a259199b4e2c2478e269a3d2032 | fb671897604a99a4c85e912717dd27d9b93a3543 | /src/engine/sounds.py | bc5e18335fbbfdd58245daae02d53e2e2782c9c1 | [] | no_license | davidpendergast/grovetender | 650215063ef57a49e2a185ce695f463032736ee9 | 384f546af7f76d3826545875c252edaefdd632e3 | refs/heads/master | 2022-07-11T04:26:02.878880 | 2022-06-26T05:51:10 | 2022-06-26T05:51:10 | 256,640,586 | 4 | 1 | null | 2022-06-26T05:51:11 | 2020-04-18T00:48:26 | Python | UTF-8 | Python | false | false | 1,782 | py | import pygame
from src.utils.util import Utils
import traceback
_MASTER_VOLUME = 1.0
_LOADED_EFFECTS = {} # effect_id -> Effect object
_RECENTLY_PLAYED = {} # effect_id -> ticks since last play
RECENCY_LIMIT = 4 # if an effect was already played X ticks ago, don't play it again
def set_volume(volume):
global _MASTER_VOLUME
_MASTER_VOLUME = Utils.bound(volume, 0.0, 1.0)
def update():
to_remove = []
for effect in _RECENTLY_PLAYED:
if _RECENTLY_PLAYED[effect] >= RECENCY_LIMIT:
to_remove.append(effect)
else:
_RECENTLY_PLAYED[effect] = _RECENTLY_PLAYED[effect] + 1
for effect in to_remove:
del _RECENTLY_PLAYED[effect]
def play_sound(sound):
"""
:param sound: either an effect_path, or a tuple (effect_path, volume)
"""
if sound is None:
return
if isinstance(sound, tuple):
effect_path = sound[0]
volume = sound[1]
else:
effect_path = sound
volume = 1.0
if _MASTER_VOLUME == 0 or volume <= 0 or effect_path is None:
return
if effect_path in _RECENTLY_PLAYED:
return
if effect_path in _LOADED_EFFECTS:
effect = _LOADED_EFFECTS[effect_path]
effect.set_volume(_MASTER_VOLUME * volume)
else:
try:
effect = pygame.mixer.Sound(effect_path)
effect.set_volume(_MASTER_VOLUME * volume)
except Exception:
print("ERROR: failed to load sound effect {}".format(effect_path))
traceback.print_exc()
effect = None
_LOADED_EFFECTS[effect_path] = effect
if effect is not None:
_RECENTLY_PLAYED[effect_path] = 0
# print("INFO: playing sound effect: {}".format(effect_path))
effect.play()
| [
"dlp75@case.edu"
] | dlp75@case.edu |
6e8a22680c7a5fb533a518c9e01bd3dbc4e797b5 | 6e7f45c7a7f5b7a2333c6bfe8a4ae8723537dc62 | /pronostico/settings.py | 8c476df723d65a4a066e52776e642edcee201abd | [] | no_license | devnandito/pronosticos | 8bcacfd7cf3032fb5b831c331ef0719f0c050205 | 0796562243dc51aa2e5b1e8e51d10497d15aa8e3 | refs/heads/master | 2021-05-28T04:49:53.532946 | 2014-08-24T14:36:19 | 2014-08-24T14:36:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,693 | py | # Django settings for pronostico project.
import os
RUTA_PROYECTO = os.path.dirname(os.path.realpath(__file__))
DEFAULT_CHARSET = 'utf-8'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'pronostico', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'postgres',
'PASSWORD': '1234',
'HOST': '192.168.192.152', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '5432', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'es-mx'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
#STATIC_URL = '/static/'
STATIC_URL = os.path.join(RUTA_PROYECTO,'/static/')
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(RUTA_PROYECTO,'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'd6)7j+b+w-id+kpvk+1t%!$q0$r(rbqi+!y9h5m-e^)wu#1a0x'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'pronostico.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'pronostico.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(RUTA_PROYECTO,'plantillas'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'principal',
'gunicorn',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"fhersa@gmail.com"
] | fhersa@gmail.com |
870e815caf5014255375f8956850095165e7b89b | 974d04d2ea27b1bba1c01015a98112d2afb78fe5 | /test/distributed_passes/test_build_cinn_pass_resnet.py | 6f608a5299670435d9be93a605837060354b0858 | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle | b3d2583119082c8e4b74331dacc4d39ed4d7cff0 | 22a11a60e0e3d10a3cf610077a3d9942a6f964cb | refs/heads/develop | 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 | Apache-2.0 | 2023-09-14T19:20:51 | 2016-08-15T06:59:08 | C++ | UTF-8 | Python | false | false | 1,384 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from dist_pass_test_base import DistPassTestBase
from model_zoo import resnet_model
import paddle
from paddle.distributed.passes import PassManager, new_pass
class TestBuildCINNPass(DistPassTestBase):
def init(self):
self.atol = 0.5
self.rtol = 0.0
def apply_passes(self, main_prog, startup_prog):
pass_manager = PassManager(
[
new_pass("build_cinn"),
new_pass("fuse_elewise_add_act"),
]
)
pass_manager.apply([main_prog], [startup_prog])
print(pass_manager.names)
def test_bs_32(self):
if paddle.is_compiled_with_cinn():
self.check_main(resnet_model, batch_size=32)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
dd5091a2cf2790d210743f7a8548f6daf53c1721 | d0cb58e1658d4b5b88bdc07e497dc8092707ae02 | /2021/02february/05dialect.py | df530b033aff277696ce9660a95e906229059d3a | [] | no_license | June-fu/python365 | 27f9b753d38ade549d59aa8f2d8bda0fb8b1e20c | 242033a4b644a7566fbfa4dba9b60f60aa31fe91 | refs/heads/master | 2021-07-02T21:42:28.454091 | 2021-05-04T15:08:44 | 2021-05-04T15:08:44 | 233,629,713 | 0 | 0 | null | 2020-01-13T15:52:58 | 2020-01-13T15:36:53 | null | UTF-8 | Python | false | false | 868 | py | #!/usr/bin/python
'''
# @ Author: june-fu
# @ Create Time: 2021-03-13 22:45:08
# @ Modified by: june-fu
# @ Modified time: 2021-03-13 22:45:10
# @ Description:
'''
import pandas as pd
from io import StringIO
data = 'label1,label2,label3\nindex1,"a,c,e\nindex2,b,d,f'
print(data)
# By default, read_csv uses the Excel dialect and treats the double quote as the quote character
import csv
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = pd.read_csv(StringIO(data), dialect=dia)
print(df)
# all of the dialect options can be specified separately by keyword arguments
data2 = 'a,b,c~1,2,3~4,5,6'
print(pd.read_csv(StringIO(data2), lineterminator='~'))
# another common dialect option is skipinintialspace ,to skip any whitespace after a delimiter
data3 = 'a, b, c\n1, 3, 2\n4, 5, 6'
print(data3)
print(pd.read_csv(StringIO(data3), skipinitialspace=True)) | [
"fujun1990@gmail.com"
] | fujun1990@gmail.com |
d1cb7036a1cf941780faab1c0b64128cb8f1ec78 | 1070490055b5c981d936038959731134b01ce272 | /apps/users/migrations/0005_auto_20170327_1444.py | ee50300c072d68d14bdc022c9266044d9a86412f | [] | no_license | ljingen/MxOnline | 401d5be37e11cb866dc8eb78acc9b6de053c5708 | 1b471dd6b4968f79dd6866bb5e3e6413b760c8a1 | refs/heads/master | 2021-10-11T08:57:05.304124 | 2018-02-11T06:59:32 | 2018-02-11T06:59:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-27 14:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_banner_emailverifyrecord'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='gender',
field=models.CharField(choices=[('MAN', '\u7537'), ('FEMALE', '\u5973')], default='MAN', max_length=6, verbose_name='\u6027\u522b'),
),
]
| [
"luojingen@aliyun.com"
] | luojingen@aliyun.com |
8039502bd646c312cc145afd795a2888c0b371f6 | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-baas/aliyunsdkbaas/request/v20180731/DescribeOrdererLogsRequest.py | be856c0d614b151f838c0ed18c1a50a5aca82921 | [
"Apache-2.0"
] | permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeOrdererLogsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Baas', '2018-07-31', 'DescribeOrdererLogs')
def get_Lines(self):
return self.get_query_params().get('Lines')
def set_Lines(self,Lines):
self.add_query_param('Lines',Lines)
def get_ConsortiumId(self):
return self.get_query_params().get('ConsortiumId')
def set_ConsortiumId(self,ConsortiumId):
self.add_query_param('ConsortiumId',ConsortiumId)
def get_OrdererName(self):
return self.get_query_params().get('OrdererName')
def set_OrdererName(self,OrdererName):
self.add_query_param('OrdererName',OrdererName) | [
"1478458905@qq.com"
] | 1478458905@qq.com |
a212b30d912750ff9fce5a42e4e78bf0e89cea39 | dc18846eb4e956e3067b187c17eec203ad02b732 | /Web Scrapping/html/lucky.py | 5fbfee4cc9ed6bc9221a0ad5ef2a614573aef13e | [] | no_license | rafal-mizera/automate_the_boring_stuff | 53e46e2f5668b3f64f67297c997f2a44695d765b | 966e9d04546f33f7fcd12c569e19c4d736a4eb44 | refs/heads/master | 2023-07-17T08:41:03.235120 | 2021-09-01T16:28:57 | 2021-09-01T16:28:57 | 402,127,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | #! python3
# lucky.py - Opens several Google search results.
import requests
import webbrowser
import bs4
import sys
print('Googling...') # display text while downloading the google page
# searched = sys.argv[1:]
searched = "polska"
res = requests.get(f"http://www.google.com/search?q={searched}")
# res.raise_for_status()
soup = bs4.BeautifulSoup(res.text,features="html.parser")
links_to_open = soup.select('div#main > div > div > div > a')
print(links_to_open)
numOpen = min(5,len(links_to_open))
for i in range(numOpen):
webbrowser.open("google.com" + links_to_open[i].get("href"))
| [
"rafalmizera11@gmail.com"
] | rafalmizera11@gmail.com |
224346578cbb7fb74119a3175e7fac840954cc3e | bf60236048450e951b994d66666edd4f73be5101 | /application/frontend/views.py | 80bfb6ac9c738a29fa388e03e36b2a262d0f01d2 | [
"MIT"
] | permissive | DBeath/testing-flask-tutorial | 0fe756beffc44ef49e55493e337022a263350f20 | 1eecb2c49c19d0ced001f164c11f3d0dfe5b9d7a | refs/heads/master | 2021-01-12T02:50:22.130064 | 2017-04-26T15:09:28 | 2017-04-26T15:09:28 | 78,115,654 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,620 | py | from flask import request, make_response, abort, Response, jsonify
from application.frontend import frontend_blueprint as bp
from application.frontend.models import User, user_schema
from application.database import db
@bp.route('/')
def index():
return 'Hello World!'
def add(a, b):
return int(a) + int(b)
@bp.route('/add', methods=['POST'])
def add_view():
a = request.args.get('input1')
b = request.args.get('input2')
if not a or not b:
return abort(400)
result = add(a, b)
return make_response(str(result))
@bp.route('/users')
def get_users():
users = User.query.all()
users_dump = user_schema.dump(users, many=True).data
return jsonify(users_dump)
@bp.route('/users', methods=['POST'])
def create_user():
name = request.args.get('name')
user = User(name=name)
db.session.add(user)
db.session.commit()
user_dump = user_schema.dump(user).data
return jsonify(user=user_dump)
@bp.route('/users/<int:user_id>', methods=['GET', 'POST'])
def single_user(user_id):
user = User.query.filter_by(id=user_id).first()
if user is None:
abort(404)
if request.methods == 'POST':
name = request.args.get('name')
user.name = name
db.session.commit()
user_dump = user_schema.dump(user).data
return jsonify(user=user_dump)
@bp.route('/users/<int:user_id>/delete', methods=['POST'])
def delete_user(user_id):
user = User.query.filter_by(id=user_id).first()
if user is None:
abort(404)
db.session.delete(user)
db.session.commit()
return Response(status_code=200)
| [
"davidgbeath@gmail.com"
] | davidgbeath@gmail.com |
fa8fd1e5e8b9ef6df7d8391d8581f2b663c17a64 | 21e6fd368aee8acb80747141291a00f83fb67d1e | /python/WoT/SourcesRes/9.10/client/messenger/proto/xmpp/xmppserversettings.py | db4502d71aa9092ff1e3783f2c8d59a2f969d7d8 | [] | no_license | Infernux/Projects | e7d1eab9b25471c543aa82985ec0bfcca2cfe05e | da7a9f71231b76dafbc4c7348065f1fc2dead854 | refs/heads/master | 2023-08-24T09:17:23.834855 | 2023-08-05T14:18:46 | 2023-08-05T14:18:46 | 23,395,952 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,157 | py | # 2015.01.14 22:41:28 CET
import types
from debug_utils import LOG_ERROR
from messenger.proto.interfaces import IProtoSettings
from messenger.proto.xmpp.gloox_wrapper import CONNECTION_IMPL_TYPE
from messenger.proto.xmpp.jid import JID
import random
_NUMBER_OF_ITEMS_IN_SAMPLE = 2
def _makeSample(*args):
queue = []
for seq in args:
count = min(len(seq), _NUMBER_OF_ITEMS_IN_SAMPLE)
queue.extend(random.sample(seq, count))
return queue
def _validateConnection(record):
result = True
if len(record) == 2:
(host, port,) = record
if not host:
result = False
if type(port) is not types.IntType:
result = False
else:
result = False
return result
class ConnectionsIterator(object):
def __init__(self, base = None, alt = None, bosh = None):
super(ConnectionsIterator, self).__init__()
self.__tcp = _makeSample(base or [], alt or [])
self.__bosh = _makeSample(bosh or [])
def __iter__(self):
return self
def __len__(self):
return len(self.__tcp) + len(self.__bosh)
def clear(self):
self.__tcp = []
self.__bosh = []
def hasNext(self):
return len(self.__tcp) > 0 or len(self.__bosh) > 0
def next(self):
if self.__tcp:
cType = CONNECTION_IMPL_TYPE.TCP
(host, port,) = self.__tcp.pop(0)
elif self.__bosh:
cType = CONNECTION_IMPL_TYPE.BOSH
(host, port,) = self.__bosh.pop(0)
else:
raise StopIteration
return (cType, host, port)
class XmppServerSettings(IProtoSettings):
__slots__ = ('enabled', 'connections', 'domain', 'port', 'resource', 'altConnections', 'boshConnections')
def __init__(self):
super(XmppServerSettings, self).__init__()
self.clear()
def __repr__(self):
return 'XmppServerSettings(enabled = {0!r:s}, connections = {1!r:s}, altConnections = {2!r:s}, boshConnections = {3!r:s}, domain = {4:>s}, port = {5:n}, resource = {6:>s})'.format(self.enabled, self.connections, self.altConnections, self.boshConnections, self.domain, self.port, self.resource)
def update(self, data):
if 'xmpp_connections' in data:
self.connections = filter(_validateConnection, data['xmpp_connections'])
else:
self.connections = []
if 'xmpp_alt_connections' in data:
self.altConnections = filter(_validateConnection, data['xmpp_alt_connections'])
else:
self.altConnections = []
if 'xmpp_bosh_connections' in data:
self.boshConnections = filter(_validateConnection, data['xmpp_bosh_connections'])
else:
self.boshConnections = []
if 'xmpp_host' in data:
self.domain = data['xmpp_host']
else:
self.domain = ''
if 'xmpp_port' in data:
self.port = data['xmpp_port']
else:
self.port = -1
if 'xmpp_resource' in data:
self.resource = data['xmpp_resource']
else:
self.resource = ''
if 'xmpp_enabled' in data:
self.enabled = data['xmpp_enabled']
if self.enabled and not self.connections and not self.altConnections and not self.boshConnections and not self.domain:
LOG_ERROR('Can not find host to connection. XMPP is disabled', self.connections, self.altConnections, self.domain)
self.enabled = False
else:
self.enabled = False
def clear(self):
self.enabled = False
self.connections = []
self.altConnections = []
self.boshConnections = []
self.domain = None
self.port = -1
self.resource = ''
def isEnabled(self):
return self.enabled
def getFullJID--- This code section failed: ---
0 LOAD_FAST 'databaseID'
3 POP_JUMP_IF_TRUE '18'
6 LOAD_ASSERT 'AssertionError'
9 LOAD_CONST "Player's databaseID can not be empty"
12 CALL_FUNCTION_1 ''
15 RAISE_VARARGS ''
18 LOAD_GLOBAL 'JID'
21 CALL_FUNCTION_0 ''
24 STORE_FAST 'jid'
27 LOAD_FAST 'jid'
30 LOAD_ATTR 'setNode'
33 LOAD_FAST 'databaseID'
36 CALL_FUNCTION_1 ''
39 POP_TOP ''
40 LOAD_FAST 'jid'
43 LOAD_ATTR 'setDomain'
46 LOAD_FAST 'self'
49 LOAD_ATTR 'domain'
52 CALL_FUNCTION_1 ''
55 POP_TOP ''
56 LOAD_FAST 'jid'
59 LOAD_ATTR 'setResource'
62 LOAD_FAST 'self'
65 LOAD_ATTR 'resource'
68 CALL_FUNCTION_1 ''
71 POP_TOP ''
72 LOAD_FAST 'jid'
75 RETURN_VALUE ''
-1 RETURN_LAST ''
Syntax error at or near `RETURN_VALUE' token at offset 75
def getConnectionsIterator(self):
iterator = ConnectionsIterator(self.connections, self.altConnections, self.boshConnections)
if not iterator.hasNext():
iterator = ConnectionsIterator([(self.domain, self.port)])
return iterator
# decompiled 0 files: 0 okay, 1 failed, 0 verify failed
# 2015.01.14 22:41:28 CET
| [
"mrnux@nux.com"
] | mrnux@nux.com |
26ac13b6fb750d4aaa746aaca2f2568788d5d99b | e61749d3dd1999e938d494b8181753c3552f21de | /HW6/P3b.py | f2de3ec5ac9398e591ba37ef5a6bfbe953ee7c01 | [] | no_license | arsalan2400/HWSolutions | 6b3f95b924769dd795a1c638d3e0b9d0c055c7ab | 790625048f1addf3828a9f415e3237fd76d91a90 | refs/heads/master | 2020-03-12T18:18:23.685324 | 2018-04-22T01:23:48 | 2018-04-22T01:23:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | import HW6.Classes as Cls
import scr.FigureSupport as figureLibrary
# create a multiple game sets
multipleGameSets=Cls.MultipleGameSets(ids=range(1000), prob_head=0.5, n_games_in_a_set=10)
# simulate all game sets
multipleGameSets.simulation()
# print projected mean reward
print('Projected mean reward',
multipleGameSets.get_mean_total_reward())
# print projection interval
print('95% projection interval of average rewards',
multipleGameSets.get_PI_total_reward(0.05))
# plot
figureLibrary.graph_histogram(
data=multipleGameSets.get_all_total_rewards(),
title="Histogram of gambler's total reward from playing the gam 10 times",
x_label='Mean Rewards',
y_label='Count')
print('We need a transient-state simulation for this perspective.')
print('We are not able to rely on the Law of Large Numbers to make inference because our data is very limited.')
print('Therefore, we must use the sample mean and projection intervals for interpretation.') | [
"reza.yaesoubi@yale.edu"
] | reza.yaesoubi@yale.edu |
4c0d32ed51f42bfd3f2e7e885983d382928996d7 | 5c8139f1e57e06c7eaf603bd8fe74d9f22620513 | /PartB/py是否为其他单词的前缀信息.py | 9d72ce3ca43c4346a0d144a49300729dbb43f94a | [] | no_license | madeibao/PythonAlgorithm | c8a11d298617d1abb12a72461665583c6a44f9d2 | b4c8a75e724a674812b8a38c0202485776445d89 | refs/heads/master | 2023-04-03T07:18:49.842063 | 2021-04-11T12:02:40 | 2021-04-11T12:02:40 | 325,269,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,788 | py |
'''
给你一个字符串 sentence 作为句子并指定检索词为 searchWord ,其中句子由若干用 单个空格 分隔的单词组成。
请你检查检索词 searchWord 是否为句子 sentence 中任意单词的前缀。
如果 searchWord 是某一个单词的前缀,则返回句子 sentence 中该单词所对应的下标(下标从 1 开始)。
如果 searchWord 是多个单词的前缀,则返回匹配的第一个单词的下标(最小下标)。
如果 searchWord 不是任何单词的前缀,则返回 -1 。
字符串 S 的 「前缀」是 S 的任何前导连续子字符串。
示例 1:
输入:sentence = "i love eating burger", searchWord = "burg"
输出:4
解释:"burg" 是 "burger" 的前缀,而 "burger" 是句子中第 4 个单词。
示例 2:
输入:sentence = "this problem is an easy problem", searchWord = "pro"
输出:2
解释:"pro" 是 "problem" 的前缀,而 "problem" 是句子中第 2 个也是第 6 个单词,但是应该返回最小下标 2 。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/check-if-a-word-occurs-as-a-prefix-of-any-word-in-a-sentence
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
#================================================================================================================================
class Solution:
def isPrefixOfWord(self, sentence: str, searchWord: str) -> int:
for num, word in enumerate(sentence.split(), 1):
if word.startswith(searchWord):
return num
return -1
if __name__ == "__main__":
s =Solution()
sentence = "i love eating burger"
searchWord = "burg"
print(s.isPrefixOfWord(sentence, searchWord))
| [
"2901429479@qq.com"
] | 2901429479@qq.com |
357507b900e424e19a92a66e71c1080beba97bfa | b47f2e3f3298388b1bcab3213bef42682985135e | /experiments/fdtd-2d/tmp_files/6239.py | fa47a2670566765c6d603faaaae743187fd6ebde | [
"BSD-2-Clause"
] | permissive | LoopTilingBenchmark/benchmark | 29cc9f845d323431e3d40e878cbfc6d1aad1f260 | 52a3d2e70216552a498fd91de02a2fa9cb62122c | refs/heads/master | 2020-09-25T09:45:31.299046 | 2019-12-04T23:25:06 | 2019-12-04T23:25:06 | 225,975,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/6239.c')
procedure('kernel_fdtd_2d')
loop(0)
known(' nx > 1 ')
known(' ny > 1 ')
tile(1,2,16,2)
tile(1,4,64,4)
tile(2,2,16,2)
tile(2,4,64,4)
tile(3,2,16,2)
tile(3,4,64,4)
| [
"nashenruoyang@163.com"
] | nashenruoyang@163.com |
9ca6187c5ae5548d7761cabf96c1ea07cc15a17e | c3432a248c8a7a43425c0fe1691557c0936ab380 | /CodePlus_Practice_RE/2021.03.FourthWeek/0326/14395_4연산.py | 08550d6f6d30ea04c95e486d19ef4fba1ed9b403 | [] | no_license | Parkyunhwan/BaekJoon | 13cb3af1f45212d7c418ecc4b927f42615b14a74 | 9a882c568f991c9fed3df45277f091626fcc2c94 | refs/heads/master | 2022-12-24T21:47:47.052967 | 2022-12-20T16:16:59 | 2022-12-20T16:16:59 | 232,264,447 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | from collections import deque
from collections import defaultdict
s, t = map(int, input().split())
if s == t:
print(0)
exit(0)
q = deque()
q.append((s, []))
dic = defaultdict(int)
dic[s] += 1
while q:
curr, oper = q.popleft()
if curr == t:
print(''.join(oper))
exit(0)
if curr <= t and curr * curr <= 10e9:
if dic[curr * curr] == 0:
tmp = oper[:]
tmp.append('*')
dic[curr * curr] += 1
q.append((curr * curr, tmp))
if curr <= t and curr + curr <= 10e9:
if dic[curr + curr] == 0:
tmp = oper[:]
tmp.append('+')
dic[curr + curr] += 1
q.append((curr + curr, tmp))
if curr - curr >= 0:
if dic[curr - curr] == 0:
tmp = oper[:]
tmp.append('-')
dic[curr + curr] += 1
q.append((curr - curr, tmp))
if curr != 0:
if dic[curr / curr] == 0:
tmp = oper[:]
tmp.append('/')
dic[curr / curr] += 1
q.append((curr / curr, tmp))
print(-1)
| [
"pyh8618@gmail.com"
] | pyh8618@gmail.com |
955105e9389475e6367dc2e9b50e0e8ddacaa43e | 45da24ad0793ced3ce4a332486877ebdd9776388 | /app/main/docs/schemas/survey_result_schema.py | 357b35a9532be93bc86d6d0ed3360f8eb8b6bc82 | [] | no_license | luccasPh/clean-python-api | 2fce7003646613ad543b9e8e4afd77bd4b49a25e | bc10bdc485bbec1c02c73783109c178d887514f1 | refs/heads/master | 2023-03-24T11:30:40.380210 | 2021-03-19T19:11:24 | 2021-03-19T19:11:24 | 343,474,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | save_survey_result_schema = dict(
type="object", properties=dict(answer=dict(type="string")), required=["answer"]
)
load_survey_result_schema = dict(
type="object",
properties=dict(
survey_id=dict(type="string"),
question=dict(type="string"),
answers=dict(
type="array",
items=dict(
type="object",
properties=dict(
image=dict(type="string"),
answers=dict(type="string"),
count=dict(type="number"),
percent=dict(type="number"),
),
),
),
date=dict(type="string"),
),
)
| [
"lucas4pinheiro@gmail.com"
] | lucas4pinheiro@gmail.com |
6e316a177a030e3c355047cc7aa62c2a13b91b5f | 20a23e195cb41138ea46fae4773444c7d87a51f0 | /homeassistant/components/tellduslive/config_flow.py | 3373e9cc2f7ca74aebec0c7dc1b69d4c595525e3 | [
"Apache-2.0"
] | permissive | poma/home-assistant | 37b11a5784a4e7e960bec7bf6ea1f41c66d834ee | 203190f705fa280564734ba5a2281592ef535ed4 | refs/heads/dev | 2020-04-17T17:27:35.244055 | 2019-01-23T08:06:27 | 2019-01-23T08:06:27 | 166,782,737 | 1 | 0 | Apache-2.0 | 2019-01-21T09:11:27 | 2019-01-21T09:11:24 | null | UTF-8 | Python | false | false | 5,279 | py | """Config flow for Tellduslive."""
import asyncio
import logging
import os
import async_timeout
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.util.json import load_json
from .const import (
APPLICATION_NAME, CLOUD_NAME, DOMAIN, KEY_HOST, KEY_SCAN_INTERVAL,
KEY_SESSION, NOT_SO_PRIVATE_KEY, PUBLIC_KEY, SCAN_INTERVAL,
TELLDUS_CONFIG_FILE)
KEY_TOKEN = 'token'
KEY_TOKEN_SECRET = 'token_secret'
_LOGGER = logging.getLogger(__name__)
@config_entries.HANDLERS.register('tellduslive')
class FlowHandler(config_entries.ConfigFlow):
"""Handle a config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Init config flow."""
self._hosts = [CLOUD_NAME]
self._host = None
self._session = None
self._scan_interval = SCAN_INTERVAL
def _get_auth_url(self):
from tellduslive import Session
self._session = Session(
public_key=PUBLIC_KEY,
private_key=NOT_SO_PRIVATE_KEY,
host=self._host,
application=APPLICATION_NAME,
)
return self._session.authorize_url
async def async_step_user(self, user_input=None):
"""Let user select host or cloud."""
if self.hass.config_entries.async_entries(DOMAIN):
return self.async_abort(reason='already_setup')
if user_input is not None or len(self._hosts) == 1:
if user_input is not None and user_input[KEY_HOST] != CLOUD_NAME:
self._host = user_input[KEY_HOST]
return await self.async_step_auth()
return self.async_show_form(
step_id='user',
data_schema=vol.Schema({
vol.Required(KEY_HOST):
vol.In(list(self._hosts))
}))
async def async_step_auth(self, user_input=None):
"""Handle the submitted configuration."""
errors = {}
if user_input is not None:
if await self.hass.async_add_executor_job(
self._session.authorize):
host = self._host or CLOUD_NAME
if self._host:
session = {
KEY_HOST: host,
KEY_TOKEN: self._session.access_token
}
else:
session = {
KEY_TOKEN: self._session.access_token,
KEY_TOKEN_SECRET: self._session.access_token_secret
}
return self.async_create_entry(
title=host, data={
KEY_HOST: host,
KEY_SCAN_INTERVAL: self._scan_interval.seconds,
KEY_SESSION: session,
})
else:
errors['base'] = 'auth_error'
try:
with async_timeout.timeout(10):
auth_url = await self.hass.async_add_executor_job(
self._get_auth_url)
if not auth_url:
return self.async_abort(reason='authorize_url_fail')
except asyncio.TimeoutError:
return self.async_abort(reason='authorize_url_timeout')
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error generating auth url")
return self.async_abort(reason='authorize_url_fail')
_LOGGER.debug('Got authorization URL %s', auth_url)
return self.async_show_form(
step_id='auth',
errors=errors,
description_placeholders={
'app_name': APPLICATION_NAME,
'auth_url': auth_url,
},
)
async def async_step_discovery(self, user_input):
"""Run when a Tellstick is discovered."""
from tellduslive import supports_local_api
_LOGGER.info('Discovered tellstick device: %s', user_input)
if supports_local_api(user_input[1]):
_LOGGER.info('%s support local API', user_input[1])
self._hosts.append(user_input[0])
return await self.async_step_user()
async def async_step_import(self, user_input):
"""Import a config entry."""
if self.hass.config_entries.async_entries(DOMAIN):
return self.async_abort(reason='already_setup')
self._scan_interval = user_input[KEY_SCAN_INTERVAL]
if user_input[KEY_HOST] != DOMAIN:
self._hosts.append(user_input[KEY_HOST])
if not await self.hass.async_add_executor_job(
os.path.isfile, self.hass.config.path(TELLDUS_CONFIG_FILE)):
return await self.async_step_user()
conf = await self.hass.async_add_executor_job(
load_json, self.hass.config.path(TELLDUS_CONFIG_FILE))
host = next(iter(conf))
if user_input[KEY_HOST] != host:
return await self.async_step_user()
host = CLOUD_NAME if host == 'tellduslive' else host
return self.async_create_entry(
title=host,
data={
KEY_HOST: host,
KEY_SCAN_INTERVAL: self._scan_interval.seconds,
KEY_SESSION: next(iter(conf.values())),
})
| [
"marhje52@kth.se"
] | marhje52@kth.se |
d5a3bdb7ab7125f19315ec09c924533d2f8e7815 | c3a4658077c689710abf5ec846c8c59cbda16a51 | /fbgemm_gpu/codegen/split_embedding_codegen_lookup_invoker.template | db1b96078a37023682a79053515d5dd67e25b31a | [
"BSD-3-Clause"
] | permissive | jiecaoyu/FBGEMM | 6a85c5d2e9ee75e2f62bf428332c83e0366703b3 | 2c547924deafa1839483d31096de800078c35711 | refs/heads/main | 2023-03-16T23:29:36.266634 | 2022-06-03T21:05:49 | 2022-06-03T21:05:49 | 237,500,435 | 0 | 0 | NOASSERTION | 2021-11-15T23:46:24 | 2020-01-31T19:21:59 | null | UTF-8 | Python | false | false | 7,507 | template | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from .lookup_args import *
{% if is_fbcode %}
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cpu")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:cumem_utils")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:split_table_batched_embeddings"
)
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/fb:embedding_inplace_update")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/fb:embedding_inplace_update_cpu")
{% else %}
#import os
#torch.ops.load_library(os.path.join(os.path.join(os.path.dirname(os.path.dirname(__file__)), "fbgemm_gpu_py.so")))
{% endif %}
def invoke(
common_args: CommonArgs,
optimizer_args: OptimizerArgs,
{% if "momentum1_dev" in args.split_function_arg_names %}
momentum1: Momentum,
{% endif %}
{% if "momentum2_dev" in args.split_function_arg_names %}
momentum2: Momentum,
{% endif %}
{% if "iter" in args.split_function_arg_names %}
iter: int,
{% endif %}
) -> torch.Tensor:
if (common_args.host_weights.numel() > 0):
return torch.ops.fbgemm.split_embedding_codegen_lookup_{{ optimizer }}_function_cpu(
# common_args
host_weights=common_args.host_weights,
weights_placements=common_args.weights_placements,
weights_offsets=common_args.weights_offsets,
D_offsets=common_args.D_offsets,
total_D=common_args.total_D,
max_D=common_args.max_D,
hash_size_cumsum=common_args.hash_size_cumsum,
total_hash_size_bits=common_args.total_hash_size_bits,
indices=common_args.indices,
offsets=common_args.offsets,
pooling_mode=common_args.pooling_mode,
indice_weights=common_args.indice_weights,
feature_requires_grad=common_args.feature_requires_grad,
# optimizer_args
gradient_clipping = optimizer_args.gradient_clipping,
max_gradient=optimizer_args.max_gradient,
stochastic_rounding=optimizer_args.stochastic_rounding,
{% if "learning_rate" in args.split_function_arg_names %}
learning_rate=optimizer_args.learning_rate,
{% endif %}
{% if "eps" in args.split_function_arg_names %}
eps=optimizer_args.eps,
{% endif %}
{% if "beta1" in args.split_function_arg_names %}
beta1=optimizer_args.beta1,
{% endif %}
{% if "beta2" in args.split_function_arg_names %}
beta2=optimizer_args.beta2,
{% endif %}
{% if "weight_decay" in args.split_function_arg_names %}
weight_decay=optimizer_args.weight_decay,
{% endif %}
{% if "weight_decay_mode" in args.split_function_arg_names %}
weight_decay_mode=optimizer_args.weight_decay_mode,
{% endif %}
{% if "eta" in args.split_function_arg_names %}
eta=optimizer_args.eta,
{% endif %}
{% if "momentum" in args.split_function_arg_names %}
momentum=optimizer_args.momentum,
{% endif %}
# momentum1
{% if "momentum1_dev" in args.split_function_arg_names %}
momentum1_host=momentum1.host,
momentum1_offsets=momentum1.offsets,
momentum1_placements=momentum1.placements,
{% endif %}
# momentum2
{% if "momentum2_dev" in args.split_function_arg_names %}
momentum2_host=momentum2.host,
momentum2_offsets=momentum2.offsets,
momentum2_placements=momentum2.placements,
{% endif %}
# iter
{% if "iter" in args.split_function_arg_names %}
iter=iter,
{% endif %}
)
else:
return torch.ops.fbgemm.split_embedding_codegen_lookup_{{ optimizer }}_function(
# common_args
{% if not dense %}
placeholder_autograd_tensor=common_args.placeholder_autograd_tensor,
{% endif %}
dev_weights=common_args.dev_weights,
uvm_weights=common_args.uvm_weights,
lxu_cache_weights=common_args.lxu_cache_weights,
weights_placements=common_args.weights_placements,
weights_offsets=common_args.weights_offsets,
D_offsets=common_args.D_offsets,
total_D=common_args.total_D,
max_D=common_args.max_D,
hash_size_cumsum=common_args.hash_size_cumsum,
total_hash_size_bits=common_args.total_hash_size_bits,
indices=common_args.indices,
offsets=common_args.offsets,
pooling_mode=common_args.pooling_mode,
indice_weights=common_args.indice_weights,
feature_requires_grad=common_args.feature_requires_grad,
lxu_cache_locations=common_args.lxu_cache_locations,
# optimizer_args
gradient_clipping = optimizer_args.gradient_clipping,
max_gradient=optimizer_args.max_gradient,
stochastic_rounding=optimizer_args.stochastic_rounding,
{% if "learning_rate" in args.split_function_arg_names %}
learning_rate=optimizer_args.learning_rate,
{% endif %}
{% if "eps" in args.split_function_arg_names %}
eps=optimizer_args.eps,
{% endif %}
{% if "beta1" in args.split_function_arg_names %}
beta1=optimizer_args.beta1,
{% endif %}
{% if "beta2" in args.split_function_arg_names %}
beta2=optimizer_args.beta2,
{% endif %}
{% if "weight_decay" in args.split_function_arg_names %}
weight_decay=optimizer_args.weight_decay,
{% endif %}
{% if "weight_decay_mode" in args.split_function_arg_names %}
weight_decay_mode=optimizer_args.weight_decay_mode,
{% endif %}
{% if "eta" in args.split_function_arg_names %}
eta=optimizer_args.eta,
{% endif %}
{% if "momentum" in args.split_function_arg_names %}
momentum=optimizer_args.momentum,
{% endif %}
# momentum1
{% if "momentum1_dev" in args.split_function_arg_names %}
momentum1_dev=momentum1.dev,
momentum1_uvm=momentum1.uvm,
momentum1_offsets=momentum1.offsets,
momentum1_placements=momentum1.placements,
{% endif %}
# momentum2
{% if "momentum2_dev" in args.split_function_arg_names %}
momentum2_dev=momentum2.dev,
momentum2_uvm=momentum2.uvm,
momentum2_offsets=momentum2.offsets,
momentum2_placements=momentum2.placements,
{% endif %}
# iter
{% if "iter" in args.split_function_arg_names %}
iter=iter,
{% endif %}
output_dtype=common_args.output_dtype,
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
1b37fa5b76c1aa2a32d1eb90b32391cad1c509f5 | cc72013ede1b3bb02c32a3d0d199be4f7986c173 | /ch10/DieViewColor.py | 58b5529ab431c7372c00faa9de35170a11b1cd23 | [] | no_license | alextickle/zelle-exercises | b87d2a1476189954565f5cc97ee1448200eb00d4 | b784ff9ed9b2cb1c56e31c1c63f3e2b52fa37875 | refs/heads/master | 2021-01-19T00:33:19.132238 | 2017-09-14T23:35:35 | 2017-09-14T23:35:35 | 87,182,609 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,375 | py | # DieViewColor.py
from graphics import *
class DieViewColor:
def __init__(self, win, center, size):
self.win = win
self.background = "white" # color of die face
self.foreground = "black" # color of pips
self.psize = 0.1 * size # radius of each pip
hsize = size / 2.0
offset = 0.6 * hsize
cx, cy = center.getX(), center.getY()
p1 = Point(cx - hsize, cy - hsize)
p2 = Point(cx + hsize, cy + hsize)
rect = Rectangle(p1, p2)
rect.draw(win)
rect.setFill(self.background)
self.pip1 = self.__makePip(cx - offset, cy - offset)
self.pip2 = self.__makePip(cx - offset, cy)
self.pip3 = self.__makePip(cx - offset, cy + offset)
self.pip4 = self.__makePip(cx, cy)
self.pip5 = self.__makePip(cx + offset, cy - offset)
self.pip6 = self.__makePip(cx + offset, cy)
self.pip7 = self.__makePip(cx + offset, cy + offset)
self.setValue(1)
def __makePip(self, x, y):
"draws a pip at (x, y)"
pip = Circle(Point(x, y), self.psize)
pip.setFill(self.background)
pip.setOutline(self.background)
pip.draw(self.win)
return pip
def setColor(self, color):
self.foreground = color
def setValue(self, value):
# turn all pips off
self.pip1.setFill(self.background)
self.pip2.setFill(self.background)
self.pip3.setFill(self.background)
self.pip4.setFill(self.background)
self.pip5.setFill(self.background)
self.pip6.setFill(self.background)
self.pip7.setFill(self.background)
# turn correct pips on
if value == 1:
self.pip4.setFill(self.foreground)
elif value == 2:
self.pip1.setFill(self.foreground)
self.pip7.setFill(self.foreground)
elif value == 3:
self.pip1.setFill(self.foreground)
self.pip7.setFill(self.foreground)
self.pip4.setFill(self.foreground)
elif value == 4:
self.pip1.setFill(self.foreground)
self.pip3.setFill(self.foreground)
self.pip5.setFill(self.foreground)
self.pip7.setFill(self.foreground)
elif value == 5:
self.pip1.setFill(self.foreground)
self.pip3.setFill(self.foreground)
self.pip4.setFill(self.foreground)
self.pip5.setFill(self.foreground)
self.pip7.setFill(self.foreground)
elif value == 6:
self.pip1.setFill(self.foreground)
self.pip2.setFill(self.foreground)
self.pip3.setFill(self.foreground)
self.pip5.setFill(self.foreground)
self.pip6.setFill(self.foreground)
self.pip7.setFill(self.foreground)
| [
"alexander.tickle@gmail.com"
] | alexander.tickle@gmail.com |
5625481302911c3189bc36b544673943c69da279 | 4d327de5447519d3c00e6572f74362380783006f | /source/res/scripts/client/web_client_api/vehicles/__init__.py | 95c17e15dd4bdcde0d9f7fef090313d9995c6676 | [] | no_license | XFreyaX/WorldOfTanks-Decompiled | 706ac55d919b766aa89f90c97a75672bf2142611 | 5025466edd0dd3e5e50a6c60feb02ae793f6adac | refs/heads/master | 2021-09-21T15:10:32.655452 | 2018-08-28T07:34:00 | 2018-08-28T07:34:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/web_client_api/vehicles/__init__.py
import nations
from items import vehicles
from web_client_api import w2c, w2capi, Field, W2CSchema
class _VehicleInfoSchema(W2CSchema):
vehicle_id = Field(type=(int, long))
@w2capi(name='vehicles', key='action')
class VehiclesWebApi(W2CSchema):
@w2c(_VehicleInfoSchema, 'vehicle_info')
def vehicleInfo(self, cmd):
try:
vehicle = vehicles.getVehicleType(cmd.vehicle_id)
except Exception:
res = {'error': 'vehicle_id is invalid.'}
else:
res = {'vehicle': {'vehicle_id': vehicle.compactDescr,
'tag': vehicle.name,
'name': vehicle.userString,
'short_name': vehicle.shortUserString,
'nation': nations.NAMES[vehicle.id[0]],
'type': vehicles.getVehicleClassFromVehicleType(vehicle),
'tier': vehicle.level,
'is_premium': bool('premium' in vehicle.tags)}}
return res
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
ac7c2e6ab2caeed5008a2a2f19cd2c660df5ee43 | 94d1e805521575afb7b6256af1dd6de65a50ada9 | /problem_10/problem_10.py | 8fd3318a4bddafb0d3830f22cb0c2895b05fdc79 | [] | no_license | John-W-Stevens/Euler100 | fe2004786f64172e02ba18fbe33d95ceb68abf59 | 6f193a47e9e019b99ee9b188d2227587f5a3f4b3 | refs/heads/master | 2022-11-26T07:23:36.505138 | 2020-07-28T17:36:39 | 2020-07-28T17:36:39 | 274,224,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | import time
from prime_sieve import prime_sieve
def problem_10():
return sum(prime_sieve(2000000))
start = time.time()
solution = problem_10()
print(f"{solution} found in {time.time() - start} seconds.")
# 142913828922 found in 0.07372689247131348 seconds. | [
"john.william.stevens1@gmail.com"
] | john.william.stevens1@gmail.com |
995600023a7f11305386d13654b30082fec5afdb | c9ece5470d98941a64e5253d17d56a135d89d735 | /source/conf.py | bae026a6515a948f92df365243c821c56604b0ca | [
"MIT"
] | permissive | ketgo/ml-notes | 33d0f4cea17d1d12ac278fa1dc7afee37a737791 | 0351a798d36f5a698038e7f7741cc9d8ad881498 | refs/heads/main | 2023-05-15T05:52:21.157263 | 2021-06-15T14:25:47 | 2021-06-15T14:25:47 | 361,756,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,910 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Machine Learning Notes'
copyright = '2021, Ketan Goyal'
author = 'Ketan Goyal'
# The full version, including alpha/beta/rc tags
release = 'v1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_book_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | [
"ketangoyal1988@gmail.com"
] | ketangoyal1988@gmail.com |
aacaad190f4ee572df306c9bcfe3bbcada4c13aa | aca209472c7288d69adf57124c197baf98c7a6e7 | /OpenCV讀者資源/讀者資源/程式實例/ch17/ch17_17.py | 53ef8f1ca63e66e9178e09f7833c2d6e7c58b0fd | [] | no_license | Hank-Liao-Yu-Chih/document | 712790325e48b9d8115d04b5cc2a90cd78431e61 | fafe616678cd224e70936296962dcdbbf55e38b3 | refs/heads/master | 2022-09-22T12:40:33.284033 | 2022-09-08T00:33:41 | 2022-09-08T00:33:41 | 102,203,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | # ch17_17.py
import cv2
import numpy as np
src = cv2.imread('hand.jpg')
cv2.imshow("src",src)
src_gray = cv2.cvtColor(src,cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(src_gray,50,255,cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(binary,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
mask = np.zeros(src_gray.shape,np.uint8) # 建立遮罩
mask = cv2.drawContours(mask,[cnt],-1,(255,255,255),-1)
cv2.imshow("mask",mask)
# 在src_gray影像的mask遮罩區域找尋最大像素與最小像素值
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(src_gray,mask=mask)
print(f"最小像素值 = {minVal}")
print(f"最小像素值座標 = {minLoc}")
print(f"最大像素值 = {maxVal}")
print(f"最大像素值座標 = {maxLoc}")
cv2.circle(src,minLoc,20,[0,255,0],3) # 最小像素值用綠色圓
cv2.circle(src,maxLoc,20,[0,0,255],3) # 最大像素值用紅色圓
# 建立遮罩未來可以顯示此感興趣的遮罩區域
mask1 = np.zeros(src.shape,np.uint8) # 建立遮罩
mask1 = cv2.drawContours(mask1,[cnt],-1,(255,255,255),-1)
cv2.imshow("mask1",mask1)
dst = cv2.bitwise_and(src,mask1) # 顯示感興趣區域
cv2.imshow("dst",dst)
cv2.waitKey()
cv2.destroyAllWindows()
| [
"hank.liao@vicorelogic.com"
] | hank.liao@vicorelogic.com |
ef3afca60f71a786bd2173251e5ebcc5770444c2 | 2496bd44a435dbc839b3687d17cc5efdbec8cbdc | /app/templates/app/spiders/politicl_whole_pages_spider.py | e57e404bb4207ac809ce933e6b2999cff647b108 | [] | no_license | trujunzhang/generator-djzhang-targets | e1b0655ef6a2e9f46f3d548268ab1657b7947d5e | 395b7c3de11bb5104ff6e86672b290267949ec0f | refs/heads/master | 2021-01-21T14:29:05.382901 | 2016-07-20T01:30:40 | 2016-07-20T01:30:40 | 59,209,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,822 | py | # -*- coding: utf-8 -*-
import logging
import scrapy
class <%= appclassname%>sWatchSpider(scrapy.Spider):
name = "<%= appname%>_whole_pages"
def __init__(self, name=None, **kwargs):
from cw<%= appname%>.database_factory import DatabaseFactory, CollectionTypes
database_factory = DatabaseFactory(kwargs['host'], kwargs['port'],
kwargs['user'], kwargs['passwd'],
kwargs['db'], kwargs['collection_name'])
self._cache_db = database_factory.get_database(CollectionTypes.cache)
self._history_db = database_factory.get_database(CollectionTypes.history)
self._page_db = database_factory.get_database(CollectionTypes.page)
from cw<%= appname%>.spiders.dispatch.spider_whole_pages_dispatch import SpiderWholePageDispatch
self.whole_pages_dispatch = SpiderWholePageDispatch(self._page_db)
# Dynamic the domains and start url.
self.allowed_domains = self.whole_pages_dispatch.get_allowed_domains()
page_url = self.whole_pages_dispatch.get_next_page_url()
if page_url:
self.start_urls = [page_url]
else:
logging.debug("Not found the page currently, the schedulared task end!")
super(<%= appclassname%>sWatchSpider, self).__init__(name, **kwargs)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
return super(<%= appclassname%>sWatchSpider, cls).from_crawler(crawler,
args,
host=crawler.settings.get('SQL_HOST'),
port=crawler.settings.get('SQL_PORT'),
user=crawler.settings.get('SQL_USER'),
passwd=crawler.settings.get('SQL_PASSWD'),
db=crawler.settings.get('SQL_DB'),
collection_name=crawler.settings.get(
'SQL_COLLECTION_NAME')
)
# This methond is entry point
def parse(self, response):
# Step 1: parsing the pagination.
self.whole_pages_dispatch.parse_from_pagination(response.url, response, self._cache_db, self._history_db)
# Step 2: Check the next page from the page database.
url = self.whole_pages_dispatch.get_next_page_url()
if url:
pass
else:
logging.debug("Scraped the {} pages currently, the schedulared task end!".format(10))
| [
"trujunzhang@gmail.com"
] | trujunzhang@gmail.com |
d1c7d9c016e677c1750841c3f81b7d5f6137af08 | d6ed05e23faa20beb5e47624870608a9219ea81c | /TuningTools_old/scripts/analysis_scripts/official/Trigger_201801XX_data17_v8/export_tuning.py | 05a362cf393f6ed47403b90ba3e57f3436a5e7ff | [] | no_license | kaducovas/ringer | f6495088c0d54d622dcc707333b4c2fbf132d65f | 603311caab016ad0ef052ea4fcc605c5ac4e494b | refs/heads/master | 2020-06-16T21:37:15.228364 | 2019-07-08T01:29:57 | 2019-07-08T01:29:57 | 195,477,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py | #!/usr/bin/env python
from RingerCore import LoggingLevel, expandFolders, Logger, mkdir_p
from TuningTools import CrossValidStatAnalysis, RingerOperation
from pprint import pprint
import os
mainLogger = Logger.getModuleLogger( __name__ )
basepath = 'data/crossval/'
crossval = [
[basepath],
[basepath],
[basepath],
[basepath],
]
####################### Data 2017 #########################
# 25 bins
config = 5
ref = 'SP'
filenameWeights = [
'TrigL2CaloRingerElectronTightConstants',
'TrigL2CaloRingerElectronMediumConstants',
'TrigL2CaloRingerElectronLooseConstants',
'TrigL2CaloRingerElectronVeryLooseConstants',
]
filenameThres = [
'TrigL2CaloRingerElectronTightThresholds',
'TrigL2CaloRingerElectronMediumThresholds',
'TrigL2CaloRingerElectronLooseThresholds',
'TrigL2CaloRingerElectronVeryLooseThresholds',
]
####################### Extract Ringer Configuration #########################
from TuningTools import CreateSelectorFiles, TrigMultiVarHypo_v2
export = CreateSelectorFiles( model = TrigMultiVarHypo_v2(toPickle=True) )
export( crossval, filenameWeights, filenameThres, ref, config )
| [
"kaducovas@gmail.com"
] | kaducovas@gmail.com |
9be03fd3eaa84cb66043be2f7fc92759213a1f45 | 527f721ed6080c29f15e410672ef6c30e7f2dca1 | /owllook/spiders/qidian_all_novels.py | f174be9ab66c80592fab0ca3181edb6825ea4f23 | [
"Apache-2.0"
] | permissive | PaulPaulYang/owllook | e06b7daddf2c2326cb6ceedd25ef669368637aa4 | 05e16b69466c1c69b12a195e28163df4f30b35d6 | refs/heads/master | 2021-04-09T10:17:59.049159 | 2018-03-13T00:21:36 | 2018-03-13T00:21:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,488 | py | #!/usr/bin/env python
"""
Created by howie.hu at 25/02/2018.
Target URI: https://www.qidian.com/all
Param:?page=1
"""
from pymongo import MongoClient
from talonspider import Spider, Item, TextField, AttrField, Request
from talospider.utils import get_random_user_agent
class MongoDb:
_db = None
MONGODB = {
'MONGO_HOST': '127.0.0.1',
'MONGO_PORT': '',
'MONGO_USERNAME': '',
'MONGO_PASSWORD': '',
'DATABASE': 'owllook'
}
def client(self):
# motor
self.mongo_uri = 'mongodb://{account}{host}:{port}/'.format(
account='{username}:{password}@'.format(
username=self.MONGODB['MONGO_USERNAME'],
password=self.MONGODB['MONGO_PASSWORD']) if self.MONGODB['MONGO_USERNAME'] else '',
host=self.MONGODB['MONGO_HOST'] if self.MONGODB['MONGO_HOST'] else 'localhost',
port=self.MONGODB['MONGO_PORT'] if self.MONGODB['MONGO_PORT'] else 27017)
return MongoClient(self.mongo_uri)
@property
def db(self):
if self._db is None:
self._db = self.client()[self.MONGODB['DATABASE']]
return self._db
class QidianNovelsItem(Item):
target_item = TextField(css_select='ul.all-img-list>li')
novel_url = AttrField(css_select='div.book-img-box>a', attr='href')
novel_name = TextField(css_select='div.book-mid-info>h4')
novel_author = TextField(css_select='div.book-mid-info>p.author>a.name')
novel_author_home_url = AttrField(css_select='div.book-mid-info>p.author>a.name', attr='href')
def tal_novel_url(self, novel_url):
return 'http:' + novel_url
def tal_novel_author(self, novel_author):
if isinstance(novel_author, list):
novel_author = novel_author[0].text
return novel_author
def tal_novel_author_home_url(self, novel_author_home_url):
if isinstance(novel_author_home_url, list):
novel_author_home_url = novel_author_home_url[0].get('href').strip()
return 'http:' + novel_author_home_url
class QidianNovelsSpider(Spider):
start_urls = ['https://www.qidian.com/all?page=1']
headers = {
"User-Agent": get_random_user_agent()
}
set_mul = True
request_config = {
'RETRIES': 3,
'DELAY': 0,
'TIMEOUT': 10
}
all_novels_col = MongoDb().db.all_novels
def parse(self, res):
urls = ['https://www.qidian.com/all?page={i}'.format(i=i) for i in range(1, 41645)]
for url in urls:
headers = {
"User-Agent": get_random_user_agent()
}
yield Request(url, request_config=self.request_config, headers=headers, callback=self.parse_item)
def parse_item(self, res):
items_data = QidianNovelsItem.get_items(html=res.html)
for item in items_data:
data = {
'novel_url': item.novel_url,
'novel_name': item.novel_name,
'novel_author': item.novel_author,
'novel_author_home_url': item.novel_author_home_url,
'spider': 'qidian'
}
if self.all_novels_col.find_one({"novel_name": item.novel_name}) is None:
self.all_novels_col.insert_one(data)
print(item.novel_name + ' - 抓取成功')
if __name__ == '__main__':
# 其他多item示例:https://gist.github.com/howie6879/3ef4168159e5047d42d86cb7fb706a2f
QidianNovelsSpider.start()
| [
"xiaozizayang@gmail.com"
] | xiaozizayang@gmail.com |
173616fa9d03a512d598ed7c89c3962b5cf28731 | 97cb7589aeb1c5c473301b96ba1c4782608fe7a0 | /backend/eleven11_2189/urls.py | f9bfe9afd5639d03eb87d2f687f87ada287e8751 | [] | no_license | crowdbotics-apps/eleven11-2189 | 4e39bf581c43904cdd9638690309e3569ab724af | 6ef42908a47a96444ef2db70a3838504b642e9f8 | refs/heads/master | 2022-12-07T17:57:14.979964 | 2019-04-11T19:09:53 | 2019-04-11T19:09:53 | 180,860,812 | 0 | 1 | null | 2022-12-06T15:23:48 | 2019-04-11T19:09:49 | JavaScript | UTF-8 | Python | false | false | 1,050 | py | """eleven11_2189 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('', include('home.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/v1/', include('home.api.v1.urls')),
url(r'^admin/', admin.site.urls),
]
admin.site.site_header = 'eleven11'
admin.site.site_title = 'eleven11 Admin Portal'
admin.site.index_title = 'eleven11 Admin'
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
0706c7aeac1ba92e470ffd84b8d64a2767b38f86 | e5a511e346f5be8a82fe9cb2edf457aa7e82859c | /Python/cppsecrets.com/program 51.py | c8ba027af76fa15a1501d402fc650146b31d90eb | [] | no_license | nekapoor7/Python-and-Django | 8397561c78e599abc8755887cbed39ebef8d27dc | 8fa4d15f4fa964634ad6a89bd4d8588aa045e24f | refs/heads/master | 2022-10-10T20:23:02.673600 | 2020-06-11T09:06:42 | 2020-06-11T09:06:42 | 257,163,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | """Python Program to Find the Second Largest Number in a List"""
list1 = list(map(int,input().split()))
seclarge = max(ele for ele in list1 if ele != max(list1))
print(seclarge) | [
"neha.kapoor070789@gmail.com"
] | neha.kapoor070789@gmail.com |
f744e68c36103e0914079f70b40c162e9e20f715 | b6c7ff1b2f048d3523b591ae56227be88f701b2c | /preprocess_gisaid.py | 10f50d0ea9f1371d72612ea6d263407fcf59b8de | [
"Apache-2.0"
] | permissive | majagarbulinska/pyro-cov | 98ef7fc9716692fccb4d9028c81c1e4e47f49e8e | fdbd37843618a3269b24430b8e66536583773046 | refs/heads/master | 2023-08-23T05:02:17.030999 | 2021-11-02T14:40:33 | 2021-11-02T14:40:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,255 | py | # Copyright Contributors to the Pyro-Cov project.
# SPDX-License-Identifier: Apache-2.0
import argparse
import datetime
import json
import logging
import os
import pickle
import warnings
from collections import Counter, defaultdict
from pyrocov import pangolin
from pyrocov.geo import gisaid_normalize
from pyrocov.mutrans import START_DATE
logger = logging.getLogger(__name__)
logging.basicConfig(format="%(relativeCreated) 9d %(message)s", level=logging.INFO)
DATE_FORMATS = {4: "%Y", 7: "%Y-%m", 10: "%Y-%m-%d"}
def parse_date(string):
fmt = DATE_FORMATS.get(len(string))
if fmt is None:
# Attempt to fix poorly formated dates like 2020-09-1.
parts = string.split("-")
parts = parts[:1] + [f"{int(p):>02d}" for p in parts[1:]]
string = "-".join(parts)
fmt = DATE_FORMATS[len(string)]
return datetime.datetime.strptime(string, fmt)
FIELDS = ["virus_name", "accession_id", "collection_date", "location", "add_location"]
def main(args):
logger.info(f"Filtering {args.gisaid_file_in}")
if not os.path.exists(args.gisaid_file_in):
raise OSError(f"Missing {args.gisaid_file_in}; you may need to request a feed")
os.makedirs("results", exist_ok=True)
columns = defaultdict(list)
stats = defaultdict(Counter)
covv_fields = ["covv_" + key for key in FIELDS]
with open(args.gisaid_file_in) as f:
for i, line in enumerate(f):
# Optimize for faster reading.
line, _ = line.split(', "sequence": ', 1)
line += "}"
# Filter out bad data.
datum = json.loads(line)
if len(datum["covv_collection_date"]) < 7:
continue # Drop rows with no month information.
date = parse_date(datum["covv_collection_date"])
if date < args.start_date:
date = args.start_date # Clip rows before start date.
lineage = datum["covv_lineage"]
if lineage in (None, "None", "", "XA"):
continue # Drop rows with unknown or ambiguous lineage.
try:
lineage = pangolin.compress(lineage)
lineage = pangolin.decompress(lineage)
assert lineage
except (ValueError, AssertionError) as e:
warnings.warn(str(e))
continue
# Fix duplicate locations.
datum["covv_location"] = gisaid_normalize(datum["covv_location"])
# Collate.
columns["lineage"].append(lineage)
for covv_key, key in zip(covv_fields, FIELDS):
columns[key].append(datum[covv_key])
columns["day"].append((date - args.start_date).days)
# Aggregate statistics.
stats["date"][datum["covv_collection_date"]] += 1
stats["location"][datum["covv_location"]] += 1
stats["lineage"][lineage] += 1
if i % args.log_every == 0:
print(".", end="", flush=True)
if i >= args.truncate:
break
num_dropped = i + 1 - len(columns["day"])
logger.info(f"dropped {num_dropped}/{i+1} = {num_dropped/(i+1)/100:0.2g}% rows")
logger.info(f"saving {args.columns_file_out}")
with open(args.columns_file_out, "wb") as f:
pickle.dump(dict(columns), f)
logger.info(f"saving {args.stats_file_out}")
with open(args.stats_file_out, "wb") as f:
pickle.dump(dict(stats), f)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Preprocess GISAID data")
parser.add_argument("--gisaid-file-in", default="results/gisaid.json")
parser.add_argument("--columns-file-out", default="results/gisaid.columns.pkl")
parser.add_argument("--stats-file-out", default="results/gisaid.stats.pkl")
parser.add_argument("--subset-file-out", default="results/gisaid.subset.tsv")
parser.add_argument("--subset-dir-out", default="results/fasta")
parser.add_argument("--start-date", default=START_DATE)
parser.add_argument("-l", "--log-every", default=1000, type=int)
parser.add_argument("--truncate", default=int(1e10), type=int)
args = parser.parse_args()
args.start_date = parse_date(args.start_date)
main(args)
| [
"fritz.obermeyer@gmail.com"
] | fritz.obermeyer@gmail.com |
a5423765373db001d6e72a7896e8a2040d1b9c3a | 7fbf91c595f3adb67e29ab879a0b215581d260bf | /知识点/04-LiaoXueFeng-master/08-function.py | b768bd1aef41bc822d278fa44ad012d0abe7dc69 | [] | no_license | Randyedu/python | 69947b3836e62d0081d92591ae2acd9a54eadb9a | 5f9e7bec295ae05eadde0f661e7039c2bd08f725 | refs/heads/master | 2021-04-26T22:20:22.555128 | 2018-03-02T07:01:27 | 2018-03-02T07:01:27 | 124,074,741 | 1 | 0 | null | 2018-03-06T12:23:42 | 2018-03-06T12:23:42 | null | UTF-8 | Python | false | false | 3,389 | py | print(abs(0))
print(max(1,4,5,6))
print(int('2322'))
print(int(13.23))
print(int(float('13.98')))
print(float('23.24'))
print(str(121))
print(bool(1))
print(bool(0))
print(bool(-1))
print(bool(''))
# 函数名其实就是指向一个函数对象的引用,完全可以把函数名赋给一个变量
# 相当于给这个函数起了一个“别名”
a = abs # 变量a指向abs函数
print(a(-1)) # 所以也可以通过a调用abs函数
n1 = 255
n2 = 1000
print(hex(n1))
print(hex(n2))
'''
在Python中,定义一个函数要使用def语句,依次写出函数名、括号、括号中的参数和冒号:,然后,在缩进块中编写函数体,函数的返回值用return语句返回。
'''
def my_abs(x):
# 数据类型检查可以用内置函数isinstance()实现
if not isinstance(x, (int, float)):
raise TypeError('bad operand type')
if x >= 0:
return x
else:
return -x
print(my_abs(-9.7))
import math
def move(x, y, step, angle = 0):
nx = x + step * math.cos(angle)
ny = y - step * math.sin(angle)
return nx, ny
x, y = move(100,100,60,math.pi/6)
print(x,y)
# 原来返回值是一个tuple
r = move(100,100,60,math.pi/6)
print(r)
print(r[0],r[1])
def quadratic(a,b,c):
B = (b * b - 4 * a * c)
if B >= 0:
ans1 = (-1 * b + math.sqrt(B)) / (2 * a)
ans2 = (-1 * b - math.sqrt(B)) / (2 * a)
return ans1,ans2
print(quadratic(2,3,1))
print(quadratic(1,3,-4))
def power(x,n=2):
s = 1
while n > 0:
n = n - 1
s = s * x
return s
print(power(5))
print(power(15))
print(power(5,4))
def add_end(L=[]):
L.append('END')
return L
print(add_end([1,2,3]))
print(add_end())
print(add_end())
print(add_end())
def addend(L=None):
if L is None:
L = []
L.append('END')
return L
print(addend())
print(addend())
print(addend())
def calc(numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calc([1,2,3,4,5]))
def calc2(* numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calc2(12,32,32,42))
nums = [1,2,3]
print(calc2(nums[0],nums[1],nums[2]))
print(calc2(*nums))
def person(name,age,**kw):
if 'city' in kw:
print('has city', '==',end = '')
if 'job' in kw:
print('has job', '==',end = '')
print('name:',name,'age:',age,'other:',kw)
person('Min',30)
person('Bob',35,city='Beijing')
person('Bob',35,city='Beijing',job='Ern')
extra = {'city':'Beijing','job':'Eng'}
person('Jack',24,**extra)
def fact(n):
if n == 1:
return 1
return n * fact(n-1)
print(fact(5))
print(fact(100))
print(fact(100))
# 解决递归调用栈溢出的方法是通过尾递归优化,事实上尾递归和循环的效果是一样的,所以,把循环看成是一种特殊的尾递归函数也是可以的。
# 尾递归是指,在函数返回的时候,调用自身本身,并且,return语句不能包含表达式。
# 这样,编译器或者解释器就可以把尾递归做优化,使递归本身无论调用多少次,都只占用一个栈帧,不会出现栈溢出的情况。
def fec(n):
return fac_iter(n,1)
def fac_iter(num,product):
if num==1:
return product
return fac_iter(num-1,num*product)
print(fact(6))
def move(n,a,b,c):
if n==1:
print(a,'-->', c)
else:
move(n-1,a,c,b) #move the n-1 from a to b
move(1,a,b,c) #now,a has just one dish,so just move it to c
move(n-1,b,a,c) #now,move the n-1 dishes from b to c
move(4,'A','B','C')
| [
"954950195@qq.com"
] | 954950195@qq.com |
8709663ccf77b94eea9421338c940157369ddfd2 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_9/plybla001/question2.py | 8bf37650e344a8354441a930101703d6225fb85c | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | """Format Lines
B.Player
11/05/2014"""
import textwrap
#Variables
inName=input("Enter the input filename:\n")
outName=input("Enter the output filename:\n")
width=eval(input("Enter the line width:\n"))
paras=[]
i=0
inFile=open(inName,'r')
data=inFile.read()
inFile.close()
paras=data.split("\n\n")
outFile=open(outName,'w')
for para in paras:
text=textwrap.wrap(para,width)
for line in text:
print(line,file=outFile)
print("",file=outFile)
outFile.close()
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
ef1fcbe676a4d21f2e0203d263d60155be7e015b | b8e9dd6fd8f8b691cba5a3af2388467bcf6c90bb | /samples/openapi3/client/3_0_3_unit_test/python-experimental/unit_test_api/model/uniqueitems_validation.py | 93644ef0e5d68fe6ea17ddaf07330762ffafefac | [
"Apache-2.0"
] | permissive | FallenRiteMonk/openapi-generator | f8b98940219eecf14dc76dced4b0fbd394522aa3 | b6576d11733ecad6fa4a0a616e1a06d502a771b7 | refs/heads/master | 2023-03-16T05:23:36.501909 | 2022-09-02T01:46:56 | 2022-09-02T01:46:56 | 164,609,299 | 0 | 0 | Apache-2.0 | 2019-01-08T09:08:56 | 2019-01-08T09:08:56 | null | UTF-8 | Python | false | false | 1,878 | py | # coding: utf-8
"""
openapi 3.0.3 sample spec
sample spec for testing openapi functionality, built from json schema tests for draft6 # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://openapi-generator.tech
"""
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
class UniqueitemsValidation(
schemas.AnyTypeSchema,
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
class MetaOapg:
additional_properties = schemas.AnyTypeSchema
unique_items = True
def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties:
# dict_instance[name] accessor
if not hasattr(self.MetaOapg, 'properties') or name not in self.MetaOapg.properties.__annotations__:
return super().__getitem__(name)
try:
return super().__getitem__(name)
except KeyError:
return schemas.unset
def __new__(
cls,
*args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes, ],
_configuration: typing.Optional[schemas.Configuration] = None,
**kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes, ],
) -> 'UniqueitemsValidation':
return super().__new__(
cls,
*args,
_configuration=_configuration,
**kwargs,
)
| [
"noreply@github.com"
] | FallenRiteMonk.noreply@github.com |
048e3031fe6ef4f1e0319fdb0ea5bfdbbc6db368 | b6472217400cfce4d12e50a06cd5cfc9e4deee1f | /sites/top/api/rest/JipiaoAgentorderSuccessRequest.py | 676b4a43ce63c3e49f0980160f008cd5cbf13a17 | [] | no_license | topwinner/topwinner | 2d76cab853b481a4963826b6253f3fb0e578a51b | 83c996b898cf5cfe6c862c9adb76a3d6a581f164 | refs/heads/master | 2021-01-22T22:50:09.653079 | 2012-08-26T19:11:16 | 2012-08-26T19:11:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | '''
Created by auto_sdk on 2012-08-26 16:43:44
'''
from top.api.base import RestApi
class JipiaoAgentorderSuccessRequest(RestApi):
def __init__(self,domain,port):
RestApi.__init__(self,domain, port)
self.order_id = None
self.success_info = None
def getapiname(self):
return 'taobao.jipiao.agentorder.success'
| [
"timo.jiang@qq.com"
] | timo.jiang@qq.com |
57d3d0f895c149d41283f64b93f30b73b305c96c | 1db7398d89e70b20bc1d0f0b401c49588d14afc7 | /processor/process.py | bde5f1301cc13b014b81fd6003c1a29ea5343ee2 | [] | no_license | ypsprimer/3d-segmentaion | 4e676e0c2981baaf1fee4269cfab852e415699aa | 387d3e813652ab634e0f1dbf162b0cb7acc7d86d | refs/heads/master | 2023-02-17T12:48:49.536512 | 2021-01-06T09:17:30 | 2021-01-06T09:17:30 | 327,245,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | from utils import timerecord
class Process(object):
def __init__(self):
pass
@timerecord
def start(self):
pass
@timerecord
def validate(self):
pass
| [
"dyp18@mails.tsinghua.edu.cn"
] | dyp18@mails.tsinghua.edu.cn |
c8fec17e5b347c8c924de7503f6a568f1749dc32 | 11ff14c118240e87c4804d0373e4656d0683d479 | /test_case/test_setup_set_number_SSID_Y1.py | 81870cae736291ee784a668fa5ff6ce79adf2f7f | [] | no_license | wxmmavis/OS3.1 | e3028d9c79d5a1a17449fea6380fcdda902bdec7 | 26d954344207a82d2298821c3c4f01302393dc7e | refs/heads/master | 2020-03-25T20:07:11.225493 | 2018-08-13T03:20:57 | 2018-08-13T03:20:57 | 144,115,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,783 | py | # -*- coding: utf-8 -*-
import configparser
import logging
import os
import time
import pytest
#########################
# import module
#########################
import sys
import conftest
sys.path.append("..")
import modules.login_router
import modules.initialize
from modules.login_router import *
from modules.initialize_new import *
from tools import *
#########################
from selenium import webdriver
lr = login_router()
setup = initialize()
t = tools()
projectpath = os.path.dirname(os.getcwd())
caseFail = projectpath + '/errorpng/caseFail/'
test_time = time.strftime("%Y%m%d%H%M%S", time.localtime())
config_file = projectpath + '/configure/' + 'testconfig.ini'
filename = os.path.basename(__file__).split('.')[0]
t.log(filename)
config = configparser.ConfigParser()
config.read(config_file, encoding='UTF-8')
default_ip = config.get('Default', 'default_ip')
default_pw = config.get('Default', 'default_pw')
test_ssid = config.get('SSID', 'ssid_number')
logging.info(__file__)
def setSSID(driver):
if setup.setssid(driver, test_ssid) == 1:
if setup.initialize_pw(driver, default_pw) == 1:
return setup.complete(driver)
class Test_Initialize_SSID:
def setup(self):
conftest.browser()
self.driver = conftest.driver
# self.driver = webdriver.Chrome()
self.driver.maximize_window()
if lr.open_url(self.driver, 'http://' + default_ip) == 2:
if setup.homepage(self.driver) == 1:
pass
def teardown(self):
self.driver.close()
self.driver.quit()
def test_set_number_SSID(self):
print(u'测试设置初始化ssid为纯数字')
assert setSSID(self.driver) == 1
if __name__ == '__main__':
pytest.main(os.path.basename(__file__)) | [
"1475806321@qq.com"
] | 1475806321@qq.com |
afcf3ac520a9802ad38358b8327d26de164327aa | 26f78ba56388765f2fe2dc8fa23ddea097209ec5 | /Leetcode/动态规划/474. 一和零.py | dd727fe7fd84c669fd8a61dffce5cfc26c80993c | [] | no_license | johnkle/FunProgramming | 3ef2ff32a1a378e1c780138ec9bab630c9ba83c7 | a60e0d17a1e9f0bc1959d7a95737fc4a0362d735 | refs/heads/master | 2023-07-18T16:05:56.493458 | 2021-09-08T19:01:19 | 2021-09-08T19:01:19 | 402,861,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | #0-1背包,继续练习
class Solution:
def findMaxForm(self, strs, m, n):
dp = [[0]*(n+1) for _ in range(m+1)]
dp[0][0] = 0
for s in strs:
s0 = s.count('0')
s1 = s.count('1')
for i in range(m,s0-1,-1):
for j in range(n,s1-1,-1):
dp[i][j] = max(dp[i][j],dp[i-s0][j-s1]+1)
return dp[-1][-1] | [
"605991742@qq.com"
] | 605991742@qq.com |
6efc9f01d813d133e726173325dab5542ec82946 | 18c886d2c325094a93c33b4c526adb0ad8490033 | /backend/src/file_storage_system/urls.py | d6372f51bd2c39983e5440a0df82842e7449aa34 | [
"MIT"
] | permissive | Rezwanul-Haque/file-storage-system | 32d9c6636cf928b9056db142aa7fd307da308f51 | 677023d99098df7609f807463d4c7cea20390b5c | refs/heads/master | 2020-08-10T03:00:38.171912 | 2019-10-12T19:56:27 | 2019-10-12T19:56:27 | 214,240,133 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from django.views.generic import TemplateView
from .routers import router
urlpatterns = [
path('', TemplateView.as_view(template_name=settings.FRONTEND_PATH + '/index.html'), name='Home'),
path('admin/', admin.site.urls),
path('api/v1/', include(router.urls)),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"rezwanul.cse@gmail.com"
] | rezwanul.cse@gmail.com |
86e973af71e61872eeb8ac5d60cae1f947c5e04d | 0a74687347990348d798d4647c6bcfaa61b8e901 | /mysite/migrations/0005_auto_20161128_2215.py | 848a7d1d6547177f7882986f759bfdc7b8448955 | [] | no_license | bufubaoni/learndjango | 7b0db8c291c8306615d223cf4ca0c190c0fc074a | a4a750eb65f684d189d98b92310e749e9e51f07f | refs/heads/master | 2021-01-18T22:30:04.911193 | 2016-12-06T14:19:40 | 2016-12-06T14:19:40 | 72,538,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-28 14:15
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import river.models.fields.state
class Migration(migrations.Migration):
dependencies = [
('mysite', '0004_remove_customuser_mobile'),
]
operations = [
migrations.AlterField(
model_name='mymodel',
name='state',
field=river.models.fields.state.StateField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='states', to='river.State', verbose_name='\u72b6\u6001'),
),
]
| [
"bufubaoni@163.com"
] | bufubaoni@163.com |
41669d5377e34fa82c2deb81c173293e150f926a | ddd82d37cea1981d053acda347b654bd6ad44655 | /medium/balanced_brackets.py | b2d42ccf7a9a65f5a158961104ca36f9c0788e75 | [] | no_license | jriall/algoexpert | e675c73f3005effc6026eeaa20e59d92de06d3b1 | 76ab8dd7f446fb46ad3742c376b46ad7d65f35cb | refs/heads/main | 2023-06-22T10:37:28.988383 | 2021-07-18T16:21:13 | 2021-07-18T16:21:13 | 359,125,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | # Balanced Brackets
# Write a function that takes in a string made up of brackets ((, [, {, ), ],
# and }) and other optional characters. The function should return a boolean
# representing whether the string is balanced with regards to brackets.
# A string is said to be balanced if it has as many opening brackets of a
# certain type as it has closing brackets of that type and if no bracket is
# unmatched. Note that an opening bracket can't match a corresponding closing
# bracket that comes before it, and similarly, a closing bracket can't match a
# corresponding opening bracket that comes after it. Also, brackets can't
# overlap each other as in [(]).
# Sample Input
# string = "([])(){}(())()()"
# Sample Output
# true // it's balanced
# Solution
def balanced_brackets(string):
left_brackets = []
brackets = {')': '(', '}': '{', ']': '['}
for char in string:
if char in brackets.values():
left_brackets.append(char)
elif char in brackets.keys():
if len(left_brackets) and left_brackets[-1] == brackets[char]:
left_brackets.pop()
else:
return False
return not bool(len(left_brackets))
| [
"jamesrariall@gmail.com"
] | jamesrariall@gmail.com |
d59b1696d9b316d38a13becca0d19ec5d48f1ff8 | 80e152f49b355b3e07faaab6b468ca8dda6aa097 | /python/tkinter-mvc/view.py | 0937cd75dfe2b0985fe9fe1c4dcba39686577eb6 | [] | no_license | Pitrified/snippet | 13ad9222f584570b10abb23a122b010b088eb366 | 1d7e5657014b00612cde87b78d5506a9e8b6adfc | refs/heads/master | 2023-05-25T16:14:42.133900 | 2023-04-19T18:20:32 | 2023-04-19T18:20:32 | 174,192,523 | 2 | 0 | null | 2023-05-01T23:48:48 | 2019-03-06T17:47:16 | Python | UTF-8 | Python | false | false | 758 | py | import logging
import tkinter as tk
from side_panel import SidePanel
from plot_panel import PlotPanel
class View:
def __init__(self, root):
log = logging.getLogger(f"c.{__name__}.init")
log.info("Start init")
self.root = root
# setup grid for root
self.root.grid_rowconfigure(0, weight=1)
self.root.grid_columnconfigure(0, weight=1)
self.root.grid_columnconfigure(1, weight=0)
# create children widget
self.plot_panel = PlotPanel(self.root, bg="SeaGreen1")
self.side_panel = SidePanel(self.root, bg="dark orange")
# grid children widget
self.plot_panel.grid(row=0, column=0, sticky="nsew")
self.side_panel.grid(row=0, column=1, sticky="nsew")
| [
"nobilipietro@gmail.com"
] | nobilipietro@gmail.com |
0badc9960b65fd791bebaa445d38fe065c983323 | e748e6d96aace1c9149327f384e0de07d743715a | /arcade/python/fixResult.py | 413e64fd1cb385c4f5500c3c5feff061bc0588f6 | [] | no_license | jorzel/codefights | cdfc4cb32261b064ffc605bfd927bf237885b5d2 | 28b62a2ae3809f0eb487198044c0fe74be09d4e8 | refs/heads/master | 2022-04-28T06:54:26.170503 | 2022-03-23T22:22:20 | 2022-03-23T22:22:20 | 110,818,719 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | """
Easy
Recovery
100
Your teacher asked you to implement a function that calculates the Answer to the Ultimate Question of Life, the Universe, and Everything and returns it as an array of integers. After several hours of hardcore coding you managed to write such a function, and it produced a quite reasonable result. However, when you decided to compare your answer with results of your classmates, you discovered that the elements of your result are roughly 10 times greater than the ones your peers got.
You don't have time to investigate the problem, so you need to implement a function that will fix the given array for you. Given result, return an array of the same length, where the ith element is equal to the ith element of result with the last digit dropped.
Example
For result = [42, 239, 365, 50], the output should be
fixResult(result) = [4, 23, 36, 5].
"""
def fixResult(result):
def fix(x):
return x / 10
return map(fix, result)
| [
"jaroslaw.orzel@emplocity.pl"
] | jaroslaw.orzel@emplocity.pl |
33a5e14279e69788ae7b7410ebb3830620626b14 | d7f223ec944de8ef95304cb3db50be4e46e0d6e5 | /unusual/unusual/settings.py | 2e320c5edbb28e9cf46ecb645db09029bffce948 | [] | no_license | shashisp/mh | f90fb13db4951656dbcc7fa2330ce229e5b4d8fb | 01fa451cbd5b7a3080edbc03608997b00a2bfc12 | refs/heads/master | 2016-09-06T09:33:25.887683 | 2015-05-13T21:40:13 | 2015-05-13T21:40:13 | 34,963,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,515 | py | """
Django settings for unusual project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from config import *
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v@h8$i4!3u2$un&it*ix(_l*&f$@#iu%vid*wb@fnn*l04vyu2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'campaigns',
'tastypie',
'social.apps.django_app.default',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'unusual.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'unusual.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'social.backends.twitter.TwitterOAuth',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_TWITTER_KEY = 'update me'
SOCIAL_AUTH_TWITTER_SECRET = 'update me'
| [
"shashiessp@gmail.com"
] | shashiessp@gmail.com |
b41f5d1985b55fb07976eb105e68b0c7ea27bfd3 | d3f448d238b435b48d8f27f17a34b3e39a70dc29 | /python-client/test/test_pay_outs_bankwire_api.py | 2f9c2ce64dc7aaa4547dae3eb1e7e642fe99e264 | [] | no_license | pedroguirao/swagger | 1fc29b6d9bcc193bf8ce85f6d8a6074f4c37150d | 5ffea6203b5fcd3f201c2ede76d354302a6fb0ee | refs/heads/master | 2020-06-07T16:15:08.659567 | 2019-06-21T07:51:49 | 2019-06-21T07:51:49 | 193,055,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | # coding: utf-8
"""
MarketPay API
API for Smart Contracts and Payments # noqa: E501
OpenAPI spec version: v2.01
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.pay_outs_bankwire_api import PayOutsBankwireApi # noqa: E501
from swagger_client.rest import ApiException
class TestPayOutsBankwireApi(unittest.TestCase):
"""PayOutsBankwireApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.pay_outs_bankwire_api.PayOutsBankwireApi() # noqa: E501
def tearDown(self):
pass
def test_pay_outs_bankwire_get(self):
"""Test case for pay_outs_bankwire_get
"""
pass
def test_pay_outs_bankwire_post(self):
"""Test case for pay_outs_bankwire_post
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"antonio.canovas@ingenieriacloud.com"
] | antonio.canovas@ingenieriacloud.com |
9e0d43d533ea80c344c09fe7da04441b491f3e53 | 1fa04a3baf82a2469b1140a7fd350a5df011faf5 | /waglee/wagblog/migrations/0005_blogtagindexpage.py | c381938022e9a03738cedc0350f87c7b121859e3 | [] | no_license | Serrones/wag_tutorial | 8446b33c3b71657402a9af214ae1d9f8f99d9694 | f357a8dabf5ade3f6dc80c17795cf6f3e721b381 | refs/heads/master | 2020-03-23T21:12:20.069601 | 2018-08-05T01:28:39 | 2018-08-05T01:28:39 | 142,089,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | # Generated by Django 2.0.7 on 2018-08-05 00:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('wagblog', '0004_auto_20180805_0031'),
]
operations = [
migrations.CreateModel(
name='BlogTagIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| [
"fabioserrones@gmail.com"
] | fabioserrones@gmail.com |
64ab8a1cb73d26acf8f6970397a240a58e447d17 | c70b5f5c87867d692e347e382bdc6723500066b8 | /miscellanea/extract_crd_withMP2.py | 0ce7e1ff12d6771187a99ea1e738fbfd0a11e128 | [] | no_license | Steboss/mdgx_python_api | 39f5249f9a0c7b7a5361a29b60910b1d949e96e2 | b35311d10d986cafe679ad8ee0f058ce603d627c | refs/heads/master | 2021-05-12T19:23:54.946988 | 2018-02-07T11:52:12 | 2018-02-07T11:52:12 | 117,087,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,420 | py | #JAN 2017 Stefano Bosisio
#Here I extract the mp2 energies from the mp2_output folder
import os,sys,re
def rst7writer(coords,natoms):
#coords is the piece of output with all the coordinate
#file_name: the name of the file we have to ave the crd like structure_0.crd
#Crd file
outputcrd = open("tmp.rst7","w")
outputcrd.write("LIG\n")
outputcrd.write(" %d\n" %natoms)
#Now write the coordinates rst7 file
print("writing coordinate file %s" % outputcrd)
position = 0
counter = 0
for f in coords:
coordx = float(f.split()[3])
coordy = float(f.split()[4])
coordz = float(f.split()[5])
if coordx<0 or coordx>10:
space=" "
crdX = "%.7f" % coordx
cX = space+crdX
else:
space=" "
crdX = "%.7f" % coordx
cX = space+crdX
if coordy<0 or coordy>10:
space=" "
crdY = "%.7f" % coordy
cY = space+crdY
else:
space=" "
crdY = "%.7f" % coordy
cY = space+crdY
if coordz<0 or coordz>10:
space=" "
crdZ = "%.7f" % coordz
cZ = space+crdZ
else:
space=" "
crdZ = "%.7f" % coordz
cZ = space+crdZ
if counter ==1 :
outputcrd.write("%s%s%s\n" %(cX,cY,cZ))
counter=0
else:
outputcrd.write("%s%s%s" %(cX,cY,cZ))
counter+=1
outputcrd.close()
def mdcrdwriter(coords,natoms,out_mdcrd):
#print the name of the file
#extract energies
counter = 0 # elements on line
position = 0
count_coords = 0
total_coords = natoms*3
elems=""
for f in coords:
coordx = float(f.split()[3])
coordy = float(f.split()[4])
coordz = float(f.split()[5])
count_coords+=3
if coordx<0 or coordx>10:
space=" "
crdX = "%.3f" % coordx
cX = space+crdX
else:
space=" "
crdX = "%.3f" % coordx
cX = space+crdX
if coordy<0 or coordy>10:
space=" "
crdY = "%.3f" % coordy
cY = space+crdY
else:
space=" "
crdY = "%.3f" % coordy
cY = space+crdY
if coordz<0 or coordz>10:
space=" "
crdZ = "%.3f" % coordz
cZ = space+crdZ
else:
space=" "
crdZ = "%.3f" % coordz
cZ = space+crdZ
elems+="%s" % (cX)
counter+=1
if counter==10:
elems+="\n"
out_mdcrd.write(elems)
elems=""
counter=0
elems+="%s" %(cY)
counter+=1
if counter==10:
elems+="\n"
out_mdcrd.write(elems)
elems=""
counter=0
elems+="%s" %(cZ)
counter+=1
if count_coords==total_coords:
elems+="\n"
out_mdcrd.write(elems)
elif counter==10:
elems+="\n"
out_mdcrd.write(elems)
elems=""
counter=0
#out_mdcrd.write("\n")
#######################MAIN###################################
#deal with I/O
file_input = sys.argv[1]
mdcrd_file = sys.argv[2]
top_file = sys.argv[3]
reader = open(file_input,"r").readlines()
if os.path.exists(mdcrd_file):
mdcrd = open(mdcrd_file,"a")
else:
mdcrd = open(mdcrd_file,"a")
mdcrd.write("LIG\n")
#now read it and the last mp2= is the value we want
for line in reader:
if "EUMP2" in line:
en_line = line
#take the value of energy, which is after some split
#e.g.' E2 = -0.3224128066D+01 EUMP2 = -0.98809822517423D+03\n'
en_string = en_line.strip().split("EUMP2")[1].split("=")[1]
#then substitue the D with E otherwise we cannot convert in float
en_val=float(re.sub(r"D","E",en_string))
#convert the eenergy to kcal/mol?
output_energy = open("quantum_energy.dat","a")
output_energy.write("%.10f\n" % en_val)
indexes = []
#now create the mdcrd file
#now collect the index to know here the standard optimized structire is
for i, line in enumerate(reader,0):
if "Standard orientation:" in line:
indexes.append(i)
#number of atoms:
natoms = 0
charge_idx = []
for i,line in enumerate(reader,0): #the number of atoms come from lines
if "Charge" in line:
charge_idx.append(i+1)
for i,line in enumerate(reader[charge_idx[0]:],0):
if line==" \n":
break
else:
natoms+=1
last_idx = indexes[-1] + 5
end_coords = last_idx + natoms
coords = reader[last_idx:end_coords] ##this is the fragment of the file with the coordinates
mdcrdwriter(coords,natoms,mdcrd)
rst7writer(coords,natoms)
if os.path.exists("amber_energy.dat"):
amber = open("amber_energy.dat","a")
else:
amber = open("amber_energy.dat","a")
###Evaluate amber energies
#Amber energies
cmd =""" echo "0.000" > dummy.dat """
os.system(cmd)
cmd =""" cat> job.in << EOF
ALGORITHM=NONE
NSTRUCTURES=1
COORDINATE_FORMAT=RESTART
EOF"""
os.system(cmd)
#cmd
cmd = """paramfit -i job.in -p %s -c tmp.rst7 -q dummy.dat | grep "Calculated energy with initial parameters" | awk '{print $10'} >> amber_energy.dat""" %(top_file)
os.system(cmd)
os.system("wait")
print(cmd)
cmd = "rm tmp.rst7 job.in dummy.dat"
os.system(cmd)
| [
"stefanobosisio1@gmail.com"
] | stefanobosisio1@gmail.com |
583d36baee1172132ed3c0eed658f4aa8a7df6ad | ca75f7099b93d8083d5b2e9c6db2e8821e63f83b | /z2/part2/batch/jm/parser_errors_2/293445611.py | 10a81ed57c337b5a643dca205553d6da1debcf26 | [
"MIT"
] | permissive | kozakusek/ipp-2020-testy | 210ed201eaea3c86933266bd57ee284c9fbc1b96 | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | refs/heads/master | 2022-10-04T18:55:37.875713 | 2020-06-09T21:15:37 | 2020-06-09T21:15:37 | 262,290,632 | 0 | 0 | MIT | 2020-06-09T21:15:38 | 2020-05-08T10:10:47 | C | UTF-8 | Python | false | false | 3,612 | py | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 293445611
"""
"""
random actions, total chaos
"""
board = gamma_new(5, 4, 3, 2)
assert board is not None
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_move(board, 1, 0, 1) == 1
assert gamma_free_fields(board, 1) == 6
assert gamma_move(board, 2, 1, 2) == 1
assert gamma_move(board, 2, 3, 3) == 1
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 1, 0) == 1
assert gamma_move(board, 3, 0, 2) == 1
assert gamma_busy_fields(board, 3) == 2
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 3, 1, 3) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 1, 3, 0) == 1
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_free_fields(board, 2) == 6
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_move(board, 3, 3, 3) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 1, 3, 1) == 1
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_free_fields(board, 3) == 2
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 3, 3, 4) == 0
assert gamma_free_fields(board, 3) == 2
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_busy_fields(board, 1) == 5
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 3, 1, 3) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 1, 4, 1) == 1
assert gamma_move(board, 2, 4, 0) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_busy_fields(board, 1) == 7
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_move(board, 2, 1, 3) == 1
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 1, 3, 2) == 1
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_free_fields(board, 2) == 4
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_golden_move(board, 1, 0, 1) == 0
assert gamma_move(board, 2, 3, 2) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_busy_fields(board, 2) == 3
assert gamma_move(board, 3, 4, 2) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 1, 3, 3) == 0
assert gamma_move(board, 2, 2, 3) == 1
assert gamma_move(board, 3, 3, 4) == 0
assert gamma_busy_fields(board, 3) == 2
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_free_fields(board, 1) == 4
gamma_delete(board)
| [
"jakub@molinski.dev"
] | jakub@molinski.dev |
6babd3556c4d789cc9deff9a19367bcaf4eda064 | d7551c200a4859690cc73fb60f8a5cb3c0363478 | /XiaoZzi/RequestsLearn/Requests_authen.py | 71b5aada064859fe7bc0e75b36c93a33af5ea7cf | [] | no_license | OverCastCN/Python_Learn | 8b74ce4027b6ebfdd7af739551b41606bd04ff70 | e7bb003e262f82977e8b2b2e14f32ff16fb0bd03 | refs/heads/master | 2021-01-01T05:58:37.654962 | 2019-06-26T12:47:38 | 2019-06-26T12:47:38 | 97,323,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | # -*- coding:utf-8 -*-
import requests
base_url = 'https://api.github.com'
def construct_url(end):
return '/'.join([base_url,end])
def basic_auth():
"""
基本认证
:return:
"""
response = requests.get(url=construct_url('user'),auth=('imoocdemo','imoocdemo123'))
print response.headers
print response.request.headers
def basic_oauth():
"""
AOUTH认证
:return:
"""
headers = {'Authorization':'token Basic aW1vb2NkZW1vOmltb29jZGVtbzEyMw=='}
#user/emails
response = requests.get(construct_url(construct_url('user/emails',headers=headers)))
print response.request.headers | [
"980850796@qq.com"
] | 980850796@qq.com |
046541be0496fc236dae614f273af615bdc2f130 | 4e2d9f918ece774b72848128046787b178f0ee8e | /NAO/MyCode/test_module.py | 2ec0990b618d54bad50243ebe61974ac9a99361b | [
"Apache-2.0"
] | permissive | mlzboy/python | de6c210ae34db12e2b3299ce98e084f05954f15f | 0006a2f9493008400e89fcc952f9e0a182053b64 | refs/heads/master | 2020-03-26T01:07:29.747935 | 2018-06-14T11:43:44 | 2018-06-14T11:43:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,236 | py | # -*- encoding: UTF-8 -*-
""" 测试ALSoundLocalization用法
update by Ian in 2017-9-3 14:51:11
解决方法:
1. setParameter设置灵敏度参数
2. subscribe订阅这个事件
3. getData获得订阅的数据
"""
from naoqi import ALModule
from naoqi import ALProxy
from naoqi import ALBroker
import sys
import time
import argparse
class Motion(ALModule):
"""控制机器人的动作"""
def __init__(self, name,angle=50.0):
ALModule.__init__(self, name) # 需要先调用父类的初始化方法
self.life = ALProxy('ALAutonomousLife')
self.motion = ALProxy('ALMotion')
self.posture = ALProxy('ALRobotPosture')
self.memory = ALProxy('ALMemory')
# 初始化
self.angle = angle
self.headangle = 0
self.life.setState('disabled') # 设置禁用状态,关闭一切反射
self.motion.wakeUp() # 唤醒机器人
self.posture.goToPosture("StandInit", 0.5) # 姿态初始化
self.motion.setStiffnesses("Head", 1.0) # 设置刚度,不设置不能转动
def SoundLocalization(self):
self.sound = ALProxy('ALSoundLocalization')
self.sound.setParameter('Sensitivity',0.5)
self.sound.subscribe('ALSoundLocalization/SoundLocated') # 订阅这个事件,或者说启动这个模块
try:
while True:
time.sleep(1)
data = self.memory.getData('ALSoundLocalization/SoundLocated')
print data
except KeyboardInterrupt:
print
print "Interrupted by user, shutting down"
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="192.168.1.102",
help="192.168.1.101")
parser.add_argument("--port", type=int, default=9559,
help="9987")
parser.add_argument("--facesize", type=float, default=0.1,
help="0.2")
args = parser.parse_args()
# 设置代理
myBroker = ALBroker("myBroker", "0.0.0.0", 0, args.ip, args.port)
mymotion = Motion('mymotion')
mymotion.SoundLocalization()
myBroker.shutdown()
| [
"767786685@qq.com"
] | 767786685@qq.com |
e4e76e5aff929e10786d99463ac91740f4203e29 | 4b4df51041551c9a855468ddf1d5004a988f59a2 | /leetcode_python/Binary_Search/peak-index-in-a-mountain-array.py | af188fdd21026b7c3572007599786f903c62ea64 | [] | no_license | yennanliu/CS_basics | 99b7ad3ef6817f04881d6a1993ec634f81525596 | 035ef08434fa1ca781a6fb2f9eed3538b7d20c02 | refs/heads/master | 2023-09-03T13:42:26.611712 | 2023-09-03T12:46:08 | 2023-09-03T12:46:08 | 66,194,791 | 64 | 40 | null | 2022-08-20T09:44:48 | 2016-08-21T11:11:35 | Python | UTF-8 | Python | false | false | 3,776 | py | """
852. Peak Index in a Mountain Array
Easy
Let's call an array arr a mountain if the following properties hold:
arr.length >= 3
There exists some i with 0 < i < arr.length - 1 such that:
arr[0] < arr[1] < ... arr[i-1] < arr[i]
arr[i] > arr[i+1] > ... > arr[arr.length - 1]
Given an integer array arr that is guaranteed to be a mountain, return any i such that arr[0] < arr[1] < ... arr[i - 1] < arr[i] > arr[i + 1] > ... > arr[arr.length - 1].
Example 1:
Input: arr = [0,1,0]
Output: 1
Example 2:
Input: arr = [0,2,1,0]
Output: 1
Example 3:
Input: arr = [0,10,5,2]
Output: 1
Constraints:
3 <= arr.length <= 104
0 <= arr[i] <= 106
arr is guaranteed to be a mountain array.
Follow up: Finding the O(n) is straightforward, could you find an O(log(n)) solution?
"""
# V0
# IDEA : PROBLEM UNDERSTANDING
# SAME AS LC 162 Find Peak Element
class Solution(object):
def peakIndexInMountainArray(self, arr):
if len(arr) < 3:
return False
for i in range(len(arr)):
if arr[i] > arr[i+1]:
return i
return -1
# V0'
# IDEA : BINARY SEARCH
class Solution(object):
def peakIndexInMountainArray(self, arr):
if len(arr) < 3:
return False
# binary search
l = 0
r = len(arr) - 1
while r >= l:
mid = l + (r-l)//2
#print ("l = " + str(l) + " r = " + str(r) + " mid = " + str(mid))
if arr[mid] > arr[mid-1] and arr[mid] > arr[mid+1]:
return mid
elif arr[mid] < arr[mid+1]:
l = mid + 1
else:
r = mid - 1
return -1
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/80721162
# IDEA : BINARY SEARCH
class Solution(object):
def peakIndexInMountainArray(self, A):
"""
:type A: List[int]
:rtype: int
"""
left, right = 0, len(A) - 1
while left < right:
mid = (left + right) / 2
if A[mid - 1] < A[mid] and A[mid] < A[mid + 1]:
left = mid
elif A[mid - 1] > A[mid] and A[mid] > A[mid + 1]:
right = mid
else:
break
return mid
# V1'
# https://blog.csdn.net/fuxuemingzhu/article/details/80721162
# IDEA : BINARY SEARCH
class Solution:
def peakIndexInMountainArray(self, A):
"""
:type A: List[int]
:rtype: int
"""
N = len(A)
left, right = 0, N
while left < right:
mid = left + (right - left) // 2
if A[mid - 1] < A[mid] and A[mid] > A[mid + 1]:
return mid
if A[mid] < A[mid + 1]:
left = mid + 1
else:
right = mid
return -1
# V1''
# https://blog.csdn.net/fuxuemingzhu/article/details/80721162
# IDEA : MAX
class Solution:
def peakIndexInMountainArray(self, A):
"""
:type A: List[int]
:rtype: int
"""
return A.index(max(A))
# V1'''
# https://blog.csdn.net/fuxuemingzhu/article/details/80721162
# IDEA : FIRST DECREASE
class Solution:
def peakIndexInMountainArray(self, A):
"""
:type A: List[int]
:rtype: int
"""
for i in range(len(A) - 1):
if A[i + 1] < A[i]:
return i
return -1
# V2
# Time: O(logn)
# Space: O(1)
class Solution(object):
def peakIndexInMountainArray(self, A):
"""
:type A: List[int]
:rtype: int
"""
left, right = 0, len(A)
while left < right:
mid = left + (right-left)//2
if A[mid] > A[mid+1]:
right = mid
else:
left = mid+1
return left | [
"f339339@gmail.com"
] | f339339@gmail.com |
90bbd942185a79a402e133decd2102a506901894 | 42c48f3178a48b4a2a0aded547770027bf976350 | /google/ads/google_ads/v3/proto/enums/offline_user_data_job_failure_reason_pb2.py | f880a390b39fa5c4521bfcfd0452428d2cbd3add | [
"Apache-2.0"
] | permissive | fiboknacky/google-ads-python | e989464a85f28baca1f28d133994c73759e8b4d6 | a5b6cede64f4d9912ae6ad26927a54e40448c9fe | refs/heads/master | 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 | Apache-2.0 | 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null | UTF-8 | Python | false | true | 4,516 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v3/proto/enums/offline_user_data_job_failure_reason.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/enums/offline_user_data_job_failure_reason.proto',
package='google.ads.googleads.v3.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v3.enumsB$OfflineUserDataJobFailureReasonProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v3/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V3.Enums\312\002\035Google\\Ads\\GoogleAds\\V3\\Enums\352\002!Google::Ads::GoogleAds::V3::Enums'),
serialized_pb=_b('\nNgoogle/ads/googleads_v3/proto/enums/offline_user_data_job_failure_reason.proto\x12\x1dgoogle.ads.googleads.v3.enums\x1a\x1cgoogle/api/annotations.proto\"\xad\x01\n#OfflineUserDataJobFailureReasonEnum\"\x85\x01\n\x1fOfflineUserDataJobFailureReason\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12%\n!INSUFFICIENT_MATCHED_TRANSACTIONS\x10\x02\x12\x1d\n\x19INSUFFICIENT_TRANSACTIONS\x10\x03\x42\xf9\x01\n!com.google.ads.googleads.v3.enumsB$OfflineUserDataJobFailureReasonProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v3/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V3.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V3\\Enums\xea\x02!Google::Ads::GoogleAds::V3::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_OFFLINEUSERDATAJOBFAILUREREASONENUM_OFFLINEUSERDATAJOBFAILUREREASON = _descriptor.EnumDescriptor(
name='OfflineUserDataJobFailureReason',
full_name='google.ads.googleads.v3.enums.OfflineUserDataJobFailureReasonEnum.OfflineUserDataJobFailureReason',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INSUFFICIENT_MATCHED_TRANSACTIONS', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INSUFFICIENT_TRANSACTIONS', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=184,
serialized_end=317,
)
_sym_db.RegisterEnumDescriptor(_OFFLINEUSERDATAJOBFAILUREREASONENUM_OFFLINEUSERDATAJOBFAILUREREASON)
_OFFLINEUSERDATAJOBFAILUREREASONENUM = _descriptor.Descriptor(
name='OfflineUserDataJobFailureReasonEnum',
full_name='google.ads.googleads.v3.enums.OfflineUserDataJobFailureReasonEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_OFFLINEUSERDATAJOBFAILUREREASONENUM_OFFLINEUSERDATAJOBFAILUREREASON,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=144,
serialized_end=317,
)
_OFFLINEUSERDATAJOBFAILUREREASONENUM_OFFLINEUSERDATAJOBFAILUREREASON.containing_type = _OFFLINEUSERDATAJOBFAILUREREASONENUM
DESCRIPTOR.message_types_by_name['OfflineUserDataJobFailureReasonEnum'] = _OFFLINEUSERDATAJOBFAILUREREASONENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
OfflineUserDataJobFailureReasonEnum = _reflection.GeneratedProtocolMessageType('OfflineUserDataJobFailureReasonEnum', (_message.Message,), dict(
DESCRIPTOR = _OFFLINEUSERDATAJOBFAILUREREASONENUM,
__module__ = 'google.ads.googleads_v3.proto.enums.offline_user_data_job_failure_reason_pb2'
,
__doc__ = """Container for enum describing reasons why an offline user data job
failed to be processed.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.enums.OfflineUserDataJobFailureReasonEnum)
))
_sym_db.RegisterMessage(OfflineUserDataJobFailureReasonEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"noreply@github.com"
] | fiboknacky.noreply@github.com |
a12dfe975bc42d0d73e84e9188862ac2be40e096 | 5593b35f326748f18053e7ea042c98fe6b70a850 | /tqt/__version__.py | 9e07edd14c5281467200b0a4457633f34448cb95 | [
"BSD-3-Clause"
] | permissive | sicdl/TQT | 7dfe3bce2bb5dace9a467945512e65525a0c3be9 | 27b73fcf27ddfb67cd28f6ed27e49341f27c9f16 | refs/heads/main | 2023-04-14T18:28:23.224689 | 2021-04-22T14:46:46 | 2021-04-22T14:46:46 | 362,503,682 | 0 | 0 | BSD-3-Clause | 2021-04-28T14:45:14 | 2021-04-28T14:45:13 | null | UTF-8 | Python | false | false | 238 | py | __title__ = 'tqt'
__description__ = 'torch implement of TQT'
__version__ = '1.0.1'
__author__ = 'Pannenets F'
__author_email__ = 'pannenets.f@foxmail.com'
__license__ = 'BSD-3-Clause License'
__url__ = 'https://github.com/PannenetsF/TQT'
| [
"you@example.com"
] | you@example.com |
c1c728eb324e6d3190e3c0541dabc42773348998 | f58936d3d01b014131b9038616d6f3573dd94f44 | /preprocessing_tools/extended_ner/food/api/foodpanda_city.py | 14db425c4522951eefdf3a40d8de32d04fbc493b | [] | no_license | niteshsurtani/Personal_Assistant | 69f591d03200ad9f8e66533f6968c7fb2a1d8667 | 363a65b3e3438b5824d8abb6caee53a70eefa024 | refs/heads/master | 2022-11-20T00:13:09.289399 | 2017-10-13T17:32:00 | 2017-10-13T17:32:00 | 106,283,072 | 0 | 1 | null | 2020-07-23T18:18:29 | 2017-10-09T12:50:48 | Python | UTF-8 | Python | false | false | 2,007 | py | from mysql.connector import MySQLConnection, Error
from dbconfig import read_db_config
def insertOneCity(city_id, name, data_resource):
query = "INSERT INTO city(city_id, name, data_resource) " \
"VALUES(%s, %s, %s)"
args = (city_id, name, data_resource)
try:
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.execute(query, args)
conn.commit()
except Error as error:
print error
finally:
cursor.close()
conn.close()
print "CITY DATA INSERTED!!!"
def insertManyCities(city_info):
query = "INSERT INTO city(city_id, name, data_resource) " \
"VALUES(%s, %s, %s)"
try:
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.executemany(query, city_info)
conn.commit()
except Error as error:
print error
finally:
cursor.close()
conn.close()
print "CITY DATA INSERTED!!!"
def findCityById(id):
try:
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.execute("SELECT * FROM city WHERE city_id = " + id)
row = cursor.fetchone()
print row
except Error as error:
print error
finally:
cursor.close()
conn.close()
def findAllCities():
rows = []
try:
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
cursor.execute("SELECT name FROM city")
rows = cursor.fetchall()
# for row in rows:
# print row
except Error as error:
print error
finally:
cursor.close()
conn.close()
return rows
def findCityByName(query):
rows = []
try:
db_config = read_db_config()
conn = MySQLConnection(**db_config)
cursor = conn.cursor()
if query:
query = query + "%"
command = "SELECT * FROM city WHERE name LIKE '" + query +"'"
#print command
cursor.execute(command)
rows = cursor.fetchall()
#for row in rows:
# print row
except Error as error:
print error
finally:
cursor.close()
conn.close()
return rows
| [
"nitesh.surtani0606@gmail.com"
] | nitesh.surtani0606@gmail.com |
ce0fbe43952534a1980d29bcfbbed01aa5c538c6 | 6c137e70bb6b1b618fbbceddaeb74416d387520f | /lantz/lantz/drivers/examples/foreign_example.py | 682536c5bd86a50a87407085d922a159115462a4 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | zhong-lab/code | fe497c75662f8c3b7ab3c01e7e351bff6d5e8d15 | b810362e06b44387f0768353c602ec5d29b551a2 | refs/heads/master | 2023-01-28T09:46:01.448833 | 2022-06-12T22:53:47 | 2022-06-12T22:53:47 | 184,670,765 | 2 | 7 | BSD-2-Clause | 2022-12-08T21:46:15 | 2019-05-02T23:37:39 | Python | UTF-8 | Python | false | false | 2,046 | py | # -*- coding: utf-8 -*-
"""
lantz.drivers.example.foreign_example
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Foreign library example.
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import ctypes as ct
from lantz import Feat, Action, DictFeat
from lantz.foreign import LibraryDriver
from lantz.errors import InstrumentError
class ForeignTemplate(LibraryDriver):
"""Template for Drivers using a library.
"""
LIBRARY_NAME = 'mylibrary.dll'
def _return_handler(self, func_name, ret_value):
if ret_value != 0:
raise InstrumentError('{} ({})'.format(ret_value, _ERRORS[ret_value]))
return ret_value
@Feat()
def idn(self):
return self.query('*IDN?')
@Feat(units='V', limits=(10,))
def amplitude(self):
"""Amplitude.
"""
return float(self.query('?AMP'))
@amplitude.setter
def amplitude(self, value):
self.query('!AMP {:.1f}'.format(value))
@DictFeat(values={True: '1', False: '0'}, keys=list(range(1,9)))
def dout(self, key):
"""Digital output state.
"""
return self.query('?DOU {}'.format(key))
@dout.setter
def dout(self, key, value):
self.query('!DOU {} {}'.format(key, value))
@Action()
def do_something(self):
"""Help for do_something
"""
return self.lib.something()
if __name__ == '__main__':
import argparse
import lantz.log
parser = argparse.ArgumentParser(description='Test Kentech HRI')
parser.add_argument('-i', '--interactive', action='store_true',
default=False, help='Show interactive GUI')
args = parser.parse_args()
lantz.log.log_to_socket(lantz.log.DEBUG)
with ForeignTemplate() as inst:
if args.interactive:
from lantz.ui.app import start_test_app
start_test_app(inst)
else:
# Add your test code here
print('Non interactive mode')
| [
"none"
] | none |
868777082c196ad7aceaa2c788c04575c894c324 | 7af848e1aab6f1c4362fd7588c80efec566ef9f3 | /mlinsights/mlmodel/classification_kmeans.py | 0417e2bffb55232f10b10ab4e229a5d36b8595fe | [
"MIT"
] | permissive | alexisjihyeross/mlinsights | 2e8873645c3e4883aa4ff422b0543fba36712109 | 74a834714a96e2e78b8dfc3b750a9d605df14834 | refs/heads/master | 2021-04-14T23:05:22.115689 | 2020-03-11T11:43:35 | 2020-03-11T11:43:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,360 | py | """
@file
@brief Combines a *k-means* followed by a predictor.
"""
import inspect
import numpy
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
from sklearn.base import BaseEstimator, ClassifierMixin, clone
class ClassifierAfterKMeans(BaseEstimator, ClassifierMixin):
"""
Applies a *k-means* (see :epkg:`sklearn:cluster:KMeans`)
for each class, then adds the distance to each cluster
as a feature for a classifier.
See notebook :ref:`logisticregressionclusteringrst`.
"""
def __init__(self, estimator=None, clus=None, **kwargs):
"""
@param estimator :epkg:`sklearn:linear_model:LogisiticRegression`
by default
@param clus clustering applied on each class,
by default k-means with two classes
@param kwargs sent to :meth:`set_params
<mlinsights.mlmodel.classification_kmeans.ClassifierAfterKMeans.set_params>`,
see its documentation to understand how to specify parameters
"""
ClassifierMixin.__init__(self)
BaseEstimator.__init__(self)
if estimator is None:
estimator = LogisticRegression()
if clus is None:
clus = KMeans(n_clusters=2)
self.estimator = estimator
self.clus = clus
if not hasattr(clus, "transform"):
raise AttributeError("clus does not have a transform method.")
if kwargs:
self.set_params(**kwargs)
def fit(self, X, y, sample_weight=None):
"""
Runs a *k-means* on each class
then trains a classifier on the
extended set of features.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, n_targets]
Target values. Will be cast to X's dtype if necessary
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
Attributes
----------
labels_: dictionary of clustering models
clus_: array of clustering models
estimator_: trained classifier
"""
classes = set(y)
self.labels_ = list(sorted(classes))
self.clus_ = {}
sig = inspect.signature(self.clus.fit)
for cl in classes:
m = clone(self.clus)
Xcl = X[y == cl]
if sample_weight is None or 'sample_weight' not in sig.parameters:
w = None
m.fit(Xcl)
else:
w = sample_weight[y == cl]
m.fit(Xcl, sample_weight=w)
self.clus_[cl] = m
extX = self.transform_features(X)
self.estimator_ = self.estimator.fit(
extX, y, sample_weight=sample_weight)
return self
def transform_features(self, X):
"""
Applies all the clustering objects
on every observations and extends the list of
features.
@param X features
@return extended features
"""
preds = []
for _, v in sorted(self.clus_.items()):
p = v.transform(X)
preds.append(p)
return numpy.hstack(preds)
def predict(self, X):
"""
Runs the predictions.
"""
extX = self.transform_features(X)
return self.estimator.predict(extX)
def predict_proba(self, X):
"""
Converts predictions into probabilities.
"""
extX = self.transform_features(X)
return self.estimator.predict_proba(extX)
def decision_function(self, X):
"""
Calls *decision_function*.
"""
extX = self.transform_features(X)
return self.estimator.decision_function(extX)
def get_params(self, deep=True):
"""
Returns the parameters for both
the clustering and the classifier.
@param deep unused here
@return dict
:meth:`set_params <mlinsights.mlmodel.classification_kmeans.ClassifierAfterKMeans.set_params>`
describes the pattern parameters names follow.
"""
res = {}
for k, v in self.clus.get_params().items():
res["c_" + k] = v
for k, v in self.estimator.get_params().items():
res["e_" + k] = v
return res
def set_params(self, **values):
"""
Sets the parameters before training.
Every parameter prefixed by ``'e_'`` is an estimator
parameter, every parameter prefixed by ``'c_'`` is for
the :epkg:`sklearn:cluster:KMeans`.
@param values valeurs
@return dict
"""
pc, pe = {}, {}
for k, v in values.items():
if k.startswith('e_'):
pe[k[2:]] = v
elif k.startswith('c_'):
pc[k[2:]] = v
else:
raise ValueError("Unexpected parameter name '{0}'".format(k))
self.clus.set_params(**pc)
self.estimator.set_params(**pe)
| [
"xavier.dupre@gmail.com"
] | xavier.dupre@gmail.com |
de847436be859ecbfdcdf2f2eda99f6677a367d1 | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /XML/XML_to_python_objects__lxml.objectify__examples/from_text.py | 6b1cf58b002108f1d034790570a56300f1bfc679 | [
"CC-BY-4.0"
] | permissive | stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://lxml.de/objectify.html
from datetime import datetime
# pip install lxml
from lxml import objectify
text = """\
<Response>
<Data>
<Report>
<LeaderList>
<Leader ActualDate="2009-12-01" FIO="Шxxxxxxx Аxxxxx Шxxxxxx" INN="5xxxxxxxxx" Position="генеральный директор"/>
<Leader ActualDate="2008-10-07" FIO="Вxxxxxx Аxxxxxx Аxxxxxxx" Position="генеральный директор"/>
<Leader ActualDate="2007-04-17" FIO="Оxxxxxxxx Сxxxxx Вxxxxxxx" Position="генеральный директор"/>
<Leader ActualDate="2004-12-06" FIO="Кxxxxxxx Аxxxxxxx Нxxxxxx" Position="генеральный директор"/>
</LeaderList>
</Report>
</Data>
<ResultInfo ExecutionTime="140" ResultType="True"/>
</Response>
"""
def to_date(date_str):
return datetime.strptime(date_str, '%Y-%m-%d')
root = objectify.fromstring(text)
items = root.Data.Report.LeaderList.Leader
leader = max(items, key=lambda x: to_date(x.attrib['ActualDate']))
print(leader.attrib['FIO']) # Шxxxxxxx Аxxxxx Шxxxxxx
print(leader.attrib['ActualDate']) # 2009-12-01
print(leader.attrib['Position']) # генеральный директор
| [
"ilya.petrash@inbox.ru"
] | ilya.petrash@inbox.ru |
f2fdd4d9d42fb5073ee6fa1301c3897d15d7f1b5 | d833487ba7a78e900ce535d60c123986ab5ebfee | /Linked Lists/6. Shift Linked List/Solution.py | 23371037e6dda9955911381ecedcd6a0e8bca809 | [] | no_license | ceteongvanness/Algorithm-Python | b71af3bca4d2573f4a0d18dc991012b996746d6a | 0151e7bac3f4032bbc76caa209bb63cdfa8a581e | refs/heads/master | 2023-01-31T04:17:22.425719 | 2020-12-14T05:21:15 | 2020-12-14T05:21:15 | 263,880,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | class LinkedList:
def __init__(self, value):
self.value = value
self.next = None
# O(n + m) time | O(1) space - where n is the number of nodes in the first
def shiftLinkedList(head, k):
# O(n) time | O(1) space - where n is the number of nodes in the Linked List
listLength = 1
listTail = head
while listTail.next is not None:
listTail = listTail.next
listLength += 1
offset = abs(k) % listLength
if offset == 0:
return head
newTailPosition = listLength - offset if k > 0 else offset
newTail = head
for i in range(1, newTailPosition):
newTail = newTail.next
newHead = newTail.next
newTail.next = None
listTail.next = head
return newHead
# This is the class of the input linked list.
class LinkedList:
def __init__(self, value):
self.value = value
self.next = None
| [
"ceteongvanness@hotmail.com"
] | ceteongvanness@hotmail.com |
ad6c6f5bcb9bf132c2d476669c31b7aa91444dc5 | e00d41c9f4045b6c6f36c0494f92cad2bec771e2 | /server/database/mariadb/comar/service.py | 9552b147c9c78b884a728527fbac16017ea92eb4 | [] | no_license | pisilinux/main | c40093a5ec9275c771eb5fb47a323e308440efef | bfe45a2e84ea43608e77fb9ffad1bf9850048f02 | refs/heads/master | 2023-08-19T00:17:14.685830 | 2023-08-18T20:06:02 | 2023-08-18T20:06:02 | 37,426,721 | 94 | 295 | null | 2023-09-14T08:22:22 | 2015-06-14T19:38:36 | Python | UTF-8 | Python | false | false | 698 | py | # -*- coding: utf-8 -*-
from comar.service import *
import os
serviceType="server"
serviceDesc=_({"en": "MariaDB Database Server",
"tr": "MariaDB Veritabanı Sunucusu"})
PIDFILE="/run/mysqld/mysqld.pid"
DAEMON="/usr/bin/mysqld"
@synchronized
def start():
startService(command=DAEMON,
pidfile=PIDFILE,
detach=True,
donotify=True)
#os.system("pidof mariadb_server + /usr/bin/mysqld > /run/mysqld/mysqld.pid")
@synchronized
def stop():
stopService(pidfile=PIDFILE,
donotify=True)
try:
os.unlink(PIDFILE)
except OSError:
pass
def status():
return isServiceRunning(PIDFILE)
| [
"ayhanyalcinsoy@gmail.com"
] | ayhanyalcinsoy@gmail.com |
816ab3ea2f2aa70a194ab70f5b4edf02058b1926 | 8f0ce1be6cc093d962c64179eec99c7ccc20ffc4 | /fabrication/migrations/0002_auto_20170522_2054.py | bb9f50cf4571262b07c8bd43de137ffccd69989e | [] | no_license | dmeehan/futuregreenstudio | cf5e12c6ead8f0c7023ba09d5868749888068b72 | e6e2b7f7ffa2ed251d21e6b1d07573ab4f70782f | refs/heads/master | 2023-08-30T20:12:24.814970 | 2023-08-28T14:55:26 | 2023-08-28T14:55:26 | 89,943,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,047 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-22 20:54
from __future__ import unicode_literals
from django.db import migrations
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('fabrication', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='fabricationpage',
name='fabrication_content',
field=wagtail.wagtailcore.fields.StreamField([(b'item_list', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'text', wagtail.wagtailcore.blocks.TextBlock())]), icon='list-ul')), (b'numbered_item_list', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'text', wagtail.wagtailcore.blocks.TextBlock())]), icon='list-ol')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'video', wagtail.wagtailembeds.blocks.EmbedBlock())]),
),
migrations.AlterField(
model_name='fabricationpage',
name='process_content',
field=wagtail.wagtailcore.fields.StreamField([(b'item_list', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'text', wagtail.wagtailcore.blocks.TextBlock())]), icon='list-ul')), (b'numbered_item_list', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock()), (b'text', wagtail.wagtailcore.blocks.TextBlock())]), icon='list-ol')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'video', wagtail.wagtailembeds.blocks.EmbedBlock())]),
),
]
| [
"dmeehan@gmail.com"
] | dmeehan@gmail.com |
95ffd688e970ee7591bc3d5adc0e0f6570dfb5dd | 67c92dd802a14b41956589dafc6c8fad9f5043cb | /venv/bin/easy_install | b3dabbbbbad2ac29c808766ee81b920ecbc5a971 | [] | no_license | sunny-kathuria/ReconnTool | f9a68aca869cb27ad45351a1f5b8f59178590a75 | 274d2aad29f0b8c408772821b8066adfd43a9540 | refs/heads/master | 2021-01-05T02:36:59.159207 | 2020-02-16T07:49:10 | 2020-02-16T07:49:10 | 240,848,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | #!/root/PycharmProjects/Recon/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain | |
b9e506df9c12fad0bd4108510fa77486a23a356d | 4f0192ccd0b29b4b28428daa9813010cd70f49a3 | /news/migrations/0010_auto_20140519_1249.py | 9ffe01021e97f71a6319b89b0b3a6c86147bb51f | [] | no_license | nbedi/appletini | 03c2a7286cb5775a63e17c41c3ccd2af48f0b90a | dd1f34f0fa3948fa808979e35844b6d58d46c0ea | refs/heads/master | 2016-08-05T00:08:35.461725 | 2015-03-08T06:02:56 | 2015-03-08T06:02:56 | 31,575,783 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | # encoding: utf8
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('news', '0009_page_parent'),
]
operations = [
migrations.AddField(
model_name='category',
name='twitter',
field=models.CharField(default='', max_length=15, blank=True),
preserve_default=False,
),
migrations.AddField(
model_name='category',
name='twitter_profile_image',
field=models.ImageField(null=True, upload_to=b'news/category/twitter_profile/', blank=True),
preserve_default=True,
),
migrations.RemoveField(
model_name='category',
name='default_card_2x',
),
migrations.AlterField(
model_name='category',
name='default_card',
field=models.ImageField(upload_to=b'news/category/default_card/'),
),
]
| [
"omniaura5@gmail.com"
] | omniaura5@gmail.com |
069bd1201208b6e6dcd4c8d5a3897aaf17dfad90 | 8f2f83bc1381d4ce7fc968aec72fa400aae4155d | /pybitcoin/types/address.py | 99c9195e3bd8727bf34cc0d36170c29765b43ee1 | [
"MIT"
] | permissive | nifrali/pyStratis | c855fb33be77064c9a741255e324003319a4789f | b1a80bf155b7941e9ef8fc2ea93fa1b08a0c4366 | refs/heads/master | 2023-06-20T16:02:30.863589 | 2021-07-01T19:24:18 | 2021-07-01T19:24:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py | from __future__ import annotations
from typing import Callable
from pybitcoin.networks import BaseNetwork
class Address:
"""A address model. Address is validated by the network."""
def __init__(self, address: str, network: BaseNetwork):
self.validate_values(address=address, network=network)
self.address = address
self.network = network
def __repr__(self) -> str:
return self.address
def __str__(self) -> str:
return self.address
def __eq__(self, other) -> bool:
return self.address == other
def json(self) -> str:
return self.address
@classmethod
def __get_validators__(cls) -> Callable:
yield cls.validate_class
@classmethod
def validate_class(cls, value) -> Address:
cls.validate_values(address=value.address, network=value.network)
return value
@staticmethod
def validate_values(address: str, network: BaseNetwork) -> bool:
if network.validate_address(address):
return True
raise ValueError('Invalid address for given network.')
| [
"skaal@protonmail.com"
] | skaal@protonmail.com |
736cb5b4a52da061d34b42230a957d40324f6fb9 | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-LocalAuthentication/setup.py | a2118691526813fa2bc6cb10fc669f8a09239aa3 | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | """
Wrappers for the "LocalAuthentication" framework on macOS.
These wrappers don't include documentation, please check Apple's documention
for information on how to use this framework and PyObjC's documentation
for general tips and tricks regarding the translation between Python
and (Objective-)C frameworks
"""
from pyobjc_setup import setup
VERSION = "6.2b1"
setup(
name="pyobjc-framework-LocalAuthentication",
description="Wrappers for the framework LocalAuthentication on macOS",
min_os_level="10.10",
packages=["LocalAuthentication"],
version=VERSION,
install_requires=["pyobjc-core>=" + VERSION, "pyobjc-framework-Cocoa>=" + VERSION],
long_description=__doc__,
)
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
f57d887b142a32218aa4c3df1b3b08198019563f | 8f68af7b8854d8c5000f8ecbe3a3c4330b4d6a7c | /docs/interviewPrep/designPatterns/Behavioral_patterns/Memento/python/Memento.py | 13b40cf9c3bcb0f1aa3c53ea95daf48f89fa96a0 | [] | no_license | reshinto/reshinto.github.io | 7590d0fb26cbf239b2545fd3b745416ab31aa7aa | 71e5b82d49a11d9a9171a38bcb3ac23dd07ee62f | refs/heads/dev | 2022-12-05T13:45:53.578262 | 2022-12-01T15:34:59 | 2022-12-01T15:34:59 | 211,689,735 | 6 | 0 | null | 2022-08-07T22:07:36 | 2019-09-29T16:11:25 | TypeScript | UTF-8 | Python | false | false | 357 | py | from abc import ABC, abstractmethod
class Memento(ABC):
"""
The Memento interface provides a way to retrieve the memento's metadata,
such as creation date or name. However, it doesn't expose the Originator's
state.
"""
@abstractmethod
def get_name(self):
pass
@abstractmethod
def get_date(self):
pass
| [
"terencekong2002@gmail.com"
] | terencekong2002@gmail.com |
2d2104f7dd3191c1a45bdf24ccc0559a181e3bfd | 3d83e5d6c5c3b264dbca94f2fedcd1abaf522278 | /docs/source/conf.py | 34117acfb1003d1e24d7e473b7f2a321cbd12283 | [
"Apache-2.0"
] | permissive | cp4cds/c4cds-wps | 4767d779a2338d46d52f0c23bb89f0072928c482 | 5abd9281195548bbd1e7653fe2ab1fee26745200 | refs/heads/master | 2020-04-02T06:43:19.383112 | 2020-01-14T16:05:36 | 2020-01-14T16:05:36 | 154,164,988 | 0 | 0 | NOASSERTION | 2020-01-14T16:05:37 | 2018-10-22T15:09:32 | Python | UTF-8 | Python | false | false | 5,253 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# c4cds documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'pywps.ext_autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'c4cds-wps'
copyright = u"2019, Carsten Ehbrecht"
author = u"Carsten Ehbrecht"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/birdhouse_logo.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'c4cdsdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'c4cds.tex',
u'c4cds-wps Documentation',
u'Carsten Ehbrecht', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'c4cds',
u'c4cds-wps Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'c4cds',
u'c4cds-wps Documentation',
author,
'c4cds',
'One line description of project.',
'Miscellaneous'),
]
| [
"ehbrecht@dkrz.de"
] | ehbrecht@dkrz.de |
29e29ae5ff4022a72a00e62679cd8b9718301532 | d75cbad7a79e24b49f405c6529633ea65c9b286d | /most_contributive_feature.py | c3628be932c6847796e1899376eca75bccc8b7f3 | [] | no_license | aj2622/ML_HW1 | bc49e61781f108c66dfd598423915e27c72f7b3a | 7497f8d71f6b731fc232058d6a0597af4884a53f | refs/heads/master | 2020-04-22T08:49:52.188349 | 2017-10-31T14:23:02 | 2017-10-31T14:23:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | import numpy as np
import scipy.io as io
import poly_regression
feature = ['sepal length', 'sepal width', 'petal length', 'petal width']
if __name__ == '__main__':
data = io.loadmat('data/5_X.mat')['X']
target = io.loadmat('data/5_T.mat')['T']
train_x = np.concatenate((data[:40], data[50:90], data[100:140]))
train_t = np.concatenate((target[:40], target[50:90], target[100:140]))
error_list = []
for i in range(4):
trans_train_x = poly_regression.transform(np.delete(train_x, i, 1), 3, 2)
w = poly_regression.getCoef(trans_train_x, train_t)
train_y = np.dot(trans_train_x, w)
error_list.append(poly_regression.RMSE(train_y, train_t))
print 'The RMS error after remove feature <<', feature[i], '>> is', error_list[len(error_list)-1]
print 'The most contributive attribute is', feature[error_list.index(max(error_list))]
| [
"ya70201@gmail.com"
] | ya70201@gmail.com |
786af33be62d301d22854f723ab696e318419bdc | f97b8cd110b651a13628a2f394b018bed3d8957d | /screenshot_to_csv.py | ce2fa840bc41658c62a9de726137505330fe9596 | [] | no_license | sebastiengilbert73/chess_scribe | d09a9bcca37a15216342245bbddb546d87bf75c9 | 3073a8f12f086592e0eebb67f800112c914515e8 | refs/heads/main | 2023-03-30T15:18:56.852056 | 2021-03-29T00:46:39 | 2021-03-29T00:46:39 | 352,422,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,944 | py | import cv2
import argparse
import logging
import os
import numpy as np
from tesserocr import PyTessBaseAPI
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument('ImageFilepath', help="The filepath to the screenshot")
parser.add_argument('--outputDirectory', help="The directory where the output will be written. Default: '/tmp/chess_scribe/'", default='/tmp/chess_scribe/')
parser.add_argument('--rangeThreshold', help="The threshold on the grayscale range, for a row. Default: 100", type=int, default=100)
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG, format='%(asctime)-15s %(message)s')
def main():
logging.info("screenshot_to_csv.py main()")
# If the output folder doesn't exist, create it. Cf. https://www.tutorialspoint.com/How-can-I-create-a-directory-if-it-does-not-exist-using-Python
if not os.path.exists(args.outputDirectory):
os.makedirs(args.outputDirectory)
# Open the image
screenshot = cv2.imread(args.ImageFilepath, cv2.IMREAD_COLOR)
screenshot_shapeHWC = screenshot.shape
# Convert to grayscale
grayscale_screenshot = cv2.cvtColor(screenshot, cv2.COLOR_BGR2GRAY)
# Find the text break rows
text_line_delimiters = TextLineDelimiters(args.outputDirectory, grayscale_screenshot, args.rangeThreshold)
logging.debug("text_line_delimiters = {}".format(text_line_delimiters))
# Append text lines to form a single text line
single_line_img = AppendTextLines(args.outputDirectory, screenshot, text_line_delimiters)
#single_line_rgb = cv2.cvtColor(single_line_img, cv2.COLOR_BGR2RGB)
with PyTessBaseAPI() as tesser_api:
tesser_api.SetImage(Image.fromarray(single_line_img))
logging.debug("tesser_api.GetUTF8Text() = {}".format(tesser_api.GetUTF8Text()))
#text_str = pytesseract.image_to_string(Image.fromarray(single_line_rgb))
def TextLineDelimiters(output_directory, grayscale_screenshot, range_threshold):
text_line_delimiters = [0]
img_sizeHW = grayscale_screenshot.shape
row_ranges = []
for y in range(img_sizeHW[0]):
min_value, max_value, _, _ = cv2.minMaxLoc(grayscale_screenshot[y, :])
row_ranges.append(max_value - min_value)
with open(os.path.join(output_directory, "TextLineDelimiters_rowRange.csv"), 'w+') as stats_file:
stats_file.write("y,range\n")
we_are_in_text = False
for y in range(len(row_ranges)):
grayscale_range = row_ranges[y]
stats_file.write("{},{}\n".format(y, grayscale_range))
if grayscale_range >= range_threshold:
we_are_in_text = True
else:
if we_are_in_text:
text_line_delimiters.append(y)
we_are_in_text = False
return text_line_delimiters
def AppendTextLines(output_directory, screenshot, text_line_delimiters):
deltas = [text_line_delimiters[i] - text_line_delimiters[i - 1] for i in range(1, len(text_line_delimiters))]
text_line_height = max(deltas)
deltas.append(text_line_height)
logging.debug("text_line_height = {}".format(text_line_height))
text_line_width = screenshot.shape[1] * len(text_line_delimiters)
single_line_img = np.zeros((text_line_height, text_line_width, 3), dtype=np.uint8)
for lineNdx in range(len(text_line_delimiters) - 1):
#logging.debug("lineNdx = {}; text_line_delimiters[lineNdx] = {}; deltas[lineNdx] = {}".format(lineNdx, text_line_delimiters[lineNdx], deltas[lineNdx]))
single_line_img[0: deltas[lineNdx], lineNdx * screenshot.shape[1] : (lineNdx + 1) * screenshot.shape[1]] = \
screenshot[text_line_delimiters[lineNdx]: text_line_delimiters[lineNdx] + deltas[lineNdx], :]
single_line_filepath = os.path.join(output_directory, "AppendTextLines_singleLine.png")
cv2.imwrite(single_line_filepath, single_line_img)
return single_line_img
if __name__ == '__main__':
main() | [
"sebastiengilbert73@yahoo.ca"
] | sebastiengilbert73@yahoo.ca |
eea28399f23ba03c93add6c9473bc9bab6478311 | 942ee5e8d54e8ebe9c5c841fbfdd1da652946944 | /1001-1500/1029.Two City Scheduling.py | aa44653739248dc7f890e08295a3dd9bd2cedb30 | [] | no_license | kaiwensun/leetcode | 0129c174457f32887fbca078fb448adce46dd89d | 6b607f4aae3a4603e61f2e2b7480fdfba1d9b947 | refs/heads/master | 2023-08-31T07:30:50.459062 | 2023-08-27T07:59:16 | 2023-08-27T07:59:16 | 57,526,914 | 69 | 9 | null | 2023-08-20T06:34:41 | 2016-05-01T05:37:29 | Python | UTF-8 | Python | false | false | 343 | py | class Solution(object):
def twoCitySchedCost(self, costs):
"""
:type costs: List[List[int]]
:rtype: int
"""
diff = [(cost[0] - cost[1], cost) for cost in costs]
diff.sort()
N = len(costs) / 2
return sum(pair[1][0] for pair in diff[:N]) + sum(pair[1][1] for pair in diff[N:])
| [
"noreply@github.com"
] | kaiwensun.noreply@github.com |
79e4094ee8c558cf5207293f1b1395a43cd41174 | aef8eb6681e555ecb61ac67151e4c54d6fdd1023 | /plots/plotsDaniel/regions/covarianceMatrix.py | 395fda4f1921f5142a11da1b8d19d03b0329829a | [] | no_license | HephyAnalysisSW/TopEFT | 0e2dc89f7a43bacf50c77a042f56663e9d4f3404 | 53174807c96dffa6654e4dc63bef92f2b71706ee | refs/heads/master | 2022-11-07T02:41:53.120759 | 2020-03-31T08:08:27 | 2020-03-31T08:08:27 | 98,643,866 | 0 | 3 | null | 2019-10-14T09:02:09 | 2017-07-28T11:38:23 | Python | UTF-8 | Python | false | false | 7,589 | py | import shutil, os
import ROOT
from array import array
import math
import pickle
import numpy as np
import copy
from TopEFT.Tools.user import combineReleaseLocation as releaseLocation
import re
def natural_sort(list, key=lambda s:s):
"""
Sort the list into natural alphanumeric order.
http://stackoverflow.com/questions/4836710/does-python-have-a-built-in-function-for-string-natural-sort
"""
def get_alphanum_key_func(key):
convert = lambda text: int(text) if text.isdigit() else text
return lambda s: [convert(c) for c in re.split('([0-9]+)', key(s))]
sort_key = get_alphanum_key_func(key)
lc = sorted(list, key=sort_key)
return lc
def getCovariance(fname):
import uuid, os
ustr = str(uuid.uuid4())
uniqueDirname = os.path.join(releaseLocation, ustr)
print "Creating %s"%uniqueDirname
os.makedirs(uniqueDirname)
if fname is not None: # Assume card is already written when fname is not none
filename = os.path.abspath(fname)
else:
filename = fname if fname else os.path.join(uniqueDirname, ustr+".txt")
self.writeToFile(filename)
covFilename = filename.replace('.txt', '_mlfit.root')
shapeFilename = filename.replace('.txt', '_shape.txt')
assert os.path.exists(filename), "File not found: %s"%filename
combineCommand = "cd "+uniqueDirname+";eval `scramv1 runtime -sh`;combineCards.py %s -S > myshapecard.txt "%fname
#set workspace
workspaceCommand = "cd "+uniqueDirname+";eval `scramv1 runtime -sh`;text2workspace.py myshapecard.txt"
##Run fit
#fitCommand = "cd "+uniqueDirname+";eval `scramv1 runtime -sh`;combine -M FitDiagnostics --freezeParameters r --saveShapes --saveWithUnc --numToysForShape 100 --saveOverall myshapecard.root"
fitCommand = "cd "+uniqueDirname+";eval `scramv1 runtime -sh`;combine -M FitDiagnostics --saveShapes --saveWithUnc --numToysForShape 100 --saveOverall myshapecard.root"
print fitCommand
os.system(combineCommand)
os.system(workspaceCommand)
os.system(fitCommand)
f1 = ROOT.TFile(uniqueDirname+"/fitDiagnostics.root")
postfit = f1.Get("shapes_fit_s")
prefit = f1.Get("shapes_prefit")
# should also extract yields here to directly obtain chi2
cov_postfit = copy.deepcopy(postfit.Get("overall_total_covar"))
cov_prefit = copy.deepcopy(prefit.Get("overall_total_covar"))
total_postfit = copy.deepcopy(postfit.Get("total_overall"))
total_prefit = copy.deepcopy(prefit.Get("total_overall"))
data = copy.deepcopy(postfit.Get("total_data"))
f1.Close()
del postfit, prefit, f1
shutil.rmtree(uniqueDirname)
return {"postfit":cov_postfit, "prefit":cov_prefit, "yield_postfit":total_postfit, "yield_prefit":total_prefit, "data":data}
#f1 = ROOT.TFile("/afs/hephy.at/work/d/dspitzbart/top/devel/CMSSW_8_1_0/src/fitDiagnostics.root")
def getMatrix(h2, binNumbers):
binNames = []
matrix = {}
nbins = len(binNumbers)
for i in range(1, nbins+1):
#binNames.append(h2.GetXaxis().GetBinLabel(i))
matrix[h2.GetXaxis().GetBinLabel(i)] = {}
for j in range(1, nbins+1):
matrix[h2.GetXaxis().GetBinLabel(i)][h2.GetXaxis().GetBinLabel(j)] = h2.GetBinContent(i,j)
sorted_cov = ROOT.TH2D('cov','',nbins,0,nbins,nbins,0,nbins)
#binNames = natural_sort(binNames)
cov = np.zeros((nbins,nbins))
diag = np.zeros((nbins,nbins))
diag_corr = np.zeros((nbins, nbins))
for i,k in enumerate(binNumbers):
diag_corr[i,i] = math.sqrt(h2.GetBinContent(k,k))
for j,l in enumerate(binNumbers):
cov[i][j] = h2.GetBinContent(k,l)#matrix[k][l]
if i==j:
diag[i][j] = h2.GetBinContent(k,l)
return cov,diag, diag_corr
def getSortedBinNumber(h1):
binNames = []
indices = []
nbins = h1.GetNbinsX()
for i in range(1, nbins+1):
binNames.append(h1.GetXaxis().GetBinLabel(i))
sortedBinNames = natural_sort(binNames)
#sortedBinNames = sortedBinNames[0:15]# + sortedBinNames[30:45]
for x in sortedBinNames:
binNumber = binNames.index(x)+1
if h1.GetBinContent(binNumber)>0:
indices.append(binNames.index(x)+1)
return indices, sortedBinNames
def getVectorFromHist(h1, binNumbers):
vector = []
for b in binNumbers:
vector.append(h1.GetBinContent(b))
return np.array(vector)
def getVectorFromGraph(graph, binNumbers):
vector = []
for b in binNumbers:
vector.append(graph.Eval(b-0.5))
return np.array(vector)
cov = getCovariance("/afs/hephy.at/data/dspitzbart01/TopEFT/results/cardFiles/regionsE_COMBINED_xsec_shape_lowUnc_SRandCR/dim6top_LO_currents/dim6top_LO_ttZ_ll.txt")
binNumbers,sortedBinNames = getSortedBinNumber(cov["yield_postfit"])
cov_prefit, cov_prefit_diag, cov_prefit_diag_corr = getMatrix(cov["prefit"], binNumbers)
cov_postfit, cov_postfit_diag, cov_postfit_diag_corr = getMatrix(cov["postfit"], binNumbers)
obs = getVectorFromGraph(cov["data"], binNumbers)
exp_postfit = getVectorFromHist(cov["yield_postfit"], binNumbers)
exp_prefit = getVectorFromHist(cov["yield_prefit"], binNumbers)
# Chi2 for postfit
R_postfit = obs - exp_postfit
cov_postfit_BU = copy.deepcopy(cov_postfit)
cov_postfit_inv = np.linalg.inv(cov_postfit)
chi2_postfit = np.dot(cov_postfit_inv, R_postfit)
chi2_postfit = np.dot(R_postfit,chi2_postfit)
cov_postfit_diag_inv = np.linalg.inv(cov_postfit_diag)
cov_postfit_diag_corr_inv = np.linalg.inv(cov_postfit_diag_corr)
chi2_postfit_uncor = np.dot(cov_postfit_diag_inv, R_postfit)
chi2_postfit_uncor = np.dot(R_postfit, chi2_postfit_uncor)
## get the correlation matrix
corr = np.dot(cov_postfit_diag_corr_inv, cov_postfit)
corr = np.dot(corr, cov_postfit_diag_corr_inv)
nbins = len(binNumbers)
sorted_corr = ROOT.TH2D('corr','',nbins,0,nbins,nbins,0,nbins)
for i,k in enumerate(sortedBinNames[:nbins]):
#if i < nSR:
sorted_corr.GetXaxis().SetBinLabel(i+1, str(i+1))#SRnames[i])
sorted_corr.GetYaxis().SetBinLabel(i+1, str(i+1))#SRnames[i])
for j,l in enumerate(sortedBinNames[:nbins]):
sorted_corr.SetBinContent(i+1, j+1, corr[i][j])
sorted_corr.GetXaxis().LabelsOption("v")
sorted_corr.GetZaxis().SetRangeUser(-1.0, 1.0)
c3 = ROOT.TCanvas('c3','c3',700,700)
pad2=ROOT.TPad("pad2","Main",0.,0.,1.,1.)
pad2.SetRightMargin(0.15)
pad2.SetTopMargin(0.06)
pad2.SetBottomMargin(0.12)
pad2.Draw()
pad2.cd()
sorted_corr.Draw("colz")
latex1 = ROOT.TLatex()
latex1.SetNDC()
latex1.SetTextSize(0.04)
latex1.SetTextAlign(11) # align right
latex1.DrawLatex(0.10,0.95,'CMS #bf{#it{Private Work}}')
outname = 'correlation'
filetypes = ['.png','.pdf','.root']
plot_dir = '/afs/hephy.at/user/d/dspitzbart/www/TopEFT/correlation/'
for f in filetypes:
c3.Print(plot_dir+outname+f)
chi2_primitive = 0
chi2_primitives_postfit = [0,0,0,0]
chi2_primitives_prefit = [0,0,0,0]
for i,r in enumerate(R_postfit):
#if i >= 30 and i<45:
chi2_primitives_postfit[i/15] += (r**2 / cov_postfit_BU[i][i])
chi2_primitives_prefit[i/15] += (r**2 / cov_prefit[i][i])
chi2_primitive += (r**2 / cov_postfit[i][i])
print "Results"
print chi2_postfit
print chi2_primitive
print "postfit", chi2_primitives_postfit
print "prefit", chi2_primitives_prefit
# Chi2 for prefit
R_prefit = obs - exp_prefit
cov_prefit_inv = np.linalg.inv(cov_prefit)
chi2_prefit = np.dot(cov_prefit_inv, R_prefit)
chi2_prefit = np.dot(R_prefit,chi2_prefit)
#cov_inv = np.linalg.inv(cov)
#pickle.dump(cov_inv, file('cov_inv.pkl','w'))
| [
"daniel.spitzbart@cern.ch"
] | daniel.spitzbart@cern.ch |
7ce1d9097321b92cdf001e1ecbcd96d23b2ed402 | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-tms/huaweicloudsdktms/v1/model/predefine_tag.py | 5bbfd3f3ded7213f0f5b7bef8e19d6e57181afd3 | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 4,468 | py | # coding: utf-8
import pprint
import re
import six
class PredefineTag:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'key': 'str',
'value': 'str',
'update_time': 'datetime'
}
attribute_map = {
'key': 'key',
'value': 'value',
'update_time': 'update_time'
}
def __init__(self, key=None, value=None, update_time=None):
"""PredefineTag - a model defined in huaweicloud sdk"""
self._key = None
self._value = None
self._update_time = None
self.discriminator = None
self.key = key
self.value = value
self.update_time = update_time
@property
def key(self):
"""Gets the key of this PredefineTag.
键。 最大长度36个字符。 字符集:A-Z,a-z , 0-9,‘-’,‘_’,UNICODE字符(\\u4E00-\\u9FFF)。
:return: The key of this PredefineTag.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this PredefineTag.
键。 最大长度36个字符。 字符集:A-Z,a-z , 0-9,‘-’,‘_’,UNICODE字符(\\u4E00-\\u9FFF)。
:param key: The key of this PredefineTag.
:type: str
"""
self._key = key
@property
def value(self):
"""Gets the value of this PredefineTag.
值。 每个值最大长度43个字符,可以为空字符串。 字符集:A-Z,a-z , 0-9,‘.’,‘-’,‘_’,UNICODE字符(\\u4E00-\\u9FFF)。
:return: The value of this PredefineTag.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this PredefineTag.
值。 每个值最大长度43个字符,可以为空字符串。 字符集:A-Z,a-z , 0-9,‘.’,‘-’,‘_’,UNICODE字符(\\u4E00-\\u9FFF)。
:param value: The value of this PredefineTag.
:type: str
"""
self._value = value
@property
def update_time(self):
"""Gets the update_time of this PredefineTag.
更新时间,采用UTC时间表示。2016-12-09T00:00:00Z
:return: The update_time of this PredefineTag.
:rtype: datetime
"""
return self._update_time
@update_time.setter
def update_time(self, update_time):
"""Sets the update_time of this PredefineTag.
更新时间,采用UTC时间表示。2016-12-09T00:00:00Z
:param update_time: The update_time of this PredefineTag.
:type: datetime
"""
self._update_time = update_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PredefineTag):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
16f3de4254710303d6a86d6135995be72fcc7625 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_199/664.py | 4fc3d46c0a13dbea8850ff91f3343512c00a2b8e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | def flip(pancakes, left_index, spatula_size):
for i in range(left_index, left_index + spatula_size):
pancakes[i] = not pancakes[i]
def solve(pancakes, spatula_size):
"""Flip left-to-right."""
flips = 0
for left_index in range(0, len(pancakes) - spatula_size + 1):
if not pancakes[left_index]:
flips += 1
flip(pancakes, left_index, spatula_size)
# print('FLIP: ', pancakes)
return flips if all(pancakes) else 'IMPOSSIBLE'
def main():
cases = int(input())
for case_num in range(1, cases + 1):
pancakes, spatula_size = input().split()
solution = solve([p == '+' for p in pancakes], int(spatula_size))
print('Case #{}: {}'.format(case_num, solution))
if __name__ == '__main__':
main() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
36bbf0f5a20d0315d125827ef6e66ec12b5d9e74 | 2f71665621698da42e7f6d9245deea95325f3320 | /energy.py | 551577081cac09255f411ae0a2d53168fcb49ee4 | [
"MIT"
] | permissive | MiroK/fenics-rigid-motions | dfee68a4d726f16db2d293f0aef492d43698dba2 | cd50c1641d0137ac7653f032fba15d0b23b26ac6 | refs/heads/master | 2020-12-02T10:03:37.532532 | 2017-07-13T06:16:59 | 2017-07-13T06:16:59 | 96,685,308 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,298 | py | from dolfin import *
from block import block_mat, block_vec, block_transpose
from block.iterative import ConjGrad
from block.algebraic.petsc import AMG
from rigid_motions import first
import rigid_motions
def energy(lmbda, mu, f, h, mesh, Z=None):
'''
Solves
-div(sigma) = f in Omega
sigma.n = h on boundary
where sigma(u) = 2*mu*eps(u) + lambda*div(u)*I. The problem is reformulated by
considering a complemented strain energy (for which rigid motions are not
tranparent). The system to be solved with CG as
P*[A+E]*[u] = P'*b
with P a precondtioner. We run on series of meshes to show mesh independence
of the solver.
'''
if not isinstance(mesh, Mesh):
# Precompute the 'symbolic' basis
mesh0 = first(mesh)
Z = rigid_motions.rm_basis(mesh0)
return [energy(lmbda, mu, f, h, mesh_, Z) for mesh_ in mesh]
# For cube
V = VectorFunctionSpace(mesh, 'CG', 1)
u, v = TrialFunction(V), TestFunction(V)
# Strain
epsilon = lambda u: sym(grad(u))
# Stress
gdim = mesh.geometry().dim()
sigma = lambda u: 2*mu*epsilon(u) + lmbda*tr(epsilon(u))*Identity(gdim)
# Energy of elastic deformation
a = inner(sigma(u), epsilon(v))*dx
A = assemble(a)
# Mass matrix for B
m = inner(u, v)*dx
M = assemble(m)
# NOTE: Avoiding use of Q space in the assembly - dense blocks!
Q = VectorFunctionSpace(mesh, 'R', 0, dim=6)
Zh = rigid_motions.RMBasis(V, Q, Z) # L^2 orthogonal
B = M*Zh
# System operator
AA = A + B*block_transpose(B)
# Right hand side
L = inner(f, v)*dx + inner(h, v)*ds
# Orthogonalize
P = rigid_motions.Projector(Zh)
b = assemble(L)
b0 = block_transpose(P)*b
# Preconditioner
AM = assemble(a + m)
BB = AMG(AM)
# Solve, using random initial guess
x0 = AA.create_vec()
as_backend_type(x0).vec().setRandom()
AAinv = ConjGrad(AA, precond=BB, initial_guess=x0, maxiter=100, tolerance=1E-8,
show=2, relativeconv=True)
x = AAinv*b0
# Functions from coefficients
# uh = Function(V, x) # Displacement
niters = len(AAinv.residuals) - 1
assert niters < 100
P*x # to get orthogonality
if MPI.rank(mesh.mpi_comm()) == 0:
print '\033[1;37;31m%s\033[0m' % ('Orthogonality %g' % max(P.alphas))
return V.dim(), niters
def test_energy():
'''Number of iterations should not blow up'''
lmbda = Constant(1)
mu = Constant(1)
f = Expression(('A*sin(2*x[0])', 'A*cos(3*(x[0]+x[1]+x[2]))', 'A*sin(x[2])'),
degree=3, A=0.01)
h = Constant((0, 0, 0))
comm = mpi_comm_world().tompi4py()
Ns = [2, 4, 8, 16, 32]
if comm.size > 2:
Ns.extend([64, 128])
meshes = (BoxMesh(Point(1, 1, 1), Point(2, 1.5, 1.25), N, N, N) for N in Ns)
converged = energy(lmbda, mu, f, h, meshes)
assert all(converged)
# Dump data for plotting
if comm.rank == 0:
from numpy import savetxt, array
savetxt('./.energy.txt', array(converged), fmt=['%d', '%d'])
return True
# ------------------------------------------------------------------------------
if __name__ == '__main__':
set_log_level(PROGRESS)
assert test_energy()
| [
"miroslav.kuchta@gmail.com"
] | miroslav.kuchta@gmail.com |
619d787fd1c5414403f3e6be8bac6d920d0a6743 | 9ac793d32e70775bb119aaddeb832624e3cf9281 | /consoverloading.py | d6ef77beed8a28d74e31642020cc01266eca143a | [] | no_license | prabhatpal77/Adv-python-polymorphism | 9368311732e1bca9b54e099489c255e3498fbb9b | d68375e4816a746a1ffbffa6d179c50227267feb | refs/heads/master | 2020-07-29T00:41:08.162385 | 2019-09-19T16:35:32 | 2019-09-19T16:35:32 | 209,601,547 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # Constructor overloading:- The concept of defining multiple constructor with the same number parameters are different number of parameters
# within a class is known as aa constructor overloading.
class X:
def __init__(self):
print("in no parameter constructor of x")
def __init__(self, a, b):
print("in two parameter constructor of x")
def __init__(self, a):
print("in one parameter constructor of x")
x1=X(1000)
x2=X()
| [
"noreply@github.com"
] | prabhatpal77.noreply@github.com |
ca92618105b8fcd2c360427f208e240fccd36c7b | 031d0c267bef0cb8dad9a39b9863b2946a93e8bd | /pymap3d/azelradec.py | 2338ab71388c7ae4544ca4e327c51ffd1a164e08 | [
"BSD-2-Clause"
] | permissive | nhz2/pymap3d | 70a8e8987d7d739ff6d801b608830adc6de0d4fc | 74dbe48fe794a27e67c599c0740d88e84d22b3c5 | refs/heads/master | 2020-08-01T10:52:25.182699 | 2019-09-24T15:21:51 | 2019-09-24T15:21:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,014 | py | """
Azimuth / elevation <==> Right ascension, declination
"""
from typing import Tuple
from datetime import datetime
from .vallado import azel2radec as vazel2radec, radec2azel as vradec2azel
from .timeconv import str2dt # astropy can't handle xarray times (yet)
try:
from astropy.time import Time
from astropy import units as u
from astropy.coordinates import Angle, SkyCoord, EarthLocation, AltAz, ICRS
except ImportError:
Time = None
__all__ = ["radec2azel", "azel2radec"]
def azel2radec(
az_deg: float, el_deg: float, lat_deg: float, lon_deg: float, time: datetime, usevallado: bool = False
) -> Tuple[float, float]:
"""
viewing angle (az, el) to sky coordinates (ra, dec)
Parameters
----------
az_deg : float
azimuth [degrees clockwize from North]
el_deg : float
elevation [degrees above horizon (neglecting aberration)]
lat_deg : float
observer latitude [-90, 90]
lon_deg : float
observer longitude [-180, 180] (degrees)
time : datetime.datetime or str
time of observation
usevallado : bool, optional
default use astropy. If true, use Vallado algorithm
Returns
-------
ra_deg : float
ecliptic right ascension (degress)
dec_deg : float
ecliptic declination (degrees)
"""
if usevallado or Time is None: # non-AstroPy method, less accurate
return vazel2radec(az_deg, el_deg, lat_deg, lon_deg, time)
obs = EarthLocation(lat=lat_deg * u.deg, lon=lon_deg * u.deg)
direc = AltAz(location=obs, obstime=Time(str2dt(time)), az=az_deg * u.deg, alt=el_deg * u.deg)
sky = SkyCoord(direc.transform_to(ICRS()))
return sky.ra.deg, sky.dec.deg
def radec2azel(
ra_deg: float, dec_deg: float, lat_deg: float, lon_deg: float, time: datetime, usevallado: bool = False
) -> Tuple[float, float]:
"""
sky coordinates (ra, dec) to viewing angle (az, el)
Parameters
----------
ra_deg : float
ecliptic right ascension (degress)
dec_deg : float
ecliptic declination (degrees)
lat_deg : float
observer latitude [-90, 90]
lon_deg : float
observer longitude [-180, 180] (degrees)
time : datetime.datetime or str
time of observation
usevallado : bool, optional
default use astropy. If true, use Vallado algorithm
Returns
-------
az_deg : float
azimuth [degrees clockwize from North]
el_deg : float
elevation [degrees above horizon (neglecting aberration)]
"""
if usevallado or Time is None:
return vradec2azel(ra_deg, dec_deg, lat_deg, lon_deg, time)
obs = EarthLocation(lat=lat_deg * u.deg, lon=lon_deg * u.deg)
points = SkyCoord(Angle(ra_deg, unit=u.deg), Angle(dec_deg, unit=u.deg), equinox="J2000.0")
altaz = points.transform_to(AltAz(location=obs, obstime=Time(str2dt(time))))
return altaz.az.degree, altaz.alt.degree
| [
"scivision@users.noreply.github.com"
] | scivision@users.noreply.github.com |
91d9a979cbfbb76e781425a6e40b815428a3fba0 | 6ec91b363b077bffd33f15300a0935124e9fb915 | /Cracking_the_Code_Interview/Leetcode/1.Array/581.Shortest_Unsorted_Continuous_Subarray.py | 59ee17c1d77f092052632f84b1267f3614dfbe84 | [] | no_license | lzxyzq/Cracking_the_Coding_Interview | 03232515ae8eb50394d46322d36b230d1a626fcf | 79dee7dab41830c4ff9e38858dad229815c719a0 | refs/heads/master | 2023-06-05T19:52:15.595289 | 2021-06-23T22:46:02 | 2021-06-23T22:46:02 | 238,068,000 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,967 | py | # Given an integer array, you need to find one continuous subarray that if you only sort this subarray in ascending order, then the whole array will be sorted in ascending order, too.
# You need to find the shortest such subarray and output its length.
'''
Example 1:
Input: [2, 6, 4, 8, 10, 9, 15]
Output: 5
Explanation: You need to sort [6, 4, 8, 10, 9] in ascending order to make the whole array sorted in ascending order.
'''
# Method 1
class Solution:
def findUnsortedSubarray(self, nums: List[int]) -> int:
nums_copy = sorted(nums)
start = len(nums)
end = 0
for i in range(len(nums)):
if nums_copy[i] != nums[i]:
start = min(start,i)
end = max(end,i)
return end - start + 1 if end - start >= 0 else 0
# Time complexity : O(nlogn). Sorting takes nlogn time.
# Space complexity : O(n). We are making copy of original array.
# Method 2 (TLE)
# Better Brute Force
class Solution:
def findUnsortedSubarray(self, nums: List[int]) -> int:
start = len(nums)
end = 0
for i in range(len(nums)):
for j in range(i+1,len(nums)):
if nums[j] < nums[i]:
start = min(start,i)
end = max(end,j)
return end - start + 1 if end - start >= 0 else 0
# Time complexity : O(n2).Two nested loops are there.
# Space complexity : O(1). Constant space is used.
# Method 3
# Using Stack
class Solution:
def findUnsortedSubarray(self, nums: List[int]) -> int:
start = len(nums)
end = 0
stack = []
for i in range(len(nums)):
while stack and nums[stack[-1]] > nums[i]:
start = min(start,stack.pop())
stack.append(i)
stack.clear()
for i in range(len(nums)-1,-1,-1):
while stack and nums[stack[-1]] < nums[i]:
end = max(end,stack.pop())
stack.append(i)
return end - start + 1 if end - start >= 0 else 0
# Method 4
class Solution:
def findUnsortedSubarray(self, nums: List[int]) -> int:
if len(nums) == 1:
return 0
min_ = float("inf")
max_ = float("-inf")
flag = False
for i in range(1,len(nums)):
if nums[i] < nums[i-1]:
flag = True
if flag:
min_ = min(min_,nums[i])
flag = False
for i in range(len(nums)-2,-1,-1):
if nums[i] > nums[i+1]:
flag = True
if flag:
max_ = max(max_, nums[i])
for l in range(len(nums)):
if min_ < nums[l]:
break
for r in range(len(nums)-1,-1,-1):
if max_ > nums[r]:
break
return r - l + 1 if r - l > 0 else 0
# Time complexity : O(n). Four O(n) loops are used.
# Space complexity : O(1). Constant space is used.
| [
"lzxyzq@gmail.com"
] | lzxyzq@gmail.com |
6c43c50d4f5bd3c8db2cb275e43b1e2924c155c4 | 6569158699caec02fca237748b537863b861460c | /src/similarity/heterogeneous/PathSimStrategy.py | d3d3a468eb321db70008c2b917cd2cd3494c99d0 | [] | no_license | wfnuser/RicherPathSIM | 7c570ed35680c99643408ca9d1ccc40e935b4a36 | 253906f9a2fe4fb4d3451ebd1d3b51de51e0d239 | refs/heads/master | 2021-01-11T06:19:12.669466 | 2013-06-20T23:05:46 | 2013-06-20T23:05:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,378 | py | import numpy
from src.similarity.MetaPathSimilarityStrategy import MetaPathSimilarityStrategy
__author__ = 'jontedesco'
class PathSimStrategy(MetaPathSimilarityStrategy):
"""
Class that implements the PathSim similarity measure for same-typed nodes on heterogeneous graphs. Based on
publication by Yizhou Sun et al. NOTE: This assumes that any given meta path is symmetric.
@see http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.220.2455
"""
def findSimilarityScore(self, source, destination):
"""
Find the similarity score between
"""
partialMetaPath = self.metaPath[:len(self.metaPath)/2 + len(self.metaPath) % 2]
# Get the number of meta paths between source and destination
if self.conserveMemory:
# Slow, but less in-memory storage
numSourceDestinationPaths = len(self.metaPathUtility.findMetaPaths(self.graph, source, destination, self.metaPath, True))
else:
# Faster, but requires more memory
firstHalfAdjMatrix, firstHalfIndex = self.metaPathUtility.getAdjacencyMatrixFromGraph(
self.graph, partialMetaPath, project=True, symmetric=True)
secHalfAdjMatrix, secHalfIndex = self.metaPathUtility.getAdjacencyMatrixFromGraph(
self.graph, list(reversed(partialMetaPath)), project=True, symmetric=True)
adjMatrix = numpy.dot(firstHalfAdjMatrix, secHalfAdjMatrix)
numSourceDestinationPaths = adjMatrix[firstHalfIndex[source]][secHalfIndex[destination]]
# Get cycle counts
sourceNeighbors = self.metaPathUtility.findMetaPathNeighbors(self.graph, source, partialMetaPath, True)
destinationNeighbors = self.metaPathUtility.findMetaPathNeighbors(self.graph, destination, partialMetaPath, True)
numSourceDestinationCycles = 0
for node, neighbors in [(source, sourceNeighbors), (destination, destinationNeighbors)]:
for neighbor in neighbors:
paths = self.metaPathUtility.findMetaPaths(self.graph, node, neighbor, partialMetaPath, True)
numSourceDestinationCycles += len(paths) ** 2
# Compute the PathSim similarity scores of the two nodes
similarityScore = (2.0 * numSourceDestinationPaths) / float(numSourceDestinationCycles)
return similarityScore | [
"jon@jontedesco.net"
] | jon@jontedesco.net |
22b8bc857da47675a13651377ee28938d3bc1028 | 5c7f2ff956b1fd1477d56486e239b6e661a08efd | /supervised_learning/0x06-keras/4-train.py | 3af2ede58a820421ddda52a4111f58e7d5ef1030 | [] | no_license | diego0096/holbertonschool-machine_learning | 60c5f40e185df04d02d9887d966542e85a981896 | 64b8984846c2b2b88bbf11125b55b482c7b74eea | refs/heads/master | 2023-04-02T01:27:59.263397 | 2021-04-02T21:33:51 | 2021-04-02T21:33:51 | 279,229,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | #!/usr/bin/env python3
"""Module used to"""
import tensorflow.keras as K
def train_model(
network,
data,
labels,
batch_size,
epochs,
verbose=True,
shuffle=False):
"""Function that trains a model"""
history = network.fit(data,
labels,
epochs=epochs,
batch_size=batch_size,
shuffle=shuffle,
verbose=verbose)
return(history)
| [
"dfqz93@hotmail.com"
] | dfqz93@hotmail.com |
43690166d40128ffbbd3a038c62b64dc7eeb5ea7 | ebcc57cbd7bc4c951fe3cf9826efc2d03d1e47e8 | /Chapter1/Q1.8.py | d8d57988cc79c56d9f0a1356d1eb38f885e22581 | [] | no_license | Vahid-Esmaeelzadeh/CTCI-Python | 17a672e95f1d886f4fb66239a4aa22a87f38382a | 867360ab13dd63d24d6f3e45b5ac223755942b54 | refs/heads/master | 2022-10-26T16:43:54.939188 | 2020-06-11T21:42:15 | 2020-06-11T21:42:15 | 190,065,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,358 | py | '''
Zero Matrix: Write an algorithm such that if an element in an MxN matrix is 0, its entire row and
column are set to 0.
'''
def zeroMatrix(mat: list):
rowNum = len(mat)
colNum = len(mat[0]) if rowNum != 0 else 0
if rowNum == 0 or colNum == 0:
return
firstRowHasZero = mat[0].count(0) > 0
firstColHasZero = False
for x in mat:
if x[0] == 0:
firstColHasZero = True
break
# use the first row and col to save the zero existence in rows and cols
for i in range(1, rowNum):
for j in range(1, colNum):
if mat[i][j] == 0:
mat[0][j] = 0
mat[i][0] = 0
# make zero the rows
for i in range(1, rowNum):
if mat[i][0] == 0:
for j in range(1, colNum):
mat[i][j] = 0
# make zero the cols
for j in range(1, colNum):
if mat[0][j] == 0:
for i in range(1, rowNum):
mat[i][j] = 0
# make zero the first row
if firstRowHasZero:
for j in range(colNum):
mat[0][j] = 0
# make zero the first col
if firstColHasZero:
for i in range(rowNum):
mat[i][0] = 0
a = [[0, 2, 3, 4, 5],
[4, 1, 6, 7, 7],
[4, 7, 0, 6, 2],
[1, 4, 5, 7, 8],
[6, 6, 6, 6, 0]]
zeroMatrix(a)
for x in a:
print(x)
| [
"v.esmaeelzadeh@gmail.com"
] | v.esmaeelzadeh@gmail.com |
bee1cfef342afe3e9ebadd5185c7059521be9dfc | 54417b54c6e025a5d9bd89ae119c9134ccca4510 | /test/test_connectomics/datastructures/testvoxel.py | 43b80fd448b4b48a89d428df47cb82d55b2de147 | [] | no_license | SheetsKG/py-connectome-analysis | 15481d579c089010b031d57141486114a2a153b2 | 169274d562b2981bc6f04032797c87ca5a66bbb0 | refs/heads/master | 2021-01-22T15:42:33.947279 | 2014-09-16T21:06:25 | 2014-09-16T21:06:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,230 | py | '''
Created on Feb 3, 2014
@author: u0490822
'''
import unittest
import numpy as np
import connectome_analysis as ca
import connectome_analysis.datastructures.voxel as voxel
import scipy.spatial.distance as sdist
class VoxelTest(unittest.TestCase):
def checkIndicies(self, vol, position, expectedIndicies, msg=None):
indicies = vol.CoordToIndex(position)
outstr = "Position %s should map to indicies of %s not %s" % (str(position), str(indicies), str(expectedIndicies))
if not msg is None:
outstr = outstr + "\n" + msg
self.assertTrue(np.all(indicies == expectedIndicies), outstr)
return
def testCoordinates(self):
vox_size = np.array([10, 10, 10.0])
vol_dim = np.array([8, 16, 32])
vol_origin = np.array([15, 0.0, 10.0])
vol = ca.voxel.Volume.Create(voxel_size=vox_size, voxel_count=vol_dim, origin=vol_origin)
indicies = vol.CoordToIndex(vol_origin)
self.checkIndicies(vol, vol_origin, np.array([0, 0, 0]), "Origin should map to zero indicies")
self.checkIndicies(vol, vol_origin - (vox_size / 2.0), np.array([-1, -1, -1]))
self.checkIndicies(vol, vol_origin + (vox_size / 2.0), np.array([0, 0, 0]))
self.checkIndicies(vol, vol_origin + vox_size, np.array([1, 1, 1]))
self.checkIndicies(vol, vol_origin + (vox_size * 1.5), np.array([1, 1, 1]))
vol.voxels[0:2, 0:4, 0:8] = True
vol.Save('C:\\Temp\\TestVoxels.nrrd')
vol.Save('C:\\Temp\\TestVoxels.binvox')
pass
def testSphere(self):
'''Create a voxellized sphere to ensure our voxel pipeline works'''
vox_size = np.array([1, 1, 1])
vol_dim = np.array([32, 32, 32])
vol_origin = np.array([0, 0.0, 0.0])
sphere_center = (vol_dim / 2.0) * vox_size
vol = ca.voxel.Volume.Create(voxel_size=vox_size, voxel_count=vol_dim, origin=vol_origin)
for iX in range(0, vol_dim[0]):
print "X: " + str(iX)
for iY in range(0, vol_dim[1]):
for iZ in range(0, vol_dim[2]):
coord = np.array([iX, iY, iZ])
dist = sdist.pdist(np.vstack((sphere_center, coord)))
vol.voxels[iX, iY, iZ] = np.any(dist < 12.0)
vol.Save('C:\\Temp\\TestSphere.binvox')
def AddBoundingBoxToVolume(self, voxvol, BoundingBox):
(RegionOrigin, RegionVoxCount) = voxel.VoxelRegion(BoundingBox, voxvol.voxsize, voxvol.origin)
indicies = RegionOrigin / voxvol.voxsize # vol.CoordToIndex(RegionOrigin)
endIndex = indicies + RegionVoxCount
if indicies.ndim == 1:
voxvol.voxels[indicies[0]:endIndex[0],
indicies[1]:endIndex[1],
indicies[2]:endIndex[2]] = True
else:
for iRow in range(0, indicies.shape[0]):
voxvol.voxels[indicies[iRow, 0]:endIndex[iRow, 0],
indicies[iRow, 1]:endIndex[iRow, 1],
indicies[iRow, 2]:endIndex[iRow, 2]] = True
def testBoundingBox(self):
vox_size = np.array([10, 10, 10.0])
vol_dim = np.array([32, 32, 32])
vol_origin = np.array([0, 0.0, 0.0])
BoundingBox = np.array([10, 19, 31, 19, 40, 50])
(RegionOrigin, RegionVoxCount) = voxel.VoxelRegion(BoundingBox, vox_size)
self.assertTrue(np.all(RegionOrigin == np.array([10, 10, 30])))
self.assertTrue(np.all(RegionVoxCount == np.array([1, 3, 2])))
def testBoundingBox2(self):
'''Ensure all voxels within a bounding box are reported'''
vox_size = np.array([10, 10, 10])
vol_dim = np.array([32, 32, 32])
vol_origin = np.array([15, 0.0, -10.0])
vol = voxel.Volume.Create(voxel_size=vox_size, voxel_count=vol_dim, origin=vol_origin)
# BoundingBox = [MinX MinY MinZ MaxX MaxY MaxZ]
BoundingBox = np.array([27, 20, 1, 49, 40, 19])
# BoundingBox = np.array([25, 20, -10, 55, 80, 30])
self.AddBoundingBoxToVolume(vol, BoundingBox)
BoundingBoxes = np.array([[75, 50, 30, 95, 80, 40],
[75, 50, 50, 95, 120, 90]])
self.AddBoundingBoxToVolume(vol, BoundingBoxes)
vol.Save('C:\\Temp\\TestBoundingBox.binvox')
def testBigVolume(self):
'''Ensure all voxels within a bounding box are reported'''
vox_size = np.array([.5, 1, 2])
vol_dim = np.array([512, 512, 512])
vol_origin = np.array([0, 0.0, 0])
vol = voxel.Volume.Create(voxel_size=vox_size, voxel_count=vol_dim, origin=vol_origin)
# BoundingBox = [MinX MinY MinZ MaxX MaxY MaxZ]
BoundingBox = np.array([64, 1, 255, 255, 511, 356])
# BoundingBox = np.array([25, 20, -10, 55, 80, 30])
self.AddBoundingBoxToVolume(vol, BoundingBox)
# BoundingBoxes = np.array([[75, 50, 30, 95, 80, 40],
# [75, 50, 50, 95, 120, 90]])
# self.AddBoundingBoxToVolume(vol, BoundingBoxes)
vol.Save('C:\\Temp\\TestLargeVolume.binvox')
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main() | [
"james.r.andreson@utah.edu"
] | james.r.andreson@utah.edu |
2b8c4867f20c06f28ecc9fbcf2774b04be05a04e | c15a28ae62eb94dbf3ed13e2065195e572a9988e | /Fluent Python/20/20.2.py | c2c01f531425d86d457d80596f247fa9439c1bda | [] | no_license | xuyuchends1/python | 10798c92840a1a59d50f5dc5738b2881e65f7865 | 545d950a3d2fee799902658e8133e3692939496b | refs/heads/master | 2021-01-25T07:07:04.812140 | 2020-02-28T09:25:15 | 2020-02-28T09:25:15 | 93,647,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | class Quantity:
_count=0
def __init__(self):
cls=self.__class__
prefix=cls.__name__
index=cls._count
self.storage_name='_{}#{}'.format(prefix,index)
cls._count+=1
def __set__(self, instance, value):
if value>0:
instance.__dict__[self.storage_name]=value
else:
raise ValueError("value must be >0")
def __get__(self, instance, owner):
return getattr(instance,self.storage_name)
class LineItem:
weight=Quantity()
price=Quantity()
def __init__(self,description,weight,price):
self.description=description
self.weight=weight
self.price=price
def subtotal(self):
return self.weight*self.price
count=LineItem('test',20,18.3)
temp=getattr(count,'_Quantity#0')
pass
| [
"xuyuchends@163.com"
] | xuyuchends@163.com |
25c8366f63832321581fc1dce1f206218e841d65 | 2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5 | /archive/698PartitiontoKEqualSumSubsets.py | 1a8ec7581d9cf1bb49975573f231f9da53963e1b | [] | no_license | doraemon1293/Leetcode | 924b19f840085a80a9e8c0092d340b69aba7a764 | 48ba21799f63225c104f649c3871444a29ab978a | refs/heads/master | 2022-10-01T16:20:07.588092 | 2022-09-08T02:44:56 | 2022-09-08T02:44:56 | 122,086,222 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 1,607 | py | # coding=utf-8
'''
Created on 2017�10�16�
@author: Administrator
'''
class Solution(object):
def canPartitionKSubsets(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
summ = sum(nums)
if summ % k != 0: return False
target = summ / k
if max(nums) > target:
return False
used = [False] * len(nums)
nums = sorted(nums, reverse = True)
memo = set()
def dfs(cur_target, k, nums, used, st,):
# print cur_target, k, nums, used, st
key = (cur_target, k)
if key in memo:
return False
if k == 1:
return True
if cur_target == 0:
if dfs(target, k - 1, nums, used, 0):
return True
else:
memo.add(key)
for i in range(st, len(nums)):
if used[i] == False and nums[i] <= cur_target:
used[i] = True
if dfs(cur_target - nums[i], k, nums, used, i + 1):
return True
used[i] = False
if dfs(cur_target, k, nums, used, i + 1):
return True
memo.add(key)
return False
return dfs(target, k, nums, used, 0)
nums = [4, 3, 2, 3, 5, 2, 1]
k = 4
nums = [2, 2, 2, 2, 3, 4, 5]
nums = [3522, 181, 521, 515, 304, 123, 2512, 312, 922, 407, 146, 1932, 4037, 2646, 3871, 269]
k = 5
print Solution().canPartitionKSubsets(nums, k)
| [
"yanhuang1293@gmail.com"
] | yanhuang1293@gmail.com |
7cf12da9090e35e42593c445e9e5eb711089d0fb | a52066a5f390e1372fd4de78c69c16b5e247e46a | /property/admin.py | 2d37a754e348b2cdbdd1282e97ca6faed46d8e92 | [] | no_license | bl4ck4ndbr0wn/landville-backend-web-api | 48de112b50a16da81611b550a91bd71486b20824 | 2248e95a91ffabc0c69fad25ba69a7ade1081512 | refs/heads/develop | 2022-12-14T11:18:29.294693 | 2019-09-17T07:58:41 | 2019-09-17T07:58:41 | 230,882,054 | 0 | 0 | null | 2022-12-08T05:34:56 | 2019-12-30T08:54:55 | null | UTF-8 | Python | false | false | 283 | py | from django.contrib import admin
from .models import (Property, PropertyReview,
PropertyInspection, PropertyEnquiry)
admin.site.register(Property)
admin.site.register(PropertyReview)
admin.site.register(PropertyEnquiry)
admin.site.register(PropertyInspection)
| [
"ephraim.malinga@gmail.com"
] | ephraim.malinga@gmail.com |
f3b2047bb8c15d009d5cfdc2294fd57939f37105 | ff3eb18d5c3234a7e23a03fac7f8cc2a9bf94547 | /glearn/_likelihood/_base_likelihood.py | be0f78987b4f72087c2946cdb8b65715e075c570 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ameli/glearn | afe9f73edcf1719a9a59600d3934ce3653d7e43a | c5183c746306522e74e163b64ef115a65681266c | refs/heads/main | 2023-08-16T16:36:37.097729 | 2023-08-15T23:38:08 | 2023-08-15T23:38:08 | 373,664,668 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | # SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <sameli@berkeley.edu>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the license found in the LICENSE.txt file in the root directory
# of this source tree.
# =======
# Imports
# =======
from ..device._timer import Timer
# ===============
# Base Likelihood
# ===============
class BaseLikelihood(object):
"""
"""
# ====
# init
# ====
def __init__(self, mean, cov, z):
"""
"""
# Input attributes
self.mean = mean
self.cov = cov
self.z = z
# Member data
self.X = self.mean.X
self.b = self.mean.b
self.B = self.mean.B
self.Binv = self.mean.Binv
self.mixed_cor = self.cov.mixed_cor
if self.B is not None:
# Translate data to the mean of prior of beta.
self.z = self.z - self.X @ self.b
# Degrees of freedom of linear model
if self.B is None:
m = self.X.shape[1]
self.dof = m
else:
self.dof = 0
# Residual degrees of freedom
n = self.X.shape[0]
self.rdof = n - self.dof
# Counting elapsed wall time and cpu process time
self.timer = Timer()
| [
"sia.sunrise@gmail.com"
] | sia.sunrise@gmail.com |
748335d095a70db2081c5a1775861689f7ca3d8a | 1fd7d0ac2903beb5ef70370b22485a3b43df7466 | /Machine Learning/Klassifikation/Logistische Regression.py | b830413869dfe89c624468c86e2c1fc8d7429582 | [
"MIT"
] | permissive | stanman71/Python_Basics | a34b3ea95b035ced5e607a8ba4841836c7667666 | fe442e421362b22f61d05235e835a568d9ce3aef | refs/heads/master | 2021-06-07T21:29:58.565300 | 2019-09-22T21:07:56 | 2019-09-22T21:07:56 | 161,891,286 | 1 | 0 | MIT | 2021-05-08T16:50:05 | 2018-12-15T09:47:23 | CSS | UTF-8 | Python | false | false | 2,234 | py |
""" Unter logistischer Regression oder Logit-Modell versteht man Regressionsanalysen zur
Modellierung der Verteilung abhängiger zufälliger (diskreter) Variablen und Zuordnung
zu einer Klasse.
Ziel: Kurvenverlauf mit möglichst geringen Abstand zu den einezlnen Punkten """
## ##########################
## Teil 0: Einlesen der Daten
## ##########################
import pandas as pd
df = pd.read_csv("./Python_Training/Machine Learning/Klassifikation/CSV/classification.csv")
## ################################################################
## Teil 1: Aufteilung in Trainings- und Testdaten (hier: 75% / 25%)
## ################################################################
from sklearn.model_selection import train_test_split
# Welche Spalten sollen zur Vorhersage verwendet werden
X = df[["age", "interest"]].values
""" Oder: Die Spalte "success" soll nicht zur Vorhersage verwendet werden:
X = df.drop("success", axis = 1).values """
y = df["success"].values
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 0, test_size = 0.25)
## ########################
## Teil 2: Daten skallieren
## ########################
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
## #########################
## Teil 3: Modell trainieren
## #########################
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(solver='lbfgs')
model.fit(X_train, y_train)
# Güte des Modells
print(model.score(X_test, y_test))
## ##########################
## Teil 4: Ergebnisse plotten
## ##########################
""" Hinweis: Benötigt plot_classifier.py """
from Support.plot_classifier import plot_classifier
# Trainings-Daten plotten (proba bezieht sich auf die Visualisierung der Grenze)
plot_classifier(model, X_train, y_train, proba = True, xlabel = "Alter", ylabel = "Interesse")
# Testdaten plotten (proba bezieht sich auf die Visualisierung der Grenze)
plot_classifier(model, X_test, y_test, proba = True, xlabel = "Alter", ylabel = "Interesse")
| [
"stanlay@gmx.net"
] | stanlay@gmx.net |
49d717a463fe1aa2aba56a457a609fd5ef28eaef | 99fca8eaa3fb5e93ed4ed857b439293bc0952c79 | /Code Testing/test_survey.py | a5a66d73334b39d02acfeb0c00b35ea6e63ad6e4 | [] | no_license | Ebyy/python_projects | 7adb377f4e8eec94613e4e348f02c2ded306efac | 0cacfab443d3eeeb274836b7be4b7205585f7758 | refs/heads/master | 2020-05-19T22:28:17.672051 | 2019-05-19T19:32:19 | 2019-05-19T19:32:19 | 185,240,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | import unittest
from survey import AnonymousSurvey
class TestAnonymousSurvey(unittest.TestCase):
"""Test for class AnonymousSurvey."""
def test_store_single_response(self):
"""Test that a single response is stored."""
question = "What language did you first learn to speak?"
my_survey = AnonymousSurvey(question)
my_survey.store_response('English')
self.assertIn('English',my_survey.responses)
def test_store_three_responses(self):
"""Test that 3 individual responses are stored properly."""
question = "What language did you first learn to speak?"
my_survey = AnonymousSurvey(question)
responses = ['English','German','French']
for response in responses:
my_survey.store_response(response)
for response in responses:
self.assertIn(response, my_survey.responses)
unittest.main()
| [
"eberechi_oo@yahoo.com"
] | eberechi_oo@yahoo.com |
a6a11b9f02b65505c57c67c82f445c38bf20424f | 9cbab916088192af67a19aaee25fe7d6e5d27a31 | /web/flask/mvc/mvc.py | 4b9513e871f0b010000c26f6a1b3992bdfc99267 | [] | no_license | ddayzzz/Pythonlearning | 806c75304d7d954f2c935031d4d7516be7ce7300 | 54e92aa5282da97b6d4bd2355a668a16c272ee68 | refs/heads/master | 2020-12-30T12:44:49.465356 | 2017-05-25T15:12:53 | 2017-05-25T15:12:53 | 91,356,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | # coding=utf-8
# MVC结构初探
from flask import Flask, render_template, request
import json
import os
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
return render_template('home.html')
@app.route('/signin', methods=['GET'])
def signin_form():
return render_template('form.html')
@app.route('/signin', methods=['POST'])
def signin():
username = request.form['username']
passwd = request.form['password']
if username == 'admin' and passwd == '123':
return render_template('signin-ok.html', username=username)
# 错误记录一下
if os.path.exists('record.json') == False:
data = {'count': 1}
jsfptr = open('record.json', 'w')
jsfptr.write(json.dumps(data))
jsfptr.close()
return render_template('form.html', message='Bad username or password!', count=range(1, 2), username=username)
jsfptr = open('record.json', 'r')
data = json.load(jsfptr)
jsfptr.close()
cnt = data['count']
data['count'] = data['count'] + 1
jsfptr = open('record.json', 'w')
jsfptr.write(json.dumps(data))
jsfptr.close()
return render_template('form.html', message='Bad username or password!', count=range(1, cnt + 2), username=username)
if __name__ == '__main__':
app.run() | [
"wangshu214@live.cn"
] | wangshu214@live.cn |
183146db1c12b5739955d5dd2905e1e8753a16e5 | b872ccff0c2f79886c0136b32da5f04cb8d3276c | /etcewrappers/emane/emaneeventtdmaschedule.py | 55875966e00e3b6b7a7d46e29ca2f41d9efe2459 | [] | no_license | prj8121/python-etce | 9c22b3a182f103f46b1d865d13ded277482e4a34 | bbd74a65280a09f3edc05457961b8c51ec009165 | refs/heads/master | 2022-11-18T05:19:19.324966 | 2020-04-02T15:15:47 | 2020-04-02T15:15:47 | 276,674,792 | 0 | 0 | null | 2020-07-02T14:57:07 | 2020-07-02T14:57:06 | null | UTF-8 | Python | false | false | 3,961 | py | #
# Copyright (c) 2015-2018 - Adjacent Link LLC, Bridgewater, New Jersey
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Adjacent Link LLC nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import etce.timeutils
from etce.eelsequencer import EELSequencer
from etce.wrapper import Wrapper
class EmaneEventTDMASchedule(Wrapper):
"""
Issue TDMA schedule events using emaneevent-tdmaschedule based on events
listed in the input EEL file. EEL lines require this format:
TIME NEMIDS tdmaschedule SCHEDULEXMLFILE
Example: Issue schedule events at time 3.0 and 47.0 to different NEM
groups.
3.0 nem:1-5,7 tdmaschedule schedule-003.xml
47.0 nem:9 tdmaschedule schedule-047.xml
"""
def register(self, registrar):
registrar.register_infile_name('scenario.eel')
registrar.register_outfile_name('tdmaschedule.log')
registrar.register_argument('eventservicegroup',
'224.1.2.8:45703',
'The Event Service multicast group and port.')
registrar.register_argument('eventservicedevice',
None,
'Event channel multcast device.')
def run(self, ctx):
if not ctx.args.infile:
return
if not ctx.args.eventservicedevice:
message = 'Wrapper emane.emaneeventtdmaschedule mandatory ' \
'argument "eventservicedevice" not specified. Quitting.'
raise RuntimeError(message)
mcgroup,mcport = ctx.args.eventservicegroup.split(':')
sequencer = EELSequencer(ctx.args.infile,
ctx.args.starttime,
('tdmaschedule',))
for _,_,eventargline in sequencer:
# parse inputs
# 0.0 nem:1-5 tdmaschedule tdmaschedules/t000.xml
eventargs = eventargline.split()
schedulexml = eventargs[0]
# build argstr
argstr = \
'--device %s --group %s --port %s %s' \
% (ctx.args.eventservicedevice, mcgroup, mcport, schedulexml)
ctx.run('emaneevent-tdmaschedule', argstr, genpidfile=False)
# and log it
with open(ctx.args.outfile,'a') as lf:
lf.write('%s: emaneevent-tdmaschedule %s\n' % (etce.timeutils.getstrtimenow(),
argstr))
def stop(self, ctx):
pass
| [
"eschreiber@adjacentlink.com"
] | eschreiber@adjacentlink.com |
bf26cfc8f60644c52cb50448bc33a7c5e8dddb18 | 505343f6ace00d22f8753c1a943a5794a619e698 | /katas/Python/6 kyu/Schrdingers Boolean 5a5f9f80f5dc3f942b002309.py | e09f00fb145d91c745d59a94f46968afea841d81 | [] | no_license | bullet1337/codewars | 7652e50bf768bc47976a9124dd98b93602d4d458 | ba7f13ddd766158b41e036dae5d6b15f7f08761a | refs/heads/master | 2020-03-27T05:04:03.751302 | 2019-04-30T17:45:39 | 2019-04-30T17:45:39 | 145,991,995 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | # https://www.codewars.com/kata/5a5f9f80f5dc3f942b002309
class Omnibool:
def __eq__(self, other):
return True
omnibool = Omnibool()
| [
"alichek95@mail.ru"
] | alichek95@mail.ru |
6d4ba7dbe4ec8724bcfc6c95ce18bb818fc5b124 | 0ebcfdb5a98ff3e3975fb16e5f3b0616447b27e5 | /DPSPipeline/database/userassignments.py | c07e0f32c85254e6e9223126fbb45bb0ef4c8edb | [] | no_license | satishgoda/DPS_PIPELINE | ff7723dba09c54dca4caaaf390c398f33d474bf3 | 49100eea1f81bb0b86a5fed1bb5c3b1b5411b912 | refs/heads/master | 2021-01-21T18:34:46.613939 | 2016-03-04T22:42:47 | 2016-03-04T22:42:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,241 | py | import sharedDB
#timestamp
from datetime import datetime
from PyQt4 import QtCore
from PyQt4.QtCore import QObject
'''
Group by project/phase
order by due date
For percentage completed Get the number of unfinished tasks / total number of tasks
To get estimated hours per assignment, take (8 hr * work days left in phase) / number of incomplete tasks under phase
'''
class UserAssignment(QObject):
userAssignmentChanged = QtCore.pyqtSignal(QtCore.QString)
userAssignmentAdded = QtCore.pyqtSignal(QtCore.QString)
def __init__(self,_iduserassignments = -1, _idusers = -1, _assignmentid = -1, _assignmenttype = '', _idstatuses = 1, _timestamp = datetime.now(), _hours = 0, _updated = 0, _new = 0):
super(QObject, self).__init__()
# define custom properties
self._iduserassignments = _iduserassignments
self._idusers = _idusers
self._assignmentid = _assignmentid
self._assignmenttype = _assignmenttype
self._idstatuses = _idstatuses
self._hours = _hours
self._timestamp = _timestamp
self._updated = _updated
self._new = _new
self._type = "userassignment"
self._hidden = False
self._new = _new
self.statusButton = ''
#if self._idstatuses == 3 or self._idstatuses == 5:
#self._hidden = True
if not self._new:
self.connectToDBClasses()
self._estimatedHoursLeft = 0
'''
if self.assignmentType() == "phase_assignment":
self._scarcityIndex = sharedDB.myPhaseAssignments(str(self._assignmentid))._scarcityIndex
else:
self._scarcityIndex = 0
'''
self.userAssignmentChanged.connect(sharedDB.myAvailabilityManager.CalculateBooking)
def __eq__(self, another):
return hasattr(another, '_iduserassignments') and self._iduserassignments == another._iduserassignments
def __hash__(self):
return hash(self._iduserassignments)
def id(self):
return self._iduserassignments
def Save(self):
if self._new:
self.AddUserAssignmentToDB()
print "User Assignment '"+str(self._iduserassignments)+"' Added to Database!"
elif self._updated:
#print self._number+" Updated!"
self.UpdateUserAssignmentInDB()
print "User Assignment '"+str(self._iduserassignments)+"' Updated in Database!"
def AddUserAssignmentToDB(self):
rows,self._iduserassignments = sharedDB.mySQLConnection.query("INSERT INTO userassignments (idusers, assignmentid, assignmenttype, idstatuses, lasteditedbyname, lasteditedbyip, appsessionid, hours) VALUES ('"+str(self._idusers)+"', '"+str(self._assignmentid)+"', '"+str(self._assignmenttype)+"', '"+str(self._idstatuses)+"', '"+str(sharedDB.currentUser._name)+"', '"+str(sharedDB.mySQLConnection.myIP)+"', '"+str(sharedDB.app.sessionId())+"', '"+str(self._hours)+"');","commit")
#self._iduserassignments = sharedDB.mySQLConnection._lastInsertId
sharedDB.myUserAssignments[str(self._iduserassignments)] = self
self.userAssignmentAdded.emit(str(self._iduserassignments))
self._new = 0
def UpdateUserAssignmentInDB (self):
if self.id() is not None:
sharedDB.mySQLConnection.query("UPDATE userassignments SET idusers = '"+str(self._idusers)+"', assignmentid = '"+str(self._assignmentid)+"', assignmenttype = '"+str(self._assignmenttype)+"', idstatuses = '"+str(self._idstatuses)+"', lasteditedbyname = '"+str(sharedDB.currentUser._name)+"', lasteditedbyip = '"+str(sharedDB.mySQLConnection.myIP)+"', appsessionid = '"+str(sharedDB.app.sessionId())+"', hours = '"+str(self._hours)+"' WHERE iduserassignments = "+str(self._iduserassignments)+";","commit")
self._updated = 0
def SetValues(self,_iduserassignments = -1, _idusers = -1, _assignmentid = -1, _assignmenttype = '', _idstatuses = 1, _hours = 0, _timestamp = datetime.now()):
print ("Downloaded update for UserAssignment '"+str(self._iduserassignments)+"'")
self._iduserassignments = _iduserassignments
self._idusers = _idusers
self._assignmentid = _assignmentid
self._assignmenttype = _assignmenttype
self._idstatuses = _idstatuses
self._hours = _hours
self._timestamp = _timestamp
#update views containing project
#update calendar view
#self.UpdateCalendarView()
self.userAssignmentChanged.emit(str(self._iduserassignments))
#self.UpdateProjectView()
##if current project changed, update values
##else just update project list
def setStatus(self,newStatus):
self._status = newStatus
self._updated = 1
def setHours(self, hours):
#if hours <1 delete assignment?
self._hours = hours
self.userAssignmentChanged.emit(str(self._iduserassignments))
self._updated = 1
def connectToDBClasses(self):
#connect to users
if str(self._idusers) in sharedDB.myUsers:
user = sharedDB.myUsers[str(self._idusers)]
user._assignments[str(self.id())] = self
if self.assignmentType() == "phase_assignment":
#for phase in sharedDB.myPhaseAssignments:
#if phase.idphaseassignments() == self.assignmentID():
if str(self.assignmentID()) in sharedDB.myPhaseAssignments:
phase = sharedDB.myPhaseAssignments[str(self.assignmentID())]
phase.addUserAssignment(self)
if self.hours():
if not phase.assigned():
phase.setAssigned(1)
def assignmentID(self):
return self._assignmentid
def assignmentType(self):
return self._assignmenttype
def idUsers(self):
return self._idusers
def idUserAssignment(self):
return self._iduserassignments
def hours(self):
return self._hours
#
'''if self._assignmenttype = 'phaseassignment':
#iterate through shots
for shot in sharedDB.myShots:
##if idsequences matches
#print "Shot id:" +str(shot._idshots)+" Task Id shots: "+str(myTask._idshots)
if shot._idshots == myUserAssignment._idshots:
###add to shot's task list
if shot._tasks is not None:
#print "Appending shot: "+str(shot._idshots)+"'s task list"
shot._tasks.append(myUserAssignment)
else:
#print "Creating shot: "+str(shot._idshots)+"'s task list"
shot._tasks = [myUserAssignment]
sharedDB.mySQLConnection.newTaskSignal.emit(str(myUserAssignment._idtasks))
break
'''
| [
"kanooshka@gmail.com"
] | kanooshka@gmail.com |
1d1a3137d03706cbb5c91955085020705c30c27e | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Scripts/pyinstaller/PyInstaller/loader/pyimod02_archive.py | 9296bbd14c1aef42a27da920eae5501b3e9edccd | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:c97ad5e579ca97d5b44d77cfa269fe74c6d46c1c0b473a1c8de6cf7df2228569
size 7279
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
2b9265fbb0e7033162161f8319ba5d843fec6c6e | 7c79c8caee77d08aa05cdc59eb68e569abf54a7e | /ics 33/solutions/ile2 solutions/Lab 8/PhanChristopher/poly.py | 5eebdb03bbe68e9c447e9c4bb418dcfd86860a0f | [] | no_license | solomc1/python | 2e4715cc24e7b23d91c879fc95954f615a615982 | 119e388fb6f4ab42f581e48393919d4052a08ef6 | refs/heads/master | 2021-01-17T16:48:02.671810 | 2016-07-29T05:27:50 | 2016-07-29T05:27:50 | 64,452,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,060 | py | class Poly:
def __init__(self,*terms) -> None:
'''
> self.terms is a dictionary
> keys are *powers* of x
> values are *coefficients* of x
'''
self.terms = {}
if len(terms) == 0:
return
else:
for coefficient,power in terms:
#--------------------
assert type(coefficient) == int
assert type(power) == int
assert power >= 0
assert power not in self.terms
#--------------------
if coefficient == 0:
pass
else:
self.terms[power] = coefficient
#--------------------
return
def __str__(self) -> str:
def term(c,p,var):
return (str(c) if p == 0 or c != 1 else '') +\
('' if p == 0 else var+('^'+str(p) if p != 1 else ''))
if len(self.terms) == 0:
return '0'
else:
return ' + '.join([term(c,p,'x') for p,c in sorted(self.terms.items(),reverse=True)]).replace('+ -','- ')
def __repr__(self) -> str:
'''
Can Do Better
'''
s = "Poly("
for x in self.terms.items():
s += str((x[1],x[0]))
s +=","
s = s.rstrip(",")
s += ")"
return s
def __len__(self):
'''
Can Do Better
'''
length = 0
for key in self.terms:
for check in self.terms:
if key>=check:
length = key
else:
length = 0
return length
def __call__(self,arg:int or float):
'''
Can Do Better
'''
term = 0
if type(arg) == int or type(arg) == float:
for key,value in self.terms.items():
term += value*(arg**key)
return term
else:
raise TypeError("arg was not an int or float object")
def __iter__(self):
'''
Can Do Better
'''
l = sorted(self.terms.items(),reverse=True)
for t in l:
yield (t[1],t[0])
def __getitem__(self,index):
'''
Can Do Better
'''
if type(index) == int:
if index >= 0:
if index in self.terms:
return self.terms[index]
else:
return 0
else:
raise TypeError("Incorrect Input")
else:
raise TypeError("Incorrect Input")
def __setitem__(self,index,value):
if type(index) == int and index >= 0:
if value == 0:
if index in self.terms:
del self.terms[index]
# equavelent to self.terms.__delitem__(index)
else:
self.terms[index] = value
else:
raise TypeError("Incorrect Input")
def __delitem__(self,index) -> None:
'''
Is it this simple?
'''
if type(index) == int and index >= 0:
if index in self.terms:
self.terms.__delitem__(index)
else:
raise TypeError("Incorrect Input")
return
def _add_term(self,c,p) -> None:
if type(c) == int or type(c) == float:
if type(p) == int and p > 0:
if p not in self.terms and p <= 0:
self.terms[p] = c
elif p in self.terms and p <= 0:
self.terms[p] += c
if self.terms[p] == 0:
del self.terms[p]
else:
raise TypeError("Power is either not an int or negative")
else:
raise TypeError("Coefficient is neither a float or int object")
return
def __add__(self,right):
pass
def __radd__(self,left):
pass
def __mul__(self,right):
pass
def __rmul__(self,left):
pass
def __eq__(self,right):
pass
if __name__ == '__main__':
# Some simple tests; you can comment them out and/or add your own before
# the driver is called.
print('Start simple tests')
p = Poly((3,2),(-2,1), (4,0))
print(' For Polynomial: 3x^2 - 2x + 4')
print(' str(p):',p)
print(' repr(p):',repr(p))
print(' len(p):',len(p))
print(' p(2):',p(2))
print(' list collecting iterator results:',[t for t in p])
print(' p+p:',p+p)
print(' p+2:',p+2)
print(' p*p:',p*p)
print(' p*2:',p*2)
print('End simple tests\n')
import driver
#driver.default_show_exception=True
#driver.default_show_exception_message=True
#driver.default_show_traceback=True
driver.driver() | [
"solomc1@uci.edu"
] | solomc1@uci.edu |
35e4465ea5c36e613c6aa15fad18b6c59e34aca2 | d8e23b9eaaea8080aa7a910b06fe1ae04b7f2a74 | /flavio/math/test_optimize.py | ace5779277eed50b329b85089cccbea69c4cecf4 | [
"MIT"
] | permissive | flav-io/flavio | 7ba0f8735193f2014ee69b4b64e139714637f1df | cf9fe5c56b2a6930e366142894ddc66951c1ce52 | refs/heads/master | 2023-07-07T00:45:48.923555 | 2023-06-01T13:25:59 | 2023-06-01T13:25:59 | 50,420,265 | 76 | 65 | MIT | 2023-06-29T06:57:05 | 2016-01-26T10:03:12 | Python | UTF-8 | Python | false | false | 1,149 | py | import unittest
import numpy as np
import numpy.testing as npt
import flavio
def f(x):
return (x[0]-2)**2 + (x[1]-1)**2
def g(x):
return -f(x)
def h(x, a):
return (x[0]-a)**2 + (x[1]-1)**2
class TestOptimize(unittest.TestCase):
def test_slsqp(self):
res = flavio.math.optimize.minimize_robust(f, [0, 0], disp=False, methods=('SLSQP',))
npt.assert_array_almost_equal(res.x, [2, 1])
res = flavio.math.optimize.maximize_robust(g, [5, 5], disp=False, methods=('SLSQP',))
npt.assert_array_almost_equal(res.x, [2, 1])
res = flavio.math.optimize.minimize_robust(h, [0, 0], args=(3,), methods=('SLSQP',))
npt.assert_array_almost_equal(res.x, [3, 1])
def test_minuit(self):
res = flavio.math.optimize.minimize_migrad(f, [0, 0], print_level=0)
npt.assert_array_almost_equal(res.x, [2, 1])
res = flavio.math.optimize.minimize_robust(f, [0, 0], methods=('MIGRAD',))
npt.assert_array_almost_equal(res.x, [2, 1])
res = flavio.math.optimize.minimize_robust(h, [0, 0], args=(3,), methods=('MIGRAD',))
npt.assert_array_almost_equal(res.x, [3, 1])
| [
"david.straub@tum.de"
] | david.straub@tum.de |
42c767941b8f7a8d86d7e4cb597956535457e53c | d324b3d4ce953574c5945cda64e179f33c36c71b | /php/php-sky/grpc/src/python/grpcio_tests/tests/unit/framework/foundation/stream_testing.py | 32b0341ad7966e0c9bd1fa259e35f43792aea7cf | [
"Apache-2.0"
] | permissive | Denticle/docker-base | decc36cc8eb01be1157d0c0417958c2c80ac0d2f | 232115202594f4ea334d512dffb03f34451eb147 | refs/heads/main | 2023-04-21T10:08:29.582031 | 2021-05-13T07:27:52 | 2021-05-13T07:27:52 | 320,431,033 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,017 | py | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing stream-related code."""
from grpc.framework.foundation import stream
class TestConsumer(stream.Consumer):
"""A stream.Consumer instrumented for testing.
Attributes:
calls: A sequence of value-termination pairs describing the history of calls
made on this object.
"""
def __init__(self):
self.calls = []
def consume(self, value):
"""See stream.Consumer.consume for specification."""
self.calls.append((value, False))
def terminate(self):
"""See stream.Consumer.terminate for specification."""
self.calls.append((None, True))
def consume_and_terminate(self, value):
"""See stream.Consumer.consume_and_terminate for specification."""
self.calls.append((value, True))
def is_legal(self):
"""Reports whether or not a legal sequence of calls has been made."""
terminated = False
for value, terminal in self.calls:
if terminated:
return False
elif terminal:
terminated = True
elif value is None:
return False
else: # pylint: disable=useless-else-on-loop
return True
def values(self):
"""Returns the sequence of values that have been passed to this Consumer."""
return [value for value, _ in self.calls if value]
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
60b0db1dcc7d38d48d1c419268514304c1806bd6 | 9c4508b340f7f84fc5084decc64ebff75afaec68 | /analysis/webservice/webmodel/StatsComputeOptions.py | 86e5d597215a4585d335caa8d2b389c2df4ae911 | [
"Apache-2.0"
] | permissive | apache/incubator-sdap-nexus | 4590d6417b362acd88ac3ec6b315da06f7460718 | 76f3e4d617abbf283804d6f52aa2eff86e15a744 | refs/heads/master | 2023-09-01T12:52:17.381622 | 2023-08-22T21:35:26 | 2023-08-22T21:35:26 | 108,511,090 | 21 | 34 | Apache-2.0 | 2023-09-14T20:18:33 | 2017-10-27T07:00:11 | Python | UTF-8 | Python | false | false | 2,586 | py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class StatsComputeOptions(object):
def __init__(self):
pass
def get_apply_seasonal_cycle_filter(self, default="false"):
raise Exception("Please implement")
def get_max_lat(self, default=90.0):
raise Exception("Please implement")
def get_min_lat(self, default=-90.0):
raise Exception("Please implement")
def get_max_lon(self, default=180):
raise Exception("Please implement")
def get_min_lon(self, default=-180):
raise Exception("Please implement")
def get_dataset(self):
raise Exception("Please implement")
def get_environment(self):
raise Exception("Please implement")
def get_start_time(self):
raise Exception("Please implement")
def get_end_time(self):
raise Exception("Please implement")
def get_start_year(self):
raise Exception("Please implement")
def get_end_year(self):
raise Exception("Please implement")
def get_clim_month(self):
raise Exception("Please implement")
def get_start_row(self):
raise Exception("Please implement")
def get_end_row(self):
raise Exception("Please implement")
def get_content_type(self):
raise Exception("Please implement")
def get_apply_low_pass_filter(self, default=False):
raise Exception("Please implement")
def get_low_pass_low_cut(self, default=12):
raise Exception("Please implement")
def get_low_pass_order(self, default=9):
raise Exception("Please implement")
def get_plot_series(self, default="mean"):
raise Exception("Please implement")
def get_plot_type(self, default="default"):
raise Exception("Please implement")
def get_nparts(self):
raise Exception("Please implement") | [
"noreply@github.com"
] | apache.noreply@github.com |
e2aecbe9701cbaf4eaa6a72fdedbf8eeb13950b0 | 0a727f3ffde045805b9b789abbaa9c8497667f8e | /CrossMgrCamera/AddPhotoHeader.py | a9ab798acf0f813bf0c8bc2a0fb48a2709f0153d | [
"MIT"
] | permissive | esitarski/CrossMgr | ff4a632089a144f6ecc57970e2b29a7c31a15118 | a95ac1d65f2d0cab712cc6e5f9393668c1bbf83c | refs/heads/master | 2023-08-30T22:48:43.457978 | 2023-08-24T14:12:44 | 2023-08-24T14:12:44 | 1,042,402 | 33 | 20 | MIT | 2023-04-30T13:32:11 | 2010-11-01T17:25:15 | Python | UTF-8 | Python | false | false | 6,801 | py | import wx
from wx.lib.agw.aui import LightColour
import os
import math
import Utils
class dotdict( object ):
pass
def formatTime( secs ):
f, ss = math.modf( secs or 0.0 )
secs = int(ss)
hours = secs // (60*60)
minutes = (secs // 60) % 60
secs = secs % 60 + f
return '{:02d}:{:02d}:{:06.3f}'.format( hours, minutes, secs )
def PilImageToWxImage( pil ):
image = wx.Image( *pil.size )
image.SetData( pil.convert('RGB').tobytes() )
return image
drawResources = None # Cached resource for drawing the photo header.
def setDrawResources( dc, w, h ):
global drawResources
drawResources = dotdict()
drawResources.w = w
drawResources.h = h
fontHeight = int(h/36.0)
fontFace = Utils.FontFace
drawResources.bibFontSize = fontHeight * 1.5
drawResources.bibFont = wx.Font(
(0, drawResources.bibFontSize),
wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD,
faceName=fontFace,
)
dc.SetFont( drawResources.bibFont )
drawResources.bibWidth, drawResources.bibHeight = dc.GetTextExtent( u' 9999' )
drawResources.bibTextColour = wx.Colour(0,0,200)
drawResources.bibSpaceWidth = dc.GetTextExtent( u'9999' )[0] / 4
drawResources.nameFontSize = drawResources.bibFontSize
drawResources.nameFont = wx.Font(
(0, drawResources.nameFontSize),
wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,
faceName=fontFace,
)
drawResources.nameTextColour = drawResources.bibTextColour
drawResources.fontSize = fontHeight * 1.0
drawResources.font = wx.Font(
(0, drawResources.fontSize),
wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,
faceName=fontFace,
)
dc.SetFont( drawResources.font )
drawResources.spaceWidth = dc.GetTextExtent( u'9999' )[0] / 4
drawResources.smallFontSize = drawResources.fontSize * 0.9
drawResources.smallFont = wx.Font(
(0, drawResources.smallFontSize),
wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL,
faceName=fontFace,
)
drawResources.fontColour = wx.BLACK
dc.SetFont( drawResources.font )
drawResources.fontHeight = dc.GetTextExtent( u'ATWgjjy' )[1]
bitmapHeight = drawResources.bibHeight * 2.8
bitmap = wx.Bitmap( os.path.join(Utils.getImageFolder(), 'CrossMgrHeader.png'), wx.BITMAP_TYPE_PNG )
scaleMult = float(bitmapHeight) / float(bitmap.GetHeight())
image = bitmap.ConvertToImage()
drawResources.bitmapWidth, drawResources.bitmapHeight = int(image.GetWidth() * scaleMult), int(image.GetHeight() * scaleMult)
image.Rescale( drawResources.bitmapWidth, drawResources.bitmapHeight, wx.IMAGE_QUALITY_HIGH )
drawResources.bitmap = image.ConvertToBitmap()
drawResources.fadeDark = wx.Colour(114+80,119+80,168+80)
drawResources.fadeLight = LightColour( drawResources.fadeDark, 50 )
drawResources.borderColour = wx.Colour( 71+50, 75+50, 122+50 )
def AddPhotoHeader( image, bib=None, time=None, raceSeconds=None, firstNameTxt=u'', lastNameTxt=u'', teamTxt=u'', raceNameTxt=u'' ):
global drawResources
if not image:
return wx.Bitmap(8, 8)
bitmap = wx.Bitmap( image )
w, h = bitmap.GetSize()
dcMemory = wx.MemoryDC( bitmap )
dc = wx.GCDC( dcMemory )
if drawResources is None or drawResources.w != w or drawResources.h != h:
setDrawResources( dc, w, h )
bibTxt = '{}'.format(bib) if bib else ''
if time and raceSeconds:
timeTxt = _('{} {}').format(
formatTime(raceSeconds),
time.strftime('%Y-%m-%d %H:%M:%S'),
)
else:
timeTxt = u''
if timeTxt.startswith('0'):
timeTxt = timeTxt[1:]
nameTxt = u' '.join( n for n in [firstNameTxt, lastNameTxt] if n )
frameWidth = 4
borderWidth = 1
bitmapWidth = drawResources.bitmapWidth
bitmapHeight = drawResources.bitmapHeight
bibSpaceWidth = drawResources.bibSpaceWidth
spaceWidth = drawResources.spaceWidth
xText, yText = bitmapWidth, 0
x = borderWidth
y = borderWidth
def shadedRect( x, y, w, h ):
highlightTop = int(h/4.0)
dc.GradientFillLinear( wx.Rect(0, y, w, highlightTop),
drawResources.fadeDark, drawResources.fadeLight, wx.SOUTH )
dc.GradientFillLinear( wx.Rect(0, y+highlightTop, w, h-highlightTop),
drawResources.fadeDark, drawResources.fadeLight, wx.NORTH )
def textInRect( txt, x, y, width, height, font=None, colour=None, alignment=wx.ALIGN_CENTER|wx.ALIGN_CENTRE_VERTICAL ):
if font:
dc.SetFont( font )
if colour:
dc.SetTextForeground( colour )
dc.DrawLabel( txt, wx.Rect(x, y, width, height), alignment )
lineHeight = int(drawResources.bibHeight * 1.25 + 0.5)
x += xText + frameWidth + bibSpaceWidth
dc.SetPen( wx.Pen(drawResources.borderColour, borderWidth) )
shadedRect( x, 0, w, lineHeight + borderWidth )
dc.DrawLine( 0, 0, w, 0 )
dc.DrawLine( xText, lineHeight, w, lineHeight )
# Draw the bib.
dc.SetFont( drawResources.bibFont )
tWidth = dc.GetTextExtent( bibTxt )[0]
textInRect( bibTxt, x, y, tWidth, lineHeight, drawResources.bibFont, drawResources.bibTextColour )
# Draw the name and team.
x += tWidth + spaceWidth
textInRect( nameTxt, x, y, dc.GetTextExtent(nameTxt)[0], lineHeight, drawResources.nameFont, drawResources.bibTextColour )
x += dc.GetTextExtent(nameTxt)[0] + spaceWidth
remainingWidth = w - x - spaceWidth - borderWidth
dc.SetFont( drawResources.font )
teamTxtWidth = dc.GetTextExtent(teamTxt)[0]
if teamTxtWidth < remainingWidth:
textInRect( teamTxt, x, y, remainingWidth, lineHeight, drawResources.font, wx.BLACK, alignment=wx.ALIGN_RIGHT|wx.ALIGN_CENTRE_VERTICAL )
y += lineHeight
lineHeight = int( drawResources.fontHeight * 1.25 + 0.5 )
shadedRect( 0, y, w, lineHeight )
dc.DrawLine( 0, y+lineHeight, w, y+lineHeight )
# Draw the time, race time and raceName.
dc.SetFont( drawResources.font )
x = borderWidth
x += xText + frameWidth + bibSpaceWidth
textInRect( timeTxt, x, y, w-x, lineHeight, drawResources.font, wx.BLACK, alignment=wx.ALIGN_LEFT|wx.ALIGN_CENTRE_VERTICAL )
x += dc.GetTextExtent(timeTxt)[0] + spaceWidth
remainingWidth = w - x - spaceWidth - borderWidth
raceNameTxtWidth = dc.GetTextExtent(raceNameTxt)[0]
if raceNameTxtWidth < remainingWidth:
textInRect( raceNameTxt, x, y, remainingWidth, lineHeight, drawResources.font, wx.BLACK, alignment=wx.ALIGN_RIGHT|wx.ALIGN_CENTRE_VERTICAL )
# Draw the bitmap.
dc.DrawBitmap( drawResources.bitmap, frameWidth, frameWidth )
# Draw a frame around the bitmap.
dc.SetBrush( wx.TRANSPARENT_BRUSH )
frameHalf = frameWidth // 2
dc.SetPen( wx.Pen(drawResources.borderColour, frameWidth) )
dc.DrawRectangle( frameHalf, frameHalf, bitmapWidth+frameHalf, bitmapHeight+frameHalf )
dc.SetPen( wx.Pen(wx.WHITE, frameHalf) )
dc.DrawRectangle( frameHalf, frameHalf, bitmapWidth+frameHalf, bitmapHeight+frameHalf )
# Draw a border on the right side.
dc.SetPen( wx.Pen(drawResources.borderColour, 1) )
dc.DrawLine( w-1, 0, w-1, y+lineHeight )
return bitmap.ConvertToImage()
| [
"edward.sitarski@gmail.com"
] | edward.sitarski@gmail.com |
860636ada08867c77753a3c24ddd474e3242f227 | 94f5e16caf4d10a6ac4ba7e9896b33c8d503f1bb | /migrations/versions/5f7a5ddbf7af_.py | 9a94c55035b427c0504ccd18b74553992c051ce5 | [] | no_license | hornLK/LKproject | 3dd760ad1c83b2d6faaddf66c32d4be16349c2d2 | 9f9b7c324b740aa215d5dd0ac7a7eecdb0a4ef0c | refs/heads/master | 2021-09-05T22:05:20.321201 | 2018-01-31T07:43:27 | 2018-01-31T07:43:27 | 112,689,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | """empty message
Revision ID: 5f7a5ddbf7af
Revises: 1d0cc801c49f
Create Date: 2018-01-15 20:24:46.074233
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5f7a5ddbf7af'
down_revision = '1d0cc801c49f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('ips', sa.Column('status', sa.Boolean(), nullable=True))
op.add_column('networks', sa.Column('desc', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('networks', 'desc')
op.drop_column('ips', 'status')
# ### end Alembic commands ###
| [
"bjlkq546449541@gmail.com"
] | bjlkq546449541@gmail.com |
9fb1d67a0e9b750a78cab4ce36eddb0fad97d4ca | fa33d9994e45348b28a4aa375575460a0a5ef100 | /bazaar/urls.py | 96301ff62b6682a4206f791fd7476f3220ddb817 | [] | no_license | KushalVijay/Smart-Buy | 6730b4cbb9951bfb9c59045af2c479574a6ad6e5 | 4bdcfc44826d6c1aaa2f10b507b181cd092e3cb0 | refs/heads/master | 2022-04-09T21:38:31.927219 | 2020-03-23T10:12:50 | 2020-03-23T10:12:50 | 249,395,588 | 11 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,734 | py | """project3 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls.static import static
from django.conf import settings
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from products.views import all_products
from accounts import urls as urls_accounts
from products import urls as urls_products
from cart import urls as urls_cart
from search import urls as urls_search
from checkout import urls as urls_checkout
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', all_products, name='index'),
url(r'^accounts/', include(urls_accounts)),
url(r'^products/', include(urls_products)),
url(r'^checkout/', include(urls_checkout)),
url(r'^cart/', include(urls_cart)),
url(r'^user/', include(urls_accounts)),
url(r'^search/', include(urls_search)),
#url(r'^media/(?P<path>.*)$', static.serve,{'document_root': MEDIA_ROOT}),
]
if settings.DEBUG:
urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"vijaykushal8118@gmail.com"
] | vijaykushal8118@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.