blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ab618756d18481095af581ec2784df2054af7044 | 5abdbe26ad89d50761e505d02c35ea184d79f712 | /learning_logs/admin.py | a1d4a285a35b36f507aeeeb683fe621de3031bd8 | [] | no_license | liyongjun-brayan/xuexi | 5c00abaeadb46caa4a63fdcd316fabd2d1ebdb15 | b5356a5115b34dc1d5f627215aef780d7d5a0693 | refs/heads/master | 2021-06-25T10:25:12.602434 | 2019-08-27T02:27:23 | 2019-08-27T02:27:23 | 204,632,981 | 1 | 0 | null | 2021-06-10T21:54:15 | 2019-08-27T06:16:39 | Python | UTF-8 | Python | false | false | 167 | py | from django.contrib import admin
# Register your models here.
from learning_logs.models import Topic, Entry
admin.site.register(Topic)
admin.site.register(Entry)
| [
"johndoe@example.com"
] | johndoe@example.com |
409be856f4a7e354eaeef1155234db833b8c60d9 | efe036849aa46755d5dcc86dbdb682b750a318eb | /rl_coach/architectures/tensorflow_components/heads/ppo_head.py | 755ffa656b0bb7ced7c2efda502d88e03389f0c4 | [
"Apache-2.0"
] | permissive | danialkamran/coach | a1284b54f8cd59b9e7e1f49e55a31484fffd89cd | a4471389a429793fd871b225d3aaccbcf4c676ec | refs/heads/master | 2020-03-26T22:01:54.656300 | 2018-08-20T10:50:09 | 2018-08-20T10:50:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,153 | py | #
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorflow as tf
from rl_coach.base_parameters import AgentParameters
from rl_coach.spaces import BoxActionSpace, DiscreteActionSpace
from rl_coach.spaces import SpacesDefinition
from rl_coach.utils import eps
from rl_coach.architectures.tensorflow_components.heads.head import Head, HeadParameters, normalized_columns_initializer
from rl_coach.core_types import ActionProbabilities
class PPOHeadParameters(HeadParameters):
def __init__(self, activation_function: str ='tanh', name: str='ppo_head_params'):
super().__init__(parameterized_class=PPOHead, activation_function=activation_function, name=name)
class PPOHead(Head):
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, network_name: str,
head_idx: int = 0, loss_weight: float = 1., is_local: bool = True, activation_function: str='tanh'):
super().__init__(agent_parameters, spaces, network_name, head_idx, loss_weight, is_local, activation_function)
self.name = 'ppo_head'
self.return_type = ActionProbabilities
# used in regular PPO
self.use_kl_regularization = agent_parameters.algorithm.use_kl_regularization
if self.use_kl_regularization:
# kl coefficient and its corresponding assignment operation and placeholder
self.kl_coefficient = tf.Variable(agent_parameters.algorithm.initial_kl_coefficient,
trainable=False, name='kl_coefficient')
self.kl_coefficient_ph = tf.placeholder('float', name='kl_coefficient_ph')
self.assign_kl_coefficient = tf.assign(self.kl_coefficient, self.kl_coefficient_ph)
self.kl_cutoff = 2 * agent_parameters.algorithm.target_kl_divergence
self.high_kl_penalty_coefficient = agent_parameters.algorithm.high_kl_penalty_coefficient
self.clip_likelihood_ratio_using_epsilon = agent_parameters.algorithm.clip_likelihood_ratio_using_epsilon
self.beta = agent_parameters.algorithm.beta_entropy
def _build_module(self, input_layer):
if isinstance(self.spaces.action, DiscreteActionSpace):
self._build_discrete_net(input_layer, self.spaces.action)
elif isinstance(self.spaces.action, BoxActionSpace):
self._build_continuous_net(input_layer, self.spaces.action)
else:
raise ValueError("only discrete or continuous action spaces are supported for PPO")
self.action_probs_wrt_policy = self.policy_distribution.log_prob(self.actions)
self.action_probs_wrt_old_policy = self.old_policy_distribution.log_prob(self.actions)
self.entropy = tf.reduce_mean(self.policy_distribution.entropy())
# Used by regular PPO only
# add kl divergence regularization
self.kl_divergence = tf.reduce_mean(tf.distributions.kl_divergence(self.old_policy_distribution, self.policy_distribution))
if self.use_kl_regularization:
# no clipping => use kl regularization
self.weighted_kl_divergence = tf.multiply(self.kl_coefficient, self.kl_divergence)
self.regularizations = self.weighted_kl_divergence + self.high_kl_penalty_coefficient * \
tf.square(tf.maximum(0.0, self.kl_divergence - self.kl_cutoff))
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, self.regularizations)
# calculate surrogate loss
self.advantages = tf.placeholder(tf.float32, [None], name="advantages")
self.target = self.advantages
# action_probs_wrt_old_policy != 0 because it is e^...
self.likelihood_ratio = tf.exp(self.action_probs_wrt_policy - self.action_probs_wrt_old_policy)
if self.clip_likelihood_ratio_using_epsilon is not None:
self.clip_param_rescaler = tf.placeholder(tf.float32, ())
self.input.append(self.clip_param_rescaler)
max_value = 1 + self.clip_likelihood_ratio_using_epsilon * self.clip_param_rescaler
min_value = 1 - self.clip_likelihood_ratio_using_epsilon * self.clip_param_rescaler
self.clipped_likelihood_ratio = tf.clip_by_value(self.likelihood_ratio, min_value, max_value)
self.scaled_advantages = tf.minimum(self.likelihood_ratio * self.advantages,
self.clipped_likelihood_ratio * self.advantages)
else:
self.scaled_advantages = self.likelihood_ratio * self.advantages
# minus sign is in order to set an objective to minimize (we actually strive for maximizing the surrogate loss)
self.surrogate_loss = -tf.reduce_mean(self.scaled_advantages)
if self.is_local:
# add entropy regularization
if self.beta:
self.entropy = tf.reduce_mean(self.policy_distribution.entropy())
self.regularizations = -tf.multiply(self.beta, self.entropy, name='entropy_regularization')
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, self.regularizations)
self.loss = self.surrogate_loss
tf.losses.add_loss(self.loss)
def _build_discrete_net(self, input_layer, action_space):
num_actions = len(action_space.actions)
self.actions = tf.placeholder(tf.int32, [None], name="actions")
self.old_policy_mean = tf.placeholder(tf.float32, [None, num_actions], "old_policy_mean")
self.old_policy_std = tf.placeholder(tf.float32, [None, num_actions], "old_policy_std")
# Policy Head
self.input = [self.actions, self.old_policy_mean]
policy_values = tf.layers.dense(input_layer, num_actions, name='policy_fc')
self.policy_mean = tf.nn.softmax(policy_values, name="policy")
# define the distributions for the policy and the old policy
self.policy_distribution = tf.contrib.distributions.Categorical(probs=self.policy_mean)
self.old_policy_distribution = tf.contrib.distributions.Categorical(probs=self.old_policy_mean)
self.output = self.policy_mean
def _build_continuous_net(self, input_layer, action_space):
num_actions = action_space.shape[0]
self.actions = tf.placeholder(tf.float32, [None, num_actions], name="actions")
self.old_policy_mean = tf.placeholder(tf.float32, [None, num_actions], "old_policy_mean")
self.old_policy_std = tf.placeholder(tf.float32, [None, num_actions], "old_policy_std")
self.input = [self.actions, self.old_policy_mean, self.old_policy_std]
self.policy_mean = tf.layers.dense(input_layer, num_actions, name='policy_mean',
kernel_initializer=normalized_columns_initializer(0.01))
if self.is_local:
self.policy_logstd = tf.Variable(np.zeros((1, num_actions)), dtype='float32',
collections=[tf.GraphKeys.LOCAL_VARIABLES])
else:
self.policy_logstd = tf.Variable(np.zeros((1, num_actions)), dtype='float32')
self.policy_std = tf.tile(tf.exp(self.policy_logstd), [tf.shape(input_layer)[0], 1], name='policy_std')
# define the distributions for the policy and the old policy
self.policy_distribution = tf.contrib.distributions.MultivariateNormalDiag(self.policy_mean, self.policy_std + eps)
self.old_policy_distribution = tf.contrib.distributions.MultivariateNormalDiag(self.old_policy_mean, self.old_policy_std + eps)
self.output = [self.policy_mean, self.policy_std]
| [
"gal.novik@intel.com"
] | gal.novik@intel.com |
60880b307495d767154fe596f68b0a05c24e4934 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/he_0532-4503/sdB_HE_0532-4503_lc.py | 5fe4f20f7122fe6399d904e826635f483dd2d08c | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[83.41875,-45.026469], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_HE_0532-4503 /sdB_HE_0532-4503_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
a76680334402715f6493dcd8c93f9120713d231b | 25fa5fdc9f67738332bd6f95a1e4f038cd286890 | /이것이 코딩테스트다/ch05_DFS:BFS/음료수 얼려 먹기.py | d8e484185f873f9fe701d264e79b7d3577dd241e | [] | no_license | mandos1995/online_judge | b0cfd56e3391495f22b9832895cddcea70334349 | 9b90bffdcbfb5369e8dd5dafbb07f8e9e7050617 | refs/heads/main | 2023-08-02T19:29:03.716295 | 2021-10-04T15:10:34 | 2021-10-04T15:10:34 | 329,517,747 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | # n, m을 공백으로 구분하여 입력받기
n, m = map(int, input().split())
# 2차원 리스트의 맵 정보 입력받기
graph = []
for i in range(n):
graph.append(list(map(int, input())))
# DFS로 특정한 노드를 방문한 뒤에 연결된 모든 노드들도 방문
def dfs(x, y):
# 주어진 범위를 벗어나는 경우에는 즉시 종료
if x <= -1 or x >= n or y <= -1 or y>= m:
return False
# 현재 노드를 아직 방문하지 않았다면
if graph[x][y] == 0:
# 해당 노드 방문 처리
graph[x][y] = 1
# 상, 하, 좌, 우의 위치도 모두 재귀적으로 호출
dfs(x - 1, y)
dfs(x, y - 1)
dfs(x + 1, y)
dfs(x, y + 1)
return True
return False
# 모든 노드(위치)에 대하여 음료수 채우기
result = 0
for i in range(n):
for j in range(m):
# 현재 위치에서 DFS 수행
if dfs(i, j) == True:
result += 1
# 정답 출력
print(result) | [
"mandos19950620@gmail.com"
] | mandos19950620@gmail.com |
63c7e6fe54ba6dae4f1797752b42b55890887057 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02409/s766823066.py | 34c864d987f1742566592d17da40546e50f9c4f4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | n = int(input())
list = [[[0 for i in range(10)] for j in range(3)] for k in range(4)]
for i in range(n):
a,b,c,d = [int(j) for j in input().split()]
list[a-1][b-1][c-1] += d
for i in range(4):
for j in range(3):
for k in range(10):
print(" {0}".format(list[i][j][k]),end='')
print()
if i != 3:
print("####################") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6e33e1d8b5d3c083c46e467a888d86fe4a21f45d | cc2fcc1a0c5ea9789f98ec97614d7b25b03ba101 | /st2common/st2common/models/db/execution.py | dcc363b0feb7a0410be85b1d1b8636dc23afc808 | [
"Apache-2.0"
] | permissive | Junsheng-Wu/st2 | 6451808da7de84798641882ca202c3d1688f8ba8 | c3cdf657f7008095f3c68b4132b9fe76d2f52d81 | refs/heads/master | 2022-04-30T21:32:44.039258 | 2020-03-03T07:03:57 | 2020-03-03T07:03:57 | 244,301,363 | 0 | 0 | Apache-2.0 | 2022-03-29T22:04:26 | 2020-03-02T06:53:58 | Python | UTF-8 | Python | false | false | 7,432 | py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import mongoengine as me
from st2common import log as logging
from st2common.models.db import stormbase
from st2common.fields import ComplexDateTimeField
from st2common.util import date as date_utils
from st2common.util.secrets import get_secret_parameters
from st2common.util.secrets import mask_inquiry_response
from st2common.util.secrets import mask_secret_parameters
from st2common.constants.types import ResourceType
__all__ = [
'ActionExecutionDB',
'ActionExecutionOutputDB'
]
LOG = logging.getLogger(__name__)
class ActionExecutionDB(stormbase.StormFoundationDB):
RESOURCE_TYPE = ResourceType.EXECUTION
UID_FIELDS = ['id']
trigger = stormbase.EscapedDictField()
trigger_type = stormbase.EscapedDictField()
trigger_instance = stormbase.EscapedDictField()
rule = stormbase.EscapedDictField()
action = stormbase.EscapedDictField(required=True)
runner = stormbase.EscapedDictField(required=True)
# Only the diff between the liveaction type and what is replicated
# in the ActionExecutionDB object.
liveaction = stormbase.EscapedDictField(required=True)
status = me.StringField(
required=True,
help_text='The current status of the liveaction.')
start_timestamp = ComplexDateTimeField(
default=date_utils.get_datetime_utc_now,
help_text='The timestamp when the liveaction was created.')
end_timestamp = ComplexDateTimeField(
help_text='The timestamp when the liveaction has finished.')
parameters = stormbase.EscapedDynamicField(
default={},
help_text='The key-value pairs passed as to the action runner & action.')
result = stormbase.EscapedDynamicField(
default={},
help_text='Action defined result.')
context = me.DictField(
default={},
help_text='Contextual information on the action execution.')
parent = me.StringField()
children = me.ListField(field=me.StringField())
log = me.ListField(field=me.DictField())
# Do not use URLField for web_url. If host doesn't have FQDN set, URLField validation blows.
web_url = me.StringField(required=False)
meta = {
'indexes': [
{'fields': ['rule.ref']},
{'fields': ['action.ref']},
{'fields': ['liveaction.id']},
{'fields': ['start_timestamp']},
{'fields': ['end_timestamp']},
{'fields': ['status']},
{'fields': ['parent']},
{'fields': ['rule.name']},
{'fields': ['runner.name']},
{'fields': ['trigger.name']},
{'fields': ['trigger_type.name']},
{'fields': ['trigger_instance.id']},
{'fields': ['context.user']},
{'fields': ['-start_timestamp', 'action.ref', 'status']}
]
}
def get_uid(self):
# TODO Construct od from non id field:
uid = [self.RESOURCE_TYPE, str(self.id)]
return ':'.join(uid)
def mask_secrets(self, value):
result = copy.deepcopy(value)
liveaction = result['liveaction']
parameters = {}
# pylint: disable=no-member
parameters.update(value.get('action', {}).get('parameters', {}))
parameters.update(value.get('runner', {}).get('runner_parameters', {}))
secret_parameters = get_secret_parameters(parameters=parameters)
result['parameters'] = mask_secret_parameters(parameters=result['parameters'],
secret_parameters=secret_parameters)
if 'parameters' in liveaction:
liveaction['parameters'] = mask_secret_parameters(parameters=liveaction['parameters'],
secret_parameters=secret_parameters)
if liveaction.get('action', '') == 'st2.inquiry.respond':
# Special case to mask parameters for `st2.inquiry.respond` action
# In this case, this execution is just a plain python action, not
# an inquiry, so we don't natively have a handle on the response
# schema.
#
# To prevent leakage, we can just mask all response fields.
result['parameters']['response'] = mask_secret_parameters(
parameters=liveaction['parameters']['response'],
secret_parameters=[p for p in liveaction['parameters']['response']]
)
# TODO(mierdin): This logic should be moved to the dedicated Inquiry
# data model once it exists.
if self.runner.get('name') == "inquirer":
schema = result['result'].get('schema', {})
response = result['result'].get('response', {})
# We can only mask response secrets if response and schema exist and are
# not empty
if response and schema:
result['result']['response'] = mask_inquiry_response(response, schema)
return result
def get_masked_parameters(self):
"""
Retrieve parameters with the secrets masked.
:rtype: ``dict``
"""
serializable_dict = self.to_serializable_dict(mask_secrets=True)
return serializable_dict['parameters']
class ActionExecutionOutputDB(stormbase.StormFoundationDB):
"""
Stores output of a particular execution.
New document is inserted dynamically when a new chunk / line is received which means you can
simulate tail behavior by periodically reading from this collection.
Attribute:
execution_id: ID of the execution to which this output belongs.
action_ref: Parent action reference.
runner_ref: Parent action runner reference.
timestamp: Timestamp when this output has been produced / received.
output_type: Type of the output (e.g. stdout, stderr, output)
data: Actual output data. This could either be line, chunk or similar, depending on the
runner.
"""
execution_id = me.StringField(required=True)
action_ref = me.StringField(required=True)
runner_ref = me.StringField(required=True)
timestamp = ComplexDateTimeField(required=True, default=date_utils.get_datetime_utc_now)
output_type = me.StringField(required=True, default='output')
data = me.StringField()
meta = {
'indexes': [
{'fields': ['execution_id']},
{'fields': ['action_ref']},
{'fields': ['runner_ref']},
{'fields': ['timestamp']},
{'fields': ['output_type']}
]
}
MODELS = [ActionExecutionDB, ActionExecutionOutputDB]
| [
"wei.ying@easystack.cn"
] | wei.ying@easystack.cn |
423385b7603bb326c76dd43a32df2f7a1505d221 | 589b5eedb71d83c15d44fedf60c8075542324370 | /project/stock_project/barra_risk_model/barra_factor/cal_factor_barra_leverage.py | 17358e48ac842ad352fcf71dc23fe7800a3a6799 | [] | no_license | rlcjj/quant | 4c2be8a8686679ceb675660cb37fad554230e0d4 | c07e8f0f6e1580ae29c78c1998a53774a15a67e1 | refs/heads/master | 2020-03-31T07:15:48.111511 | 2018-08-27T05:29:00 | 2018-08-27T05:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | def cal_factor_barra_leverage_market_leverage():
name = 'TotalLiabilityDaily'
total_debt = get_h5_data(name)
name = 'TotalAssetDaily'
total_asset = get_h5_data(name)
debt_to_asset = total_debt.div(total_asset)
debt_to_asset = debt_to_asset.dropna(how='all')
out_path = 'E:\\4_代码\\pycharmprojects\\2_风险模型BARRA\\data\\barra_data\\raw_data\\'
debt_to_asset.to_csv(out_path + 'RAW_CNE5_LEVERAGE_MARKET_LEVERAGE.csv')
debt_to_asset = remove_extreme_value_mad_pandas(debt_to_asset)
debt_to_asset = normal_pandas(debt_to_asset)
out_path = 'E:\\4_代码\\pycharmprojects\\2_风险模型BARRA\\data\\barra_data\\standardization_data\\'
debt_to_asset.to_csv(out_path + 'NORMAL_CNE5_LEVERAGE_MARKET_LEVERAGE.csv')
def cal_factor_barra_leverage():
name = 'NORMAL_CNE5_LEVERAGE_MARKET_LEVERAGE'
leverage = get_barra_standard_data(name)
leverage = leverage.dropna(how='all')
out_path = 'E:\\4_代码\\pycharmprojects\\2_风险模型BARRA\\data\\barra_data\\raw_data\\'
leverage.to_csv(out_path + 'RAW_CNE5_LEVERAGE.csv')
leverage = remove_extreme_value_mad_pandas(leverage)
leverage = normal_pandas(leverage)
out_path = 'E:\\4_代码\\pycharmprojects\\2_风险模型BARRA\\data\\barra_data\\standardization_data\\'
leverage.to_csv(out_path + 'NORMAL_CNE5_LEVERAGE.csv')
if __name__ == '__main__':
cal_factor_barra_leverage_market_leverage()
cal_factor_barra_leverage()
| [
"1119332482@qq.com"
] | 1119332482@qq.com |
dfffd665730509eb19be752fae578c6918d50252 | 002b18b4e66d7536ce8538f65edcb0cf17472bf7 | /liaoxuefeng/webframework/app2.py | 146d43b72ca6e0bd9c5785af52b29a650d58b0a1 | [] | no_license | yslatgit/test-ysl | 987b6026ddd74c88bb81b41ce12e43733c458cb1 | c1a8858a8ad346913131c3bd9fb8ae8ea84c36a7 | refs/heads/master | 2020-04-10T15:16:55.898370 | 2018-12-21T06:51:09 | 2018-12-21T06:51:09 | 161,104,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,669 | py | from flask import Flask, request, render_template
import os, sqlite3
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def homeMeth():
return render_template('home.html')
@app.route('/login', methods=['POST'])
def loginMeth():
username = request.form['username']
password = request.form['password']
msg = dealInfo(username, password, 1)
if msg == True:
return render_template('login.html')
else:
return render_template('home.html', message = msg)
@app.route('/success', methods=['POST'])
def successMeth():
username = request.form['username']
password = request.form['password']
msg = dealInfo(username, password, 2)
if msg == True:
return render_template('success.html')
else:
return render_template('login.html', message = msg)
# type:1:保存 2:查询
def dealInfo(name, pwd, type):
msg = ""
print(name, pwd, type)
# 没有则建立数据库文件,有则建立连接
db_file = os.path.join(os.path.dirname(__file__), 'dbdb.db')
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
# 获取该数据库下的所有表名
a = "select name from sqlite_master where type = 'table'"
cursor.execute(a)
tableNames = cursor.fetchall()
# 若无表,则新建表格'user'
if tableNames:
pass
else:
cursor.execute('create table user(username VARCHAR(20), password VARCHAR(20))')
# 判断用户名和密码是否为空
if name == '' or pwd == '':
return "用户名和密码不能为空"
# 查询该表格下是否有该条数据
cursor.execute("select * from user WHERE username = '%s'" %name)
values = cursor.fetchall()
if values:
for value in values:
if value[0] == name:
if type == 1:
cursor.close()
conn.close()
return "该用户名已存在,请重填注册信息。。。"
elif type == 2 and value[1] == pwd: # 信息一致,登录成功
cursor.close()
conn.close()
return True
msg = "密码错误,请重新输入"
else: # 没有查询到数据
if type == 1: # 信息保存成功,可以进行登录操作
cursor.execute("insert into user VALUES ('%s', '%s')" %(name, pwd))
cursor.close()
conn.commit()
conn.close()
return True
else:
msg = '没有此用户名信息,请核对。。。'
cursor.close()
conn.close()
return msg
if __name__ == '__main__':
app.run() | [
"986725816@qq.com"
] | 986725816@qq.com |
fe70e24316ee8a47560b7e331c0a5d8d453646d4 | 88849505c8d71c5fcc8d18fe2da3aa93a97f1e0e | /cupt/screen.py | 7e2100412973869b68b09de2b05d22f16142cf6c | [
"MIT"
] | permissive | mscroggs/KLBFAX | 5322e025c41b30c6f160699e742c988c9e47ea88 | 3aaaa0cfe3b9772caa0a87e639efd9bce5b6adf4 | refs/heads/master | 2021-04-09T16:38:08.581934 | 2018-06-25T12:18:23 | 2018-06-25T12:18:26 | 31,314,664 | 5 | 1 | null | 2017-07-26T19:21:13 | 2015-02-25T13:26:38 | Python | UTF-8 | Python | false | false | 830 | py | import curses
import config
from .cupt import CuPT
class Screen:
def __enter__(self):
import locale
locale.setlocale(locale.LC_ALL,"")
self.scr = curses.initscr()
self.cupt = CuPT(self.scr)
curses.start_color()
curses.use_default_colors()
curses.noecho()
curses.cbreak()
self.old = curses.curs_set(0)
self.scr.keypad(1)
curses.resizeterm(config.HEIGHT,config.WIDTH)
self.scr.refresh()
return self
def getch(self):
return self.scr.getch()
def __exit__(self,a,b,c):
curses.nocbreak()
curses.curs_set(self.old)
self.scr.keypad(0)
curses.echo()
curses.endwin()
class DummyScreen:
def __init__(self):
self.scr = None
self.cupt = CuPT(self.scr)
| [
"matthew.w.scroggs@gmail.com"
] | matthew.w.scroggs@gmail.com |
959ec00f00a0cf6fe78fac46268a67bebfb445ef | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03565/s875696871.py | 6fdbfc635ad33e71b8d47334616f309ffe409d41 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | s = input()
T = input()
n_s = len(s)
n_t = len(T)
S = ""
for i in range(n_s-n_t+1):
s_i = s[i:n_t+i]
flag = True
for j in range(n_t):
if s_i[j] != "?":
if s_i[j] != T[j]:
break
else:
S = s[:i] + T + s[n_t+i:]
#print(S)
if S == "":
print("UNRESTORABLE")
exit()
S = list(S)
for i in range(n_s):
if S[i] == "?":
S[i] = "a"
print("".join(S))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c05430eb373a57834315638dedc9649acbcc0526 | df5b1caed735947b3cf6f4cdf46f3141ba9e2c5c | /boot3.spec | f7516251ea56958c76f03542e6131ca2e7dbd8d7 | [] | no_license | hcferreira-git/RDM | 33b303469ca0230ac1a0a82e74ba5c4fbe894837 | e53972cab2d416bbf44bb9c874d8d91d82b7d074 | refs/heads/master | 2021-03-31T16:36:29.549303 | 2020-04-18T16:14:59 | 2020-04-18T16:14:59 | 248,119,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['boot3.py'],
pathex=['C:\\Users\\Henrique\\Desktop\\bootzap'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='boot3',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )
| [
"="
] | = |
5cb5b444d884d749465d4e777e33c10b94b3b810 | 10857edc2c10a89077555595b253376dbdcd17cb | /main.py | a6684596c2dc869d436e1a05c48af4f0243a50d3 | [
"MIT"
] | permissive | pannal/libfilebot | dfe0a55e804b9118fc643057ac010432c903ac10 | 996736ec0139aeae95f44e7fdb0a748fe08214c5 | refs/heads/master | 2021-09-02T01:27:48.203475 | 2017-12-29T16:58:30 | 2017-12-29T16:58:30 | 115,065,680 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,648 | py | # coding=utf-8
import subprocess
import sys
import traceback
import logging
import re
import binascii
import types
from pipes import quote
from lib import find_executable
if sys.platform == "win32":
from pyads import ADS
logger = logging.getLogger(__name__)
def quote_args(seq):
return ' '.join(quote(arg) for arg in seq)
def win32_xattr(fn):
handler = ADS(fn)
return handler.get_stream_content("net.filebot.filename")
def default_xattr(fn):
if not default_xattr_bin:
raise Exception("Neither getfattr, attr nor filebot were found")
if "getfattr" in default_xattr_bin:
return ["getfattr", "-n", "user.net.filebot.filename", fn]
elif "attr" in default_xattr_bin:
return ["attr", "-g", "net.filebot.filename", fn]
return ["filebot", "-script", "fn:xattr", fn]
XATTR_MAP = {
"default": (
default_xattr,
lambda result: re.search('(?um)(net\.filebot\.filename(?=="|: )[=:" ]+|Attribute.+:\s)([^"\n\r\0]+)',
result).group(2)
),
"darwin": {
lambda fn: ["xattr", "-p", "net.filebot.filename", fn],
lambda result: binascii.unhexlify(result.replace(' ', '').replace('\n', '')).strip("\x00")
},
"win32": {
lambda fn: fn,
win32_xattr,
}
}
if sys.platform not in XATTR_MAP:
default_xattr_bin = find_executable("getfattr") or find_executable("attr") or find_executable("filebot")
def get_filebot_attrs(fn):
"""
Currently only supports the filebot filename attrs
:param fn: filename
:return:
"""
if sys.platform in XATTR_MAP:
logger.debug("Using native xattr calls for %s", sys.platform)
else:
logger.debug("Using %s for %s", default_xattr_bin, sys.platform)
args_func, match_func = XATTR_MAP.get(sys.platform, XATTR_MAP["default"])
args = args_func(fn)
if isinstance(args, types.ListType):
try:
output = subprocess.check_output(quote_args(args), stderr=subprocess.PIPE, shell=True)
except subprocess.CalledProcessError, e:
if e.returncode == 1:
logger.info(u"%s: Couldn't get filebot original filename", fn)
else:
logger.error(u"%s: Unexpected error while getting filebot original filename: %s", fn,
traceback.format_exc())
return
else:
output = args
try:
orig_fn = match_func(output)
return orig_fn.strip()
except:
logger.info(u"%s: Couldn't get filebot original filename" % fn)
if __name__ == "__main__":
print get_filebot_attrs(sys.argv[1])
| [
"panni@fragstore.net"
] | panni@fragstore.net |
025002537b75adc263d8ca8ed6e3eb0b03adc44b | 077a17b286bdd6c427c325f196eb6e16b30c257e | /08_FmtString-unit-tests/13_32/remenissions-work/exploit-FsRetShellcode.py | e5a56cd137bb5b1dc25714972fc3c717b69d43f1 | [] | no_license | KurSh/remenissions_test | 626daf6e923459b44b82521aa4cb944aad0dbced | 9dec8085b62a446f7562adfeccf70f8bfcdbb738 | refs/heads/master | 2023-07-08T20:25:04.823318 | 2020-10-05T06:45:16 | 2020-10-05T06:45:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | from pwn import *
import os
import sf
import sys
import signal
target = process("./chall-test_FmtString-13-x86")
gdb.attach(target, execute="verify_exploit")
bof_payload = sf.BufferOverflow(arch=32)
target.recvuntil("Tell me I was never good enough: ")
leak = int(target.recvline().strip(b"\n"), 16)
ret_address = leak + (108)
fs = sf.WriteFmtStr(
arch = 32,
value = -0x46,
address = 0x0,
offset = 0x4,
printed_bytes = 0x0,
alignment_bytes = 0x0,
value_base = ret_address,
address_base = ret_address)
payload = sf.BufferOverflow(arch=32, start=108)
payload.add_bytes(108, fs.generate_fmt_str())
payload.add_bytes(70, b"\x83\xec\x7f\x31\xc0\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x89\xe2\x53\x89\xe1\x04\x05\x04\x06\xcd\x80\xb0\x01\x31\xdb\xcd\x80")
target.sendline(payload.generate_payload())
# Exploit Verification starts here 15935728
def handler(signum, frame):
raise Exception("Timed out")
signal.signal(signal.SIGALRM, handler)
signal.alarm(2)
try:
while True:
target.recvall(timeout=2)
except Exception:
print("Exploit timed out")
| [
"ryancmeinke@gmail.com"
] | ryancmeinke@gmail.com |
072c86d1641e5db25afaccbb5f221e35910e72be | ea1703dbfedb3abced6dad5acf6f43b341e881ab | /Chapter 1/stringTripleQuotes.py | 51aa135c8109918af8dc6397df8ef1d6f2778a54 | [] | no_license | NathanZuidema/Learn-Python-for-Stats-and-Econ | 2019927cf536411a73049404a8cc15bc12ee7633 | cb589999f1275754e58994ef84c85ccc702707b5 | refs/heads/master | 2020-04-18T08:43:07.756614 | 2019-01-24T16:50:22 | 2019-01-24T16:50:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | #stringTripleQuotes.py
x = """ Everything in this object will be recorded exactly as entered,
if we enter a new line or
a new line with a tab."""
print(x) | [
"jlcatonjr@gmail.com"
] | jlcatonjr@gmail.com |
9e36b3b15685c1ec32b9a2db9b529d46cf6d65ca | b333dc607a2f1556f6a8adb6d16dc88fa8a30c8b | /portal/libs/scripts/one_time/20200814_migrate_visitorsmdb.py | 028a68923f56bb5d6dba9357067bc1e62e676452 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hernan0216/utopia-cms | 6558f8f600620c042dd79c7d2edf18fb77caebb8 | 48b48ef9acf8e3d0eb7d52601a122a01da82075c | refs/heads/main | 2023-02-06T10:31:35.525180 | 2020-12-15T17:43:28 | 2020-12-15T17:43:28 | 321,775,279 | 1 | 0 | BSD-3-Clause | 2020-12-15T19:59:17 | 2020-12-15T19:59:16 | null | UTF-8 | Python | false | false | 2,070 | py | # -*- coding: utf-8 -*-
# la diaria 2020. Aníbal Pacheco.
"""
1. set SIGNUPWALL_MONGODB_VISITOR to None in local_settings before running this script in a "live" environment.
2. touch uwsgi.ini.
3. run this script.
4. drop old table:
mongo
> use ldsocial_signupwall_visitor
> db.dropDatabase()
5. rename new table with the old table name and add indexes:
mongodump --archive="visitor_new" --db=ldsocial_signupwall_visitor_new
mongorestore --archive="visitor_new" --nsFrom='ldsocial_signupwall_visitor_new.*' --nsTo='ldsocial_signupwall_visitor.*'
mongo
> use ldsocial_signupwall_visitor_new
> db.dropDatabase()
> use ldsocial_signupwall_visitor
> db.posts.createIndex({'timestamp': -1})
> db.posts.createIndex({'session_key': -1})
> db.posts.createIndex({'ip_address': -1})
> db.posts.createIndex({'user': -1})
6. return the local setting to its original value.
7. deploy branch with the modifications to use the new table.
"""
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError
from progress.bar import Bar
try:
client = MongoClient(serverSelectionTimeoutMS=1000)
client.server_info()
signupwall_visitor_mdb = client['ldsocial_signupwall_visitor']
signupwall_visitor_mdb_new = client['ldsocial_signupwall_visitor_new']
except ServerSelectionTimeoutError:
signupwall_visitor_mdb = signupwall_visitor_mdb_new = None
if signupwall_visitor_mdb and signupwall_visitor_mdb_new:
visitors = signupwall_visitor_mdb.posts.find({'paths_visited': {'$exists': True}}, no_cursor_timeout=True)
bar = Bar('Processing ...', max=visitors.count())
for v in visitors:
paths_visited = v.get('paths_visited')
if paths_visited:
migrated = {'timestamp': v['last_update'], 'session_key': v['session_key'], 'ip_address': v['ip_address']}
user = v.get('user')
if user:
migrated.update({'user': user})
signupwall_visitor_mdb_new.posts.insert_many([dict(migrated, path_visited=p) for p in paths_visited])
bar.next()
bar.finish()
| [
"apacheco@ladiaria.com.uy"
] | apacheco@ladiaria.com.uy |
6f9b54e564353b153d9722ad704799455a77f0b0 | 578e8f18c12e425f7a17210fb3c324ef1fac61c1 | /video_ex/streaming/video-client.py | e26013024013d5141e772ca25fa6e1696521f2eb | [] | no_license | jeonghaejun/09.RaspberryPi | 06c513add59d34e34a340f10cffa7a5aca075bd2 | 7a9145d985b5bfd227cc5bb90e5a2ac280292550 | refs/heads/master | 2023-03-24T23:15:57.693146 | 2021-03-19T08:56:08 | 2021-03-19T08:56:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | from video import Video
from time import sleep
import socket
import json
import net
HOST = '127.0.0.1'
PORT = 5000
if __name__ == '__main__':
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
writer = s.makefile('wb')
reader = s.makefile('rb')
with Video(device=0) as v:
for image in v:
image = Video.to_jpg(image, 60)
print('video send ', len(image))
net.send(writer, image)
result = net.receive(reader)[0]
print(json.loads(result.decode()))
| [
"wjdgownsll@gmail.com"
] | wjdgownsll@gmail.com |
317e5b0993195cab701b749f5687da23f96ed43f | d79493f0f9b2664a597e6a2909516d2a582b3389 | /smote.py | a25ec68c13b417285fe0f30fba16fbb75143194d | [] | no_license | vivekaxl/Enigma | 8594c6efdd6ca7b5081bdd43772a17a426bb61bf | d4e9ae627c027181881b2b3369dd502c40254921 | refs/heads/master | 2020-05-18T06:16:31.093635 | 2015-02-05T16:24:20 | 2015-02-05T16:24:20 | 30,219,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,371 | py | # from os import environ, getcwd
# import sys
# from scipy.spatial.distance import euclidean
# # Update PYTHONPATH
# HOME = environ['HOME']
# axe = HOME + '/git/axe/axe/' # AXE
# pystat = HOME + '/git/pystats/' # PySTAT
# cwd = getcwd() # Current Directory
# sys.path.extend([axe, pystat, cwd])
# from random import choice, seed, uniform as rand
# import pandas as pd
# from dectree import *
# def SMOTE(data = None, k = 5, atleast = 50, atmost = 100):
# def Bugs(tbl):
# cells = [i.cells[-2] for i in tbl._rows]
# return cells
# def minority(data):
# unique = list(set(sorted(Bugs(data))))
# counts = len(unique) * [0];
# # set_trace()
# for n in xrange(len(unique)):
# for d in Bugs(data):
# if unique[n] == d: counts[n] += 1
# return unique, counts
# def knn(one, two):
# pdistVect = []
# # set_trace()
# for ind, n in enumerate(two):
# pdistVect.append([ind, euclidean(one.cells[:-1], n.cells[:-1])])
# indices = sorted(pdistVect, key = lambda F:F[1])
# return [two[n[0]] for n in indices]
# def extrapolate(one, two):
# new = one;
# # set_trace()
# new.cells[3:-1] = [min(a, b) + rand() * (abs(a - b)) for
# a, b in zip(one.cells[3:-1], two.cells[3:-1])]
# new.cells[-2] = int(new.cells[-2])
# return new
# def populate(data):
# newData = []
# reps = len(data) - atleast
# for _ in xrange(reps):
# for one in data:
# neigh = knn(one, data)[1:k + 1];
# two = choice(neigh)
# newData.append(extrapolate(one, two))
# data.extend(newData)
# return data
# def depopulate(data):
# return [choice(data) for _ in xrange(atmost)]
# newCells = []
# seed(1)
# unique, counts = minority(data)
# rows = data._rows
# for u, n in zip(unique, counts):
# if 1 < n < atleast:
# newCells.extend(populate([r for r in rows if r.cells[-2] == u]))
# elif n > atmost:
# newCells.extend(depopulate([r for r in rows if r.cells[-2] == u]))
# else:
# newCells.extend([r for r in rows if r.cells[-2] == u])
# return clone(data, rows = [k.cells for k in newCells])
# def test_smote():
# dir = '../Data/camel/camel-1.6.csv'
# Tbl = createTbl([dir])
# newTbl = SMOTE(data = Tbl)
# for r in newTbl._rows:
# print r.cells
# if __name__ == '__main__':
# test_smote() | [
"vivekaxl@gmail.com"
] | vivekaxl@gmail.com |
f824fb4fc38a34d6258a36c61bb44234d657c45d | 9a05e1e8c950b091124d805ea70f24d2837b827c | /daydayup/cema_python/eight/requestdemo4.py | 618e089e2210c839c52184cb85beb8e835dad219 | [] | no_license | fanzongpeng/mywork | 20676a9fe0e0599461a756ad194e4bd35aad4668 | aa6d044bbab3c0288de48888b2cc7dbd7785c91b | refs/heads/master | 2022-05-31T06:03:26.826914 | 2020-04-30T09:50:22 | 2020-04-30T09:50:22 | 257,189,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,011 | py | import json
dict = {
'message': 'success',
'url': '',
'data': {
'address': '北京',
'eid': 1,
'name': '这是一个测试',
'limit': 2000,
'start_time': '2019-05-31T15:25:19',
'status': True
},
'status': 200
}
# json = json.dumps(dict)
# print(json)
schema = {
"definitions": {},
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "http://example.com/root.json",
"type": "object",
"title": "The Root Schema",
"required": [
"message",
"url",
"data",
"status"
],
"properties": {
"message": {
"$id": "#/properties/message",
"type": "string",
"title": "The Message Schema",
"enum": ["success", "error"],
"pattern": "^(.*)$"
},
"url": {
"$id": "#/properties/url",
"type": "string",
"title": "The url",
"format": "uri",
},
"data": {
"$id": "#/properties/data",
"type": "object",
"title": "The Data Schema",
"required": [
"address",
"eid",
"name",
"limit",
"start_time",
"status"
],
"properties": {
"address": {
"$id": "#/properties/data/properties/address",
"type": "string",
"title": "The Address Schema",
"default": "",
"examples": [
"成都"
],
"pattern": "^(.*)$"
},
"eid": {
"$id": "#/properties/data/properties/eid",
"type": "integer",
"title": "The Eid Schema",
"default": 0,
"examples": [
1
]
},
"name": {
"$id": "#/properties/data/properties/name",
"type": "string",
"title": "The Name Schema",
"default": "",
"examples": [
"这是也是汉字"
],
"pattern": "^(.*)$"
},
"limit": {
"$id": "#/properties/data/properties/limit",
"type": "integer",
"title": "The Limit Schema",
"default": 0,
"examples": [
2000
]
},
"start_time": {
"$id": "#/properties/data/properties/start_time",
"type": "string",
"title": "The Start_time Schema",
"default": "",
"format": "date-time",
"examples": [
"2017-11-21T15:25:19"
],
"pattern": "^(.*)$"
},
"status": {
"$id": "#/properties/data/properties/status",
"type": "boolean",
"title": "The Status Schema",
"default": False,
"examples": [
True
]
}
}
},
"status": {
"$id": "#/properties/status",
"type": "integer",
"title": "The Status Schema",
"default": 0,
"examples": [
200
]
}
}
}
from jsonschema.validators import Draft4Validator
data = dict
validator = Draft4Validator(schema)
validator.validate(data) | [
"18210023228.com"
] | 18210023228.com |
87421e86c5d1275f774023eab4e876ea28c4e92e | 33c2c804fd38483755093628c6b08f31c296bd9f | /nlp/sentiment_analysis/MNB_classifier.py | ffa14200cd31e7f289886d0a47a469b9c55b25d7 | [] | no_license | thefr33radical/projects | 1ec51e5ec2d8b4ff671d7aa9a6bb72f6292f9b52 | ec26e509b64c58c79facadaf6345ab77f8ae7bd7 | refs/heads/master | 2021-06-02T19:21:12.369653 | 2021-04-23T12:46:52 | 2021-04-23T12:46:52 | 135,284,488 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,675 | py | from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import metrics
import numpy as np
from sklearn.naive_bayes import MultinomialNB
import pandas as pd
import os
from sklearn.datasets import load_files
import glob
import sys
import re
import pickle
mem = Memory("/home/gowtham/drive/codes/racetrack/sentiment_analysis/aclImdb/mycache")
subset = load_files('/home/gowtham/drive/codes/racetrack/sentiment_analysis/aclImdb/train',shuffle='False',encoding='utf-8')
subset2=load_files('/home/gowtham/drive/codes/racetrack/sentiment_analysis/aclImdb/test',shuffle='False',encoding='utf-8')
for i in range(0,len(subset.data)):
f_name=subset.filenames[i]
temp=f_name.split("_")
temp2=temp[2].split(".")
subset.target[i]=int(temp2[0])
# print((subset.data[i]), subset.filenames[i],subset.target[i])
v=CountVectorizer()
X=v.fit_transform(subset.data)
#print(v.get_feature_names())
model=MultinomialNB()
model.fit(X,subset.target)
#print(v.vocabulary_)
for i in range(0,len(subset2.data)):
f_name=subset2.filenames[i]
temp=f_name.split("_")
temp2=temp[2].split(".")
subset2.target[i]=int(temp2[0])
# print((subset.data[i]), subset.filenames[i],subset.target[i])
filename=("model.sav")
pickle.dump(model,open(filename,"wb"))
#--------------------------Testing-------------------------------------------------------
X2=v.transform(subset2.data)
expected=subset2.target
predicted=model.predict(X2)
c=pd.DataFrame({'test_data':subset2.data,'actual_value':subset2.target,'predicted':predicted})
c.to_csv("output.csv")
l=['this is very good.',
'this is bad.',
'this is very bad.',
'this is not good.',
'this is not what i had expected.',
'you are taking too much time.',
'this is good',
'this is awesome',
'this is slighly good',
'i expected better than this',
'this is much more than my expectation',
'this is something i love',
'this is something i hate',
'you are taking a hell lot of time.']
X3=v.transform(l)
predicted2=model.predict(X3)
c2=pd.DataFrame({'test_data':l,'predicted':predicted2})
c2.to_csv("output2.csv")
report=(metrics.classification_report(expected, predicted))
con_matrix=(metrics.confusion_matrix(expected, predicted))
print(report,con_matrix)
#with open("report.txt","w+") as f:
# f.write(report)
# f.write(con_matrix)
| [
"imperial.gauntlet@gmail.com"
] | imperial.gauntlet@gmail.com |
d048808d665f0b2335b0686f1f4c78264ffa56f2 | 81407be1385564308db7193634a2bb050b4f822e | /the-python-standard-library-by-example/ConfigParser/ConfigParser_allow_no_value.py | df8d6ea50bcdf1b9e1f5e8c326f47e315b219efe | [
"MIT"
] | permissive | gottaegbert/penter | 6db4f7d82c143af1209b4259ba32145aba7d6bd3 | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | refs/heads/master | 2022-12-30T14:51:45.132819 | 2020-10-09T05:33:23 | 2020-10-09T05:33:23 | 305,266,398 | 0 | 0 | MIT | 2020-10-19T04:56:02 | 2020-10-19T04:53:05 | null | UTF-8 | Python | false | false | 771 | py | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Reading a configuration file.
"""
#end_pymotw_header
import ConfigParser
# Require values
try:
parser = ConfigParser.SafeConfigParser()
parser.read('allow_no_value.ini')
except ConfigParser.ParsingError, err:
print 'Could not parse:', err
# Allow stand-alone option names
print '\nTrying again with allow_no_value=True'
parser = ConfigParser.SafeConfigParser(allow_no_value=True)
parser.read('allow_no_value.ini')
for flag in [ 'turn_feature_on', 'turn_other_feature_on' ]:
print
print flag
exists = parser.has_option('flags', flag)
print ' has_option:', exists
if exists:
print ' get:', parser.get('flags', flag)
| [
"350840291@qq.com"
] | 350840291@qq.com |
e67cd7a58f5a860c679aa449643fff683cf93bf0 | 9b8ca63a377e6f94cc6a970cc97a6f7f50932811 | /sale_analysis_report/__openerp__.py | 1aff230cf327cf05e60e952bcf9687f43b84d1a5 | [
"Apache-2.0"
] | permissive | lester-lees/extra_addons_sz | 9b6d2400abe4707b7b18d9e2e9caf2fb366cf3a6 | cddaf972cf4ea64c553bcff0006eb006a115d5ee | refs/heads/master | 2021-01-06T20:43:28.782147 | 2017-08-07T06:51:45 | 2017-08-07T06:51:45 | 99,545,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | # -*- coding: utf-8 -*-
{
"name": "Sale Analysis Report",
"description": """
""",
"version": "0.1",
"depends": ["base", "sale", "report_webkit"],
"category": "Reporting",
"author": "Jimmy Lee",
"url": "http://www.loewie.com/",
"update_xml": ["sale_report_view.xml","loewie_purchase_report.xml"],
"installable": True,
"auto_install": False,
} | [
"346994202@qq.com"
] | 346994202@qq.com |
7e05461799b0f76803a45bd5d539ec259176b050 | 2a28bb9594fe98a6e8934b6e5eb952baa3a0b803 | /Tools/Scripts/libraries/webkitscmpy/webkitscmpy/__init__.py | 9f3ebe6570c7c695504043d655d51c588165c7f4 | [] | no_license | yolin1020/WebKit | 154b0f177b0d48be66e78717e85d98e546dcf9c5 | b7e84a4224b3934868bc08f5c89b583355a6c87a | refs/heads/main | 2023-02-02T22:48:08.044648 | 2020-12-18T19:34:39 | 2020-12-18T19:34:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,698 | py | # Copyright (C) 2020 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import sys
log = logging.getLogger('webkitscmpy')
def _maybe_add_webkitcorepy_path():
# Hopefully we're beside webkitcorepy, otherwise webkitcorepy will need to be installed.
libraries_path = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
webkitcorepy_path = os.path.join(libraries_path, 'webkitcorepy')
if os.path.isdir(webkitcorepy_path) and os.path.isdir(os.path.join(webkitcorepy_path, 'webkitcorepy')) and webkitcorepy_path not in sys.path:
sys.path.insert(0, webkitcorepy_path)
_maybe_add_webkitcorepy_path()
try:
from webkitcorepy import AutoInstall, Package, Version
except ImportError:
raise ImportError(
"'webkitcorepy' could not be found on your Python path.\n" +
"You are not running from a WebKit checkout.\n" +
"Please install webkitcorepy with `pip install webkitcorepy --extra-index-url <package index URL>`"
)
version = Version(0, 6, 4)
AutoInstall.register(Package('fasteners', Version(0, 15, 0)))
AutoInstall.register(Package('monotonic', Version(1, 5)))
AutoInstall.register(Package('xmltodict', Version(0, 12, 0)))
from webkitscmpy.contributor import Contributor
from webkitscmpy.commit import Commit
from webkitscmpy.scm_base import ScmBase
from webkitscmpy import local
from webkitscmpy import mocks
name = 'webkitscmpy'
| [
"jbedard@apple.com"
] | jbedard@apple.com |
4d32b5dceb9de6e224b0bfe285ac1b75465b2816 | 45954869eb53b1f6fe4b675494b72a76fcac534c | /instagram/admin.py | d5b6d964da407dab1a778d7eba0041e2bb736d71 | [
"MIT"
] | permissive | Jmos-Mbugua/Insta-clone | b69f39a9d3e7ad8b21bcf77a4695a17a4dc75b74 | 85ab0f3ee93c2ed5b9778058e3df31e25563e5e8 | refs/heads/master | 2022-12-14T22:16:14.829459 | 2020-02-14T12:59:16 | 2020-02-14T12:59:16 | 239,035,462 | 0 | 0 | null | 2022-12-08T03:36:40 | 2020-02-07T22:42:39 | Python | UTF-8 | Python | false | false | 228 | py | from django.contrib import admin
from .models import Location, Post,Comment, Profile
# Register your models here.
admin.site.register(Location)
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(Profile) | [
"johnmbugua849@gmail.com"
] | johnmbugua849@gmail.com |
a764645348053f284abb738a5febafeabb9cc262 | 14373275670c1f3065ce9ae195df142146e2c1a4 | /stubs/redis/redis/commands/search/query.pyi | eb1846bab957d9c44c9913f2791264cc54783e40 | [
"Apache-2.0",
"MIT"
] | permissive | sobolevn/typeshed | eb7af17c06a9722f23c337e6b9a4726223155d58 | d63a82640390a9c130e0fe7d409e8b0b836b7c31 | refs/heads/master | 2023-08-04T05:59:29.447015 | 2023-06-14T21:27:53 | 2023-06-14T21:27:53 | 216,265,622 | 2 | 0 | Apache-2.0 | 2022-02-08T10:40:53 | 2019-10-19T20:21:25 | Python | UTF-8 | Python | false | false | 1,654 | pyi | from _typeshed import Incomplete
from typing import Any
class Query:
def __init__(self, query_string) -> None: ...
def query_string(self): ...
def limit_ids(self, *ids): ...
def return_fields(self, *fields): ...
def return_field(self, field, as_field: Incomplete | None = None): ...
def summarize(
self,
fields: Incomplete | None = None,
context_len: Incomplete | None = None,
num_frags: Incomplete | None = None,
sep: Incomplete | None = None,
): ...
def highlight(self, fields: Incomplete | None = None, tags: Incomplete | None = None): ...
def language(self, language): ...
def slop(self, slop): ...
def in_order(self): ...
def scorer(self, scorer): ...
def get_args(self): ...
def paging(self, offset, num): ...
def verbatim(self): ...
def no_content(self): ...
def no_stopwords(self): ...
def with_payloads(self): ...
def with_scores(self): ...
def limit_fields(self, *fields): ...
def add_filter(self, flt): ...
def sort_by(self, field, asc: bool = True): ...
def expander(self, expander): ...
class Filter:
args: Any
def __init__(self, keyword, field, *args) -> None: ...
class NumericFilter(Filter):
INF: str
NEG_INF: str
def __init__(self, field, minval, maxval, minExclusive: bool = False, maxExclusive: bool = False) -> None: ...
class GeoFilter(Filter):
METERS: str
KILOMETERS: str
FEET: str
MILES: str
def __init__(self, field, lon, lat, radius, unit="km") -> None: ...
class SortbyField:
args: Any
def __init__(self, field, asc: bool = True) -> None: ...
| [
"noreply@github.com"
] | sobolevn.noreply@github.com |
db14263818ca2ec53409ec3e900b8bb1024d43c3 | 86a26119af259e3858cb5e57ea2e41e3b25c5fa7 | /Python Project/StockerLogin_support.py | e025cae333394920cf0640976bbfb93881497cb4 | [] | no_license | deshmukhshweta/project2 | 747ca7972a7bfdc4aed20dbb4ee3f6d2f009ca83 | 8bf07454d259456dc616e7283c266b35fe7b870d | refs/heads/master | 2020-04-19T09:57:05.541157 | 2019-01-29T09:27:01 | 2019-01-29T09:27:01 | 168,125,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,612 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Support module generated by PAGE version 4.13
# In conjunction with Tcl version 8.6
# May 27, 2018 02:27:42 PM
import sys
from tkinter import messagebox
from firebase import firebase as fb
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
def init(top, gui, *args, **kwargs):
global w, top_level, root
w = gui
top_level = top
root = top
def destroy_window():
# Function which closes the window.
global top_level
top_level.destroy()
top_level = None
import Stocker_Home
idno = ""
def Stocker_validation():
global idno
idno = w.Admin_username.get()
password = w.Admin_password.get()
if(idno == ""):
messagebox.showerror("Stocker Login","Please Enter Username")
else:
if(password == ""):
messagebox.showerror("Stocker Login","Please Enter Password")
else:
fire = fb.FirebaseApplication("https://python-project-2d5d6.firebaseio.com/merchant/employee/stocker",None)
result = fire.get("stocker", idno)
if result != None:
if result["password"] == password:
destroy_window()
Stocker_Home.vp_start_gui()
else:
messagebox.showerror("stocker login","Invalid Password")
else:
messagebox.showerror("stocker login","Invalid Idno")
return idno
def Stocker_Cancel():
destroy_window()
| [
"123deshmukhshweta@gmail.com"
] | 123deshmukhshweta@gmail.com |
95b1b870a5d43d004cda91f86fbfa2c7bf12bd8c | 9dee94907e6456a4af9855d358693923c17b4e0d | /0451_Sort_Characters_By_Frequency.py | 92254fbc5d08ee4386acef0c4237627f0dffc781 | [] | no_license | chien-wei/LeetCode | e215915a8103e56f182040dacc9fb0d6996c86ec | 0d6f414e7610fedb2ec4818ecf88d51aa69e1355 | refs/heads/master | 2021-05-13T14:48:22.891100 | 2019-08-20T05:52:59 | 2019-08-20T05:52:59 | 116,749,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | class Solution:
def frequencySort(self, s):
"""
:type s: str
:rtype: str
"""
res = ""
print(collections.Counter(s))
for c, i in collections.Counter(s).most_common():
res += c*i
return res | [
"chien-wei@outlook.com"
] | chien-wei@outlook.com |
949c6d9f07351de9dce2dce42d9cd57e27bac03d | 0920b50773cfd231137d2383695a6730d0678628 | /pylib/keys.py | e9b88d82a59a5096cf7a1651b31216abd9793056 | [] | no_license | chyser/bin | 05b67cf299b0e427e253abc42ca015fcdec8e84c | b54f23c6c5f1f19e426ee06c9e9faf9f561ee9a9 | refs/heads/master | 2021-01-19T19:35:05.801722 | 2015-08-19T17:58:29 | 2015-08-19T17:58:29 | 17,319,228 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,443 | py | #!/usr/bin/env python
"""
Library:
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
import string
__QuickChars = '0123456789abcdefghijkmnopqrstuvwxyzABCDEFGHIJKLMNPQRSTUVWXYZ-_+$'
__sd = {}
for idx, ch in enumerate(__QuickChars):
__sd[ch] = idx
#-------------------------------------------------------------------------------
def cvtNum2QChars(num, length=None):
#-------------------------------------------------------------------------------
if num == 0:
s = ['0']
else:
s = []
while num > 0:
s.insert(0, __QuickChars[num & 0b00111111])
num >>= 6
if length:
l = length - len(s)
if l > 0:
s = (['0']*l) + s
#s.reverse()
return ''.join(s)
#-------------------------------------------------------------------------------
def cvtQChars2Num(s):
#-------------------------------------------------------------------------------
num = 0
for ch in s:
num = num << 6 | __sd[ch]
return num
__SimpleChars = string.digits + string.letters
__ManyChars = __SimpleChars + '_()[]+-@!~:;{}|'
__PrintableChars = string.printable[:94]
#-------------------------------------------------------------------------------
def cvtNum2Chars(num, srcChars):
#-------------------------------------------------------------------------------
s = []
mod = len(srcChars)
while num > 0:
num, idx = divmod(num, mod)
s.append(srcChars[idx])
return ''.join(s)
#-------------------------------------------------------------------------------
def cvtNum2AllChars(num):
#-------------------------------------------------------------------------------
return cvtNum2Chars(num, __PrintableChars)
#-------------------------------------------------------------------------------
def cvtNum2SimpleChars(num):
#-------------------------------------------------------------------------------
return cvtNum2Chars(num, __SimpleChars)
#-------------------------------------------------------------------------------
def cvtNum2ManyChars(num):
#-------------------------------------------------------------------------------
return cvtNum2Chars(num, __ManyChars)
#-------------------------------------------------------------------------------
def __test__(verbose=False):
#-------------------------------------------------------------------------------
"""
used for automated module testing. see L{tester}
"""
import pylib.tester as tester
import random
for i in range(100):
n = random.randint(0, 9999999999999999999999999999999999999999999)
s = cvtNum2QChars(n)
a = cvtQChars2Num(s)
print(s, a)
tester.Assert(n == a)
for i in range(100):
n = random.randint(0, 9999999)
s = cvtNum2QChars(n)
a = cvtQChars2Num(s)
print(s, a)
tester.Assert(n == a)
return 0
#-------------------------------------------------------------------------------
if __name__ == "__main__":
#-------------------------------------------------------------------------------
import pylib.osscripts as oss
args, opts = oss.gopt(oss.argv[1:], [], [], __test__.__doc__)
s = cvtNum2SChars(-123456789, 16)
print(s)
print(cvtSChars2Num(s))
res = not __test__(verbose=True)
#oss.exit(res)
| [
"chris.hyser@oracle.com"
] | chris.hyser@oracle.com |
4559f5f956f5f1d1aca521001d1a56aa006e342c | c2e969a4a54d54426675639a1dc8e0cb86e7a272 | /mbed_devices/_internal/mbed_tools/list_connected_devices.py | 924000ba8a97dddb551a8d8bf57ce56ae2f90fbe | [
"Apache-2.0"
] | permissive | ARMmbed/mbed-devices | e773caf78b29c5f1eb2e59485c6e4a2847630eef | d9f459cbe47a341734c0813ebcdd25633237e1d9 | refs/heads/master | 2023-03-16T15:58:40.202451 | 2020-04-28T14:26:43 | 2020-04-28T14:26:43 | 215,789,280 | 3 | 0 | Apache-2.0 | 2020-07-09T21:34:01 | 2019-10-17T12:40:04 | Python | UTF-8 | Python | false | false | 3,148 | py | #
# Copyright (C) 2020 Arm Mbed. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""List all devices cli command."""
import click
import json
from operator import attrgetter
from typing import Iterable
from tabulate import tabulate
from mbed_devices import get_connected_devices, Device
from mbed_targets import Board
@click.command()
@click.option(
"--format", type=click.Choice(["table", "json"]), default="table", show_default=True, help="Set output format."
)
@click.option(
"--show-all",
"-a",
is_flag=True,
default=False,
help="Show all connected devices, even those which are not Mbed Boards.",
)
def list_connected_devices(format: str, show_all: bool) -> None:
"""Prints connected devices."""
connected_devices = get_connected_devices()
if show_all:
devices = _sort_devices(connected_devices.identified_devices + connected_devices.unidentified_devices)
else:
devices = _sort_devices(connected_devices.identified_devices)
output_builders = {
"table": _build_tabular_output,
"json": _build_json_output,
}
if devices:
output = output_builders[format](devices)
click.echo(output)
else:
click.echo("No connected Mbed devices found.")
def _sort_devices(devices: Iterable[Device]) -> Iterable[Device]:
"""Sort devices by board name and then serial number (in case there are multiple boards with the same name)."""
return sorted(devices, key=attrgetter("mbed_board.board_name", "serial_number"))
def _build_tabular_output(devices: Iterable[Device]) -> str:
headers = ["Board name", "Serial number", "Serial port", "Mount point(s)", "Build target(s)"]
devices_data = []
for device in devices:
devices_data.append(
[
device.mbed_board.board_name or "<unknown>",
device.serial_number,
device.serial_port or "<unknown>",
"\n".join(str(mount_point) for mount_point in device.mount_points),
"\n".join(_get_build_targets(device.mbed_board)),
]
)
return tabulate(devices_data, headers=headers)
def _build_json_output(devices: Iterable[Device]) -> str:
devices_data = []
for device in devices:
board = device.mbed_board
devices_data.append(
{
"serial_number": device.serial_number,
"serial_port": device.serial_port,
"mount_points": [str(m) for m in device.mount_points],
"mbed_board": {
"product_code": board.product_code,
"board_type": board.board_type,
"board_name": board.board_name,
"mbed_os_support": board.mbed_os_support,
"mbed_enabled": board.mbed_enabled,
"build_targets": _get_build_targets(board),
},
}
)
return json.dumps(devices_data, indent=4)
def _get_build_targets(board: Board) -> Iterable[str]:
return [f"{board.board_type}_{variant}" for variant in board.build_variant] + [board.board_type]
| [
"noreply@github.com"
] | ARMmbed.noreply@github.com |
60cc3428b450d6e43e6a31d6e789ce5f20e0f0f1 | 011416f366b8ff7da7e267cabcacb2279f328447 | /detector.py | e8686abcd2dfc72cadbfa58d80bc1c8997c14671 | [] | no_license | victorgrubio/Yolo-detection-NRG5 | ceed23cc7d2d7f97064bc9232e888e8c1df3df7a | 48c746d6cb1f1862f94bcfb5d90378d009fd73b6 | refs/heads/main | 2023-01-10T16:12:40.487364 | 2020-10-20T17:58:39 | 2020-10-20T17:58:39 | 306,098,308 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | """
Created on Mon Jan 29 17:25:59 2018
@author: victor
"""
import pyximport; pyximport.install() # allow .pyx files import
def Detector():
def __init__(self, img):
pass
def process_img(self, img):
pass
| [
"victorgrubiodl@gmail.com"
] | victorgrubiodl@gmail.com |
8e7c8e939b745936b9c56fdcad18bbc94247f2dc | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/mab_error_info.py | d57e90b3fdeaad0ccaa8fce630564c8ecc36c04b | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 1,097 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MabErrorInfo(Model):
"""MAB workload-specific error information.
:param error_string: Localized error string.
:type error_string: str
:param recommendations: List of localized recommendations.
:type recommendations: list of str
"""
_attribute_map = {
'error_string': {'key': 'errorString', 'type': 'str'},
'recommendations': {'key': 'recommendations', 'type': '[str]'},
}
def __init__(self, error_string=None, recommendations=None):
self.error_string = error_string
self.recommendations = recommendations
| [
"dheeru.rathor14@gmail.com"
] | dheeru.rathor14@gmail.com |
967bc0c6daed181a230ed0df131092a91d1585c7 | 9b3f578e63a7e17e2b1bab5f38aa8625b8a80251 | /descarteslabs/workflows/types/primitives/primitive.py | 2d49e5b56195eb800ecbd67e14ed0bf44934e74c | [
"Apache-2.0"
] | permissive | carderne/descarteslabs-python | e6f7000f08cd1569e0ddd0f7fb8e53abb6765183 | 757b480efb8d58474a3bf07f1dbd90652b46ed64 | refs/heads/master | 2022-12-09T23:19:02.361226 | 2020-08-13T11:52:30 | 2020-08-13T11:52:30 | 287,264,851 | 0 | 0 | NOASSERTION | 2020-08-13T11:46:58 | 2020-08-13T11:46:57 | null | UTF-8 | Python | false | false | 1,324 | py | from descarteslabs.common.graft import client
from ..core import Proxytype, ProxyTypeError
class Primitive(Proxytype):
"""
Proxy wrapper around a Python primitive type.
Do not use Primitive directly; instead, use one of the built-in subtypes (Int, Str, etc.)
"""
_pytype = None
def __init__(self, obj):
if self._is_generic():
raise ProxyTypeError(
"Cannot instantiate a generic {}; use a concrete subclass".format(
type(self).__name__
)
)
from .any_ import Any # TODO circular import
if isinstance(obj, (type(self), Any)):
self.graft = obj.graft
else:
if not isinstance(obj, self._pytype):
raise ProxyTypeError(
"Cannot promote {} to {}".format(type(obj), type(self))
)
self.graft = client.value_graft(obj)
self._literal_value = obj
@classmethod
def _promote(cls, obj):
return cls(obj)
@property
def literal_value(self):
"Python literal value this proxy object was constructed with, or None if not constructed from a literal value."
return getattr(self, "_literal_value", None)
def _is_generic(self):
return self._pytype is None
| [
"support@descarteslabs.com"
] | support@descarteslabs.com |
bd6fbef0bcbf14bea60261fe548c8aa68a9ac909 | 302442c32bacca6cde69184d3f2d7529361e4f3c | /cidtrsend-all/stage2-model/pytz/zoneinfo/America/Argentina/Mendoza.py | d3b0b6b1d1cd786afa0f915837aa14c8768788d6 | [] | no_license | fucknoob/WebSemantic | 580b85563072b1c9cc1fc8755f4b09dda5a14b03 | f2b4584a994e00e76caccce167eb04ea61afa3e0 | refs/heads/master | 2021-01-19T09:41:59.135927 | 2015-02-07T02:11:23 | 2015-02-07T02:11:23 | 30,441,659 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,974 | py | '''tzinfo timezone information for America/Argentina/Mendoza.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Mendoza(DstTzInfo):
'''America/Argentina/Mendoza timezone definition. See datetime.tzinfo for details'''
zone = 'America/Argentina/Mendoza'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1920,5,1,4,16,48),
d(1930,12,1,4,0,0),
d(1931,4,1,3,0,0),
d(1931,10,15,4,0,0),
d(1932,3,1,3,0,0),
d(1932,11,1,4,0,0),
d(1933,3,1,3,0,0),
d(1933,11,1,4,0,0),
d(1934,3,1,3,0,0),
d(1934,11,1,4,0,0),
d(1935,3,1,3,0,0),
d(1935,11,1,4,0,0),
d(1936,3,1,3,0,0),
d(1936,11,1,4,0,0),
d(1937,3,1,3,0,0),
d(1937,11,1,4,0,0),
d(1938,3,1,3,0,0),
d(1938,11,1,4,0,0),
d(1939,3,1,3,0,0),
d(1939,11,1,4,0,0),
d(1940,3,1,3,0,0),
d(1940,7,1,4,0,0),
d(1941,6,15,3,0,0),
d(1941,10,15,4,0,0),
d(1943,8,1,3,0,0),
d(1943,10,15,4,0,0),
d(1946,3,1,3,0,0),
d(1946,10,1,4,0,0),
d(1963,10,1,3,0,0),
d(1963,12,15,4,0,0),
d(1964,3,1,3,0,0),
d(1964,10,15,4,0,0),
d(1965,3,1,3,0,0),
d(1965,10,15,4,0,0),
d(1966,3,1,3,0,0),
d(1966,10,15,4,0,0),
d(1967,4,2,3,0,0),
d(1967,10,1,4,0,0),
d(1968,4,7,3,0,0),
d(1968,10,6,4,0,0),
d(1969,4,6,3,0,0),
d(1969,10,5,4,0,0),
d(1974,1,23,3,0,0),
d(1974,5,1,2,0,0),
d(1988,12,1,3,0,0),
d(1989,3,5,2,0,0),
d(1989,10,15,3,0,0),
d(1990,3,4,2,0,0),
d(1990,10,15,4,0,0),
d(1991,3,1,3,0,0),
d(1991,10,15,4,0,0),
d(1992,3,1,3,0,0),
d(1992,10,18,4,0,0),
d(1993,3,7,2,0,0),
d(1999,10,3,3,0,0),
d(2000,3,3,3,0,0),
d(2004,5,23,3,0,0),
d(2004,9,26,4,0,0),
]
_transition_info = [
i(-15420,0,'CMT'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-14400,0,'WART'),
i(-10800,3600,'WARST'),
i(-14400,0,'WART'),
i(-10800,3600,'WARST'),
i(-14400,0,'WART'),
i(-7200,7200,'ARST'),
i(-10800,0,'ART'),
i(-10800,0,'ARST'),
i(-10800,0,'ART'),
i(-14400,0,'WART'),
i(-10800,0,'ART'),
]
Mendoza = Mendoza()
| [
"learnfuzzy@gmail.com"
] | learnfuzzy@gmail.com |
731ddffa3a3330ee11c7a4b1f6c437a7196dcce7 | d90283bff72b5a55dd4d0f90c7325355b00ce7b1 | /p1804/lianxi/函数参数.py | 604939b96265d356561c29ce3cf5a71702d1a3db | [] | no_license | yuemeiss/p1804daima | f841f52e63081d53d50a199e4d148d4533605bb6 | 6ea08eb9971e42bf4ac535033a006d98ed98bf98 | refs/heads/master | 2020-03-15T23:29:59.691297 | 2018-08-06T02:42:49 | 2018-08-06T02:42:49 | 132,395,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | def sum_2_num():
num1 = 10
num2 = 20
result = num1 + num2
print("%d + %d = %d"% (num1,num2,result))
sum_2_num()
| [
"1083027306@qq.com"
] | 1083027306@qq.com |
bc4cfdc288816b00de2839a560736efa2542f302 | 07151cc20993dff5e3e22a8fc2fe4fe7fb3e2551 | /parse_drugbank.py | 3142e785a49b34248686366bc30b75f9c1d3bc04 | [] | no_license | jmuhlich/lincs-drug-targets | 4a2b122185caf587a3b4eda47da125c4a3c8e439 | bf627c4760c52fa0a15645c4b49c077a4ed478d5 | refs/heads/master | 2021-01-19T08:26:02.903067 | 2013-07-03T19:48:01 | 2013-07-03T19:48:01 | 10,800,024 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,668 | py | import os
import sys
import lxml.etree
import csv
import sqlalchemy as sa
def xpath(obj, path, single=True):
result = map(unicode, obj.xpath(path, namespaces={'d': ns}))
if single:
if len(result) == 0:
result = None
elif len(result) == 1:
result = result[0]
else:
raise ValueError("XPath expression matches more than one value")
return result
def record_match(hmsl_id, drugbank_id, description):
conn.execute(hmsl_drugbank.insert().values(locals()))
db_file = 'drugbank.sqlite'
#db_file = ':memory:'
engine = sa.create_engine('sqlite:///' + db_file)
conn = engine.connect()
metadata = sa.MetaData(bind=conn)
drugbank_drug = sa.Table(
'drugbank_drug', metadata,
sa.Column('drug_id', sa.String(), primary_key=True),
sa.Column('name', sa.String()),
sa.Column('synonyms', sa.PickleType()), # list of strings
sa.Column('kegg_id', sa.String()),
sa.Column('pubchem_cid', sa.String()),
sa.Column('molecular_formula', sa.String()),
sa.Column('partners', sa.PickleType()), # list of strings
)
drugbank_name = sa.Table(
'drugbank_name', metadata,
sa.Column('drug_id', sa.String()),
sa.Column('name', sa.String(), index=True),
)
hmsl_drugbank = sa.Table(
'hmsl_drugbank', metadata,
sa.Column('hmsl_id', sa.String(), primary_key=True),
sa.Column('drugbank_id', sa.String()),
sa.Column('description', sa.String()),
)
metadata.create_all()
datafile_name = 'drugbank.xml'
datafile = open(datafile_name)
ns = 'http://drugbank.ca'
qnames = dict((tag, lxml.etree.QName(ns, tag).text)
for tag in ('drug', 'drug-interaction', 'partner'))
# Parse drugbank xml into sqlite, only if the table is empty.
if not conn.execute(drugbank_drug.select()).first():
with conn.begin() as trans:
for event, element in lxml.etree.iterparse(datafile, tag=qnames['drug']):
# We need to skip 'drug' elements in drug-interaction sub-elements.
# It's unfortunate they re-used this tag name.
if element.getparent().tag == qnames['drug-interaction']:
continue
drug_id = xpath(element, 'd:drugbank-id/text()')
name = xpath(element, 'd:name/text()')
synonyms = xpath(
element, 'd:synonyms/d:synonym/text()', single=False)
synonyms += xpath(
element, 'd:brands/d:brand/text()', single=False)
molecular_formula = xpath(
element, './/d:property[d:kind="Molecular Formula"]/'
'd:value/text()')
kegg_id = xpath(
element, './/d:external-identifier[d:resource="KEGG Drug"]/'
'd:identifier/text()')
pubchem_cid = xpath(
element, './/d:external-identifier[d:resource="PubChem Compound"]/'
'd:identifier/text()')
partner_ids = xpath(
element, 'd:targets/d:target/@partner', single=False)
conn.execute(
drugbank_drug.insert().
values(drug_id=drug_id, name=name, synonyms=synonyms,
kegg_id=kegg_id, pubchem_cid=pubchem_cid,
molecular_formula=molecular_formula,
partners=partner_ids))
conn.execute(
drugbank_name.insert().
values(drug_id=drug_id, name=name.lower()))
for s in synonyms:
conn.execute(
drugbank_name.insert().
values(drug_id=drug_id, name=s.lower()))
element.clear()
# Turns out it's much faster to do a second iterparse loop with a different
# tag argument than to do just one iterparse loop with a conditional on the
# tag name. The lxml internals are much more efficient at filtering tags
# than we are, and the disk I/O and buffer cache impact are negligible. It
# would be nice if the tag argument could accept a list of tag names...
datafile.seek(0)
partner_to_uniprot = {}
for event, element in lxml.etree.iterparse(datafile, tag=qnames['partner']):
partner_id = element.get('id')
uniprot_id = xpath(element, './/d:external-identifier'
'[d:resource="UniProtKB"]/d:identifier/text()')
partner_to_uniprot[partner_id] = uniprot_id
element.clear()
with conn.begin() as trans:
for rec in conn.execute(drugbank_drug.select()):
new_values = dict(rec)
new_values['partners'] = map(partner_to_uniprot.__getitem__, rec.partners)
new_values['partners'] = filter(None, new_values['partners'])
conn.execute(drugbank_drug.update().
where(drugbank_drug.c.drug_id == rec.drug_id).
values(**new_values))
drugbank_names = [
rec[0] for rec in conn.execute(sa.select([drugbank_name.c.name]))]
sm_filename = os.path.join(os.path.dirname(sys.argv[0]),
'small_molecule.130624M134120.tsv')
sm_file = open(sm_filename, 'rb')
sm_reader = csv.reader(sm_file, dialect='excel-tab')
sm_fields = [f.lower().replace(' ', '_') for f in sm_reader.next()]
sm_fields[0] = 'sm_id'
hmsl_sm = sa.Table(
'hmsl_sm', metadata,
*[sa.Column(f, sa.String()) for f in sm_fields]
)
hmsl_sm.append_constraint(sa.PrimaryKeyConstraint(hmsl_sm.c.sm_id))
hmsl_sm.c.alternative_names.type = sa.PickleType()
metadata.create_all(tables=[hmsl_sm])
# Clear out hmsl_sm table unconditionally (it's fast to reload).
conn.execute(hmsl_sm.delete())
with conn.begin() as trans:
for row in sm_reader:
row[0] = row[0][:-4]
row[2] = row[2].split(';')
try:
conn.execute(hmsl_sm.insert().values(row))
except sa.exc.IntegrityError as e:
# Merge tsv row with existing record.
rec = conn.execute(hmsl_sm.select().
where(hmsl_sm.c.sm_id == row[0])).first()
if rec:
new_rec = dict(rec)
# Append new name and synonyms to synonyms.
new_rec['alternative_names'] = list(set(
rec.alternative_names +
[row[sm_fields.index('sm_name')]] +
row[sm_fields.index('alternative_names')]))
# If no existing CID, use the new one.
if not rec.pubchem_cid:
new_rec['pubchem_cid'] = row[sm_fields.index('pubchem_cid')]
conn.execute(hmsl_sm.update().
where(hmsl_sm.c.sm_id == new_rec['sm_id']).
values(new_rec))
conn.execute(hmsl_drugbank.delete())
with conn.begin() as trans:
for sm in conn.execute(hmsl_sm.select()):
hmsl_names = [s.lower() for s in [sm.sm_name] + sm.alternative_names]
for name in hmsl_names:
match = conn.execute(sa.select([drugbank_name.c.drug_id]).
where(drugbank_name.c.name == name)
).scalar()
if match:
break
if match:
record_match(sm.sm_id, match, 'Name: %s' % name)
continue
match = conn.execute(sa.select([drugbank_drug.c.drug_id]).
where(drugbank_drug.c.pubchem_cid ==
sm.pubchem_cid)
).scalar()
if match:
record_match(sm.sm_id, match, 'PubChem CID: %s' % sm.pubchem_cid)
continue
for rec in conn.execute(hmsl_drugbank.select()):
print '\t'.join(rec)
| [
"jmuhlich@bitflood.org"
] | jmuhlich@bitflood.org |
bc0ab3ba1d66e12d5151b4ece16b2e5d76d35cfa | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/h5py/version.py | d07fd5c286ba42d9633ba01d61c2280a7fd43eff | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 1,652 | py | # This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
"""
Versioning module for h5py.
"""
from __future__ import absolute_import
from collections import namedtuple
from . import h5 as _h5
import sys
import numpy
# All should be integers, except pre, as validating versions is more than is
# needed for our use case
_H5PY_VERSION_CLS = namedtuple("_H5PY_VERSION_CLS",
"major minor bugfix pre post dev")
hdf5_built_version_tuple = _h5.HDF5_VERSION_COMPILED_AGAINST
version_tuple = _H5PY_VERSION_CLS(2, 9, 0, None, None, None)
version = "{0.major:d}.{0.minor:d}.{0.bugfix:d}".format(version_tuple)
if version_tuple.pre is not None:
version += version_tuple.pre
if version_tuple.post is not None:
version += ".post{0.post:d}".format(version_tuple)
if version_tuple.dev is not None:
version += ".dev{0.dev:d}".format(version_tuple)
hdf5_version_tuple = _h5.get_libversion()
hdf5_version = "%d.%d.%d" % hdf5_version_tuple
api_version_tuple = (1,8)
api_version = "%d.%d" % api_version_tuple
info = """\
Summary of the h5py configuration
---------------------------------
h5py %(h5py)s
HDF5 %(hdf5)s
Python %(python)s
sys.platform %(platform)s
sys.maxsize %(maxsize)s
numpy %(numpy)s
""" % { 'h5py': version,
'hdf5': hdf5_version,
'python': sys.version,
'platform': sys.platform,
'maxsize': sys.maxsize,
'numpy': numpy.__version__ }
| [
"leibingye@outlook.com"
] | leibingye@outlook.com |
8dcce500bccb4d7e0fe014e6d850a544ff23c742 | 822027ec57f113f80a51f100c520eb76a6f302f6 | /test/z_component_tests/test__encoding.py | c16288fc2367ecd1ff65d2070ad6a1e0a27f5ece | [
"MIT"
] | permissive | KIC/pandas_ml_utils | 131de11f4914f0993570687b581452e2e81b256b | 76b764e2f87c2e9bcee9a62cfe0b54e7fb046034 | refs/heads/master | 2023-04-04T00:08:23.175385 | 2020-02-24T14:44:42 | 2020-02-24T14:44:42 | 205,210,206 | 3 | 0 | MIT | 2023-03-24T23:20:47 | 2019-08-29T16:54:12 | Python | UTF-8 | Python | false | false | 1,683 | py | import logging
import unittest
from typing import List
import pandas as pd
import numpy as np
from sklearn.neural_network import MLPClassifier
import pandas_ml_utils as pdu
from pandas_ml_utils.constants import *
from test.config import TEST_FILE
from pandas_ml_utils.model.features_and_labels.target_encoder import TargetLabelEncoder
from test.mocks.mock_model import MockModel
from pandas_ml_utils.utils.functions import integrate_nested_arrays
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class EncoderTest(unittest.TestCase):
def test__2d_encoding(self):
"""given"""
df = pd.read_csv(TEST_FILE, index_col='Date')
df["label"] = df["spy_Close"] > df["spy_Open"]
class ArrayEncoder(TargetLabelEncoder):
def __init__(self):
super().__init__()
@property
def labels_source_columns(self) -> List[str]:
return ["spy_Close"]
@property
def encoded_labels_columns(self) -> List[str]:
return ["2D"]
def encode(self, df: pd.DataFrame, **kwargs) -> pd.DataFrame:
res = pd.DataFrame({}, index=df.index)
res["2D"] = df["spy_Close"] = df["spy_Close"].apply(lambda r: np.array([r, r]))
return res
"""when"""
model = MockModel(pdu.FeaturesAndLabels(["spy_Close"], ArrayEncoder(), feature_lags=[0, 1, 2]))
fit = df.fit(model)
"""then"""
print(fit.test_summary.df)
self.assertEqual(fit.test_summary.df.shape, (2682, 2))
self.assertEqual(integrate_nested_arrays(fit.test_summary.df.values).shape, (2682, 2, 2))
| [
"ch9.ki7@gmail.com"
] | ch9.ki7@gmail.com |
c1d2ad1b4ef08b921ee81f80d41045d6c1deef7a | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_211/ch27_2020_03_11_19_25_38_657892.py | 6bfeffb25e1b70d6b961b54597fe66634b50247f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | '''Faça um programa que pergunta ao aluno se ele tem dúvidas na disciplina. Se o aluno responder qualquer coisa diferente de "não", escreva "Pratique mais" e pergunte novamente se ele tem dúvidas. Continue perguntando até que o aluno responda que não tem dúvidas. Finalmente, escreva "Até a próxima".
Seu programa deve imprimir as strings exatamente como descritas acima e nada mais.'''
x=input("você tem alguma dúvida?")
while x!="não":
x=input("você tem alguma dúvida?")
| [
"you@example.com"
] | you@example.com |
287eb5948fdfa0b92d31d92331777526e4b0d8c2 | 0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e | /regexlib/2021-5-15/python_re2_test_file/regexlib_5492.py | 8908eaf023624b909fb44cceba923652e2fb1cb3 | [
"MIT"
] | permissive | agentjacker/ReDoS-Benchmarks | c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6 | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | refs/heads/main | 2023-05-10T13:57:48.491045 | 2021-05-21T11:19:39 | 2021-05-21T11:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | # 5492
# ^((\D*[a-z]\D*[A-Z]\D*)|(\D*[A-Z]\D*[a-z]\D*)|(\D*\W\D*[a-z])|(\D*\W\D*[A-Z])|(\D*[a-z]\D*\W)|(\D*[A-Z]\D*\W))$
# EXPONENT
# nums:5
# EXPONENT AttackString:""+"aA"*512+"@1 _SLQ_2"
import re2 as re
from time import perf_counter
regex = """^((\D*[a-z]\D*[A-Z]\D*)|(\D*[A-Z]\D*[a-z]\D*)|(\D*\W\D*[a-z])|(\D*\W\D*[A-Z])|(\D*[a-z]\D*\W)|(\D*[A-Z]\D*\W))$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "aA" * i * 1 + "@1 _SLQ_2"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | [
"liyt@ios.ac.cn"
] | liyt@ios.ac.cn |
d4e4c2c0bc5b59146ff0bc3021c814b5a8821c8a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_undulated.py | 64b34a6850b8ecaf7e0aabe42b76a28fce49e7b8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py |
from xai.brain.wordbase.verbs._undulate import _UNDULATE
#calss header
class _UNDULATED(_UNDULATE, ):
def __init__(self,):
_UNDULATE.__init__(self)
self.name = "UNDULATED"
self.specie = 'verbs'
self.basic = "undulate"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
4c8ea9cb63d8f0c08dd8f3b0cec6698d4ed9ea3d | 0cc2a3a4b5948d8a30a4ab6e6a81209b28fa4dc2 | /Introduction.py | b75a74b8451203f59734e9fa4a6de2b3aa61bb28 | [] | no_license | zoshs2/Statiscal_Learning_Beginner | ece80effaae28875ed023803f2c738baf21fb6af | dc48640b00b04c1357ea205340f81b3e6bdbff5b | refs/heads/main | 2023-01-13T09:12:29.204403 | 2020-10-28T06:46:30 | 2020-10-28T06:46:30 | 306,056,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | ### Introduction
# Basic statistic for beginners
# https://www.kaggle.com/kanncaa1/statistical-learning-tutorial-for-beginners
## Chapter 1.py
# 1. Histogram
# 2. Outliers
# 3. Box Plot
# 4. Summary Statistics
## Chapter 2.py
# 5. CDF (Cumulative Distribution Function?)
# 6. Effect Size
# 7. Relationship Between Variables
# 8. Correlation
# 9. Covariance
## Chapter 3.py
# 10. Pearson Correlation
# 11. Spearman's Rank Correlation
# 12. Mean VS. Median
# 13. Hypothesis Testing
# 14. Normal(Gaussian) Distribution & z-score | [
"zoshs27@gmail.com"
] | zoshs27@gmail.com |
bae18e6d6c368cd7d692ce5ddccda12121b1bcd3 | f6217c228984107f1fdde63fc544c92ad32efd13 | /common/hash/sha256.py | 36c2288f256afde86f48bd1cd2dc3a4118fb44cb | [
"MIT"
] | permissive | lukius/mts | 8be64972fd700ec9110789a7e15307e3fc3dfecb | 96d3d8b28742a474aca67bfcb079577c878bbb4c | refs/heads/master | 2021-06-06T03:22:21.991908 | 2017-11-28T23:52:50 | 2017-11-28T23:52:50 | 22,904,866 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | from Crypto.Hash import SHA256 as _SHA256
from common.hash import HashFunction
class SHA256(HashFunction):
@classmethod
def get_OID(cls):
return '\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01'
def hash(self, message):
# TODO: implement custom SHA256.
return _SHA256.new(message).digest() | [
"lukius@gmail.com"
] | lukius@gmail.com |
2d0ddf5bf1de02234b97f6a5df7b3d69b8d470a4 | 22b3822af1a3c525cfbc85efabcb80f7198dba8d | /Functions/Brantley_U5_04/Brantley_U5_04.py | 39c8efb038eaf3f8b4602369e39fffdb88cef6ec | [] | no_license | ccbrantley/Python_3.30 | 90b05a0b985819e95333e490006544332bb5e462 | 681bfd542505754abe36224f5b773d889f20ae38 | refs/heads/master | 2021-12-25T02:04:44.501778 | 2018-04-05T17:40:46 | 2018-04-05T17:40:46 | 80,469,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | expenseDict = {'loan payment' : 0, 'insurance' : 0, 'gas' : 0, 'oil' : 0, 'tires' : 0, 'maintenace' : 0}
def main():
expense()
totalMonthly, totalYearly = total()
print('Total monthly cost: $', format(totalMonthly, ',.2f'), sep='')
print('Total annual cost: $', format(totalYearly, ',.2f'), sep='')
def expense():
for x in expenseDict:
y = int(input('Enter cost amount of ' + x +': '))
expenseDict[x] = y
totalMonthly = sum(expenseDict.values())
def total():
x = sum(expenseDict.values())
return x, x * 12
main()
| [
"noreply@github.com"
] | ccbrantley.noreply@github.com |
d6ca72f18a592b1ecc313eea503875930f5d835c | 167face5e34f69ba36b8a8d93306387dcaa50d24 | /testes.py | 9502eb97df4ebeb820bacf59a85b1d29e3ef13b5 | [] | no_license | william-cirico/python-study | 4fbe20936c46af6115f0d88ad861c71e6273db71 | 5923268fea4c78707fe82f1f609535a69859d0df | refs/heads/main | 2023-04-19T03:49:23.237829 | 2021-05-03T01:24:56 | 2021-05-03T01:24:56 | 309,492,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | import unittest
from atividades import comer, dormir, eh_engracado
class AtividadesTestes(unittest.TestCase):
def test_comer_saudavel(self):
"""Testando o retorno com comida saudavel"""
self.assertEqual(
comer('quiabo', True),
"Estou comendo quiabo porque quero manter a forma"
)
def test_comer_gostosa(self):
"""Testando o retorno com comida gostosa"""
self.assertEqual(
comer(comida="pizza", eh_saudavel=False),
"Estou comendo pizza porque a gente só vive uma vez"
)
def test_dormindo_pouco(self):
"""Testando o retorno dormindo pouco"""
self.assertEqual(
dormir(4),
"Continuo cansado após dormir por 4 horas. :("
)
def test_domindo_muito(self):
"""Testando o retorno dormindo muito"""
self.assertEqual(
dormir(10),
"Ptz! Dormi muito! Estou atrasado para o trabalho!"
)
def test_eh_engracado(self):
# self.assertEqual(eh_engracado("Sérgio Malandro"), False)
self.assertFalse(eh_engracado("Sérgio Malandro"))
if __name__ == '__main__':
unittest.main() | [
"contato.williamc@gmail.com"
] | contato.williamc@gmail.com |
e966f209cb98135cc7d991a4ed9fb9f6176e8b2b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_artworks.py | d3cffdc1e4d13959720c0a0811e03088e3d625c5 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.nouns._artwork import _ARTWORK
#calss header
class _ARTWORKS(_ARTWORK, ):
def __init__(self,):
_ARTWORK.__init__(self)
self.name = "ARTWORKS"
self.specie = 'nouns'
self.basic = "artwork"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
24842142cd636353539a3e7b63e7cef1c4626bb1 | 7a1b88d06ea18772b065b43d775cec6dd2acdf80 | /4153.py | 39386beff06afc1b47bb3d042dc3cabb7a745654 | [] | no_license | skaurl/baekjoon-online-judge | 28144cca45168e79b1ae0baa9a351f498f8d19ab | 1620d298c2f429e03c5f9387d8aca13763f5c731 | refs/heads/master | 2023-07-26T10:07:29.724066 | 2021-09-07T09:21:02 | 2021-09-07T09:21:02 | 299,019,978 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | while True:
A = input().split()
A[0] = int(A[0])
A[1] = int(A[1])
A[2] = int(A[2])
A = sorted(A)
if A[0] == 0 and A[1] == 0 and A[2] == 0:
break
if A[0]**2 + A[1]**2 == A[2]**2:
print('right')
else:
print('wrong') | [
"dr_lunars@naver.com"
] | dr_lunars@naver.com |
94d7fe23b39627e9dafd26e70c17d851bdc74ebc | bedadeffd76899b4255871eaa79a03e8c8c5d7a9 | /screenshot/urls.py | 48013d0dce79f803043d4b0400d96b8fd8e14906 | [] | no_license | aakriti1435/Django-HTML-to-PDF | 5b48c5b0300227bc37439c4ea3d515c9ca3644a1 | 1f9a261ef1b17267514a951b8155c54ad74a281a | refs/heads/master | 2022-12-02T02:12:20.659027 | 2020-08-13T13:32:01 | 2020-08-13T13:32:01 | 287,287,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | from django.urls import path
from . import views
# from .views import take_screenshot
urlpatterns = [
path('', views.canvas),
path('/take', views.take_screenshot, name='canvas'),
] | [
"65544777+aakriti1435@users.noreply.github.com"
] | 65544777+aakriti1435@users.noreply.github.com |
6fad1f124a44e93e1651c2f3ac8832a29a8777dd | 4be467ebc691f31b94dc72de88c10e1ab14d9c53 | /data.py | b7c56b80f628bfbb8bf81d5948588ea589af7f90 | [] | no_license | oziTeam/mockup-warp-test | 546d96a028155b2d605f72fbd1b0513d23b63ada | 242e838d31c57603f04060b5e8c196ac8ba9f306 | refs/heads/master | 2022-12-04T10:09:18.159312 | 2020-08-19T04:52:32 | 2020-08-19T04:52:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,833 | py | # ARTWORK_DATA = {
# "Front": "./sample_data/artworks/3-front.jpg",
# "Hood": "./sample_data/artworks/3-hood.jpg",
# "Back": "./sample_data/artworks/3-back.jpg"
# }
#
# MOCKUP_DATA = [
# {
# "side_name": "Back",
# "parts": [
# {
# "name": "back.left_sleeve",
# "model_path": "./sample_data//models/tshirt.back.left_sleeve.model.npy",
# "cut_image_path": "./sample_data/cut_images/back.left_sleeve-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Back"
# },
# {
# "name": "back.left_sleeve",
# "model_path": "./sample_data//models/tshirt.back.model.npy",
# "cut_image_path": "./sample_data/cut_images/back.front_back-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Back"
# },
# {
# "name": "back.right_sleeve",
# "model_path": "./sample_data//models/tshirt.back.right_sleeve.model.npy",
# "cut_image_path": "./sample_data/cut_images/back.right_sleeve-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Back"
# },
# {
# "name": "back.top_hood",
# "model_path": "./sample_data//models/tshirt.back.top_hood.model.npy",
# "cut_image_path": "./sample_data/cut_images/back.top_hood-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Hood"
# },
# ]
# },
# {
# "side_name": "Front",
# "parts": [
# {
# "name": "front.left_sleeve",
# "model_path": "./sample_data//models/tshirt.front.left_sleeve.model.npy",
# "cut_image_path": "./sample_data/cut_images/front.left_sleeve-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Front"
# },
# {
# "name": "front",
# "model_path": "./sample_data//models/tshirt.front.model.npy",
# "cut_image_path": "./sample_data/cut_images/front.front-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Front"
# },
# {
# "name": "front.right_sleeve",
# "model_path": "./sample_data//models/tshirt.front.right_sleeve.model.npy",
# "cut_image_path": "./sample_data/cut_images/front.right_sleeve-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Front"
# },
# {
# "name": "front.bottom_hood",
# "model_path": "./sample_data//models/tshirt.front.bottom_hood.model.npy",
# "cut_image_path": "./sample_data/cut_images/front.bottom_hood-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Hood"
#
# },
# {
# "name": "front.top_hood",
# "model_path": "./sample_data//models/tshirt.front.top_hood.model.npy",
# "cut_image_path": "./sample_data/cut_images/front.top_hood-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Hood"
# }
# ]
# }
# ]
ARTWORK_DATA = {
"Front": "./sample_data/artworks/fushion-mask.jpeg",
"Adult": "./sample_data/artworks/mask-4.jpeg",
}
MOCKUP_DATA = [
{
"side_name": "Adult",
"parts": [
{
"name": "Adult",
"model_path": "./sample_data/models/aop_cc_mask.adult.model.npy",
"cut_image_path": "./sample_data/cut_images/cc_mask.adult.cut.png",
"shadow_image": "", # can be None
"artwork_side": "Adult"
}
]
},
{
"side_name": "Front",
"parts": [
{
"name": "Front",
"model_path": "./sample_data/models/aop_cc_mask.front.model.npy",
"cut_image_path": "./sample_data/cut_images/cc_mask.front.cut.png",
"shadow_image": "", # can be None
"artwork_side": "Front"
}
]
},
{
"side_name": "White",
"parts": [
{
"name": "White",
"model_path": "./sample_data/models/aop_cc_mask.white.front.model.npy",
"cut_image_path": "./sample_data/cut_images/cc_mask.white.front.cut.png",
"shadow_image": "", # can be None
"artwork_side": "Front"
}
]
}
]
| [
"vantrong291@gmail.com"
] | vantrong291@gmail.com |
0495487a69cc62832cd6afee4efb15ddda3a9969 | 10e94d77e56d9cbb979174795c465b679d03d6b3 | /tensorflow/contrib/learn/python/learn/dataframe/transforms/difference.py | f9cb0c9485516abedbb3847530755d5cb328287f | [
"Apache-2.0"
] | permissive | pint1022/tf-coriander | 68939732c1ec0f052929c13ef6d8f49e44d423e4 | 197a685accca4a3f38285d6ac3ccf3998a200090 | refs/heads/master | 2020-04-14T18:56:40.334257 | 2019-01-11T00:40:11 | 2019-01-11T00:40:11 | 164,038,861 | 1 | 0 | Apache-2.0 | 2019-01-04T00:53:40 | 2019-01-04T00:53:40 | null | UTF-8 | Python | false | false | 2,361 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `Transform` that performs subtraction on two `Series`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import series
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.framework import ops
from tensorflow.python.ops import sparse_ops
def _negate_sparse(sparse_tensor):
return ops.SparseTensor(indices=sparse_tensor.indices,
values=-sparse_tensor.values,
shape=sparse_tensor.shape)
@series.Series.register_binary_op("__sub__")
class Difference(transform.TensorFlowTransform):
"""Subtracts one 'Series` from another."""
def __init__(self):
super(Difference, self).__init__()
@property
def name(self):
return "difference"
@property
def input_valency(self):
return 2
@property
def _output_names(self):
return "output",
def _apply_transform(self, input_tensors, **kwargs):
pair_sparsity = (isinstance(input_tensors[0], ops.SparseTensor),
isinstance(input_tensors[1], ops.SparseTensor))
if pair_sparsity == (False, False):
result = input_tensors[0] - input_tensors[1]
# note tf.sparse_add accepts the mixed cases,
# so long as at least one input is sparse.
elif not pair_sparsity[1]:
result = sparse_ops.sparse_add(input_tensors[0], - input_tensors[1])
else:
result = sparse_ops.sparse_add(input_tensors[0],
_negate_sparse(input_tensors[1]))
# pylint: disable=not-callable
return self.return_type(result)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
0428f2bbc10bab71365ca218e39a361a0a85a71f | e89b1297206710aad354ae7a0514ea8d0dfe5984 | /setup.py | 907ca11897264ee61d985f4a1558c49f3ab2f3e7 | [] | no_license | dandavison/docopt-subcommand-completion-example | d649f635012e147cc59c94611a198abe7b61aff7 | f700d61aa43fb9ddf16c4bd11aeccdb7bad171dc | refs/heads/master | 2021-01-17T13:29:34.631932 | 2016-07-10T21:03:31 | 2016-07-10T21:03:31 | 56,941,539 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | import os
from setuptools import find_packages
from setuptools import setup
setup(
name='docopt-example',
version=(open(os.path.join(os.path.dirname(__file__),
'app',
'version.txt'))
.read().strip()),
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=['docopt'],
entry_points={
'console_scripts': [
'docopt-example = app.cli:main',
],
},
)
| [
"dandavison7@gmail.com"
] | dandavison7@gmail.com |
555ad2bb52e603076658741cc942bcaa8a6e7d82 | a024fe3b05dd320a7860165dd72ebd832ce6e484 | /intn_informe_bascula_web/models/models.py | 50eadeb8cc3bf328f707f559a4c7e5cdcabf4edf | [] | no_license | acostaw/erp_odoo | 97d02a675908e441cf8e1ba4e3dcbc62691f8dec | 2437997b650c9fdbf6a6f007c0a1fea2aab018e2 | refs/heads/main | 2023-04-19T14:52:48.877851 | 2021-04-22T18:40:07 | 2021-04-22T18:40:07 | 360,644,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
# class intn_informe_bascula_web(models.Model):
# _name = 'intn_informe_bascula_web.intn_informe_bascula_web'
# name = fields.Char()
# value = fields.Integer()
# value2 = fields.Float(compute="_value_pc", store=True)
# description = fields.Text()
#
# @api.depends('value')
# def _value_pc(self):
# self.value2 = float(self.value) / 100 | [
"wacosta@INTN.GOV.PY"
] | wacosta@INTN.GOV.PY |
0b979cd389adf373b4cf58c997b7186c16712406 | 291ede8b17c404991e8140b9e8815c8e2e799163 | /NSC/src/train.py | aa871d3320b2339b8b28a015610fc02105c1b09a | [] | no_license | SleepyBag/NSC_tensorflow | 54d53d0d174b8d3e85ae222c8c0ca7e985363c38 | 3a2b7ff4a9a29d9b49f6510767ba3b0e8d408536 | refs/heads/master | 2020-04-03T03:09:07.906478 | 2018-10-27T15:45:55 | 2018-10-27T15:45:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,527 | py | #-*- coding: utf-8 -*-
#author: Zhen Wu
import os, time, pickle
import datetime
import numpy as np
import tensorflow as tf
from data_helpers import Dataset
import data_helpers
from model import NSC
# Data loading params
tf.flags.DEFINE_integer("n_class", 5, "Numbers of class")
tf.flags.DEFINE_string("dataset", 'yelp13', "The dataset")
# Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 200, "Dimensionality of character embedding")
tf.flags.DEFINE_integer("sen_hidden_size", 100, "hidden_size of rnn")
tf.flags.DEFINE_integer("doc_hidden_size", 100, "hidden_size of rnn")
tf.flags.DEFINE_integer("usr_hidden_size", 100, "hidden_size of rnn")
tf.flags.DEFINE_integer("prd_hidden_size", 100, "hidden_size of rnn")
tf.flags.DEFINE_integer('max_sen_len', 50, 'max number of tokens per sentence')
tf.flags.DEFINE_integer('max_doc_len', 40, 'max number of tokens per sentence')
tf.flags.DEFINE_float("lr", 0.005, "Learning rate")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 100, "Batch Size")
tf.flags.DEFINE_integer("num_epochs", 1000, "Number of training epochs")
tf.flags.DEFINE_integer("evaluate_every", 25, "Evaluate model on dev set after this many steps")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
# FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# Load data
print("Loading data...")
trainset = Dataset('../../data/' + FLAGS.dataset + '/train.ss')
devset = Dataset('../../data/' + FLAGS.dataset + '/dev.ss')
testset = Dataset('../../data/' + FLAGS.dataset + '/test.ss')
alldata = np.concatenate([trainset.t_docs, devset.t_docs, testset.t_docs], axis=0)
embeddingpath = '../../data/' + FLAGS.dataset + '/embedding.txt'
embeddingfile, wordsdict = data_helpers.load_embedding(embeddingpath, alldata, FLAGS.embedding_dim)
del alldata
print("Loading data finished...")
usrdict, prddict = trainset.get_usr_prd_dict()
trainbatches = trainset.batch_iter(usrdict, prddict, wordsdict, FLAGS.n_class, FLAGS.batch_size,
FLAGS.num_epochs, FLAGS.max_sen_len, FLAGS.max_doc_len)
devset.genBatch(usrdict, prddict, wordsdict, FLAGS.batch_size,
FLAGS.max_sen_len, FLAGS.max_doc_len, FLAGS.n_class)
testset.genBatch(usrdict, prddict, wordsdict, FLAGS.batch_size,
FLAGS.max_sen_len, FLAGS.max_doc_len, FLAGS.n_class)
with tf.Graph().as_default():
session_config = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement
)
session_config.gpu_options.allow_growth = True
sess = tf.Session(config=session_config)
with sess.as_default():
nsc = NSC(
max_sen_len = FLAGS.max_sen_len,
max_doc_len = FLAGS.max_doc_len,
cls_cnt = FLAGS.n_class,
emb_file = embeddingfile,
emb_dim = FLAGS.embedding_dim,
sen_hidden_size = FLAGS.sen_hidden_size,
doc_hidden_size = FLAGS.doc_hidden_size,
usr_hidden_size = FLAGS.usr_hidden_size,
prd_hidden_size = FLAGS.prd_hidden_size,
usr_cnt = len(usrdict),
prd_cnt = len(prddict)
)
loss, mse, correct_num, accuracy = nsc.build()
# Define Training procedure
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(FLAGS.lr)
grads_and_vars = optimizer.compute_gradients(loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Save dict
timestamp = str(int(time.time()))
checkpoint_dir = os.path.abspath("../checkpoints/"+FLAGS.dataset+"/"+timestamp)
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
with open(checkpoint_dir + "/wordsdict.txt", 'wb') as f:
pickle.dump(wordsdict, f)
with open(checkpoint_dir + "/usrdict.txt", 'wb') as f:
pickle.dump(usrdict, f)
with open(checkpoint_dir + "/prddict.txt", 'wb') as f:
pickle.dump(prddict, f)
sess.run(tf.global_variables_initializer())
def train_step(batch, loss, accuracy):
u, p, x, y, sen_len, doc_len = zip(*batch)
feed_dict = {
nsc.usrid: u,
nsc.prdid: p,
nsc.input_x: x,
nsc.input_y: y,
nsc.sen_len: sen_len,
nsc.doc_len: doc_len
}
_, step, loss, accuracy = sess.run(
[train_op, global_step, loss, accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{0}: step {1}, loss {2}, acc {3}".format(time_str, step, loss, accuracy))
def predict_step(u, p, x, y, sen_len, doc_len, loss, accuracy, name=None):
feed_dict = {
nsc.usrid: u,
nsc.prdid: p,
nsc.input_x: x,
nsc.input_y: y,
nsc.sen_len: sen_len,
nsc.doc_len: doc_len
}
step, loss, accuracy, correct_num, mse = sess.run(
[global_step, loss, accuracy, nsc.correct_num, nsc.mse],
feed_dict)
return correct_num, accuracy, mse
def predict(dataset, loss, accuracy, name=None):
acc = 0
rmse = 0.
for i in xrange(dataset.epoch):
correct_num, _, mse = predict_step(dataset.usr[i], dataset.prd[i], dataset.docs[i],
dataset.label[i], dataset.sen_len[i], dataset.doc_len[i],
loss, accuracy, name)
acc += correct_num
rmse += mse
acc = acc * 1.0 / dataset.data_size
rmse = np.sqrt(rmse / dataset.data_size)
return acc, rmse
topacc = 0.
toprmse = 0.
better_dev_acc = 0.
predict_round = 0
# Training loop. For each batch...
for tr_batch in trainbatches:
train_step(tr_batch, loss, accuracy)
current_step = tf.train.global_step(sess, global_step)
if current_step % FLAGS.evaluate_every == 0:
predict_round += 1
print("\nEvaluation round %d:" % (predict_round))
dev_acc, dev_rmse = predict(devset, loss, accuracy, name="dev")
print("dev_acc: %.4f dev_RMSE: %.4f" % (dev_acc, dev_rmse))
test_acc, test_rmse = predict(testset, loss, accuracy, name="test")
print("test_acc: %.4f test_RMSE: %.4f" % (test_acc, test_rmse))
# print topacc with best dev acc
if dev_acc >= better_dev_acc:
better_dev_acc = dev_acc
topacc = test_acc
toprmse = test_rmse
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))
print("topacc: %.4f RMSE: %.4f" % (topacc, toprmse))
| [
"xueqianming200@gmail.com"
] | xueqianming200@gmail.com |
21ac7595d1c48ec6845defa0d35ade0a65638217 | 38b88b6123634e4d0deb4ffab4bdb8302dbc9e5a | /modules/estatistica-01/distribuicoes/distribuicao_normal-definicao.py | 23b5dae9208438902ac2a2e7b31f3855faa10625 | [] | no_license | Angelicogfa/data-science | 0c11d165b1d061c71812d596c86e4472a240017c | 30f05a3e62edd278a87f81eba952cce99bc9453e | refs/heads/master | 2020-04-21T09:13:38.211419 | 2019-06-28T13:36:47 | 2019-06-28T13:36:47 | 169,441,917 | 0 | 0 | null | 2019-11-02T07:00:19 | 2019-02-06T16:58:56 | Python | UTF-8 | Python | false | false | 1,482 | py | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
itens = [7.57, 6.72, 5.59, 9.56, 4.79, 4.84, 5.87, 10.23, 9.53, 6.99,
9.51, 9.21, 5.78, 6.72, 8.96, 7.32, 7.64, 8.53, 5.9, 7.93,
8.82, 8.45, 7.99, 5.77, 4.76, 4.49, 8.97, 6.60, 8.55, 6.30,
6.54, 5.98, 10.88, 8.92, 7.01, 7.58, 9.47, 6.34, 6.17, 7.46,
8.78, 7.13, 7.71, 8.06, 7.67, 7.05, 9.66, 4.37, 15.08, 9.20,
7.64, 5.89, 11.16, 5.35, 5.75, 8.98, 8.74, 8.20, 8.79, 5.80,
11.7, 5.53, 7.75, 6.54, 9.79, 7.43, 9.14, 5.78, 10.31, 10.12,
9.68, 8.11, 5.54, 10.41, 8.83, 10.00, 5.54, 10.32, 6.92, 7.93,
10.14, 9.66, 10.67, 8.17, 8.86, 8.40, 5.15, 6.98, 8.19, 8.72,
8.76, 8.02, 8.93, 8.54, 3.26, 10.06, 8.18, 2.43, 9.17, 12.00]
print(itens)
print(np.median(itens))
print(np.std(itens, ddof=1))
stats.probplot(itens, plot= plt)
plt.show()
# Funcao distribuição normal
# Z = (x - u) / a
# x = valor a ser obtido
# u = média
# a = desvio padrão
# Z = valor para pesquisa de tabela
# à probabilidade é acomulativa da esquerda para à direita
# Validar se a distribuição é normal:
# à media deve ser o centro de um histograma
# Deve ser simetrico entre os lados de cada eixo do grafico
# Deve encontrar a grande maioria dos dados em no máximo 3 destivos padrões da média
# Pode ser utilizado um driagrama de probabilidade normal
| [
"angelicogfa@gmail.com"
] | angelicogfa@gmail.com |
b5851bb47c31679be956cce35108ea80515cd733 | 910be469257538bcbbd15e894679856a1d311252 | /server/service/kernel/migrations/0043_auto_20170424_2209.py | 10536bd456c1285476597fbe490fe0f21ae0fd3c | [] | no_license | bopo/bankeys2 | ece7e7faa93aab48bf5a336721bfa69b33a870d8 | 5a81f5f4cd6442aade444444ba768b9ffa9dcbd4 | refs/heads/master | 2023-08-19T04:16:12.063961 | 2023-08-04T09:09:00 | 2023-08-04T09:09:00 | 119,646,417 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-04-24 22:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kernel', '0042_auto_20170417_0213'),
]
operations = [
migrations.AlterField(
model_name='relancement',
name='creation_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='\u7533\u8bf7\u65f6\u95f4'),
),
]
| [
"ibopo@126.com"
] | ibopo@126.com |
1e60f4c33e6b2d6239d2677ec6afe2ff4f9186a6 | 057c525d6fbff928fc0cb0cd6b2930e9494b5d4b | /training-data/py/7-__init__.py | bfd89ded65d2386773e3e370d841ca01d3420cce | [] | no_license | uk-gov-mirror/ukwa.text-id | 0931742d1f2df3091ac52eee6160c177ea98180d | 5f3dcc6436bc46dedb375b37e3fd51c1c0d9b45b | refs/heads/master | 2022-02-26T15:32:15.901527 | 2019-11-19T16:36:06 | 2019-11-19T16:36:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | # a ..a a aAaN
a .a a Aa, a_a_a, a_a_a_a, a_a_a_a, aAaN
a .aAaAaAa a *N
a .aAaAaAa a *N
a .a_a a Aa_AaAa_AaN
a .a_a a Aa_AaAaAa_AaN
a .a_a a Aa_AaAaAa_AaN
a .a_a a Aa_AaAaAa_AaN
# a .a_a a Aa_AaAaAa_AaN
a .a_a_ a Aa_AaAa__AaN
a .a_a a Aa_AaAa_AaN
a .a_a a Aa_AaAaAa_AaN
a .a_a a Aa_Aa_AaN
N
a .aAa a aAaAa | [
"Andrew.Jackson@bl.uk"
] | Andrew.Jackson@bl.uk |
bd80320694ed6fa0379f916daa2fb0c7caa8d53d | 7c51b321d97b6e1f2480941cf6ce17e6fc1eef55 | /hungerstation/hungerstation/doctype/vacation_air_ticket/test_vacation_air_ticket.py | d9afd535d85a06afa39c73daed8577fc0c598c60 | [
"MIT"
] | permissive | poweroftrue/hungerstation | 1c53131a98968b92d678cda28f9db45068ae1454 | 8df88ce77cbde553b21f87511c6875d63b2aeb48 | refs/heads/master | 2020-03-12T09:49:22.202964 | 2018-04-16T09:58:15 | 2018-04-16T09:58:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Accurate Systems and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestVacationAirTicket(unittest.TestCase):
pass
| [
"mhbu50@gmail.com"
] | mhbu50@gmail.com |
35436f7d0a4d6539eac725bb92f926434e59aaf0 | 8a82a83655f118208692e55d7804d9fa480ad4b6 | /book/packt/Mastering.Natural.Language.Processing.with.Python/Chapter 1/ch1_10.py | 5f7445a2ea2ad22292f509ee07c1e70e85cceb00 | [] | no_license | xenron/sandbox-da-python | 0814159da9a91923e4b66c5e40057e381f765e96 | ab8f1c0d57fdc6006355f613012b84165068c315 | refs/heads/master | 2020-04-12T05:41:33.182110 | 2016-12-14T22:57:33 | 2016-12-14T22:57:33 | 60,324,979 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | import nltk
from nltk.tokenize import regexp_tokenize
sent="Don't hesitate to ask questions"
print(regexp_tokenize(sent, pattern='\w+|\$[\d\.]+|\S+'))
| [
"xenron@outlook.com"
] | xenron@outlook.com |
2cc9d0b711bdaca74f11120bcc21b5c032da427a | 2218e1da5cb944e4509f8641ca051de137645c5e | /剑指 Offer/54. KthLargest.py | bff16aa6411802e289ae82e16e257f787326e850 | [] | no_license | Hegemony/Python-Practice | 9e76ebb414433e51c2074602fb0a871891647839 | b68ea41688e9e305635c63fdc43402e2b6fe6524 | refs/heads/main | 2023-05-05T14:00:59.921803 | 2021-06-01T15:38:30 | 2021-06-01T15:38:30 | 301,602,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def kthLargest(self, root: TreeNode, k: int) -> int:
def preTraversal(root, nums):
if root == None:
return
nums.append(root.val)
preTraversal(root.left, nums)
preTraversal(root.right, nums)
nums = []
preTraversal(root, nums)
nums.sort(reverse=True)
return nums[k-1] | [
"noreply@github.com"
] | Hegemony.noreply@github.com |
be017a4ba1b77d079419dd99b0595b8acd34030a | fab39aa4d1317bb43bc11ce39a3bb53295ad92da | /examples/tensorflow/common/object_detection/utils/mask_utils.py | 4dde46ca5e8538cd4e262119415ba0ae1c611d1a | [
"Apache-2.0"
] | permissive | dupeljan/nncf | 8cdce27f25f01ce8e611f15e1dc3036fb8548d6e | 0abfd7103ca212888a946ba4d0fbdb9d436fdaff | refs/heads/develop | 2023-06-22T00:10:46.611884 | 2021-07-22T10:32:11 | 2021-07-22T10:32:11 | 388,719,455 | 0 | 0 | Apache-2.0 | 2021-07-23T07:46:15 | 2021-07-23T07:43:43 | null | UTF-8 | Python | false | false | 3,853 | py | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import cv2
def paste_instance_masks(masks, detected_boxes, image_height, image_width):
"""Paste instance masks to generate the image segmentation results.
Args:
masks: a numpy array of shape [N, mask_height, mask_width] representing the
instance masks w.r.t. the `detected_boxes`.
detected_boxes: a numpy array of shape [N, 4] representing the reference
bounding boxes.
image_height: an integer representing the height of the image.
image_width: an integer representing the width of the image.
Returns:
segms: a numpy array of shape [N, image_height, image_width] representing
the instance masks *pasted* on the image canvas.
"""
def expand_boxes(boxes, scale):
"""Expands an array of boxes by a given scale."""
# Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/boxes.py#L227
# The `boxes` in the reference implementation is in [x1, y1, x2, y2] form,
# whereas `boxes` here is in [x1, y1, w, h] form
w_half = boxes[:, 2] * .5
h_half = boxes[:, 3] * .5
x_c = boxes[:, 0] + w_half
y_c = boxes[:, 1] + h_half
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
# Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/core/test.py#L812
# To work around an issue with cv2.resize (it seems to automatically pad
# with repeated border values), we manually zero-pad the masks by 1 pixel
# prior to resizing back to the original image resolution. This prevents
# "top hat" artifacts. We therefore need to expand the reference boxes by an
# appropriate factor.
_, mask_height, mask_width = masks.shape
scale = max((mask_width + 2.0) / mask_width, (mask_height + 2.0) / mask_height)
ref_boxes = expand_boxes(detected_boxes, scale)
ref_boxes = ref_boxes.astype(np.int32)
padded_mask = np.zeros((mask_height + 2, mask_width + 2), dtype=np.float32)
segms = []
for mask_ind, mask in enumerate(masks):
im_mask = np.zeros((image_height, image_width), dtype=np.uint8)
# Process mask inside bounding boxes.
padded_mask[1:-1, 1:-1] = mask[:, :]
ref_box = ref_boxes[mask_ind, :]
w = ref_box[2] - ref_box[0] + 1
h = ref_box[3] - ref_box[1] + 1
w = np.maximum(w, 1)
h = np.maximum(h, 1)
mask = cv2.resize(padded_mask, (w, h)) # pylint: disable=E1101
mask = np.array(mask > 0.5, dtype=np.uint8)
x_0 = min(max(ref_box[0], 0), image_width)
x_1 = min(max(ref_box[2] + 1, 0), image_width)
y_0 = min(max(ref_box[1], 0), image_height)
y_1 = min(max(ref_box[3] + 1, 0), image_height)
im_mask[y_0:y_1, x_0:x_1] = mask[(y_0 - ref_box[1]):(y_1 - ref_box[1]),
(x_0 - ref_box[0]):(x_1 - ref_box[0])]
segms.append(im_mask)
segms = np.array(segms)
assert masks.shape[0] == segms.shape[0]
return segms
| [
"noreply@github.com"
] | dupeljan.noreply@github.com |
de16a25bb4c0fe0e41345993cb917cb6907c5490 | 09c87fe780df6d1f9eb33799ed516a0bbd7ab1e3 | /Admin/bitly-releases/bitly.py | 5285e9b5e8d0667bd6b843a772839257b5701f7a | [] | no_license | abulka/pynsource | 8ad412b85dc1acaeb83d7d34af8cc033c6baba91 | 979436525c57fdaeaa832e960985e0406e123587 | refs/heads/master | 2023-04-13T12:58:02.911318 | 2023-04-11T09:56:32 | 2023-04-11T09:56:32 | 32,249,425 | 271 | 46 | null | 2022-10-10T04:36:57 | 2015-03-15T07:21:43 | Python | UTF-8 | Python | false | false | 6,024 | py | """
Generate the links for
- DOWNLOADS.md
- Bitly
- Main website html
from parsing the Github release page HTML information
"""
import requests
from bs4 import BeautifulSoup
import bs4
import os
from dataclasses import dataclass # requires 3.7
from typing import List, Set, Dict, Tuple, Optional
import pprint
from beautifultable import BeautifulTable
from textwrap import dedent
releaseUrl = "https://github.com/abulka/pynsource/releases/tag/version-1.77"
response = requests.get(releaseUrl)
assert response.status_code == 200
html_doc = response.text
# with open("junk.html", "w") as fp:
# fp.write(html_doc)
soup = BeautifulSoup(html_doc, "html.parser")
# print(soup)
@dataclass
class DownloadEntity:
# link: bs4.element.Tag
url: str
basename: str
basenameNoExtension: str
bitlyUrl: str
downloads: Dict[str, DownloadEntity] = {}
for link in soup.find_all("a"):
if "/abulka/pynsource/releases/download/" in link.get("href"):
# print(link.get('href'))
url = f"https://github.com{link.get('href')}" # e.g. https://github.com/abulka/pynsource/releases/download/version-1.77/pynsource-1.77-macosx.zip
basename = os.path.basename(url) # e.g. pynsource-1.77-macosx.zip
basenameNoExtension = os.path.splitext(basename)[0] # e.g. pynsource-1.77-macosx
basenameNoExtension = basenameNoExtension.replace('.', '-') # get rid of the illegal '.' chars bitly doesn't like e.g. pynsource-1-77-macosx
bitlyUrl = f"http://bit.ly/{basenameNoExtension}" # e.g. http://bit.ly/pynsource-1-77-macosx
entity = DownloadEntity(
basename=basename,
basenameNoExtension=basenameNoExtension,
url=url,
bitlyUrl=bitlyUrl,
)
if "-macosx" in basename:
downloads["mac"] = entity
elif "-win-" in basename:
downloads["win"] = entity
elif "-ubuntu-18" in basename:
downloads["ubuntu-18"] = entity
elif "-ubuntu-16" in basename:
downloads["ubuntu-16"] = entity
else:
raise RuntimeError(
f"Unknown url on Github releases page {url} - cannot detect OS"
)
# validate that each download url exists OK - requests can't seem to handle it ?
#
# headers = {
# "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36 Edg/84.0.522.52",
# "Referer": "https://github.com/abulka/pynsource/releases/edit/untagged-3ddd799663921fd65d7a",
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
# "Accept-Encoding": "gzip, deflate, br",
# "Accept-Language": "en-AU,en-GB;q=0.9,en;q=0.8,en-US;q=0.7",
# "Cache-Control": "max-age=0",
# "Connection": "keep-alive",
# "Host": "github.com",
# "Sec-Fetch-Dest": "document",
# "Sec-Fetch-Mode": "navigate",
# "Sec-Fetch-Site": "same-origin",
# "Sec-Fetch-User": "?1",
# "Upgrade-Insecure-Requests": "1",
# }
# for downloadEntity in downloads.values():
# r = requests.head(downloadEntity.url, allow_redirects=True, headers=headers)
# print(r.url)
# # try again - doesn't seem to work, still get a 403
# if r.status_code == 403:
# newUrl = r.url # probably to amazon
# print("trying again...")
# r = requests.head(newUrl, allow_redirects=True, headers=headers)
# if r.status_code == 200:
# print(f"Url {downloadEntity.url} exists OK")
# elif r.status_code == 403:
# raise RuntimeError(
# f"Forbidden download url {downloadEntity.url} status {r.status_code}"
# )
# else:
# raise RuntimeError(
# f"Malformed download url {downloadEntity.url} status {r.status_code}"
# )
# print(downloads)
# pprint.pprint(downloads)
# Now that we have gathered up the information, generate the needed outputs
downloadMarkdown = f"""
* [Mac download]({downloads["mac"].bitlyUrl}) (unzip and drag app into the Applications directory)
* [Windows 10 download]({downloads["win"].bitlyUrl}) (unzip and run the installer)
* [Ubuntu Linux 18.0.4 download]({downloads["ubuntu-18"].bitlyUrl}) (unzip and run the executable)
* [Ubuntu Linux 16.0.4 download]({downloads["ubuntu-16"].bitlyUrl}) (unzip and run the executable)
* [Linux snap installer](http://bit.ly/pynsource-snap) (one-click install on any Ubuntu distro)
"""
print("DOWNLOADS.md")
print(downloadMarkdown)
t = BeautifulTable(max_width=760)
t.column_headers = [
"OS",
"download-url",
"customize back half / title",
"final bitly-url",
]
t.column_alignments["download-url"] = BeautifulTable.ALIGN_LEFT
t.column_alignments["final bitly-url"] = BeautifulTable.ALIGN_LEFT
for os, downloadEntity in downloads.items():
t.append_row(
[os, downloadEntity.url, downloadEntity.basenameNoExtension, downloadEntity.bitlyUrl,]
)
print("Bitly Entries to create (click on each link in turn (in vscode terminal) to ensure it exists and triggers a download)")
print(t)
print()
htmlFragmentForWebsite = dedent(f"""
<p>The latest version is <code>1.77</code></p>
<ul>
<li><a href="{downloads["mac"].bitlyUrl}" rel="nofollow">Mac download</a> (unzip and drag app into the Applications directory)</li>
<li><a href="{downloads["win"].bitlyUrl}" rel="nofollow">Windows 10 download</a> (unzip and run the installer)</li>
<li><a href="{downloads["ubuntu-18"].bitlyUrl}" rel="nofollow">Ubuntu Linux 18.0.4 download</a> (unzip and run the executable)</li>
<li><a href="{downloads["ubuntu-16"].bitlyUrl}" rel="nofollow">Ubuntu Linux 16.0.4 download</a> (unzip and run the executable)</li>
<li><a href="http://bit.ly/pynsource-snap" rel="nofollow">Linux snap installer</a> (one-click install on any Ubuntu distro)</li>
</ul>
""")
print("Fragment of HTML to put on official website on downloads page")
print(htmlFragmentForWebsite)
| [
"abulka@gmail.com"
] | abulka@gmail.com |
e9f8199bbd0443f5ade26424134dfc5c24dfbf03 | 7c843f80a08db6725fd8d2e85099d9e6c13f6426 | /nets/res-unet1/trainInterface.py | b9b17f7178bc520e09135402109634d591211eae | [] | no_license | wanfade/scaffolding_Seg | e983c1d1cdd60efcd7d381728c277993a1cf4721 | 12ba8892eb44d3ce47fa2609973b0510904c4753 | refs/heads/master | 2023-03-16T05:57:28.808341 | 2017-11-25T13:53:11 | 2017-11-25T13:53:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,608 | py | # coding: utf-8
'''
res-unet
全图训练 自动填充黑边 以适应上下采样
Parameters
----------
step : int
填充黑边 将图片shape 调整为step的整数倍
'''
from lib import *
import logging
logging.basicConfig(level=logging.INFO)
npm = lambda m:m.asnumpy()
npm = FunAddMagicMethod(npm)
import mxnet as mx
from netdef import getNet
class SimpleBatch(object):
def __init__(self, data, label, pad=0):
self.data = data
self.label = label
self.pad = pad
from collections import Iterator
class genImg(Iterator):
def __init__(self,names,batch=1,
handleImgGt=None,
timesPerRead=1,
):
self.names = names
self.batch = batch
self.tpr = timesPerRead
self.handleImgGt = handleImgGt
self.genNameBatchs()
def genNameBatchs(self):
import random
self.now = 0
random.shuffle(self.names)
batch = self.batch
nameBatchs = listToBatch(self.names,batch)
more = (batch - len(nameBatchs[-1]))
nameBatchs[-1] += tuple(random.sample(self.names,more))
self.nameBatchs = nameBatchs
self.lenn = len(nameBatchs)
reset = genNameBatchs
def next(self):
now,lenn,names = self.now,self.lenn,self.nameBatchs
if lenn == now:
self.genNameBatchs()
raise StopIteration
self.now += 1
imgs = [];gts = []
for img,gt in names[now]:
imgs.append(imread(img))
gts.append(imread(gt))
if self.handleImgGt:
return self.handleImgGt(imgs,gts)
return (imgs,gts)
labrgb = lambda lab:cv2.cvtColor(lab,cv2.COLOR_LAB2RGB)
randint = lambda x:np.random.randint(-x,x)
def imgToLab(img,gt):
labr=cv2.cvtColor(img,cv2.COLOR_RGB2LAB)#/np.float32(255)
return labr
def imgAug(img,gt,prob=.5):
lab = img
if np.random.random()<prob:
lab = imgToLab(img,gt)
if np.random.random()<prob:
lab=np.fliplr(lab)
gt=np.fliplr(gt)
# show(labrgb(lab),img)
return lab,gt
def imgGtAdd0Fill(step=1):
def innerf(imgs,gts):
img = imgs[0][::c.resize,::c.resize]
h,w = img.shape[:2]
hh = ((h-1)//step+1)*step
ww = ((w-1)//step+1)*step
nimgs,ngts=[],[]
for img,gt in zip(imgs,gts):
gt=gt>.5
img,gt = img[::c.resize,::c.resize],gt[::c.resize,::c.resize]
img,gt = imgAug(img,gt)
img = img/255.
nimg = np.zeros((hh,ww,3))
ngt = np.zeros((hh,ww),np.bool)
h,w = img.shape[:2]
nimg[:h,:w] = img
ngt[:h,:w]=gt
nimgs.append(nimg)
ngts.append(ngt)
imgs,gts=np.array(nimgs),np.array(ngts)
# return imgs,gts
imgs = imgs.transpose(0,3,1,2)
mximgs = map(mx.nd.array,[imgs])
mxgtss = map(mx.nd.array,[gts])
mxdata = SimpleBatch(mximgs,mxgtss)
return mxdata
return innerf
class GenSimgInMxnet(genImg):
@property
def provide_data(self):
return [('data', (c.batch, 3, c.simgShape[0], c.simgShape[1]))]
@property
def provide_label(self):
return [('softmax1_label', (c.batch, c.simgShape[0], c.simgShape[1])),]
def saveNow(name = None):
f=mx.callback.do_checkpoint(name or args.prefix)
f(-1,mod.symbol,*mod.get_params())
c = dicto(
gpu = 1,
lr = 0.01,
epochSize = 10000,
step=64
)
c.resize = 1
if __name__ == '__main__':
from train import args
else:
from configManager import args
c.update(args)
args = c
img = imread(c.names[0][0])
img = img[::c.resize,::c.resize]
h,w = img.shape[:2]
hh = ((h-1)//c.step+1)*c.step
ww = ((w-1)//c.step+1)*c.step
args.simgShape = (hh,ww)
net = getNet(args.classn)
if args.resume:
print('resume training from epoch {}'.format(args.resume))
_, arg_params, aux_params = mx.model.load_checkpoint(
args.prefix, args.resume)
else:
arg_params = None
aux_params = None
if 'plot' in args:
mx.viz.plot_network(net, save_format='pdf', shape={
'data': (1, 3, 640, 640),
'softmax1_label': (1, 640, 640), }).render(args.prefix)
exit(0)
mod = mx.mod.Module(
symbol=net,
context=[mx.gpu(k) for k in range(args.gpu)],
data_names=('data',),
label_names=('softmax1_label',)
)
c.mod = mod
#if 0:
args.names = args.names[:]
# data = GenSimgInMxnet(args.names, args.simgShape,
# handleImgGt=handleImgGt,
# batch=args.batch,
# cache=None,
# iters=args.epochSize
# )
gen = GenSimgInMxnet(args.names,c.batch,handleImgGt=imgGtAdd0Fill(c.step))
g.gen = gen
total_steps = len(c.names) * args.epoch
lr_sch = mx.lr_scheduler.MultiFactorScheduler(
step=[total_steps // 2, total_steps // 4 * 3], factor=0.1)
def train():
mod.fit(
gen,
begin_epoch=args.resume,
arg_params=arg_params,
aux_params=aux_params,
batch_end_callback=mx.callback.Speedometer(args.batch),
epoch_end_callback=mx.callback.do_checkpoint(args.prefix),
optimizer='sgd',
optimizer_params=(('learning_rate', args.lr), ('momentum', 0.9),
('lr_scheduler', lr_sch), ('wd', 0.0005)),
num_epoch=args.epoch)
if __name__ == '__main__':
pass
if 0:
#%%
ne = g.gen.next()
#for ne in dd:
ds,las = ne.data, ne.label
d,la = npm-ds[0],npm-las[0]
im = d.transpose(0,2,3,1)
show(labrgb(uint8(im[0])));show(la)
| [
"ylxx@live.com"
] | ylxx@live.com |
b68b7615a7af8bb6f8aee3839a354f867e3f5bc5 | e26bf05bc4177e15c5f8cb28690882189d332bdf | /transformers_keras/question_answering/readers.py | f1662f73bc73213045db4c7d2e1530ae5abb8529 | [
"Apache-2.0"
] | permissive | OuyKai/transformers-keras | 1e4ed574acafcb807f3073f45e6462025c0139e5 | 58b87d5feb5632e3830c2d3b27873df6ae6be4b3 | refs/heads/master | 2023-09-06T07:50:10.404744 | 2021-11-23T02:34:34 | 2021-11-23T02:34:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | import json
import logging
import os
def read_jsonl_files_for_prediction(
input_files, conetxt_key="context", question_key="question", answers_key="answer", **kwargs
):
if isinstance(input_files, str):
input_files = [input_files]
for f in input_files:
if not os.path.exists(f):
logging.warning("File %d does not exist, skipped.", f)
continue
with open(f, mode="rt", encoding="utf-8") as fin:
for line in fin:
line = line.strip()
if not line:
continue
data = json.loads(line)
answers = data[answers_key]
if isinstance(answers, str):
answers = [answers]
answer = answers[0]
instance = {"context": data[conetxt_key], "question": data[question_key], "answer": answer}
yield instance
| [
"zhouyang.luo@gmail.com"
] | zhouyang.luo@gmail.com |
268edd9811fd6743a2f68e9cdc53f307295bd5df | ff23900a911e099595c392a7efab1d268b4f5f7d | /python_modules/libraries/dagster-census/dagster_census_tests/test_op.py | e552b2e971e3033594f96e914cba86674bacb4b9 | [
"Apache-2.0"
] | permissive | zkan/dagster | bbf2da091bdc7fca028c569db72b9c68ddf55e98 | b2b19edb71fc8985f505b116927350dd23b4a7d9 | refs/heads/master | 2022-08-24T03:20:12.583577 | 2022-08-16T00:01:23 | 2022-08-16T00:01:23 | 244,012,061 | 0 | 0 | Apache-2.0 | 2020-02-29T17:33:24 | 2020-02-29T17:33:24 | null | UTF-8 | Python | false | false | 1,952 | py | import responses
from dagster_census import CensusOutput, census_resource, census_trigger_sync_op
from dagster import job, op
from .utils import (
get_destination_data,
get_source_data,
get_sync_data,
get_sync_run_data,
get_sync_trigger_data,
)
def test_census_trigger_sync_op():
cen_resource = census_resource.configured({"api_key": "foo"})
@op
def foo_op():
pass
@job(
resource_defs={"census": cen_resource},
config={
"ops": {
"census_trigger_sync_op": {
"config": {
"sync_id": 52,
"poll_interval": 0,
"poll_timeout": 10,
}
}
}
},
)
def census_sync_job():
census_trigger_sync_op(start_after=foo_op())
with responses.RequestsMock() as rsps:
rsps.add(
rsps.GET,
"https://app.getcensus.com/api/v1/syncs/52",
json=get_sync_data(),
)
rsps.add(
rsps.GET,
"https://app.getcensus.com/api/v1/sources/15",
json=get_source_data(),
)
rsps.add(
rsps.GET,
"https://app.getcensus.com/api/v1/destinations/15",
json=get_destination_data(),
)
rsps.add(
rsps.POST,
"https://app.getcensus.com/api/v1/syncs/52/trigger",
json=get_sync_trigger_data(),
)
rsps.add(
rsps.GET,
"https://app.getcensus.com/api/v1/sync_runs/94",
json=get_sync_run_data(),
)
result = census_sync_job.execute_in_process()
assert result.output_for_node("census_trigger_sync_op") == CensusOutput(
sync_run=get_sync_run_data()["data"],
source=get_source_data()["data"],
destination=get_destination_data()["data"],
)
| [
"noreply@github.com"
] | zkan.noreply@github.com |
b149337554b2282c3286a0bcf124a42801eccad7 | 682526c4fa74951f5551310d92b19f9948f67b89 | /tapioca_jarbas/tapioca_jarbas.py | 42fda2683f6ec05780471d136c39fce0d2c44ce2 | [
"MIT"
] | permissive | indigos33k3r/tapioca-jarbas | 458d8b0cefc0425c7d94ae25c572d0c931a62671 | e54846a1aa7a2b2bcaa23126f21492f9da475704 | refs/heads/master | 2020-04-13T23:18:13.797237 | 2017-11-01T21:13:01 | 2017-11-01T21:13:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | from tapioca import (
TapiocaAdapter, generate_wrapper_from_adapter, JSONAdapterMixin)
from .resource_mapping import RESOURCE_MAPPING
class JarbasClientAdapter(JSONAdapterMixin, TapiocaAdapter):
api_root = 'https://jarbas.serenatadeamor.org/api/'
resource_mapping = RESOURCE_MAPPING
def get_iterator_list(self, response_data):
return response_data.get('results', response_data)
def get_iterator_next_request_kwargs(self, iterator_request_kwargs,
response_data, response):
next_url = response_data.get('next', '')
if not next_url:
return
iterator_request_kwargs['url'] = next_url
iterator_request_kwargs.pop('params', None) # these are sent in the next_url
return iterator_request_kwargs
Jarbas = generate_wrapper_from_adapter(JarbasClientAdapter)
| [
"daniloshiga@gmail.com"
] | daniloshiga@gmail.com |
c5df412987c4bf17583da28903931d117431accc | 279f415dd1e06c594c6c87deda57e201c73c4542 | /test/espnet2/layers/test_mask_along_axis.py | 61f62562a222d988d407a5c997e71dcd8802261d | [
"Apache-2.0"
] | permissive | espnet/espnet | f7ba47271c1a6b1ed606dbbfb04a7f14220bb585 | bcd20948db7846ee523443ef9fd78c7a1248c95e | refs/heads/master | 2023-08-28T23:43:34.238336 | 2023-08-23T02:51:39 | 2023-08-23T02:51:39 | 114,054,873 | 7,242 | 2,244 | Apache-2.0 | 2023-09-14T08:01:11 | 2017-12-13T00:45:11 | Python | UTF-8 | Python | false | false | 1,043 | py | import pytest
import torch
from espnet2.layers.mask_along_axis import MaskAlongAxis
@pytest.mark.parametrize("requires_grad", [False, True])
@pytest.mark.parametrize("replace_with_zero", [False, True])
@pytest.mark.parametrize("dim", ["freq", "time"])
def test_MaskAlongAxis(dim, replace_with_zero, requires_grad):
freq_mask = MaskAlongAxis(
dim=dim,
mask_width_range=30,
num_mask=2,
replace_with_zero=replace_with_zero,
)
x = torch.randn(2, 100, 80, requires_grad=requires_grad)
x_lens = torch.tensor([80, 78])
y, y_lens = freq_mask(x, x_lens)
assert all(l1 == l2 for l1, l2 in zip(x_lens, y_lens))
if requires_grad:
y.sum().backward()
@pytest.mark.parametrize("replace_with_zero", [False, True])
@pytest.mark.parametrize("dim", ["freq", "time"])
def test_MaskAlongAxis_repr(dim, replace_with_zero):
freq_mask = MaskAlongAxis(
dim=dim,
mask_width_range=30,
num_mask=2,
replace_with_zero=replace_with_zero,
)
print(freq_mask)
| [
"naoyuki.kamo829@gmail.com"
] | naoyuki.kamo829@gmail.com |
0c8fc6ce245ed6f32ae7a857ba2561de41e4a544 | 0f0f8b3b027f412930ca1890b0666538358a2807 | /dotop/tools/amount_to_text_en.py | ff67589e6bb8f9d80bfd30c551ac13aba3354988 | [] | no_license | konsoar/dotop_pos_v11 | 741bd5ca944dfd52eb886cab6f4b17b6d646e131 | 576c860917edd25661a72726d0729c769977f39a | refs/heads/master | 2021-09-06T13:25:34.783729 | 2018-02-07T02:11:12 | 2018-02-07T02:11:12 | 111,168,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,146 | py | # -*- coding: utf-8 -*-
# Part of dotop. See LICENSE file for full copyright and licensing details.
import logging
from translate import _
_logger = logging.getLogger(__name__)
#-------------------------------------------------------------
#ENGLISH
#-------------------------------------------------------------
to_19 = ( 'Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six',
'Seven', 'Eight', 'Nine', 'Ten', 'Eleven', 'Twelve', 'Thirteen',
'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen' )
tens = ( 'Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety')
denom = ( '',
'Thousand', 'Million', 'Billion', 'Trillion', 'Quadrillion',
'Quintillion', 'Sextillion', 'Septillion', 'Octillion', 'Nonillion',
'Decillion', 'Undecillion', 'Duodecillion', 'Tredecillion', 'Quattuordecillion',
'Sexdecillion', 'Septendecillion', 'Octodecillion', 'Novemdecillion', 'Vigintillion' )
def _convert_nn(val):
"""convert a value < 100 to English.
"""
if val < 20:
return to_19[val]
for (dcap, dval) in ((k, 20 + (10 * v)) for (v, k) in enumerate(tens)):
if dval + 10 > val:
if val % 10:
return dcap + '-' + to_19[val % 10]
return dcap
def _convert_nnn(val):
"""
convert a value < 1000 to english, special cased because it is the level that kicks
off the < 100 special case. The rest are more general. This also allows you to
get strings in the form of 'forty-five hundred' if called directly.
"""
word = ''
(mod, rem) = (val % 100, val // 100)
if rem > 0:
word = to_19[rem] + ' Hundred'
if mod > 0:
word += ' '
if mod > 0:
word += _convert_nn(mod)
return word
def english_number(val):
if val < 100:
return _convert_nn(val)
if val < 1000:
return _convert_nnn(val)
for (didx, dval) in ((v - 1, 1000 ** v) for v in range(len(denom))):
if dval > val:
mod = 1000 ** didx
l = val // mod
r = val - (l * mod)
ret = _convert_nnn(l) + ' ' + denom[didx]
if r > 0:
ret = ret + ', ' + english_number(r)
return ret
def amount_to_text(number, currency):
number = '%.2f' % number
units_name = currency
list = str(number).split('.')
start_word = english_number(int(list[0]))
end_word = english_number(int(list[1]))
cents_number = int(list[1])
cents_name = (cents_number > 1) and 'Cents' or 'Cent'
return ' '.join(filter(None, [start_word, units_name, (start_word or units_name) and (end_word or cents_name) and 'and', end_word, cents_name]))
#-------------------------------------------------------------
# Generic functions
#-------------------------------------------------------------
_translate_funcs = {'en' : amount_to_text}
#TODO: we should use the country AND language (ex: septante VS soixante dix)
#TODO: we should use en by default, but the translation func is yet to be implemented
def amount_to_text(nbr, lang='en', currency='euro'):
""" Converts an integer to its textual representation, using the language set in the context if any.
Example::
1654: thousands six cent cinquante-quatre.
"""
import dotop.loglevels as loglevels
# if nbr > 10000000:
# _logger.warning(_("Number too large '%d', can not translate it"))
# return str(nbr)
if not _translate_funcs.has_key(lang):
_logger.warning(_("no translation function found for lang: '%s'"), lang)
#TODO: (default should be en) same as above
lang = 'en'
return _translate_funcs[lang](abs(nbr), currency)
if __name__=='__main__':
from sys import argv
lang = 'nl'
if len(argv) < 2:
for i in range(1,200):
print i, ">>", int_to_text(i, lang)
for i in range(200,999999,139):
print i, ">>", int_to_text(i, lang)
else:
print int_to_text(int(argv[1]), lang)
| [
"Administrator@20nuo003-PC"
] | Administrator@20nuo003-PC |
816d8629ef45304e5ba47462013cad82e344a259 | f5ce05395e4b37ea5d970073f95681d3a880aefd | /setup.py | 27845d5d9659006759224cf1dabf78b80890a412 | [
"MIT"
] | permissive | simondlevy/gym-mygame | 2ef960a8cfd546f3f4abd42e1bcd952840416223 | e04495425117f1cd8ffe2e840f4561d6fdcaf50d | refs/heads/master | 2022-07-13T16:52:39.760990 | 2020-05-12T20:44:41 | 2020-05-12T20:44:41 | 263,425,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | #!/usr/bin/env python3
'''
Python distutils setup file for gym-mygame module.
Copyright (C) 2020 Simon D. Levy
MIT License
'''
#from distutils.core import setup
from setuptools import setup
setup (name = 'gym_mygame',
version = '0.1',
install_requires = ['gym', 'numpy'],
description = 'Gym environment for my CSCI 316 game',
packages = ['gym_mygame', 'gym_mygame.envs'],
author='Simon D. Levy',
author_email='simon.d.levy@gmail.com',
url='https://github.com/simondlevy/studenta21/gym-mygame',
license='MIT',
platforms='Linux; Windows; OS X'
)
| [
"simon.d.levy@gmail.com"
] | simon.d.levy@gmail.com |
9bd40a000147a571fe7d40700465c556556526c7 | 4567c7caa29288dda264cb78f6bc7ef2a6eeb756 | /SetDataStructure/MathOpsSet.py | 729dcee00cde6ed866eda38d638315232ea90155 | [] | no_license | JaspinderSingh786/Python3BasicsToAdvance | dc0c676e7efb0749288425dd3922a716b389199d | 00e9cb66bb2e5e35736fe8032e233a9d178cb038 | refs/heads/master | 2022-12-23T11:01:38.626288 | 2019-05-15T06:08:36 | 2019-05-15T06:08:36 | 300,102,348 | 0 | 0 | null | 2020-10-01T01:03:21 | 2020-10-01T01:03:21 | null | UTF-8 | Python | false | false | 672 | py | # union to return all the elements present in both sets
x =set(range(0,10,2))
y = set(range(6,20,2))
print(x|y)
print(x.union(y))
# intersection of x&y will return all the common elements present in both the set
print(x.intersection(y)) # or
print(x&y)
# difference x-y returns the elements present in x but not in y
print(x.difference(y))
print(x-y)
print(y.difference(x))
print(y-x)
# symmetric difference uncommon elements in both
print(x.symmetric_difference(y))
print(x^y)
# Membership Operator in, not in
print(10 in x)
print(10 not in x)
# Set comprehensions
s = {z*z for z in range(1,10)}
print(s)
s = {c**2 for c in range(1,10,2)}
print(s)
| [
"vivekgoswami71@gmail.com"
] | vivekgoswami71@gmail.com |
265cd5ce49260eb4a369231f4af087e09bb9f225 | 4042d12cc6ece8e690331a03fbe7936f2b85cc31 | /assets_app/models/assets_main.py | 4c6b8f4bc101bbfa53fb2e3250cde82403d6106e | [] | no_license | terroristhouse/Odoo13 | 551b65d18a934e7cfb1bcb2a571110ca524d80b8 | be4789c2c38dffe9afc3495c7f17f629cb458c89 | refs/heads/master | 2022-12-01T05:31:30.892018 | 2020-08-17T00:48:45 | 2020-08-17T00:48:45 | 278,875,024 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,056 | py | from odoo import fields, models, api
class AssetsMain(models.Model):
_name = 'assets.main'
_description = '资产'
_order = 'name'
name = fields.Char('设备编号', required=True) # 设备编号
desc_detail = fields.Text('备注') # 设备备注
number = fields.Integer('数量', required=True) # 资产数量
sequ = fields.Char('序列号') # 资产序列号
local_id = fields.Many2one('assets.site', '地点', required=True) # 所在地点
section_id = fields.Many2one('assets.section', '部门') # 所在部门
user_id = fields.Many2one('assets.user', '使用人') # 使用人
cate_id = fields.Many2one('assets.cate', '类别', required=True) # 资产类别
secret_id = fields.Selection(
[('gongkai', '公开'),
('mimi', '秘密'),
('jimi', '机密'),
('juemi', '绝密')], '密级', required=True
) # 资产密级
priority = fields.Selection(
[('0', 'Low'),
('1', 'Normal'),
('2', 'High')],
'Priority', default='1'
)
kanban_state = fields.Selection(
[('normal', 'In Progress'),
('blocked', 'Blocked'),
('done', 'Ready for next stage')],
'Kanban State', default='normal'
)
type_id = fields.Many2one('assets.type', '型号') # 资产型号
use_ids = fields.One2many('assets.use', 'zichan_id', string='使用记录') # 使用记录
_sql_constraints = [
('unique_course_name',
'unique(name)', '设备编号重复!'),
('unique_course_sequ',
'unique(sequ)', '设备序列号重复!')
]
@api.model
def _default_stage(self):
Stage = self.env['assets.main.stage']
return Stage.search([], limit=1)
@api.model
def _group_expand_stage_id(self, stages, domain, order):
return stages.search([], order=order)
stage_id = fields.Many2one('assets.main.stage', default=_default_stage, group_expand='_group_expand_stage_id')
state_use = fields.Selection(related='stage_id.state')
| [
"867940410@qq.com"
] | 867940410@qq.com |
23f0c67f201967b6850945aa7d07c32191f2f9b8 | 7489448f6279fb4821ad49bc9475a2ddafd2570f | /.venv/lib/python3.8/site-packages/finmarketpy/network_analysis/learn_network_structure.py | 821e5917c4becc640c3353909b9b755ed1ae70a5 | [
"MIT"
] | permissive | webclinic017/VectorBTanalysis | a37df299103e63e350a6fb83caaeb9b3dc0b9542 | bea3deaf2ee3fc114b308146f2af3e4f35f70197 | refs/heads/master | 2023-03-16T02:03:34.288818 | 2020-09-05T22:59:50 | 2020-09-05T22:59:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,358 | py | # Project: finmarketpy project
# Filename: learn_network_structure
# Objective: compute a network graph for a group of asset return time series
# Created: 2019-11-02 12:05
# Version: 0.0
# Author: FS
# importing packages
import numpy as np
from sklearn import cluster, covariance, manifold
def learn_network_structure(ts_returns_data, names, alphas=4, cv=5, mode='cd',
assume_centered = False,
n_components=2, n_neighbors=5,
eigen_solver="dense", method='standard',
neighbors_algorithm="auto",
random_state = None, n_jobs=None,
standardise=False):
"""
Parameters
----------
ts_returns_data : array-like of shape [n_samples, n_instruments]
time series matrix of returns
names : array-like of shape [n_samples, 1]
Individual names of the financial instrument
alphas : int or positive float, optional
Number of points on the grids to be used
cv : int, optional
Number of folds for cross-validation splitting strategy
mode : str, optional
Solver to use to compute the graph
assume_centered : bool, optional
Centre the data if False.
n_components : int
Number of components for the manifold
n_neighbors: int
Number of neighbours to consider for each point
eigen_solver : str
Algorithm to compute eigenvalues
method : str
Algorithm to use for local linear embedding
neighbors_algorithm : str
Algorithm to use for nearest neighbours search
random_state : int, RandomState instance or None, optional
If int, random_state is the seed used by the random number generator.
If RandomState instance, random_state is the random number generator.
If None, the random number generator is the RandomState instance used by np.random.
Used when eigen_solver == ‘arpack’
n_jobs : int or None, optional
number of parallel jobs to run
standardise : bool
standardise data if True
Returns : sklearn.covariance.graph_lasso_.GraphicalLassoCV
sklearn.manifold.locally_linear.LocallyLinearEmbedding
array-like of shape [n_components, n_instruments]
Transformed embedding vectors
array-like of shape [n_instruments, 1]
numeric identifier of each cluster
-------
"""
if not isinstance(ts_returns_data, (np.ndarray, np.generic)):
raise TypeError("ts_returns_data must be of class ndarray")
# learn graphical structure
edge_model = covariance.GraphicalLassoCV(alphas=alphas, cv=cv, mode=mode,
assume_centered=assume_centered)
edge_model.fit(ts_returns_data)
# cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
# find low-dimension embedding - useful for 2D plane visualisation
node_position_model = manifold.LocallyLinearEmbedding(
n_components=n_components, eigen_solver=eigen_solver,
n_neighbors=n_neighbors, method=method,
neighbors_algorithm=neighbors_algorithm,
random_state=random_state, n_jobs=n_jobs)
embedding = node_position_model.fit_transform(ts_returns_data.T).T
if standardise:
# standardise returns
standard_ret = ts_returns_data.copy()
standard_ret /= ts_returns_data.std(axis=0)
# learn graph model
edge_model.fit(standard_ret)
# cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
# find low-dimension embedding - useful for 2D plane visualisation
node_position_model = manifold.LocallyLinearEmbedding(
n_components=n_components, eigen_solver=eigen_solver,
n_neighbors=n_neighbors, method=method,
neighbors_algorithm=neighbors_algorithm,
random_state=random_state, n_jobs=n_jobs)
embedding = node_position_model.fit_transform(ts_returns_data.T).T
return edge_model, node_position_model, embedding, labels
| [
"eorlowski6@gmail.com"
] | eorlowski6@gmail.com |
9b34fda8067ba60916db6d5830d18b528fb2163a | bf813d2b877fb8ba62feb4263484db3d0f26d5cd | /coma/catalogue_manipulation/move_cat_to_d_coma.py | c4553074f71305e798c3de2117e40e6a93870ec9 | [] | no_license | 9217392354A/astro-scripts | 1e8e8c827097a877518d1f3e10870a5c2609417c | cd7a175bd504b4e291020b551db3077b067bc632 | refs/heads/master | 2021-01-13T00:40:57.481755 | 2016-03-25T17:04:28 | 2016-03-25T17:04:28 | 54,730,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,346 | py | #program to move a catalogue to the distance of coma
# Chris Fuller March 2014
#import modules
import numpy as np
from os.path import join as pj
import atpy as at
from copy import copy, deepcopy
import matplotlib.pyplot as plt
from pylab import bar
import pdb
#pdb.set_trace()
#i/o
print 'reading in cat . . .'
folder = '/Users/chrisfuller/Dropbox/phd/herchel/coma/aux_data'
cat = at.Table(pj(folder, 'fornax_input.fits'),type='fits')
output = 'fornax_at_100mpc-030314.fits'
#key parameters
#coor_names = ['RA (2000)', 'DEC (2000)'] # these are the colum names that containe ra and dec ### Virgo ###
coor_names = ['GRA2000', 'GDEC2000'] # these are the colum names that containe ra and dec ####### Fornax ###
optical_col = 'BTmag_1'
flux_cols = ['F100', 'F160', 'F250', 'F350', 'F500' ]
optical_lim = 14.89 # faintest magnitude that is possible to select at the distance of the coma cluster
x = 0.30 #conversion between deg and mpc
dist_x = 0.0289#scale fluxes
# conversion between degrees to mpc
#coma x = 1.77
#virgo x= 0.25
#fornax x=0.30
#flux scales
#coma = 1.0
#virgo = 0.0196
#fornax = 0.0289
# # # # # # # # # # # # # # # Function # # # # # # # # # # # # # # # # # # # # # # # # # #
#function to produce new cat with column added for the nth nearest neigboure
def nth_nearest_neighbour(t, coor_names):
print 'nth_nearest_neighbour....'
#add columnd for D1,D5, and D10
t.add_empty_column('D1', dtype = np.float)
t.add_empty_column('D5', dtype = np.float)
t.add_empty_column('D10', dtype = np.float)
t.add_empty_column('SIGMA1', dtype = np.float)
t.add_empty_column('SIGMA5', dtype = np.float)
t.add_empty_column('SIGMA10', dtype = np.float)
###### part 2 #######
# find nearest neighbours
#ra1 and dec1
ra_1 = t[coor_names[0]]
dec_1 = t[coor_names[1]]
#loop through all members of catA
for i in range(0, len(t)):
ra = t[coor_names[0]][i]
dec = t[coor_names[1]][i]
#ra2 and dec2
ra_2 = np.array([ra]*len(ra_1), dtype=np.float)
dec_2 = np.array([dec]*len(ra_1), dtype=np.float)
#caculate distance to all sources from ra1 and dec1
radius = np.sort(distance(ra_1, dec_1, ra_2, dec_2 ))
#print radius[1]*1.77*1000.0, np.min(radius)
#add values to table
t['D1'][i] = radius[1] * x
t['D5'][i] = radius[5] * x
t['D10'][i] = radius[10]* x
t['SIGMA1'][i] = np.log10(1.0 / (np.pi*(radius[1]*x)**2.0) )
t['SIGMA5'][i] = np.log10(5.0 / (np.pi*(radius[5]*x)**2.0) )
t['SIGMA10'][i] = np.log10(10.0 / (np.pi*(radius[10]*x)**2.0))
return t
#distance equation designed to do arraywise caculations
def distance(ra1, dec1, ra2, dec2):
delta_ra = (ra1 - ra2) * np.cos(np.radians((dec1+dec2)/2.0))
delta_dec = (dec1 - dec2)
return np.sqrt(delta_ra**2.0 + delta_dec**2.0)
# # # # # # # # # # # # # # # Main Program # # # # # # # # # # # # # # # # # # # # # # # #
#scale fluxes so as to appear at the distance of coma
for i in range(len(flux_cols)):
col = flux_cols[i]
#scale to the distance of coma
cat[col] = cat[col]*dist_x
#if less than 15mjy then set to 0
w = np.where(cat[col] < 0.015)[0]
cat[col][w] = 0.0
#make an optical selection for the cluster
optical = cat.where((cat[optical_col] <= optical_lim) & (np.nan_to_num(cat[optical_col]) != 0.0))
new_cat = nth_nearest_neighbour(optical, coor_names)
new_cat.write(pj(folder, output), overwrite=True)
| [
"chrisfuller@Chriss-MBP.lan"
] | chrisfuller@Chriss-MBP.lan |
27ed23f7457434fd19a3ba7ce1b446ac8006d7d4 | 02f565644b729c496bb4d802dfc6cb3a5db68ff1 | /tests/test_repeated_dna_sequences.py | fbfb36d28c09ab3fc23461b2dc41dd8bf4b564b5 | [] | no_license | saubhik/leetcode | 99a854ad87272eb82b16f22408ee7314ba0db099 | 221f0cb3105e4ccaec40cd1d37b9d7d5e218c731 | refs/heads/master | 2023-04-27T03:11:03.565056 | 2021-05-17T07:55:22 | 2021-05-17T07:55:22 | 275,324,914 | 3 | 1 | null | 2020-10-03T07:06:17 | 2020-06-27T07:48:37 | Python | UTF-8 | Python | false | false | 1,121 | py | import unittest
from repeated_dna_sequences import (
OfficialSolutionApproach2,
OfficialSolutionApproach3,
Solution,
)
class TestRepeatedDNASequences(unittest.TestCase):
def test_example_1(self):
assert Solution().findRepeatedDnaSequences(
s="AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
) == ["AAAAACCCCC", "CCCCCAAAAA"]
assert set(
OfficialSolutionApproach2().findRepeatedDnaSequences(
s="AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
)
) == {"CCCCCAAAAA", "AAAAACCCCC"}
assert set(
OfficialSolutionApproach3().findRepeatedDnaSequences(
s="AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
)
) == {"CCCCCAAAAA", "AAAAACCCCC"}
def test_example_2(self):
assert Solution().findRepeatedDnaSequences(s="AAAAAAAAAAAAA") == ["AAAAAAAAAA"]
assert OfficialSolutionApproach2().findRepeatedDnaSequences(
s="AAAAAAAAAAAAA"
) == ["AAAAAAAAAA"]
assert OfficialSolutionApproach3().findRepeatedDnaSequences(
s="AAAAAAAAAAAAA"
) == ["AAAAAAAAAA"]
| [
"saubhik.mukherjee@gmail.com"
] | saubhik.mukherjee@gmail.com |
392b75c54f958a4bebd4f2b76e439193093387d0 | eef243e450cea7e91bac2f71f0bfd45a00c6f12c | /.history/run_20210124182546.py | 96b0a8d0a563b5c91efbe0ad25075a0f449732ac | [] | no_license | hoaf13/nlp-chatbot-lol | 910ab2ea3b62d5219901050271fc1a1340e46a2f | 18cb64efa9d6b4cafe1015f1cd94f4409271ef56 | refs/heads/master | 2023-05-08T04:17:19.450718 | 2021-02-02T02:37:38 | 2021-02-02T02:37:38 | 332,535,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | from app import app
from flask import request
from flask_socketio import SocketIO, send, emit, join_room, leave_room, close_room, rooms, disconnect
socketio = SocketIO(app, cors_allowed_origins='*')
@socketio.on('connected')
def test_connect(data):
send("User {} has connected".format(data), broadcast=True)
@socketio.on('disconnected')
def test_disconnect(data):
send("User {} has disconnected.".format(data), brsoadcast=True)
@socketio.on('client-send-data')
def test_emit(data):
print("data recived: {}".format(data))
send(data, broadcast=True)
@socketio.on('client-send-private-data')
def handle_send_private_data(msg):
response = "response-> " + msg
ans = dict()
ans['client-msg'] = msg
ans['server-msg'] = response
socketio.broadcast.emit("server-send-private-data", "hello private")
if __name__ == '__main__':
app.jinja_env.auto_reload = True
socketio.run(app)
| [
"samartcall@gmail.com"
] | samartcall@gmail.com |
8c6416ed9c7686e7035c91a619c60aa6c6150ff3 | a6b8f33193163de60eb17231a713083da4dea970 | /week_04/mini_projects/webpage_generator/wpgenerator.py | c09f7f4936f39ee4fc06e46531fa266b4da896c5 | [] | no_license | mingyyy/onsite | 4defd8d2e8bad6f2f1c61f756ee9269ec0ba5fe2 | 79c8fa30ca152161abfeef797d6eb357f764dc97 | refs/heads/master | 2022-12-14T18:50:13.514560 | 2019-04-02T11:56:37 | 2019-04-02T11:56:37 | 171,419,253 | 0 | 3 | null | 2022-12-08T01:41:15 | 2019-02-19T06:35:59 | Python | UTF-8 | Python | false | false | 445 | py |
path = "raw/ubud.txt"
with open(path, "r") as f:
file = f.readlines()
# first line is the title
counter = 0
for line in file:
print(line)
if counter == 0:
title = line.strip()
while line == "\n":
para = "".join(line)
break
counter += 1
page = f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<>
<title>{title}</title>
</head>
<body>
<h1></h1>
</body>
</html>
''' | [
"j.yanming@gmail.com"
] | j.yanming@gmail.com |
a528bab8dad447a472fead2de386caaa12a21e06 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/securityinsight/azure-mgmt-securityinsight/generated_samples/get_entity_queries.py | 8cee24ba13980a452d2c2b900ca52c9cfd64be9e | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,615 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.securityinsight import SecurityInsights
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-securityinsight
# USAGE
python get_entity_queries.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = SecurityInsights(
credential=DefaultAzureCredential(),
subscription_id="d0cfe6b2-9ac0-4464-9919-dccaee2e48c0",
)
response = client.entity_queries.list(
resource_group_name="myRg",
workspace_name="myWorkspace",
)
for item in response:
print(item)
# x-ms-original-file: specification/securityinsights/resource-manager/Microsoft.SecurityInsights/preview/2022-12-01-preview/examples/entityQueries/GetEntityQueries.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
4d07737f103ae1cce749e1abaf6560be63c813fc | 8c50265b43add0e91e30245cc7af3c2558c248f5 | /example/rcnn/symnet/metric.py | fa8d7919e919244f30ccfca2fbaf238d92cf322d | [
"BSD-3-Clause",
"BSD-2-Clause-Views",
"Zlib",
"Apache-2.0",
"BSD-2-Clause",
"Intel",
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | awslabs/dynamic-training-with-apache-mxnet-on-aws | 6a67f35d7e4b12fa8bba628bd03b2b031924e211 | 1063a979417fee8c820af73860eebd2a4f670380 | refs/heads/master | 2023-08-15T11:22:36.922245 | 2022-07-06T22:44:39 | 2022-07-06T22:44:39 | 157,440,687 | 60 | 19 | Apache-2.0 | 2022-11-25T22:23:19 | 2018-11-13T20:17:09 | Python | UTF-8 | Python | false | false | 5,187 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
def get_names():
pred = ['rpn_cls_prob', 'rpn_bbox_loss', 'rcnn_cls_prob', 'rcnn_bbox_loss', 'rcnn_label']
label = ['rpn_label', 'rpn_bbox_target', 'rpn_bbox_weight']
return pred, label
class RPNAccMetric(mx.metric.EvalMetric):
def __init__(self):
super(RPNAccMetric, self).__init__('RPNAcc')
self.pred, self.label = get_names()
def update(self, labels, preds):
pred = preds[self.pred.index('rpn_cls_prob')]
label = labels[self.label.index('rpn_label')]
# pred (b, c, p) or (b, c, h, w)
pred_label = mx.ndarray.argmax_channel(pred).asnumpy().astype('int32')
pred_label = pred_label.reshape((pred_label.shape[0], -1))
# label (b, p)
label = label.asnumpy().astype('int32')
# filter with keep_inds
keep_inds = np.where(label != -1)
pred_label = pred_label[keep_inds]
label = label[keep_inds]
self.sum_metric += np.sum(pred_label.flat == label.flat)
self.num_inst += len(pred_label.flat)
class RCNNAccMetric(mx.metric.EvalMetric):
def __init__(self):
super(RCNNAccMetric, self).__init__('RCNNAcc')
self.pred, self.label = get_names()
def update(self, labels, preds):
pred = preds[self.pred.index('rcnn_cls_prob')]
label = preds[self.pred.index('rcnn_label')]
last_dim = pred.shape[-1]
pred_label = pred.asnumpy().reshape(-1, last_dim).argmax(axis=1).astype('int32')
label = label.asnumpy().reshape(-1,).astype('int32')
self.sum_metric += np.sum(pred_label.flat == label.flat)
self.num_inst += len(pred_label.flat)
class RPNLogLossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RPNLogLossMetric, self).__init__('RPNLogLoss')
self.pred, self.label = get_names()
def update(self, labels, preds):
pred = preds[self.pred.index('rpn_cls_prob')]
label = labels[self.label.index('rpn_label')]
# label (b, p)
label = label.asnumpy().astype('int32').reshape((-1))
# pred (b, c, p) or (b, c, h, w) --> (b, p, c) --> (b*p, c)
pred = pred.asnumpy().reshape((pred.shape[0], pred.shape[1], -1)).transpose((0, 2, 1))
pred = pred.reshape((label.shape[0], -1))
# filter with keep_inds
keep_inds = np.where(label != -1)[0]
label = label[keep_inds]
cls = pred[keep_inds, label]
cls += 1e-14
cls_loss = -1 * np.log(cls)
cls_loss = np.sum(cls_loss)
self.sum_metric += cls_loss
self.num_inst += label.shape[0]
class RCNNLogLossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RCNNLogLossMetric, self).__init__('RCNNLogLoss')
self.pred, self.label = get_names()
def update(self, labels, preds):
pred = preds[self.pred.index('rcnn_cls_prob')]
label = preds[self.pred.index('rcnn_label')]
last_dim = pred.shape[-1]
pred = pred.asnumpy().reshape(-1, last_dim)
label = label.asnumpy().reshape(-1,).astype('int32')
cls = pred[np.arange(label.shape[0]), label]
cls += 1e-14
cls_loss = -1 * np.log(cls)
cls_loss = np.sum(cls_loss)
self.sum_metric += cls_loss
self.num_inst += label.shape[0]
class RPNL1LossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RPNL1LossMetric, self).__init__('RPNL1Loss')
self.pred, self.label = get_names()
def update(self, labels, preds):
bbox_loss = preds[self.pred.index('rpn_bbox_loss')].asnumpy()
bbox_weight = labels[self.label.index('rpn_bbox_weight')].asnumpy()
# calculate num_inst (average on those fg anchors)
num_inst = np.sum(bbox_weight > 0) / 4
self.sum_metric += np.sum(bbox_loss)
self.num_inst += num_inst
class RCNNL1LossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RCNNL1LossMetric, self).__init__('RCNNL1Loss')
self.pred, self.label = get_names()
def update(self, labels, preds):
bbox_loss = preds[self.pred.index('rcnn_bbox_loss')].asnumpy()
label = preds[self.pred.index('rcnn_label')].asnumpy()
# calculate num_inst
keep_inds = np.where(label != 0)[0]
num_inst = len(keep_inds)
self.sum_metric += np.sum(bbox_loss)
self.num_inst += num_inst
| [
"vikumar@88e9fe53272d.ant.amazon.com"
] | vikumar@88e9fe53272d.ant.amazon.com |
b54cad4d67281209ae0454f243bca0b73a8d9bf8 | 3420dd606acc60f921efcc79160d85af92be3740 | /dexp/processing/denoising/_test/test_butterworth.py | f8a65768a97d976601687f947fe3ad4734ff0888 | [
"BSD-3-Clause"
] | permissive | royerlab/dexp | 3e9b67b4084eacf9de8006f75754292f8d7e0fb4 | 8e8399f5d0d8f1e1ae0ddfa6cb6011921929ae0b | refs/heads/master | 2023-05-26T04:03:44.833528 | 2023-04-10T16:06:09 | 2023-04-10T16:06:09 | 196,109,847 | 23 | 6 | BSD-3-Clause | 2023-04-07T21:48:25 | 2019-07-10T01:41:20 | Python | UTF-8 | Python | false | false | 251 | py | from dexp.processing.denoising.demo.demo_2D_butterworth import _demo_butterworth
from dexp.utils.testing.testing import execute_both_backends
@execute_both_backends
def test_butterworth():
assert _demo_butterworth(display=False) >= 0.608 - 0.03
| [
"noreply@github.com"
] | royerlab.noreply@github.com |
94f60f929cf72989003431c51a7ae1b30e26b12a | bb983b38f9be7b6fd4ab1a651484db37c1aeff39 | /1019/python_list_index.py | d54b78a345b11bda2588b2b6d799910da221d2b2 | [] | no_license | nakanishi-akitaka/python2018_backup | c214df78372cca993d69f8001010ec2f6dcaf1be | 45766d3c3777de2a91b3e2cf50c6bfedca8627da | refs/heads/master | 2023-02-18T08:04:28.625532 | 2022-06-07T01:02:53 | 2022-06-07T01:02:53 | 201,399,236 | 5 | 30 | null | 2023-02-10T21:06:51 | 2019-08-09T05:48:22 | Jupyter Notebook | UTF-8 | Python | false | false | 1,127 | py | # -*- coding: utf-8 -*-
"""
https://note.nkmk.me/python-list-index/
Created on Fri Oct 19 12:40:31 2018
@author: Akitaka
"""
l = list('abcde')
print(l)
print(l.index('a'))
print(l.index('c'))
def my_index(l, x, default=False):
if x in l:
return l.index(x)
else:
return default
print(my_index(l, 'd'))
print(my_index(l, 'x'))
print(my_index(l, 'x', -1))
#%%
l_dup = list('abcba')
print(l_dup)
print(l_dup.index('a'))
print(l_dup.index('b'))
#%%
print([i for i, x in enumerate(l_dup) if x == 'a'])
print([i for i, x in enumerate(l_dup) if x == 'b'])
print([i for i, x in enumerate(l_dup) if x == 'c'])
print([i for i, x in enumerate(l_dup) if x == 'x'])
#%%
def my_index_multi(l, x):
return [i for i, _x in enumerate(l) if _x == x]
print(my_index_multi(l_dup, 'a'))
print(my_index_multi(l_dup, 'c'))
print(my_index_multi(l_dup, 'x'))
#%%
t = tuple('abcde')
print(t)
print(t.index('a'))
print(my_index(t, 'c'))
print(my_index(t, 'x'))
t_dup = tuple('abcba')
print(t_dup)
print(my_index_multi(t_dup, 'a'))
| [
"noreply@github.com"
] | nakanishi-akitaka.noreply@github.com |
ec813cec9fde2a104a1cdad75cf78ecc5a255913 | 65b4522c04c2be071c2d42095956fe950fe1cebe | /inversions/inversion_one_chanel/run1/analysis/pred_disp_large_scale/plots/Raslip_vel/plot_displacement_contours.py | f75d0bde319f5b19287101cdc1313d0c9cf23b29 | [] | no_license | geodesy/viscojapan | ac0cd93f7a2134cd2651623b94879dcc21c0c46a | 03e70265b56eb5994e73bcb6066f0be338e42f27 | refs/heads/master | 2021-03-03T18:19:07.779601 | 2015-07-16T03:50:49 | 2015-07-16T03:50:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | import numpy as np
import viscojapan as vj
from epochs import epochs
def load_lons_lats():
tp = np.loadtxt('../stations_large_scale.in', '4a,f,f')
lons = [ii[1] for ii in tp]
lats = [ii[2] for ii in tp]
return lons, lats
lons, lats = load_lons_lats()
reader = vj.inv.DeformPartitionResultReader(
'../../deformation_partition_large_scale.h5')
Ecumu = reader.Ecumu
contours = [0.05, 0.1, 0.5, 1, 2]
cmpt = 'Raslip'
obj = getattr(reader, cmpt)
for epoch in epochs:
print(cmpt, epoch)
if epoch == 0:
continue
mags = obj.get_velocity_hor_mag_at_epoch(epoch)
mags = mags*100*365 # m/day => cm/yr
plt = vj.displacement.plot.MagnitudeContoursPlotter()
plt.plot(lons, lats, mags,
'plots/%s_day%04d.png'%(cmpt,epoch),
contours = contours,
if_topo = False,
unit_label = 'cm/yr',
title = "Rate Raslip year %.3f"%(epoch/365)
)
| [
"zy31415@gmail.com"
] | zy31415@gmail.com |
c9c98e197cfaa40df88820f453e394610790ef19 | 3d62466a21dd4f9cce27544eb0318025949e2385 | /samples/WebApplication/Session.py | 4e487d2d1d9d729d2aa048e5fe7fb4606a779dad | [
"BSD-3-Clause"
] | permissive | zlorb/PyModel | eb6cd24e96429bdd57c3ed2a451d0f4f4073e353 | 502aa0a3708f549ecd803008ab6a2d63a59a2cd3 | refs/heads/master | 2023-08-09T15:32:53.183114 | 2022-02-23T00:13:02 | 2022-02-23T00:13:02 | 50,697,490 | 15 | 8 | NOASSERTION | 2023-07-25T18:13:49 | 2016-01-29T23:07:34 | Python | UTF-8 | Python | false | false | 2,381 | py | """
Experiment with code for WebApplication stepper
"""
import re
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import http.cookiejar
# Scrape page contents
def loginFailed(page):
return (page.find('Incorrect login') > -1)
intPattern = re.compile(r'Number: (\d+)')
def intContents(page):
m = intPattern.search(page)
if m:
return int(m.group(1))
else:
return None
def main():
# Configure. Web application in this sample requires cookies, redirect
cookies = http.cookiejar.CookieJar()
cookie_handler = urllib.request.HTTPCookieProcessor(cookies)
redirect_handler= urllib.request.HTTPRedirectHandler()
debug_handler = urllib.request.HTTPHandler(debuglevel=1) # print headers on console
opener = urllib.request.build_opener(cookie_handler,redirect_handler,debug_handler)
# Constants
site = 'http://localhost/'
path = 'nmodel/webapplication/php/'
webAppPage = 'doStuff.php' # Shouldn't this be called webAppPage, ...Url -?
logoutPage = 'logout.php'
webAppUrl = site + path + webAppPage
logoutUrl = site + path + logoutPage
print('GET to show login page')
print(opener.open(webAppUrl).read())
print('POST to login with sample username and password, pass separate args for POST')
args = urllib.parse.urlencode({'username':'user1', 'password':'123'})
page = opener.open(webAppUrl, args).read() # should show successful login
print(page)
if loginFailed(page):
print('Login FAILED')
print('GET with arg in URL to UpdateInt on server')
num = 99
wrongNum = 'xx'
numArg = urllib.parse.urlencode({'num':num})
print(opener.open("%s?%s" % (webAppUrl,numArg)).read())
print('GET to retrieve page with integer')
page = opener.open(webAppUrl).read()
print(page)
print('%s found in page, expected %s' % (intContents(page), num))
print()
print('GET to logout')
print(opener.open(logoutUrl).read())
print('GET to show login page -- again')
print(opener.open(webAppUrl).read())
print('POST to login with username and WRONG password')
args = urllib.parse.urlencode({'username':'user1', 'password':'321'}) # wrong pass
page = opener.open(webAppUrl, args).read() # should show login fail
print(page)
if loginFailed(page):
print('Login FAILED')
# No logout this time - we're not logged in
if __name__ == '__main__':
main()
| [
"raliclo@gmail.com"
] | raliclo@gmail.com |
008051dab9733ed2d9f2fb7454f439c579ba2b1d | d0cbfb54c336582c72e8d36c26c03a41d81a1bf4 | /djblog/blog/urls.py | 3f9a68d0cf928b549348495c01231e5567c56b8b | [
"MIT"
] | permissive | ghacer/djwebapp-blog | ea523928112572d34caf62c1bcede2e52c71dc6b | 0101b0356a6fa2d364f0da04adc8956938cef78c | refs/heads/master | 2021-01-18T01:37:25.283289 | 2014-11-22T02:09:38 | 2014-11-22T02:09:38 | 39,756,290 | 0 | 1 | null | 2015-07-27T05:15:11 | 2015-07-27T05:15:11 | null | UTF-8 | Python | false | false | 370 | py | from django.conf.urls import patterns, include, url
# import .views
urlpatterns = patterns('',
url(r"^$", "blog.views.index", name="index"),
url(r"^post/(?P<pk>\d+)/$", "blog.views.post", name="post"),
url(r"^category/(?P<pk>\d+)/$", "blog.views.category", name="category"),
)
| [
"wwq0327@gmail.com"
] | wwq0327@gmail.com |
7cf68892b2e25b23ddffda245dbbce948ae8f6ce | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_206/717.py | f8d716b09ed85efab988ce26b530693ae8483649 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | t = int(raw_input()) # read a line with a single integer
import numpy as np
for i in xrange(1, t + 1):
D, N = [s for s in raw_input().split(" ")]
D = int(D) #destination distance
N = int(N) #number of horses
K = np.zeros(N) #start position
S = np.zeros(N) #speed
T = np.zeros(N) #arrival time
for j in xrange(0,N):
string = raw_input().split(" ")
K[j] = int(string[0]) #starting position of jth horse
S[j] = int(string[1]) #speed of jth horse
T[j] = float(D-K[j])/float(S[j])
time = max(T)
optimal_speed = D / float(time)
print "Case #{}: {}".format(i, optimal_speed)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
035f8507902b4e954a369b882f9c67efe0e953c2 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-iotanalytics/huaweicloudsdkiotanalytics/v1/model/dis_content_rsp.py | 67f2dccb885bb01a85eae6061603d4173e14226a | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 5,056 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DisContentRsp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'stream_name': 'str',
'ak': 'str',
'sk': 'str',
'project_id': 'str'
}
attribute_map = {
'stream_name': 'streamName',
'ak': 'ak',
'sk': 'sk',
'project_id': 'projectId'
}
def __init__(self, stream_name=None, ak=None, sk=None, project_id=None):
"""DisContentRsp
The model defined in huaweicloud sdk
:param stream_name: 通道名称
:type stream_name: str
:param ak: 租户的AK
:type ak: str
:param sk: 租户的SK
:type sk: str
:param project_id: 项目id
:type project_id: str
"""
self._stream_name = None
self._ak = None
self._sk = None
self._project_id = None
self.discriminator = None
if stream_name is not None:
self.stream_name = stream_name
if ak is not None:
self.ak = ak
if sk is not None:
self.sk = sk
if project_id is not None:
self.project_id = project_id
@property
def stream_name(self):
"""Gets the stream_name of this DisContentRsp.
通道名称
:return: The stream_name of this DisContentRsp.
:rtype: str
"""
return self._stream_name
@stream_name.setter
def stream_name(self, stream_name):
"""Sets the stream_name of this DisContentRsp.
通道名称
:param stream_name: The stream_name of this DisContentRsp.
:type stream_name: str
"""
self._stream_name = stream_name
@property
def ak(self):
"""Gets the ak of this DisContentRsp.
租户的AK
:return: The ak of this DisContentRsp.
:rtype: str
"""
return self._ak
@ak.setter
def ak(self, ak):
"""Sets the ak of this DisContentRsp.
租户的AK
:param ak: The ak of this DisContentRsp.
:type ak: str
"""
self._ak = ak
@property
def sk(self):
"""Gets the sk of this DisContentRsp.
租户的SK
:return: The sk of this DisContentRsp.
:rtype: str
"""
return self._sk
@sk.setter
def sk(self, sk):
"""Sets the sk of this DisContentRsp.
租户的SK
:param sk: The sk of this DisContentRsp.
:type sk: str
"""
self._sk = sk
@property
def project_id(self):
"""Gets the project_id of this DisContentRsp.
项目id
:return: The project_id of this DisContentRsp.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this DisContentRsp.
项目id
:param project_id: The project_id of this DisContentRsp.
:type project_id: str
"""
self._project_id = project_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DisContentRsp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
ff4c97e9a54013b3ff6342b1b25da663fd7d7cf0 | 6f097812440f1cf728d9a0c2706b66e706de0824 | /uclptb/models.py | 8a9993cf6b152f7ab43b7dc96673b9822736a3a6 | [] | no_license | medical-projects/uclp-tb | 105f915c3042c53b769681fb30d7f06fb21fd60a | ef9dbdb22846be1a0d38e63b34532f7ff414762d | refs/heads/master | 2021-06-22T01:30:55.287491 | 2016-07-05T16:45:20 | 2016-07-05T16:45:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | """
uclptb models.
"""
from django.db.models import fields
from opal import models
class Demographics(models.Demographics): pass
class Location(models.Location): pass
class Allergies(models.Allergies): pass
class Diagnosis(models.Diagnosis): pass
class PastMedicalHistory(models.PastMedicalHistory): pass
class Treatment(models.Treatment): pass
class Investigation(models.Investigation): pass
class ReferralRoute(models.ReferralRoute): pass
class SymptomComplex(models.SymptomComplex): pass
class PatientConsultation(models.PatientConsultation): pass
| [
"fredkingham@gmail.com"
] | fredkingham@gmail.com |
a962541a52c1468a9fc1c8e4406db08c41303414 | dae212cb615e5eba3fe8108799a39bc09d7bddb6 | /grokking-coding/cyclic_sort/problem_challenge_1.py | 572ef55c35c78732d5581d7b37aa4e9dcc615fb7 | [] | no_license | cs-cordero/interview-prep | a291b5ce2fb8461449e6e27a1f23e12b54223540 | c3b5b4612f3641572d2237e36aa23019c680c799 | refs/heads/master | 2022-05-23T10:39:59.817378 | 2020-04-29T12:57:12 | 2020-04-29T12:57:12 | 76,767,250 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | from typing import List
def find_corrupt_numbers(nums: List[int]) -> List[int]:
result = []
for i in range(len(nums)):
if nums[i] == i + 1:
continue
temp = nums[i]
nums[i] = None
while temp and temp > 0 and temp <= len(nums):
if temp == nums[temp - 1]:
result.append(temp)
break
next_temp = nums[temp - 1]
nums[temp - 1] = temp
temp = next_temp
for i, num in enumerate(nums):
if num is None:
result.append(i + 1)
break
return result
| [
"ccordero@protonmail.com"
] | ccordero@protonmail.com |
184619b837b7e49365075a3d962d2bbd1c417295 | 8256963b73a829ec5054b8c3cb707250a8c6054a | /scooter/models/__models.py | f5946eb13876de57266266c8cd2a86b855a4396b | [
"MIT"
] | permissive | vyahello/rent-electro-scooter | bbd2d8c51536a832baeadbcd2a328de2174638ac | 34b85b0538d61315e325842f4c1b5094a94d2c0d | refs/heads/master | 2021-07-06T11:48:20.303858 | 2021-04-23T16:06:33 | 2021-04-23T16:06:33 | 236,315,479 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # pylint: disable=unused-import
# noinspection PyUnresolvedReferences
from scooter.models import rentals # noqa: F401
# noinspection PyUnresolvedReferences
from scooter.models import locations # noqa: F401
# noinspection PyUnresolvedReferences
from scooter.models import scooters # noqa: F401
# noinspection PyUnresolvedReferences
from scooter.models import users # noqa: F401
| [
"vyahello@gmail.com"
] | vyahello@gmail.com |
a5e12c032d5bc0f2f18a84268727ab3ea96e0593 | ddb3656fbacef606ac3cfa53eb74a99be90202cd | /selfdrive/hardware/eon/androidd.py | b836eb01294dc6258395bbe29ba7b767d3aca242 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | ErichMoraga/openpilot | f70b353099d3643c9f8d16fb8003811418c95656 | 2f73be29651e34e62eaf18472f9219cea57c177a | refs/heads/812 | 2023-08-02T16:58:57.870050 | 2023-07-20T17:33:41 | 2023-07-20T17:33:41 | 140,953,335 | 58 | 77 | MIT | 2023-07-30T15:33:18 | 2018-07-14T14:41:16 | C | UTF-8 | Python | false | false | 2,295 | py | #!/usr/bin/env python3
import os
import time
import psutil
from typing import Optional
from common.realtime import set_core_affinity, set_realtime_priority
from selfdrive.swaglog import cloudlog
MAX_MODEM_CRASHES = 3
MODEM_PATH = "/sys/devices/soc/2080000.qcom,mss/subsys5"
WATCHED_PROCS = ["zygote", "zygote64", "/system/bin/servicemanager", "/system/bin/surfaceflinger"]
def get_modem_crash_count() -> Optional[int]:
try:
with open(os.path.join(MODEM_PATH, "crash_count")) as f:
return int(f.read())
except Exception:
cloudlog.exception("Error reading modem crash count")
return None
def get_modem_state() -> str:
try:
with open(os.path.join(MODEM_PATH, "state")) as f:
return f.read().strip()
except Exception:
cloudlog.exception("Error reading modem state")
return ""
def main():
set_core_affinity(1)
set_realtime_priority(1)
procs = {}
crash_count = 0
modem_killed = False
modem_state = "ONLINE"
while True:
# check critical android services
if any(p is None or not p.is_running() for p in procs.values()) or not len(procs):
cur = {p: None for p in WATCHED_PROCS}
for p in psutil.process_iter(attrs=['cmdline']):
cmdline = None if not len(p.info['cmdline']) else p.info['cmdline'][0]
if cmdline in WATCHED_PROCS:
cur[cmdline] = p
if len(procs):
for p in WATCHED_PROCS:
if cur[p] != procs[p]:
cloudlog.event("android service pid changed", proc=p, cur=cur[p], prev=procs[p])
procs.update(cur)
if os.path.exists(MODEM_PATH):
# check modem state
state = get_modem_state()
if state != modem_state and not modem_killed:
cloudlog.event("modem state changed", state=state)
modem_state = state
# check modem crashes
cnt = get_modem_crash_count()
if cnt is not None:
if cnt > crash_count:
cloudlog.event("modem crash", count=cnt)
crash_count = cnt
# handle excessive modem crashes
if crash_count > MAX_MODEM_CRASHES and not modem_killed:
cloudlog.event("killing modem")
with open("/sys/kernel/debug/msm_subsys/modem", "w") as f:
f.write("put")
modem_killed = True
time.sleep(1)
if __name__ == "__main__":
main()
| [
"user@comma.ai"
] | user@comma.ai |
0f213d8dd1ec7a658623f0215997a3592e0df9ed | de707c94c91f554d549e604737b72e6c86eb0755 | /math/0x02-calculus/10-matisse.py | 2580a5ebc2eeb7f237af29cff5d2d583248ae911 | [] | no_license | ejonakodra/holbertonschool-machine_learning-1 | 885cf89c1737573228071e4dc8e26304f393bc30 | 8834b201ca84937365e4dcc0fac978656cdf5293 | refs/heads/main | 2023-07-10T09:11:01.298863 | 2021-08-11T03:43:59 | 2021-08-11T03:43:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | #!/usr/bin/env python3
""" defines a function that calculates the derivative of a polynomial """
def poly_derivative(poly):
"""
calculates the derivative of the given polynomial
Parameters:
poly (list): list of coefficients representing a polynomial
the index of the list represents the power of x
the coefficient belongs to
Returns:
a new list of coefficients representing the derivative
[0], if the derivate is 0
None, if poly is not valid
"""
if type(poly) is not list or len(poly) < 1:
return None
for coefficient in poly:
if type(coefficient) is not int and type(coefficient) is not float:
return None
for power, coefficient in enumerate(poly):
if power is 0:
derivative = [0]
continue
if power is 1:
derivative = []
derivative.append(power * coefficient)
while derivative[-1] is 0 and len(derivative) > 1:
derivative = derivative[:-1]
return derivative
| [
"eislek02@gmail.com"
] | eislek02@gmail.com |
299b7afae0c73e134909d4228f2ad18889254403 | 3bf73a5ac2c8dbcee802a742ee31834c2bbfda4e | /viewer/converter.py | d4e98e5ba9d2b73fa658263024cc81b9108103e8 | [
"Unlicense",
"LicenseRef-scancode-public-domain"
] | permissive | pawlosck/epistolaire | c8708df67e324abce31bff5519967a2ba6ffcd31 | 56c3d8665e492e649c631953baadebc70404303d | refs/heads/master | 2021-05-17T16:19:37.762930 | 2020-03-25T22:29:57 | 2020-03-25T22:32:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,391 | py | #!/usr/bin/env python3
# This is free and unencumbered software released into the public domain.
# See LICENSE file for details.
import locale
import datetime
from pathlib import Path
import sys
import json
import xml.etree.ElementTree as ET
class Converter:
def import_data(self, path):
with open(path) as fd:
self.jfile = json.load(fd)
def convert(self):
seen = set()
for conversation in self.jfile['conversations']:
try:
addr = conversation[0]['address'].replace(' ', '')
except KeyError:
addr = ','.join(conversation[0]['addresses']).replace(' ', '')
outfile = Path(f"{addr[:200]}{addr[200:] and '...'}.html")
if outfile in seen:
raise FileExistsError(f"oops, {outfile} has already been used")
seen.add(outfile)
hconv = self.build_conversation(conversation)
html = ET.Element('html')
hhead = ET.SubElement(html, 'head')
ET.SubElement(hhead, 'link', rel='stylesheet', href='https://cdn.jsdelivr.net/gh/kognise/water.css@latest/dist/dark.css')
ET.SubElement(hhead, 'link', rel='stylesheet', href='style.css')
hbody = ET.SubElement(html, 'body')
hbody.append(hconv)
with outfile.open('wb') as fd:
fd.write(ET.tostring(html, method='html'))
def build_conversation(self, jconv):
hconv = ET.Element('div', **{
'itemscope': 'itemscope',
'itemtype': 'http://schema.org/Message',
})
for jmsg in sorted(jconv, key=lambda jmsg: jmsg['date']):
if 'parts' in jmsg:
self.build_mms(jmsg, hconv)
else:
self.build_sms(jmsg, hconv)
return hconv
def build_mms(self, jmsg, hconv):
parts = jmsg['parts']
text_part = next((part for part in parts if part['ct'] == 'text/plain'), None)
img_part = next((part for part in parts if part['ct'].startswith('image/')), None)
is_received = jmsg['msg_box'] == 1
dt = datetime.datetime.fromtimestamp(jmsg['date'] / 1000)
hmsg = ET.SubElement(
hconv, 'div', id=str(jmsg['_id']),
**{
'class': f'message message-{"received" if is_received else "sent"}',
'itemscope': 'itemscope',
'itemprop': 'hasPart',
'itemtype': 'http://schema.org/Message',
},
)
htime = ET.SubElement(
hmsg, 'time', **{
'class': 'message-date',
'itemprop': 'dateReceived',
'datetime': dt.isoformat(),
})
htime.text = dt.strftime('%Y-%m-%d %H:%M:%S')
if img_part:
hdimg = ET.SubElement(hmsg, 'div')
ET.SubElement(
hdimg, 'img', **{
'class': 'message-photo',
'src': f'data:{img_part["ct"]};base64,{img_part["my_content"]}',
})
if text_part:
hbody = ET.SubElement(hmsg, 'div', **{'class': 'message-body'})
hbody.text = text_part['text']
def build_sms(self, jmsg, hconv):
is_received = jmsg['type'] == 1
dt = datetime.datetime.fromtimestamp(jmsg['date'] / 1000)
hmsg = ET.SubElement(
hconv, 'div', id=str(jmsg['_id']),
**{
'class': f'message message-{"received" if is_received else "sent"}',
'itemscope': 'itemscope',
'itemprop': 'hasPart',
'itemtype': 'http://schema.org/Message',
},
)
# haddr = ET.SubElement(
# hmsg, 'div', **{
# 'class': 'message-address',
# 'itemprop': 'sender' if is_received else 'recipient',
# })
# haddr.text = jmsg['address']
htime = ET.SubElement(
hmsg, 'time', **{
'class': 'message-date',
'itemprop': 'dateReceived',
'datetime': dt.isoformat(),
})
htime.text = dt.strftime('%Y-%m-%d %H:%M:%S')
hbody = ET.SubElement(hmsg, 'div', **{'class': 'message-body'})
hbody.text = jmsg['body']
locale.setlocale(locale.LC_ALL, '')
c = Converter()
c.import_data(sys.argv[1])
c.convert()
| [
"dev@indigo.re"
] | dev@indigo.re |
252968fc95b8ee95bcdff316f26b7222dc1805b1 | cba0f1286e4271ac35101a25d5040b2e4f405bde | /cgi-bin/admin/severe2/advanced/answerKey/edit.py.cln | ba798fa61b4625939db956fa6a9d944db2d181ef | [] | no_license | akrherz/pals | 271c92d098909abb5b912db4ae08f0c3589e5ec7 | adc213333fb23dc52d6784ce160c4ff8a8f193e3 | refs/heads/master | 2021-01-10T15:01:59.570168 | 2019-12-18T16:59:08 | 2019-12-18T16:59:08 | 45,484,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,565 | cln | #!/usr/local/bin/python
# This program changes db stuff
# Daryl Herzmann 8-16-99
import cgi, pg, style, time
mydb = pg.connect('severe2_adv', 'localhost', 5555)
def get_question(question_num):
entry = mydb.query("SELECT * from questions WHERE q_id = '"+question_num+"' ").dictresult()
return entry
def get_old_answer(caseNum, q_id):
select = mydb.query("SELECT answer, correct, wrong from answers WHERE casenum = '"+caseNum+"' and q_id = '"+q_id+"' ").getresult()
if len(select) > 0:
ans = select[0][0]
cor_comments = select[0][1]
wro_comments = select[0][2]
return ans, cor_comments, wro_comments
else:
return "","",""
def mk_option(ans, letter, optionval):
if letter == ans and optionval != 'N':
print '<option value="'+letter+'" SELECTED>'+letter+'. '+optionval[:80]+' ...'
elif optionval != 'N':
print '<option value="'+letter+'">'+letter+'. '+optionval[:80]+' ...'
def Main():
form = cgi.FormContent()
caseNum = form["caseNum"][0]
question_num = form["question_num"][0]
style.header("Edit answer for Generic Question", "white")
quest = get_question(question_num)
print '<H3>This is Question number '+question_num+' from caseNum '+caseNum+' </H3>'
question = quest[0]["question"]
optiona = quest[0]["optiona"]
optionb = quest[0]["optionb"]
optionc = quest[0]["optionc"]
optiond = quest[0]["optiond"]
optione = quest[0]["optione"]
optionf = quest[0]["optionf"]
optiong = quest[0]["optiong"]
optionh = quest[0]["optionh"]
ans, cor_comments, wro_comments = get_old_answer(caseNum, question_num)
print '<form method="POST" action="change.py">'
print '<input type="hidden" value="'+question_num+'" name="question_num">'
print '<input type="hidden" value="'+caseNum+'" name="caseNum">'
print '<B>Edit the answer for this question:</B><BR>'
print '<dd>'+question+'</dd><BR>'
print '<B>Select the correct answer:</B><BR>'
print '<SELECT name="answer">'
mk_option(ans, "A", optiona)
mk_option(ans, "B", optionb)
mk_option(ans, "C", optionc)
mk_option(ans, "D", optiond)
mk_option(ans, "E", optione)
mk_option(ans, "F", optionf)
mk_option(ans, "G", optiong)
mk_option(ans, "H", optionh)
print '</SELECT>'
print '<BR><B>Input the correct comments</B>'
print '<textarea name="cor_comments" cols="70" rows="10" WRAP>'+cor_comments+'</textarea>'
print '<BR><B>Input the wrong comments</B>'
print '<textarea name="wro_comments" cols="70" rows="10" WRAP>'+wro_comments+'</textarea>'
print '<BR><BR>'
print '<input type="submit" value="SUBMIT ANSWER">'
print '</form>'
style.std_bot()
Main()
| [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
e19d8f8f88840156c1eeb8d48d212e59b617dba8 | 34ec93dd1846270d7999e03db4f2f877ea1af005 | /nfldb/__init__.py | b2fdac632ab95a72060604da2e99ceda8b7bbc64 | [
"Unlicense"
] | permissive | micahstone20/nfldb | 4469fc466d3e8b065cf669362b0d13d6033bae2d | 61a5ae56be627a1ad5be93ea25ac494ee0ff292d | refs/heads/master | 2017-12-02T12:33:48.929627 | 2014-05-10T16:13:11 | 2014-05-10T16:13:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,759 | py | """
Module nfldb provides command line tools and a library for maintaining
and querying a relational database of play-by-play NFL data. The data
is imported from [nflgame](https://github.com/BurntSushi/nflgame),
which in turn gets its data from a JSON feed on NFL.com's live
GameCenter pages. This data includes, but is not limited to, game
schedules, scores, rosters and play-by-play data for every preseason,
regular season and postseason game dating back to 2009.
Here is a small teaser that shows how to use nfldb to find the top five
passers in the 2012 regular season:
#!python
import nfldb
db = nfldb.connect()
q = nfldb.Query(db)
q.game(season_year=2012, season_type='Regular')
for pp in q.sort('passing_yds').limit(5).as_aggregate():
print pp.player, pp.passing_yds
And the output is:
[andrew@Liger ~] python2 top-five.py
Drew Brees (NO, QB) 5177
Matthew Stafford (DET, QB) 4965
Tony Romo (DAL, QB) 4903
Tom Brady (NE, QB) 4799
Matt Ryan (ATL, QB) 4719
In theory, both `nfldb` and `nflgame` provide access to the same data.
The difference is in the execution. In order to search data in nflgame,
a large JSON file needs to be read from disk and loaded into Python
data structures for each game. Conversely, nfldb's data is stored in
a relational database, which can be searched and retrieved faster
than nflgame by a few orders of magnitude. Moreover, the relational
organization of data in nfldb allows for a convenient
[query interface](http://goo.gl/Sd6MN2) to search NFL play data.
The database can be updated with real time data from active games by
running the `nfldb-update` script included with this module as often
as you're comfortable pinging NFL.com. (N.B. The JSON data itself only
updates every 15 seconds, so running `nfldb-update` faster than that
would be wasteful.) Roster updates are done automatically at a minimum
interval of 12 hours.
nfldb has [comprehensive API documentation](http://pdoc.burntsushi.net/nfldb)
and a [wiki with examples](https://github.com/BurntSushi/nfldb/wiki).
nfldb can be used in conjunction with
[nflvid](https://pypi.python.org/pypi/nflvid)
to
[search and watch NFL game footage](http://goo.gl/Mckaf0).
If you need help, please join us at our IRC channel `#nflgame` on
FreeNode.
"""
from __future__ import absolute_import, division, print_function
from nfldb.db import __pdoc__ as __db_pdoc__
from nfldb.db import api_version, connect, now, set_timezone, schema_version
from nfldb.db import Tx
from nfldb.query import __pdoc__ as __query_pdoc__
from nfldb.query import aggregate, current, guess_position, player_search
from nfldb.query import Query, QueryOR
from nfldb.team import standard_team
from nfldb.types import __pdoc__ as __types_pdoc__
from nfldb.types import select_columns, stat_categories
from nfldb.types import Category, Clock, Enums, Drive, FieldPosition, Game
from nfldb.types import Play, Player, PlayPlayer, PossessionTime, Team
from nfldb.version import __pdoc__ as __version_pdoc__
from nfldb.version import __version__
__pdoc__ = __db_pdoc__
__pdoc__ = dict(__pdoc__, **__query_pdoc__)
__pdoc__ = dict(__pdoc__, **__types_pdoc__)
__pdoc__ = dict(__pdoc__, **__version_pdoc__)
# Export selected identifiers from sub-modules.
__all__ = [
# nfldb.db
'api_version', 'connect', 'now', 'set_timezone', 'schema_version',
'Tx',
# nfldb.query
'aggregate', 'current', 'guess_position', 'player_search',
'Query', 'QueryOR',
# nfldb.team
'standard_team',
# nfldb.types
'select_columns', 'stat_categories',
'Category', 'Clock', 'Enums', 'Drive', 'FieldPosition', 'Game',
'Play', 'Player', 'PlayPlayer', 'PossessionTime', 'Team',
# nfldb.version
'__version__',
]
| [
"jamslam@gmail.com"
] | jamslam@gmail.com |
10a63c1f20bce5638d2acc7a6327beab0a37f250 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/galex_j23054-4046/sdB_GALEX_J23054-4046_coadd.py | 2c395340b31353193613a52ea32cda925e2a3290 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[346.356125,-40.776181], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_GALEX_J23054-4046/sdB_GALEX_J23054-4046_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_GALEX_J23054-4046/sdB_GALEX_J23054-4046_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
7088e6f502c1cdeacd741bb0c4bd166fe030c4ad | 364d77b02d62d45ea588dbada7da16540e6a1f0c | /PyQt5/_table.py | 526ddea155e5a727ca8028812b9d53c62c2ffefe | [] | no_license | BaranAkcakaya/PythonProgramming | 3021f5b3452495fcc34ab9bbfce441976bb63456 | a0cc0f60dce3d50fe9bcf68a7255a71b3e81351d | refs/heads/main | 2023-01-07T09:20:33.695241 | 2020-11-02T07:25:49 | 2020-11-02T07:25:49 | 309,286,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,077 | py | from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QTableWidgetItem
from _tableForm import Ui_MainWindow
import sys
class Window(QtWidgets.QMainWindow):
def __init__(self):
super(Window, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.loadProducts()
self.ui.btnSave.clicked.connect(self.saveProduct)
self.ui.tableProducts.doubleClicked.connect(self.doubleClick)
def doubleClick(self):
for item in self.ui.tableProducts.selectedItems():
print(item.row(), item.column(), item.text())
def saveProduct(self):
name = self.ui.txtName.text()
price = self.ui.txtPrice.text()
if name and price is not None:
rowCount = self.ui.tableProducts.rowCount()
print(rowCount)
self.ui.tableProducts.insertRow(rowCount)
self.ui.tableProducts.setItem(rowCount,0, QTableWidgetItem(name))
self.ui.tableProducts.setItem(rowCount,1, QTableWidgetItem(price))
def loadProducts(self):
products = [
{'name': 'Samsung S5', 'price': 2000},
{'name': 'Samsung S6', 'price': 3000},
{'name': 'Samsung S7', 'price': 4000},
{'name': 'Samsung S8', 'price': 5000}
]
self.ui.tableProducts.setRowCount(len(products))
self.ui.tableProducts.setColumnCount(2)
self.ui.tableProducts.setHorizontalHeaderLabels(('Name','Price'))
self.ui.tableProducts.setColumnWidth(0,200)
self.ui.tableProducts.setColumnWidth(1,100)
rowIndex = 0
for product in products:
self.ui.tableProducts.setItem(rowIndex,0, QTableWidgetItem(product['name']))
self.ui.tableProducts.setItem(rowIndex,1, QTableWidgetItem(str(product['price'])))
rowIndex+=1
def app():
app = QtWidgets.QApplication(sys.argv)
win = Window()
win.show()
sys.exit(app.exec_())
app()
| [
"noreply@github.com"
] | BaranAkcakaya.noreply@github.com |
999e154742b6bdc53d8b6a9fa2225b844a90b729 | 7d27c71588c08e2a56807d5e670ef48e1985b3b5 | /Python/kraken/core/__init__.py | 1f2bf6959ded98791ac377e9905699510cf005f1 | [
"BSD-3-Clause"
] | permissive | BigRoy/Kraken | 6fcc5cf55c412751180d930c2c56a37084f5c5a3 | 8744f9ef3eec4f7d94f28a1433c6e89ca9cd0f6b | refs/heads/develop2.X | 2021-01-18T00:01:42.721175 | 2016-02-11T03:34:26 | 2016-02-11T03:34:26 | 51,552,149 | 1 | 0 | null | 2016-02-11T22:34:36 | 2016-02-11T22:34:36 | null | UTF-8 | Python | false | false | 468 | py | """Kraken Core."""
VERSION_MAJOR = 1
VERSION_MINOR = 0
VERSION_BUILD = 0
VERSION_SUFFIX = ""
def getVersion():
"""Contatenates the version globals and returns the current version of
Kraken.
Returns:
str: Current version of Kraken.
"""
versionString = str(VERSION_MAJOR) + "." + str(VERSION_MINOR) + "." + str(VERSION_BUILD)
if VERSION_SUFFIX:
versionString = versionString + "-" + VERSION_SUFFIX
return versionString
| [
"ethivierge@gmail.com"
] | ethivierge@gmail.com |
863ceb86e30e5bcaec6018ee17468974dbc00861 | 6448cd8b6fc0104362924fe1aa788cbd58abe17d | /ABCNN/test_abcnn.py | b2575e8f945de2bcfcdcaad1429d5fe680eac788 | [
"Apache-2.0"
] | permissive | RandolphVI/Text-Pairs-Relation-Classification | 8e54c21fcc97be81c0c797a83d3212c1a854a318 | 25a746ac9e72efdc79c9d90af9769e02587cf650 | refs/heads/master | 2021-06-05T21:58:11.686850 | 2020-11-18T02:24:55 | 2020-11-18T02:24:55 | 83,399,665 | 156 | 52 | null | null | null | null | UTF-8 | Python | false | false | 6,218 | py | # -*- coding:utf-8 -*-
__author__ = 'Randolph'
import os
import sys
import time
import logging
import numpy as np
sys.path.append('../')
logging.getLogger('tensorflow').disabled = True
import tensorflow as tf
from utils import checkmate as cm
from utils import data_helpers as dh
from utils import param_parser as parser
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
args = parser.parameter_parser()
MODEL = dh.get_model_name()
logger = dh.logger_fn("tflog", "logs/Test-{0}.log".format(time.asctime()))
CPT_DIR = 'runs/' + MODEL + '/checkpoints/'
BEST_CPT_DIR = 'runs/' + MODEL + '/bestcheckpoints/'
SAVE_DIR = 'output/' + MODEL
def create_input_data(data: dict):
return zip(data['f_pad_seqs'], data['b_pad_seqs'], data['onehot_labels'])
def test_abcnn():
"""Test ABCNN model."""
# Print parameters used for the model
dh.tab_printer(args, logger)
# Load word2vec model
word2idx, embedding_matrix = dh.load_word2vec_matrix(args.word2vec_file)
# Load data
logger.info("Loading data...")
logger.info("Data processing...")
test_data = dh.load_data_and_labels(args, args.test_file, word2idx)
# Load abcnn model
OPTION = dh._option(pattern=1)
if OPTION == 'B':
logger.info("Loading best model...")
checkpoint_file = cm.get_best_checkpoint(BEST_CPT_DIR, select_maximum_value=True)
else:
logger.info("Loading latest model...")
checkpoint_file = tf.train.latest_checkpoint(CPT_DIR)
logger.info(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=args.allow_soft_placement,
log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x_front = graph.get_operation_by_name("input_x_front").outputs[0]
input_x_behind = graph.get_operation_by_name("input_x_behind").outputs[0]
input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
is_training = graph.get_operation_by_name("is_training").outputs[0]
# Tensors we want to evaluate
scores = graph.get_operation_by_name("output/topKPreds").outputs[0]
predictions = graph.get_operation_by_name("output/topKPreds").outputs[1]
loss = graph.get_operation_by_name("loss/loss").outputs[0]
# Split the output nodes name by '|' if you have several output nodes
output_node_names = "output/topKPreds"
# Save the .pb model file
output_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,
output_node_names.split("|"))
tf.train.write_graph(output_graph_def, "graph", "graph-abcnn-{0}.pb".format(MODEL), as_text=False)
# Generate batches for one epoch
batches_test = dh.batch_iter(list(create_input_data(test_data)), args.batch_size, 1, shuffle=False)
# Collect the predictions here
test_counter, test_loss = 0, 0.0
true_labels = []
predicted_labels = []
predicted_scores = []
for batch_test in batches_test:
x_f, x_b, y_onehot = zip(*batch_test)
feed_dict = {
input_x_front: x_f,
input_x_behind: x_b,
input_y: y_onehot,
dropout_keep_prob: 1.0,
is_training: False
}
batch_predicted_scores, batch_predicted_labels, batch_loss \
= sess.run([scores, predictions, loss], feed_dict)
for i in y_onehot:
true_labels.append(np.argmax(i))
for j in batch_predicted_scores:
predicted_scores.append(j[0])
for k in batch_predicted_labels:
predicted_labels.append(k[0])
test_loss = test_loss + batch_loss
test_counter = test_counter + 1
test_loss = float(test_loss / test_counter)
# Calculate Precision & Recall & F1
test_acc = accuracy_score(y_true=np.array(true_labels), y_pred=np.array(predicted_labels))
test_pre = precision_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
test_rec = recall_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
test_F1 = f1_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
# Calculate the average AUC
test_auc = roc_auc_score(y_true=np.array(true_labels),
y_score=np.array(predicted_scores), average='micro')
logger.info("All Test Dataset: Loss {0:g} | Acc {1:g} | Precision {2:g} | "
"Recall {3:g} | F1 {4:g} | AUC {5:g}"
.format(test_loss, test_acc, test_pre, test_rec, test_F1, test_auc))
# Save the prediction result
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
dh.create_prediction_file(output_file=SAVE_DIR + "/predictions.json", front_data_id=test_data['f_id'],
behind_data_id=test_data['b_id'], true_labels=true_labels,
predict_labels=predicted_labels, predict_scores=predicted_scores)
logger.info("All Done.")
if __name__ == '__main__':
test_abcnn()
| [
"chinawolfman@hotmail.com"
] | chinawolfman@hotmail.com |
7b8e14dedc35d80a37f531e52050c5e7631b4e23 | 03e115c1937ec7bd1e249f82db0225828eaaa186 | /2-GUI (tkinter)/5marcos2.py | ba25a0d2f06d0cb9bab02c46d760c7a49c2eaa32 | [] | no_license | mivargas/Master-python | 236c04205637ddd44d1cc879de2b7c48418153f9 | 9d1c04a8d658aa0dd8620ed792fa2133adfa57e7 | refs/heads/master | 2023-03-06T13:35:58.177058 | 2021-02-16T00:06:00 | 2021-02-16T00:06:00 | 321,731,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,866 | py | from tkinter import *
ventana = Tk()
ventana.title("Marcos | Master en python")
ventana.geometry("700x700")
marco_padre = Frame(ventana, width=250, height=250)
marco_padre.config(
bg="lightblue"
)
marco_padre.pack(side=TOP, anchor=N, fill=X, expand=YES) #el anchor es para que se apegue lo mas posible al borde superor, np basta el top es igual en el caso del de abajo seria N con bottom
marco = Frame(marco_padre, width=250, height=250) #este y el de abajo estan contendios en el marco padre
marco.config(
bg="blue",
bd=5, #borde (tamaño)
relief="solid" #relieve del borde
#relief="raised"
)
marco.pack(side=RIGHT, anchor=SE)
marco = Frame(marco_padre, width=250, height=250)
marco.config(
bg="yellow",
bd=5, #borde (tamaño)
relief="solid" #relieve del borde
#relief="raised"
)
marco.pack(side=LEFT, anchor=SW)
marco.pack_propagate(False) #sin esto al incluir el label el marco se contrae (se hace pequeño y pierde estilo)
texto = Label(marco, text="primer marco")
texto.config(
bg="red",
fg="white",
font=("Arial", 20),
#height=10, usamos fill x y expand yes para lograr esto
#width=10,
bd=3,
relief=SOLID,
anchor=CENTER
)
texto.pack(fill=Y, expand=YES)
marco_padre = Frame(ventana, width=250, height=250)
marco_padre.config(
bg="lightblue"
)
marco_padre.pack(side=BOTTOM, anchor=S, fill=X, expand=YES)
marco = Frame(marco_padre, width=250, height=250) #este y el de abajo estan contendios en el marco padre
marco.config(
bg="red",
bd=5, #borde (tamaño)
relief="solid" #relieve del borde
#relief="raised"
)
marco.pack(side=RIGHT, anchor=SE)
marco = Frame(marco_padre, width=250, height=250)
marco.config(
bg="green",
bd=5, #borde (tamaño)
relief="solid" #relieve del borde
#relief="raised"
)
marco.pack(side=LEFT, anchor=SW)
ventana.mainloop() | [
"miguelvargas619@gmail.com"
] | miguelvargas619@gmail.com |
bd6ef2fdadfa54e915b11813bf6ee532622609f2 | b0814b43440a36c9998924c9fe05f335302a2717 | /venv/lib/python2.7/site-packages/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py | 7b3f8c8ee38a2a381f8e16950d90eff5ec613387 | [
"MIT"
] | permissive | nagyistge/electrode-gui | 0b47324ce8c61ffb54c24c400aee85f16fd79c7a | 6d89c78ea61935042ead5df5e1474101df3557eb | refs/heads/master | 2021-06-03T22:47:30.329355 | 2016-09-13T19:43:31 | 2016-09-13T19:43:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.semtools.registration.brainsresize import BRAINSResize
def test_BRAINSResize_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='--inputVolume %s',
),
outputVolume=dict(argstr='--outputVolume %s',
hash_files=False,
),
pixelType=dict(argstr='--pixelType %s',
),
scaleFactor=dict(argstr='--scaleFactor %f',
),
terminal_output=dict(nohash=True,
),
)
inputs = BRAINSResize.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_BRAINSResize_outputs():
output_map = dict(outputVolume=dict(),
)
outputs = BRAINSResize.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| [
"xavierislam@gmail.com"
] | xavierislam@gmail.com |
3d5c505cb30f8c8837d93f222fe86e1aeb19d869 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D01C/DOCADVD01CUN.py | 316d74fc708591564f4d9989068543c3bfebce05 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 2,529 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD01CUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'RFF', MIN: 1, MAX: 1},
{ID: 'BUS', MIN: 1, MAX: 1},
{ID: 'INP', MIN: 1, MAX: 10},
{ID: 'FCA', MIN: 1, MAX: 3},
{ID: 'DTM', MIN: 1, MAX: 3},
{ID: 'FTX', MIN: 0, MAX: 20},
{ID: 'FII', MIN: 1, MAX: 9, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 2},
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'NAD', MIN: 1, MAX: 9, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 1},
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'DTM', MIN: 1, MAX: 1, LEVEL: [
{ID: 'LOC', MIN: 1, MAX: 1},
]},
{ID: 'MOA', MIN: 1, MAX: 5, LEVEL: [
{ID: 'ALC', MIN: 0, MAX: 1, LEVEL: [
{ID: 'PCD', MIN: 0, MAX: 2},
]},
]},
{ID: 'LOC', MIN: 1, MAX: 3, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'PAI', MIN: 1, MAX: 1, LEVEL: [
{ID: 'FII', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 1},
]},
{ID: 'PAT', MIN: 1, MAX: 5, LEVEL: [
{ID: 'FII', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'PCD', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'TOD', MIN: 0, MAX: 1, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 1},
]},
{ID: 'TSR', MIN: 0, MAX: 1, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 5},
]},
{ID: 'INP', MIN: 0, MAX: 5, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 2},
]},
{ID: 'RFF', MIN: 1, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 2},
]},
{ID: 'DOC', MIN: 1, MAX: 20, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'PCD', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 1},
{ID: 'ICD', MIN: 0, MAX: 20, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 9},
]},
{ID: 'ALI', MIN: 0, MAX: 9, LEVEL: [
{ID: 'NAD', MIN: 0, MAX: 3, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
]},
{ID: 'AUT', MIN: 0, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
c4c1858df652ab42311df23401c4eac2e1bf7dcb | 7fc26de436ad958fc02e11fc7f7486f9ac775d0b | /services/url_lookup/project/tests/test_url.py | 717ba731587a4df19ed77f4afbd8868a2d611887 | [] | no_license | chenjienan/url_lookup_service | 633071d78598b2ee248b6a6fc3ceee2bf4ccca9b | ef10d58450af97221697ac0fa26cfb9e5a43415e | refs/heads/master | 2023-05-12T00:09:36.278356 | 2019-08-06T16:45:05 | 2019-08-06T16:45:05 | 199,910,038 | 0 | 0 | null | 2023-05-01T21:14:08 | 2019-07-31T18:36:20 | Python | UTF-8 | Python | false | false | 4,009 | py | import json
import unittest
from project.tests.base import BaseTestCase
from project import db
from project.api.models import Url
class TestUrlService(BaseTestCase):
"""Tests for the URL Lookup Service."""
def test_urls(self):
"""Ensure the /ping route behaves correctly."""
# Action
response = self.client.get('/ping')
data = json.loads(response.data.decode())
# Assert
self.assertEqual(response.status_code, 200)
self.assertIn('pong!', data['message'])
self.assertIn('success', data['status'])
def test_add_url(self):
"""Ensure a new url can be added to the database."""
# Arrange
with self.client:
# Action
response = self.client.post(
'/urls',
data=json.dumps({
'url': 'google.com'
}),
content_type='application/json',
)
data = json.loads(response.data.decode())
# Assert
self.assertEqual(response.status_code, 201)
self.assertIn('google.com was added!', data['message'])
self.assertIn('success', data['status'])
def test_add_url_invalid_json(self):
"""Ensure error is thrown if the JSON object is empty."""
# Arrange
with self.client:
# Action
response = self.client.post(
'/urls',
data=json.dumps({}),
content_type='application/json',
)
data = json.loads(response.data.decode())
# Assert
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid payload.', data['message'])
self.assertIn('fail', data['status'])
def test_add_duplicate_url(self):
"""Ensure error is thrown if the url already exists."""
# Arrange
with self.client:
self.client.post(
'/urls',
data=json.dumps({
'url': 'amazon.com'
}),
content_type='application/json',
)
# Action
response = self.client.post(
'/urls',
data=json.dumps({
'url': 'amazon.com'
}),
content_type='application/json',
)
data = json.loads(response.data.decode())
# Assert
self.assertEqual(response.status_code, 400)
self.assertIn('That url already exists.', data['message'])
self.assertIn('fail', data['status'])
def test_get_urlinfo_url_not_exist(self):
"""Ensure get URL info behaves correctly."""
# Arrange
with self.client:
# Action
response = self.client.get(f'/urlinfo/google.com:443/something.html%3Fq%3Dgo%2Blang')
data = json.loads(response.data.decode())
# Assert
self.assertEqual(response.status_code, 200)
self.assertIn('success', data['status'])
self.assertIn('false', data['isMalware'])
def test_get_urlinfo_url_exists(self):
"""Ensure get URL info behaves correctly when url is empty."""
# Arrange
url = Url(url='abc.com')
db.session.add(url)
db.session.commit()
with self.client:
# Action
response = self.client.get(f'/urlinfo/abc.com/somepath?q=abc')
data = json.loads(response.data.decode())
# Assert
self.assertEqual(response.status_code, 200)
self.assertIn('success', data['status'])
self.assertIn('true', data['isMalware'])
def test_get_urlinfo_url_empty(self):
# Arrange
with self.client:
# Action
response = self.client.get(f'/urlinfo/')
# Assert
self.assertEqual(response.status_code, 404)
if __name__ == '__main__':
unittest.main()
| [
"chenjienan2009@gmail.com"
] | chenjienan2009@gmail.com |
9ab6311a01d824701beb7379e05276521f44673f | e3c8f786d09e311d6ea1cab50edde040bf1ea988 | /Incident-Response/Tools/cyphon/cyphon/aggregator/filters/tests/test_services.py | ea76a8d0f809dacc5a2677be2ac5ed231cb91e30 | [
"MIT",
"LicenseRef-scancode-proprietary-license",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-copyleft"
] | permissive | foss2cyber/Incident-Playbook | d1add8aec6e28a19e515754c6ce2e524d67f368e | a379a134c0c5af14df4ed2afa066c1626506b754 | refs/heads/main | 2023-06-07T09:16:27.876561 | 2021-07-07T03:48:54 | 2021-07-07T03:48:54 | 384,988,036 | 1 | 0 | MIT | 2021-07-11T15:45:31 | 2021-07-11T15:45:31 | null | UTF-8 | Python | false | false | 2,269 | py | # -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
Tests Filter services.
"""
# standard library
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
# third party
from django.test import TestCase
# local
from aggregator.filters.models import Filter
from aggregator.filters.services import execute_filter_queries
from aggregator.reservoirs.models import Reservoir
from tests.fixture_manager import get_fixtures
class ExecuteFilterQueriesTestCase(TestCase):
"""
Tests the execute_filter_queries function.
"""
fixtures = get_fixtures([])
def test_execute_filter_queries(self):
"""
Tests the execute_filter_queries function.
"""
query = 'mock_query'
stream_task = 'BKGD_SRCH'
doc_ids = [3, 4, 5]
mock_results = Mock()
mock_pumproom = Mock()
mock_pumproom.get_results = Mock(return_value=mock_results)
with patch('aggregator.filters.services.PumpRoom',
return_value=mock_pumproom) as new_pumproom:
with patch('aggregator.filters.services.Reservoir.objects'):
Filter.objects.create_reservoir_query = Mock(return_value=query)
Reservoir.objects.find_enabled = Mock(return_value=doc_ids)
results = execute_filter_queries()
new_pumproom.assert_called_once_with(reservoirs=doc_ids,
task=stream_task)
mock_pumproom.get_results.assert_called_once_with(query)
self.assertEqual(results, mock_results)
| [
"a.songer@protonmail.com"
] | a.songer@protonmail.com |
fbfb59163e735907eafbee626470acc4c0e48d44 | 37c3b81ad127c9e3cc26fa9168fda82460ca9bda | /SW_expert/sw_3752_가능한시험점수.py | 1d952fc7daa9160bad7302dde04267851a61397f | [] | no_license | potomatoo/TIL | 5d85b69fdaed68966db7cfe2a565b7c64ed3e816 | 395dc190fa13e5ed036e1e3c7d9e0bc2e1ee4d6c | refs/heads/master | 2021-07-08T16:19:40.410097 | 2021-04-19T02:33:40 | 2021-04-19T02:33:40 | 238,872,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | import sys
sys.stdin = open('./input/input_3752.txt','r')
T = int(input())
for t in range(1, T+1):
N = int(input())
test = list(map(int, input().split()))
arr = []
for i in range(N):
arr = list(set(arr))
if not arr:
arr.append(test[i])
continue
x = len(arr)
for a in range(x):
s = arr[a] + test[i]
arr.append(s)
arr.append(test[i])
ans = len(list(set(arr)))+1
print('#{} {}'.format(t, ans)) | [
"duseh73@gmail.com"
] | duseh73@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.