blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0aab616a8c0ca629a1f4e434c91a20302f47285b
|
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
|
/google/monitoring/metricsscope/v1/monitoring-metricsscope-v1-py/google/monitoring/metricsscope_v1/__init__.py
|
1530640d664a0943bb90109a53b0c43ead78fa5b
|
[
"Apache-2.0"
] |
permissive
|
oltoco/googleapis-gen
|
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
|
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
|
refs/heads/master
| 2023-07-17T22:11:47.848185
| 2021-08-29T20:39:47
| 2021-08-29T20:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,521
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.metrics_scopes import MetricsScopesClient
from .services.metrics_scopes import MetricsScopesAsyncClient
from .types.metrics_scope import MetricsScope
from .types.metrics_scope import MonitoredProject
from .types.metrics_scopes import CreateMonitoredProjectRequest
from .types.metrics_scopes import DeleteMonitoredProjectRequest
from .types.metrics_scopes import GetMetricsScopeRequest
from .types.metrics_scopes import ListMetricsScopesByMonitoredProjectRequest
from .types.metrics_scopes import ListMetricsScopesByMonitoredProjectResponse
from .types.metrics_scopes import OperationMetadata
__all__ = (
'MetricsScopesAsyncClient',
'CreateMonitoredProjectRequest',
'DeleteMonitoredProjectRequest',
'GetMetricsScopeRequest',
'ListMetricsScopesByMonitoredProjectRequest',
'ListMetricsScopesByMonitoredProjectResponse',
'MetricsScope',
'MetricsScopesClient',
'MonitoredProject',
'OperationMetadata',
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
b4d83d9b56e607732cd70a9353169eb6c897b04c
|
2a2435c1955f61727c9968ea87a599d6e999c1bd
|
/core/migrations/0010_billingaddress.py
|
88d7b724fb03f794792b0d947aca41ec9c668d05
|
[] |
no_license
|
mahmoudabuelnaga/dje-commerce
|
9a5ba483b568613860d55c6062a01cd08ff9466c
|
964917da53dc6045c4374943fce68d7de0edad37
|
refs/heads/master
| 2020-12-15T17:59:11.443834
| 2020-02-23T23:55:29
| 2020-02-23T23:55:29
| 235,202,147
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
# Generated by Django 2.2 on 2020-01-20 02:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0009_auto_20200119_0012'),
]
operations = [
migrations.CreateModel(
name='BillingAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street_address', models.CharField(max_length=255)),
('apartment_address', models.CharField(max_length=255)),
('countries', django_countries.fields.CountryField(max_length=746, multiple=True)),
('zip', models.CharField(max_length=100)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"mahmoudaboelnaga392@gmail.com"
] |
mahmoudaboelnaga392@gmail.com
|
e72fb5148e9d6560555da3cb66069e5cb311d78e
|
147519505f3c47e5f10d9679e07d3719931b9fd0
|
/my_contacts/contacts/views.py
|
177a81dfd5a303c238013aa4c1cbcc9b156afbe2
|
[] |
no_license
|
grbalmeida/hello-django
|
85ed28d8d47a9a2e072f3eecd13d22fb2e977a31
|
9ef261ba5faeac3de8d36eeb7efa8974e5d1e661
|
refs/heads/master
| 2020-08-12T10:10:48.554349
| 2019-12-20T01:18:33
| 2019-12-20T01:18:33
| 214,748,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,670
|
py
|
from django.shortcuts import render, get_object_or_404, Http404, redirect
from django.core.paginator import Paginator
from django.db.models import Q, Value
from django.db.models.functions import Concat
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .models import Contact
@login_required(redirect_field_name='login')
def index(request):
contacts = Contact.objects.order_by('-id').filter(
show=True
)
paginator = Paginator(contacts, 2)
page = request.GET.get('p')
contacts = paginator.get_page(page)
return render(request, 'contacts/index.html', {
'contacts': contacts
})
@login_required(redirect_field_name='login')
def see_contact(request, contact_id):
contact = get_object_or_404(Contact, id=contact_id)
if not contact.show:
raise Http404()
return render(request, 'contacts/see_contact.html', {
'contact': contact
})
@login_required(redirect_field_name='login')
def search(request):
term = request.GET.get('term')
if term is None or not term:
messages.add_message(
request,
messages.WARNING,
'Term field cannot be empty'
)
return redirect('index')
fields = Concat('name', Value(' '), 'last_name')
contacts = Contact.objects.annotate(
full_name=fields
).filter(
Q(full_name__icontains=term) |
Q(phone__icontains=term)
)
paginator = Paginator(contacts, 2)
page = request.GET.get('p')
contacts = paginator.get_page(page)
return render(request, 'contacts/search.html', {
'contacts': contacts
})
|
[
"g.r.almeida@live.com"
] |
g.r.almeida@live.com
|
8dc96ae6d44f834bc6be387acb6a7d8ae7d3e972
|
a9eed4d7b8d5256af9f33363761683bba32f106f
|
/apps/organization/migrations/0006_auto_20180620_2140.py
|
98e71397529b5d6e0e4d6500af697f01abd731dc
|
[] |
no_license
|
cannon-liu/mkonline
|
12735d4761663ba42fdd6fe781a2658a5db1b383
|
2a1c64c10ae67abe58c1bfcd77c564fd53957067
|
refs/heads/master
| 2020-03-28T22:19:08.747770
| 2018-09-18T06:17:50
| 2018-09-18T06:17:50
| 149,223,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
# Generated by Django 2.0.6 on 2018-06-20 21:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization', '0005_auto_20180620_1655'),
]
operations = [
migrations.AlterField(
model_name='teacher',
name='image',
field=models.ImageField(upload_to='teacher/%Y/%m', verbose_name='教师图片'),
),
]
|
[
"woliuliwen@163.com"
] |
woliuliwen@163.com
|
50d3fa769119f65fde8c60106790dd20765218bf
|
effce116340b7d937bd285e43b49e1ef83d56156
|
/data_files/profiler.py
|
721d79980232dad6801fb4dd8236482b83610596
|
[] |
no_license
|
DL2021Spring/CourseProject
|
a7c7ef57d69bc1b21e3303e737abb27bee3bd585
|
108cdd906e705e9d4d05640af32d34bfc8b124da
|
refs/heads/master
| 2023-04-11T18:52:30.562103
| 2021-05-18T09:59:59
| 2021-05-18T09:59:59
| 365,733,976
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
from cProfile import Profile
from pstats import Stats
__author__ = 'Daniel'
def demo():
f = lambda x: x
profiler = Profile()
profiler.runcall(f)
stats = Stats(profiler)
stats.strip_dirs()
stats.sort_stats('cumulative')
stats.print_stats()
stats.print_callers()
stats.print_callees()
|
[
"1042448815@qq.com"
] |
1042448815@qq.com
|
045e91eefbb6784e11a0d581027f7438c82d7ee4
|
211874c8c72ad0ff1e4d30b29f2e179161a36195
|
/lingvo/tasks/milan/params/dual_encoder_recipe.py
|
34d43f560fae603a1f930703f68e0a0e586a149f
|
[
"Apache-2.0"
] |
permissive
|
sailfish009/lingvo
|
d3308260d2365477e38c4b1b61bdaa4405172b1e
|
432e1b0918459c28fcfbed0e6d1a2f48a962a80f
|
refs/heads/master
| 2023-04-19T03:15:51.420821
| 2021-04-27T22:52:45
| 2021-04-27T22:53:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,743
|
py
|
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers for defining Milan dual-encoder models."""
import functools
from lingvo.core import base_model_params
from lingvo.core import layers as lingvo_layers
from lingvo.core import optimizer
from lingvo.core import schedule
from lingvo.tasks.milan import constants
from lingvo.tasks.milan import dataset_spec
from lingvo.tasks.milan import dual_encoder
from lingvo.tasks.milan import input_generator
class RecipeError(Exception):
pass
class DualEncoderRecipe(base_model_params.SingleTaskModelParams):
"""Base class that simplifies configuration of Milan dual encoder models.
`DualEncoderRecipe` is a `SingleTaskModelParams` with extra builder-like
methods for configuring the dual encoder (the `Task()` params) and input
generators (`Train()`, `Dev()`, `Test()`).
In typical usage, model definitions subclass `DualEncoderRecipe`, call helper
methods in the constructor to configure the dual encoder, and specify a
`default_dataset` for the model to run on. For example::
@model_registry.RegisterSingleTaskModel
class MyExperiment(DualEncoderRecipe):
def __init__(self):
super().__init__()
self.AddModality(
'TEXT',
input_feature='text_feature',
id_feature='text_id',
encoder=MyTextEncoder.Params(),
encoder_output_dim=42)
# Preprocess the raw 'image_feature' input prior to encoding.
self.AddPreprocessor('image_feature', ImagePreprocessor.Params())
self.AddModality(
'IMAGE',
input_feature='image_feature',
id_feature='image_id',
encoder=MyImageEncoder.Params(),
encoder_output_dim=67)
@property
def default_dataset(self) -> DatasetSpec:
# Point to your dataset of choice
...
"""
def __init__(self):
# Define these members here to make pytype happy.
self.dataset = None
self.input_params = None
self.task_params = None
self.dataset = self._ChooseDatasetSpec()
# Base input params, be shared by both train and eval sets.
self.input_params = input_generator.MilanInputGenerator.Params().Set(
batch_size=64,
# Run input pipeline on each TPU host (vs. one for all hosts) to
# avoid input-boundedness.
use_per_host_infeed=True)
# Default optimization and checkpointer settings.
self.task_params = dual_encoder.MilanTask.Params()
self.task_params.train.Set(
clip_gradient_norm_to_value=1.0,
grad_norm_tracker=lingvo_layers.GradNormTracker.Params().Set(
name='grad_norm_tracker',
# Don't clip if the grad norm is already smaller than this.
grad_norm_clip_cap_min=0.1),
save_max_to_keep=2000,
save_keep_checkpoint_every_n_hours=0.1667, # At most every 10 min.
optimizer=optimizer.Adam.Params().Set(
beta1=0.9, beta2=0.999, epsilon=1e-8),
learning_rate=0.0001,
lr_schedule=schedule.StepwiseExponentialSchedule.Params().Set(
decay=0.999, num_steps_per_decay=1000),
tpu_steps_per_loop=100,
max_steps=40000)
def _ChooseDatasetSpec(self):
"""Returns the `DatasetSpec` to be used by the recipe."""
return self.default_dataset
@property
def default_dataset(self) -> dataset_spec.DatasetSpec:
"""Returns a default dataset for the recipe to use.
Subclasses should override this method to specify a dataset, or add logic
(elsewhere) to choose the dataset at runtime, falling back to this one
as the default.
"""
raise NotImplementedError()
@property
def encoder_configs(self):
return self.task_params.dual_encoder.encoder_configs
def AddModality(self, name: str, **kwargs):
config = dual_encoder.EncoderConfig().Set(**kwargs)
self.encoder_configs[name] = config
return config
def AddPreprocessor(self, input_feature, preprocessor):
self.input_params.preprocessors[input_feature] = preprocessor.Copy()
def StartFromCheckpoint(self, checkpoint_path: str):
"""Configures the recipe to start training from the given model checkpoint.
This is intended to be used in fine-tuning recipes. All variables, including
Adam accumulators, are loaded from the checkpoint except for global step
(so that it resets to 0 in new experiment) and grad norm tracker stats
(since gradients are likely to have different moments in the new
experiment).
Args:
checkpoint_path: Path of the checkpoint to start training from.
"""
self.task_params.train.init_from_checkpoint_rules = {
checkpoint_path: (
[('(.*)', '%s')],
# Don't load vars matching these regexes.
['.*grad_norm_tracker/.*', 'global_step'])
}
# Methods below implement the lingvo SingleTaskModelParams interface, allowing
# the recipe to be registered with `RegisterSingleTaskModel()`.
def Train(self):
"""Returns Params for the training dataset."""
dataset_fn = functools.partial(
self.dataset.Read,
split=constants.Split.TRAIN,
shuffle_buffer_size=1024)
return self.input_params.Copy().Set(name='Train', dataset_fn=dataset_fn)
def Dev(self):
"""Returns Params for the development dataset."""
dataset_fn = functools.partial(
self.dataset.Read, split=constants.Split.DEV, shuffle_buffer_size=0)
return self.input_params.Copy().Set(name='Dev', dataset_fn=dataset_fn)
def Test(self):
"""Returns Params for the test dataset."""
dataset_fn = functools.partial(
self.dataset.Read, split=constants.Split.TEST, shuffle_buffer_size=0)
return self.input_params.Copy().Set(name='Test', dataset_fn=dataset_fn)
def Task(self):
task_params = self.task_params.Copy()
if not task_params.dual_encoder.encoder_configs:
raise RecipeError('Must configure at least one encoder.')
assert task_params.dual_encoder.label_fn is None
task_params.dual_encoder.label_fn = self.dataset.Label
return task_params
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
ca570fc3f6bac84c77a2c7ed692f80fdf74003e1
|
d737fa49e2a7af29bdbe5a892bce2bc7807a567c
|
/software/qt_examples/src/pyqt-official/sql/cachedtable.py
|
fc5e0ccdcaaacec4422fb011786cc34c79471638
|
[
"MIT",
"CC-BY-NC-SA-4.0",
"GPL-1.0-or-later",
"GPL-3.0-only"
] |
permissive
|
TG-Techie/CASPER
|
ec47dfbfd6c3a668739ff4d707572e0b853518b4
|
2575d3d35e7dbbd7f78110864e659e582c6f3c2e
|
refs/heads/master
| 2020-12-19T12:43:53.825964
| 2020-01-23T17:24:04
| 2020-01-23T17:24:04
| 235,736,872
| 0
| 1
|
MIT
| 2020-01-23T17:09:19
| 2020-01-23T06:29:10
|
Python
|
UTF-8
|
Python
| false
| false
| 4,184
|
py
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QApplication, QDialog, QDialogButtonBox,
QHBoxLayout, QMessageBox, QPushButton, QTableView)
from PyQt5.QtSql import QSqlTableModel
import connection
class TableEditor(QDialog):
def __init__(self, tableName, parent=None):
super(TableEditor, self).__init__(parent)
self.model = QSqlTableModel(self)
self.model.setTable(tableName)
self.model.setEditStrategy(QSqlTableModel.OnManualSubmit)
self.model.select()
self.model.setHeaderData(0, Qt.Horizontal, "ID")
self.model.setHeaderData(1, Qt.Horizontal, "First name")
self.model.setHeaderData(2, Qt.Horizontal, "Last name")
view = QTableView()
view.setModel(self.model)
submitButton = QPushButton("Submit")
submitButton.setDefault(True)
revertButton = QPushButton("&Revert")
quitButton = QPushButton("Quit")
buttonBox = QDialogButtonBox(Qt.Vertical)
buttonBox.addButton(submitButton, QDialogButtonBox.ActionRole)
buttonBox.addButton(revertButton, QDialogButtonBox.ActionRole)
buttonBox.addButton(quitButton, QDialogButtonBox.RejectRole)
submitButton.clicked.connect(self.submit)
revertButton.clicked.connect(self.model.revertAll)
quitButton.clicked.connect(self.close)
mainLayout = QHBoxLayout()
mainLayout.addWidget(view)
mainLayout.addWidget(buttonBox)
self.setLayout(mainLayout)
self.setWindowTitle("Cached Table")
def submit(self):
self.model.database().transaction()
if self.model.submitAll():
self.model.database().commit()
else:
self.model.database().rollback()
QMessageBox.warning(self, "Cached Table",
"The database reported an error: %s" % self.model.lastError().text())
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
if not connection.createConnection():
sys.exit(1)
editor = TableEditor('person')
editor.show()
sys.exit(editor.exec_())
|
[
"TGTechie01@gmail.com"
] |
TGTechie01@gmail.com
|
9fc60961ec8cdf589ac40c7a9a1ed86cf073e0f3
|
2c95e0f7bb3f977306f479d5c99601ab1d5c61f2
|
/olive/rpc/farmer_rpc_api.py
|
2544c04855463194e9428596f772ea7d75e8b7b9
|
[
"Apache-2.0"
] |
permissive
|
Olive-blockchain/Olive-blockchain-CLI
|
d62444f8456467f8105531178d2ae53d6e92087d
|
8c4a9a382d68fc1d71c5b6c1da858922a8bb8808
|
refs/heads/main
| 2023-07-19T03:51:08.700834
| 2021-09-19T16:05:10
| 2021-09-19T16:05:10
| 406,045,499
| 0
| 0
|
Apache-2.0
| 2021-09-19T16:05:10
| 2021-09-13T16:20:38
|
Python
|
UTF-8
|
Python
| false
| false
| 5,569
|
py
|
from typing import Callable, Dict, List, Optional
from olive.farmer.farmer import Farmer
from olive.types.blockchain_format.sized_bytes import bytes32
from olive.util.byte_types import hexstr_to_bytes
from olive.util.ws_message import WsRpcMessage, create_payload_dict
class FarmerRpcApi:
def __init__(self, farmer: Farmer):
self.service = farmer
self.service_name = "olive_farmer"
def get_routes(self) -> Dict[str, Callable]:
return {
"/get_signage_point": self.get_signage_point,
"/get_signage_points": self.get_signage_points,
"/get_reward_targets": self.get_reward_targets,
"/set_reward_targets": self.set_reward_targets,
"/get_pool_state": self.get_pool_state,
"/set_payout_instructions": self.set_payout_instructions,
"/get_harvesters": self.get_harvesters,
"/get_pool_login_link": self.get_pool_login_link,
}
async def _state_changed(self, change: str, change_data: Dict) -> List[WsRpcMessage]:
if change == "new_signage_point":
sp_hash = change_data["sp_hash"]
data = await self.get_signage_point({"sp_hash": sp_hash.hex()})
return [
create_payload_dict(
"new_signage_point",
data,
self.service_name,
"wallet_ui",
)
]
elif change == "new_farming_info":
return [
create_payload_dict(
"new_farming_info",
change_data,
self.service_name,
"wallet_ui",
)
]
elif change == "new_plots":
return [
create_payload_dict(
"get_harvesters",
change_data,
self.service_name,
"wallet_ui",
)
]
return []
async def get_signage_point(self, request: Dict) -> Dict:
sp_hash = hexstr_to_bytes(request["sp_hash"])
for _, sps in self.service.sps.items():
for sp in sps:
if sp.challenge_chain_sp == sp_hash:
pospaces = self.service.proofs_of_space.get(sp.challenge_chain_sp, [])
return {
"signage_point": {
"challenge_hash": sp.challenge_hash,
"challenge_chain_sp": sp.challenge_chain_sp,
"reward_chain_sp": sp.reward_chain_sp,
"difficulty": sp.difficulty,
"sub_slot_iters": sp.sub_slot_iters,
"signage_point_index": sp.signage_point_index,
},
"proofs": pospaces,
}
raise ValueError(f"Signage point {sp_hash.hex()} not found")
async def get_signage_points(self, _: Dict) -> Dict:
result: List = []
for _, sps in self.service.sps.items():
for sp in sps:
pospaces = self.service.proofs_of_space.get(sp.challenge_chain_sp, [])
result.append(
{
"signage_point": {
"challenge_hash": sp.challenge_hash,
"challenge_chain_sp": sp.challenge_chain_sp,
"reward_chain_sp": sp.reward_chain_sp,
"difficulty": sp.difficulty,
"sub_slot_iters": sp.sub_slot_iters,
"signage_point_index": sp.signage_point_index,
},
"proofs": pospaces,
}
)
return {"signage_points": result}
async def get_reward_targets(self, request: Dict) -> Dict:
search_for_private_key = request["search_for_private_key"]
return self.service.get_reward_targets(search_for_private_key)
async def set_reward_targets(self, request: Dict) -> Dict:
farmer_target, pool_target = None, None
if "farmer_target" in request:
farmer_target = request["farmer_target"]
if "pool_target" in request:
pool_target = request["pool_target"]
self.service.set_reward_targets(farmer_target, pool_target)
return {}
async def get_pool_state(self, _: Dict) -> Dict:
pools_list = []
for p2_singleton_puzzle_hash, pool_dict in self.service.pool_state.items():
pool_state = pool_dict.copy()
pool_state["p2_singleton_puzzle_hash"] = p2_singleton_puzzle_hash.hex()
pools_list.append(pool_state)
return {"pool_state": pools_list}
async def set_payout_instructions(self, request: Dict) -> Dict:
launcher_id: bytes32 = hexstr_to_bytes(request["launcher_id"])
await self.service.set_payout_instructions(launcher_id, request["payout_instructions"])
return {}
async def get_harvesters(self, _: Dict):
return await self.service.get_harvesters()
async def get_pool_login_link(self, request: Dict) -> Dict:
launcher_id: bytes32 = bytes32(hexstr_to_bytes(request["launcher_id"]))
login_link: Optional[str] = await self.service.generate_login_link(launcher_id)
if login_link is None:
raise ValueError(f"Failed to generate login link for {launcher_id.hex()}")
return {"login_link": login_link}
|
[
"87711356+Olive-blockchain@users.noreply.github.com"
] |
87711356+Olive-blockchain@users.noreply.github.com
|
aaa6aa548821da963e638937b213dc378966b3c7
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/55/usersdata/88/23890/submittedfiles/av2_p3_civil.py
|
ed8eac935d52a83bb78809e7cbded4971043205d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 708
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
def slinha(a,x):
soma=0
for j in range(0,a.shape[1],1):
soma=soma+a[x,j]
return soma
def scoluna(a,y):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,y]
return soma
def somatorio(a,x,y):
soma=(slinha(a,x)+scoluna(a,y))-(2*a[x,y])
return soma
n=input('Dê a dimensão da matriz: ')
x=input('Digite a coordenada da linha: ')
y=input('Digite a coordenada da coluna: ')
a=np.zeros((n,n))
for i in range(0,a.shape[0],1):
for j in range(0,a.shape[1],1):
a[i,j]=input('Digite um elemento da matriz: ')
somatotal=somatorio(a,x,y)
print ('%d' %somatotal)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
3951a840c8f398942a0c28d4e5f40a8e9f9f69a1
|
33836016ea99776d31f7ad8f2140c39f7b43b5fe
|
/fip_collab/2015_03_17_plastic_polycrystal/calibration.py
|
f385c0c6a6ac236b52df962dea3b4c3b32f84600
|
[] |
no_license
|
earthexploration/MKS-Experimentation
|
92a2aea83e041bfe741048d662d28ff593077551
|
9b9ff3b468767b235e7c4884b0ed56c127328a5f
|
refs/heads/master
| 2023-03-17T23:11:11.313693
| 2017-04-24T19:24:35
| 2017-04-24T19:24:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,830
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 23 14:25:50 2014
This script performs the MKS calibration given the microstructure function
and the FIP response, both in frequency space.
@author: nhpnp3
"""
import time
import numpy as np
import functions as rr
from functools import partial
import tables as tb
def calibration_procedure(el, H, ns, set_id, step, comp, wrt_file):
# open HDF5 file
base = tb.open_file("D_%s%s_s%s.h5" % (ns, set_id, step), mode="r")
# retrieve data from HDF5 file
resp = base.get_node('/', 'r%s' % comp)
r_fft = resp.r_fft[...]
M = base.root.msf.M[...]
# close the HDF5 file
base.close()
start = time.time()
specinfc = np.zeros((H, el**3), dtype='complex64')
# here we perform the calibration for the scalar FIP
specinfc[:, 0] = rr.calib(0, M, r_fft, 0, H, el, ns)
[specinfc[:, 1], p] = rr.calib(1, M, r_fft, 0, H, el, ns)
# calib_red is simply calib with some default arguments
calib_red = partial(rr.calib, M=M, r_fft=r_fft,
p=p, H=H, el=el, ns=ns)
specinfc[:, 2:(el**3)] = np.asarray(map(calib_red, range(2, el**3))).swapaxes(0, 1)
# open HDF5 file
base = tb.open_file("infl_%s%s_s%s.h5" % (ns, set_id, step), mode="a")
# create a group one level below root called infl[comp]
group = base.create_group('/',
'infl%s' % comp,
'influence function for component %s' % comp)
base.create_array(group,
'infl_coef',
specinfc,
'array of influence coefficients')
# close the HDF5 file
base.close()
end = time.time()
timeE = np.round((end - start), 3)
msg = 'Calibration, component %s: %s seconds' % (comp, timeE)
rr.WP(msg, wrt_file)
|
[
"noahhpaulson@gmail.com"
] |
noahhpaulson@gmail.com
|
c3844394a1d734f67a9d8879ca813c80bfbe37eb
|
80f56878dbceb714266abca85519ebbfa131404e
|
/app/main.py
|
9266f39af6e62cd635ea47fef07f21720c4cb42c
|
[] |
no_license
|
z-sector/async-fastapi-sqlalchemy
|
1b944173972bc8487a2f9c638810ba0ffffbbbf5
|
9d622677c56d6d8495f3c87522216f289d52e2f7
|
refs/heads/main
| 2023-08-15T03:05:10.260060
| 2021-09-12T00:55:02
| 2021-09-12T00:55:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
from fastapi import FastAPI
from fastapi.responses import JSONResponse
from app.api.main import router as api_router
from app.settings import Settings
settings = Settings()
app = FastAPI(title="async-fastapi-sqlalchemy")
app.include_router(api_router, prefix="/api")
@app.get("/", include_in_schema=False)
async def health() -> JSONResponse:
return JSONResponse({"message": "It worked!!"})
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
|
[
"rhoboro@gmail.com"
] |
rhoboro@gmail.com
|
443ad2b069ebe801ccdc2108f6045a11a4f817f6
|
c2e93b806bf439136d7ff651c14601af405eddc5
|
/play_input.py
|
362bf3b9fe6bed561365d670f1af67ed564a0782
|
[] |
no_license
|
mehulchopradev/divya-python-core
|
11bdd09072b81a7f4c46ee84170119655f9d7273
|
0d10fd5697686c3fb46ab1f9b42c0b7d2fb771b8
|
refs/heads/master
| 2020-08-23T20:35:12.946154
| 2019-11-05T03:00:07
| 2019-11-05T03:00:07
| 216,702,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
print('Program starts')
n = input('Enter n : ')
# Exception handling
try:
ii = int(n)
except ValueError:
print('Please enter integer value')
else:
# will execute when there is no exception raised in the corresponding try block
print('Odd') if ii % 2 else print('Even')
print('Program ends')
|
[
"Mehul.Chopra@avalara.com"
] |
Mehul.Chopra@avalara.com
|
6b152beccb5eaa5fe80526c70aa33082e6c766ef
|
3a28b1a12d0710c06f6360381ad8be6cf3707907
|
/modular_model/triHPC/triHPCThermo/HPCAllTrays4CstmVapO2_px_N2.py
|
2fd80c1ca4fc8994a818eb65c208cb1c144cf3b0
|
[] |
no_license
|
WheatZhang/DynamicModelling
|
6ce1d71d3b55176fd4d77a6aedbaf87e25ce4d02
|
ea099245135fe73e8c9590502b9c8b87768cb165
|
refs/heads/master
| 2020-06-15T14:12:50.373047
| 2019-07-05T01:37:06
| 2019-07-05T01:37:06
| 195,319,788
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
def VapO2_px_N2(P,T,x_N2):
x = (P-5.62017561e+02)/2.47804900e-01
y = (T--1.74950614e+02)/6.71933000e-02
z = (x_N2-7.23608844e-01)/7.27108322e-03
output = \
1*1.91797051e+00
y_O2 = output*1.00000000e+00+0.00000000e+00
return y_O2
|
[
"1052632241@qq.com"
] |
1052632241@qq.com
|
59b39957186f3c76e740d3bac8084fb63519bf5e
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_135/3066.py
|
fce606fdb7b3bf7e4ebcb4d8aa5331d6907dbeba
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
fin = open('in', 'r')
fout = open('out', 'w')
numberOfCases = int(fin.readline())
def findChosenRow():
answer = int(fin.readline())
for rowNum in range (1,5):
row = fin.readline()
if rowNum == answer:
cosenRow = row.split()
cosenRow = [int(string) for string in cosenRow]
return cosenRow
def findCommonCard(firstRow, secondRow):
numOfCommons = 0
possibleAnswer = 0
for card1 in firstRow:
for card2 in secondRow:
if card1 == card2:
possibleAnswer = card1
numOfCommons += 1
if numOfCommons == 1:
return possibleAnswer
if numOfCommons > 1:
return 0
if numOfCommons == 0:
return -1
for case in range(1,numberOfCases + 1):
firstRow = findChosenRow()
secondRow = findChosenRow()
answer = findCommonCard(firstRow, secondRow)
if answer > 0:
fout.write('case #' + str(case) + ': ' + str(answer) + '\n')
elif answer == 0:
fout.write('case #' + str(case) + ': Bad magician!\n')
elif answer == -1:
fout.write('case #' + str(case) + ': Volunteer cheated!\n')
def method():
pass
fin.close()
fout.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
4cb9216fe42a1d68811c6513183c40488acaff47
|
bb150497a05203a718fb3630941231be9e3b6a32
|
/models_restruct/deepxde/tools/start.py
|
7e9ecf8ec9ecf1528bb5f166d1ce332103f5b5aa
|
[] |
no_license
|
PaddlePaddle/PaddleTest
|
4fb3dec677f0f13f7f1003fd30df748bf0b5940d
|
bd3790ce72a2a26611b5eda3901651b5a809348f
|
refs/heads/develop
| 2023-09-06T04:23:39.181903
| 2023-09-04T11:17:50
| 2023-09-04T11:17:50
| 383,138,186
| 42
| 312
| null | 2023-09-13T11:13:35
| 2021-07-05T12:44:59
|
Python
|
UTF-8
|
Python
| false
| false
| 6,862
|
py
|
"""
start before model running
"""
import os
import sys
import json
import shutil
import urllib
import logging
import wget
logger = logging.getLogger("ce")
class DeepXDE_Start(object):
"""
自定义环境准备
"""
def __init__(self):
"""
init
"""
self.qa_yaml_name = os.environ["qa_yaml_name"]
self.rd_yaml_path = os.environ["rd_yaml_path"]
logger.info("###self.qa_yaml_name: {}".format(self.qa_yaml_name))
self.reponame = os.environ["reponame"]
self.system = os.environ["system"]
self.step = os.environ["step"]
logger.info("###self.step: {}".format(self.step))
self.paddle_whl = os.environ["paddle_whl"]
self.mode = os.environ["mode"] # function or precision
self.REPO_PATH = os.path.join(os.getcwd(), self.reponame)
self.env_dict = {}
self.model = self.qa_yaml_name.split("^")[-1]
logger.info("###self.model_name: {}".format(self.model))
self.env_dict["model"] = self.model
os.environ["model"] = self.model
def prepare_gpu_env(self):
"""
根据操作系统获取用gpu还是cpu
"""
if "cpu" in self.system or "mac" in self.system:
self.env_dict["set_cuda_flag"] = "cpu" # 根据操作系统判断
else:
self.env_dict["set_cuda_flag"] = "gpu" # 根据操作系统判断
return 0
def add_paddle_to_pythonpath(self):
"""
paddlescience 打包路径添加到python的路径中
"""
cwd = os.getcwd()
paddle_path = os.path.join(cwd, "deepxde")
old_pythonpath = os.environ.get("PYTHONPATH", "")
new_pythonpath = f"{paddle_path}:{old_pythonpath}"
os.environ["PYTHONPATH"] = new_pythonpath
os.environ["DDE_BACKEND"] = "paddle"
return 0
def alter(self, file, old_str, new_str, flag=True, except_str="model.train(0"):
"""
replaced the backend
"""
file_data = ""
with open(file, "r", encoding="utf-8") as f:
for line in f:
if flag:
if old_str in line and new_str not in line and except_str not in line:
line = line.replace(old_str, new_str)
else:
if old_str in line:
line = line.replace(old_str, new_str)
file_data += line
with open(file, "w", encoding="utf-8") as f:
f.write(file_data)
return 0
def add_seed(self, file, old_str, new_str):
"""
add the seed
"""
file_data = ""
with open(file, "r", encoding="utf-8") as f:
for line in f:
if old_str in line:
if old_str == "L-BFGS":
if " " not in line:
global flag_LBFGS
flag_LBFGS = True
line += new_str
else:
line += new_str
# line += "paddle.seed(1)\n"
# line += "np.random.seed(1)\n"
file_data += line
with open(file, "w", encoding="utf-8") as f:
f.write(file_data)
return 0
def change_backend(self, file, backend, flag):
"""
change models.py backend
"""
file_data = ""
if flag is True:
index = False
with open(file, "r", encoding="utf-8") as f:
for line in f:
if index is True:
if "# " in line and "Backend jax" not in line:
line = line.replace("# ", "")
else:
index = False
if backend in line:
index = True
file_data += line
with open(file, "w", encoding="utf-8") as f:
f.write(file_data)
else:
index = False
with open(file, "r", encoding="utf-8") as f:
for line in f:
if index is True:
if "Backend paddle" not in line:
line = "# " + line
else:
index = False
if backend in line:
index = True
file_data += line
with open(file, "w", encoding="utf-8") as f:
f.write(file_data)
return 0
def get_example_dir(self):
"""
get_example_dir
"""
example_dir = self.qa_yaml_name.replace("^", "/")
if "lulu" in example_dir:
example_dir = "deepxde" + example_dir[4:] + ".py"
elif "rd" in example_dir:
example_dir = "deepxde" + example_dir[2:] + ".py"
return example_dir
def get_deepxde_data(self):
"""
get_deepxde_data
"""
os.system("cp -r deepxde/examples/dataset/ ./")
return 0
def build_prepare(self):
"""
build prepare
"""
ret = 0
ret = self.prepare_gpu_env()
if ret:
logger.info("build prepare_gpu_env failed")
return ret
os.environ[self.reponame] = json.dumps(self.env_dict)
return ret
def download_datasets(self):
"""
download dataset
"""
url = "https://paddle-qa.bj.bcebos.com/deepxde/datasets.tar.gz"
file_name = "datasets.tar.gz"
urllib.request.urlretrieve(url, file_name)
os.system("tar -zxvf " + file_name + " -C deepxde/")
return 0
def run():
"""
执行入口
"""
model = DeepXDE_Start()
model.build_prepare()
model.add_paddle_to_pythonpath()
model.get_deepxde_data()
filedir = model.get_example_dir()
model.alter(filedir, "tf", "paddle")
model.change_backend(filedir, "Backend paddle", True)
model.change_backend(filedir, "Backend tensorflow.compat.v1", False)
model.alter(filedir, "model.train(", "model.train(display_every=1,", True, "model.train(0")
model.alter(filedir, "model.train(", "losshistory, train_state = model.train(")
model.alter(filedir, "display_every=1000,", " ", False)
model.alter(filedir, "display_every=1000", " ", False)
model.alter(filedir, "display_every=500", " ", False)
model.add_seed(filedir, "import deepxde", "import paddle\n")
# add_seed(filedir, "import paddle", "paddle.seed(1)\n")
model.add_seed(filedir, "import deepxde", "import numpy as np\n")
model.add_seed(filedir, "import deepxde", "dde.config.set_random_seed(1)\n")
if "antiderivative" in model.qa_yaml_name:
model.download_datasets()
return 0
if __name__ == "__main__":
run()
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
2a3afbad100efcb1edda22e3475a09ff6d227fab
|
7949f96ee7feeaa163608dbd256b0b76d1b89258
|
/toontown/ai/DistributedPhaseEventMgr.py
|
2a79a55ca8df3ac10529506eb7476344ed65df63
|
[] |
no_license
|
xxdecryptionxx/ToontownOnline
|
414619744b4c40588f9a86c8e01cb951ffe53e2d
|
e6c20e6ce56f2320217f2ddde8f632a63848bd6b
|
refs/heads/master
| 2021-01-11T03:08:59.934044
| 2018-07-27T01:26:21
| 2018-07-27T01:26:21
| 71,086,644
| 8
| 10
| null | 2018-06-01T00:13:34
| 2016-10-17T00:39:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
# File: t (Python 2.4)
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
import datetime
class DistributedPhaseEventMgr(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPhaseEventMgr')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.holidayDates = []
def setIsRunning(self, isRunning):
self.isRunning = isRunning
def setNumPhases(self, numPhases):
self.numPhases = numPhases
def setCurPhase(self, curPhase):
self.curPhase = curPhase
def getIsRunning(self):
return self.isRunning
def getNumPhases(self):
return self.numPhases
def getCurPhase(self):
return self.curPhase
def setDates(self, holidayDates):
for holidayDate in holidayDates:
self.holidayDates.append(datetime.datetime(holidayDate[0], holidayDate[1], holidayDate[2], holidayDate[3], holidayDate[4], holidayDate[5]))
|
[
"fr1tzanatore@aol.com"
] |
fr1tzanatore@aol.com
|
0f20585a844977b4362a9860a036f47b28823b97
|
ecf1ce6f8b592f76c7b7c253608c1264ae0676a3
|
/days/day017/list_comprehensions_and_generators.py
|
78d9123b75c5cf83388f77dff5985392cf955d59
|
[] |
permissive
|
alex-vegan/100daysofcode-with-python-course
|
94e99880a50ac412e398ad209ed53796f253641f
|
b6c12316abe18274b7963371b8f0ed2fd549ef07
|
refs/heads/master
| 2021-07-20T23:05:59.721661
| 2019-01-21T16:18:25
| 2019-01-21T16:18:25
| 150,115,516
| 0
| 0
|
MIT
| 2018-09-24T14:28:16
| 2018-09-24T14:28:15
| null |
UTF-8
|
Python
| false
| false
| 1,006
|
py
|
from random import sample
from itertools import islice
from pprint import pprint as pp
NAMES = ['arnold schwarzenegger', 'alec baldwin', 'bob belderbos',
'julian sequeira', 'sandra bullock', 'keanu reeves',
'julbob pybites', 'bob belderbos', 'julian sequeira',
'al pacino', 'brad pitt', 'matt damon', 'brad pitt']
def convert_title_case_names(names=NAMES):
return [name.title() for name in names]
def reverse_first_last_names(names=NAMES):
return [" ".join(name.split()[::-1]) for name in names]
def gen_pairs(names=NAMES):
while True:
l, r = sample(names, 2)
yield f"{(l.split()[0]).title()} teams up with {(r.split()[0]).title()}"
'''
if __name__ == "__main__":
print(convert_title_case_names())
print('-'*101)
print(reverse_first_last_names())
print('-'*101)
pairs = gen_pairs()
for _ in range(10):
print(next(pairs))
print('-'*101)
pp(list(islice(pairs, 10)))
'''
|
[
"alex-vegan@outlook.com"
] |
alex-vegan@outlook.com
|
83ef4a6d7e5cdbfb45c05ea36208a409740e1e33
|
2280e309df300fe1d4cd684799b9aeeb3495c6cc
|
/core/inbound.py
|
c6ecbcc422d08468584f3ea64b30969da4f41629
|
[] |
no_license
|
cming091/psshutlle-toots
|
471fe1a9505116b6d9571259e9de04b3d7404f98
|
1445c2efd024fe33743c09bac799ed9f4a3f15cb
|
refs/heads/master
| 2023-05-30T12:34:39.364337
| 2021-06-21T07:23:23
| 2021-06-21T07:23:23
| 378,834,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,856
|
py
|
import time
import os
from .base import Base
from config import CONF
from utils import *
from psgrpc import wholeInboundBcr
logger = LogHandler(__name__)
class Inbound(Base):
def __init__(self, data):
super(Inbound,self).__init__(data)
def singleProcess(self):
self.common.tearDownStockAndAssignTables(self.db,self.data, defaultdbs=['wes'])
if self.data['isSimulate']:
status = self.registerFrame()
if not status:
self.statusCode = 402
raise Exception('[{} registerFrame error]'.format(self.data['sName']))
else:
logger.info('[{} registerFrame succ]'.format(self.data['sName']))
time.sleep(CONF['delay'])
if self.addWorkOrder():
logger.info('[{} addWorkOrder succ]'.format(self.data['sName']))
time.sleep(CONF['delay'])
if self.creatAggs():
logger.info('[{} creatAggs succ]'.format(self.data['sName']))
time.sleep(CONF['delay'])
self.triggerBcr()
self.checkAggOpStatus()
self.checkBound()
else:
self.statusCode = 404
else:
self.statusCode = 403
return self.data['containerCode']
def registerFrame(self):
self.sqlRmStartNodePods()
data = {
"warehouseID": self.data['warehouseID'],
"frameID": self.data['frameID'],
"nodeID": self.data['nodeID'],
"dir": 1
}
url = '{}/tes/api/frame/registerFrame'.format(CONF['baseUrl'])
res = RequestApi.sendReuest('registerFrame', 'POST', url, data).json()
logger.info('[{} registerFrame: res:{}]'.format(self.data['sName'],res))
if res.get(self.data['returnCode'], None) == 0:
return True
return False
def init(self):
logger.info('[{} init ]'.format(self.data['sName']))
self.sqlRmAllPods()
def triggerBcr(self):
info = wholeInboundBcr(self.data['ip'],self.data['warehouseCode'],self.data['containerCode'],self.data['warehouseID'])
logger.info('[{} bcr res:{}]'.format(self.data['sName'],info))
def addWorkOrder(self):
url = '{}/invtransaction/api/workorder/inbound/add'.format(CONF['baseUrl'])
data ={
"woNo": self.data['no'],
"warehouseCode": self.data['warehouseCode'],
"regionCode": self.data['regionCode'],
"waveNo": self.data['no'],
"inBoundNo": self.data['no'],
"originStation": "PS-IN-001",
"priority": 0,
"transportUnit": self.data['containerCode'],
"containerCode": self.data['containerCode'],
"skuCode": self.data['skuCode'],
"skuName": self.data['skuName'],
"lot": "",
"grade": 0,
"quantity": self.data['quantity'],
"boxQuantity": 1,
"bizType": 1,
"transType": self.data['transType'],
"bizDate": 1594292882000,
"destination": "309843433806102535",
"rely_wo_no": "",
"extension": "",
"user": "user",
'palletModel':0,
}
res = RequestApi.sendReuest('addWorkOrder', 'POST', url, data, headers=self.headers).json()
logger.info('[{} addWorkOrder: res:{}]'.format(self.data['sName'],res))
if res.get(self.data['returnCode'],None) == 0:
return True
return False
def sqlRmStartNodePods(self):
sql = 'delete from tes.frame where status=1 and node=\'{}\';'.format(self.data['startNodeId'])
self.db.get_connection('tes')
res = self.db.execute('tes', sql)
logger.info('[{} sqlRmStartNodePods tes res:{}]'.format(self.data['sName'],res))
|
[
"349152234@qq.com"
] |
349152234@qq.com
|
dcc7adaa49fada352d2eb346c9e51df6ed8c9dd4
|
0a5c468cee07b79ddb5368aa7b0fe118f4b11e72
|
/lazy_slides/download.py
|
72beca49e25d6ab0de60cb57f0674c4ab1b133c7
|
[] |
no_license
|
abingham/lazy_slides
|
c36e451571c14e53cbc2817d4f72475fa5c400ba
|
ca8eb4618415df6eaa9fb3c3f721cb168708f52b
|
refs/heads/master
| 2020-05-19T16:34:20.286129
| 2013-06-18T17:58:05
| 2013-06-18T17:58:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
import contextlib
import logging
import os
import urllib2
import urlparse
import uuid
log = logging.getLogger(__name__)
def download(url, directory):
'''Download a file specified by a URL to a local file.
This generates a unique name for the downloaded file and saves
into that.
:param url: The URL to download.
:param directory: The directory into which to save the file.
'''
parsed = urlparse.urlparse(url)
# Calculate the save-file name
filename = os.path.split(parsed.path)[1]
filename_comps = os.path.splitext(filename)
filename = '{}_{}{}'.format(
filename_comps[0],
uuid.uuid4(),
filename_comps[1])
filename = os.path.join(directory, filename)
log.info('Downloading {} to {}'.format(
url, filename))
# Save the URL data to the new filename.
with contextlib.closing(urllib2.urlopen(url)) as infile:
with open(filename, 'wb') as outfile:
outfile.write(infile.read())
return filename
|
[
"austin.bingham@gmail.com"
] |
austin.bingham@gmail.com
|
80cd8baa4841a770e7eb7696c77d6f7a99d12ad2
|
23130cd12e38dbce8db8102810edaad70b240ae2
|
/lintcode/235.py
|
e2c5a50114f99694f5bfed245e493ea6148b0de9
|
[
"MIT"
] |
permissive
|
kangli-bionic/algorithm
|
ee6687c82101088db20f10fb958b4e45e97d3d31
|
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
|
refs/heads/master
| 2023-01-05T09:29:33.204253
| 2020-10-25T17:29:38
| 2020-10-25T17:29:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
"""
235. Prime Factorization
https://www.lintcode.com/problem/prime-factorization/
"""
import math
class Solution:
"""
@param num: An integer
@return: an integer array
"""
def primeFactorization(self, num):
result = []
up = math.sqrt(num)
k = 2
while k <= up and num > 1:
while num % k == 0:
result.append(k)
num //= k
k += 1
if num > 1:
result.append(num)
return result
|
[
"hipaulshi@gmail.com"
] |
hipaulshi@gmail.com
|
8879d084898863cce23dedb47389a370ebb7adcf
|
11a1e1140fe869e83e337518ca99162cca8780dd
|
/BHScripts_8TeV_postICHEP_Final_WithRun2012C_NewFitRange/histograms/DataAnalysis_FitRanges/Styles.py
|
ec5775c7f79b34278912b0d67309b9cfba720b4c
|
[] |
no_license
|
jhakala/BHMacros
|
6bdd1ac855df8a803f39f06e7e218b24b2eb76b1
|
bc3cf2e3c1d3570a9e042c865214035e60d20021
|
refs/heads/master
| 2021-01-19T04:52:27.624800
| 2015-04-09T12:14:21
| 2015-04-09T12:14:21
| 33,666,386
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,718
|
py
|
pattle = [862, 814, 797, 899, 614, 921]
marker = [20, 21, 22, 25, 24, 26]
from ROOT import gStyle
from ROOT import gROOT
from ROOT import TStyle
gStyle.SetPadTopMargin(0.05)
gStyle.SetPadRightMargin(0.05)
def formatST(h):
h.SetMarkerStyle(20)
h.SetMarkerColor(923)
h.SetLineColor(923)
h.SetXTitle("S_{T} (GeV)")
h.SetYTitle("Events / %d GeV" % h.GetBinWidth(1))
h.GetYaxis().SetTitleOffset(1.2)
def formatTemplate(f, N, iformula):
f.SetLineWidth(2)
f.SetLineColor(pattle[iformula])
if N == 2:
f.SetLineStyle(1)
elif N == 3:
f.SetLineStyle(2)
def formatUncertainty(g):
g.SetLineWidth(2)
g.SetFillColor(862)
#g.SetLineColor(33)
g.SetLineColor(862)
g.SetFillColor(33)
#g.SetFillStyle()
g.GetXaxis().SetTitle("S_{T} (GeV)")
g.GetYaxis().SetTitle("Events / 100 GeV")
g.GetYaxis().SetTitleOffset(1.2)
def formatCL(g, type, width=4):
g.SetLineWidth(width)
g.GetXaxis().SetTitle("S_{T}^{ min} (GeV)")
g.GetXaxis().SetNdivisions(5,5,0)
g.GetYaxis().SetTitle("#sigma(S_{T} > S_{T}^{ min}) #times A (pb)")
g.GetYaxis().SetTitleOffset(1.2)
if type == "CL95":
g.SetLineColor(862)
g.SetFillColor(862)
elif type == "CLA":
g.SetLineColor(899)
g.SetFillColor(899)
g.SetLineStyle(2)
def formatXsecCL(g, icolor, line_style=1):
g.SetLineWidth(2)
g.SetLineColor(pattle[icolor])
g.SetLineStyle(line_style)
g.SetMarkerColor(pattle[icolor])
g.SetMarkerSize(1)
g.GetXaxis().SetTitle("M_{BH}^{ min} (TeV)")
g.GetYaxis().SetTitle("#sigma (pb)")
g.GetYaxis().SetTitleOffset(1.2)
def formatExcludedMass(g, name = ""):
g.GetXaxis().SetTitle("M_{D} (TeV)")
g.GetYaxis().SetTitle("Excluded M_{BH}^{ min} (TeV)")
g.GetYaxis().SetTitleOffset(1.2)
if not name == "":
g.SetLineWidth(3)
g.SetMarkerSize(1)
if "BH1_BM" in name or "BH4_CH" in name:
color = 922
marker_style = 20
line_style = 1
if "BH2_BM" in name or "BH2_CH" in name:
color = 862
marker_style = 21
line_style = 2
if "BH8_CH" in name:
color = 899
marker_style = 22
line_style = 3
if "BH6_CH" in name or "BH5_BM" in name:
color = 797
marker_style = 20
line_style = 1
if "BH10_CH" in name:
color = 2
marker_style = 23
line_style = 2
if "BH9_CH" in name:
color = 4
marker_style = 24
line_style = 3
g.SetLineColor(color)
g.SetLineStyle(line_style)
g.SetMarkerStyle(marker_style)
g.SetMarkerSize(1)
g.SetMarkerColor(color)
def formatRatio(h, icolor):
h.SetMarkerColor(pattle[icolor])
#h.SetMarkerStyle(marker[icolor])
h.SetLineColor(pattle[icolor])
|
[
"john_hakala@brown.edu"
] |
john_hakala@brown.edu
|
f3a5e46acb64711021bf454c7e8f5af682764ebf
|
48f10cc3520ba8cfa5f3478e4b021766e4d5f29b
|
/openpyexcel/drawing/tests/test_text.py
|
82b747c27e487d5e6e9267a416b1ef8698b4401b
|
[
"MIT"
] |
permissive
|
sciris/openpyexcel
|
bef5094d193e62806164c77777fe8c741511aaec
|
1fde667a1adc2f4988279fd73a2ac2660706b5ce
|
refs/heads/master
| 2022-01-20T14:04:52.196385
| 2019-02-06T22:48:38
| 2019-02-06T22:48:38
| 168,293,752
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,372
|
py
|
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyexcel
import pytest
from openpyexcel.xml.functions import fromstring, tostring
from openpyexcel.tests.helper import compare_xml
@pytest.fixture
def Paragraph():
from ..text import Paragraph
return Paragraph
class TestParagraph:
def test_ctor(self, Paragraph):
text = Paragraph()
xml = tostring(text.to_tree())
expected = """
<p xmlns="http://schemas.openxmlformats.org/drawingml/2006/main">
<r>
<t/>
</r>
</p>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Paragraph):
src = """
<p />
"""
node = fromstring(src)
text = Paragraph.from_tree(node)
assert text == Paragraph()
def test_multiline(self, Paragraph):
src = """
<p>
<r>
<t>Adjusted Absorbance vs.</t>
</r>
<r>
<t> Concentration</t>
</r>
</p>
"""
node = fromstring(src)
para = Paragraph.from_tree(node)
assert len(para.text) == 2
@pytest.fixture
def ParagraphProperties():
from ..text import ParagraphProperties
return ParagraphProperties
class TestParagraphProperties:
def test_ctor(self, ParagraphProperties):
text = ParagraphProperties()
xml = tostring(text.to_tree())
expected = """
<pPr xmlns="http://schemas.openxmlformats.org/drawingml/2006/main" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, ParagraphProperties):
src = """
<pPr />
"""
node = fromstring(src)
text = ParagraphProperties.from_tree(node)
assert text == ParagraphProperties()
from ..spreadsheet_drawing import SpreadsheetDrawing
class TestTextBox:
def test_from_xml(self, datadir):
datadir.chdir()
with open("text_box_drawing.xml") as src:
xml = src.read()
node = fromstring(xml)
drawing = SpreadsheetDrawing.from_tree(node)
anchor = drawing.twoCellAnchor[0]
box = anchor.sp
meta = box.nvSpPr
graphic = box.graphicalProperties
text = box.txBody
assert len(text.p) == 2
@pytest.fixture
def CharacterProperties():
from ..text import CharacterProperties
return CharacterProperties
class TestCharacterProperties:
def test_ctor(self, CharacterProperties):
from ..text import Font
normal_font = Font(typeface='Arial')
text = CharacterProperties(latin=normal_font, sz=900, b=False, solidFill='FFC000')
xml = tostring(text.to_tree())
expected = ("""
<a:defRPr xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"
b="0" sz="900">
<a:solidFill>
<a:srgbClr val="FFC000"/>
</a:solidFill>
<a:latin typeface="Arial"/>
</a:defRPr>
""")
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, CharacterProperties):
src = """
<defRPr sz="110"/>
"""
node = fromstring(src)
text = CharacterProperties.from_tree(node)
assert text == CharacterProperties(sz=110)
@pytest.fixture
def Font():
from ..text import Font
return Font
class TestFont:
def test_ctor(self, Font):
fut = Font("Arial")
xml = tostring(fut.to_tree())
expected = """
<latin typeface="Arial"
xmlns="http://schemas.openxmlformats.org/drawingml/2006/main" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Font):
src = """
<latin typeface="Arial" pitchFamily="40"
xmlns="http://schemas.openxmlformats.org/drawingml/2006/main" />
"""
node = fromstring(src)
fut = Font.from_tree(node)
assert fut == Font(typeface="Arial", pitchFamily=40)
@pytest.fixture
def Hyperlink():
from ..text import Hyperlink
return Hyperlink
class TestHyperlink:
def test_ctor(self, Hyperlink):
link = Hyperlink()
xml = tostring(link.to_tree())
expected = """
<hlinkClick xmlns="http://schemas.openxmlformats.org/drawingml/2006/main"/>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Hyperlink):
src = """
<hlinkClick tooltip="Select/de-select all"/>
"""
node = fromstring(src)
link = Hyperlink.from_tree(node)
assert link == Hyperlink(tooltip="Select/de-select all")
@pytest.fixture
def LineBreak():
from ..text import LineBreak
return LineBreak
class TestLineBreak:
def test_ctor(self, LineBreak):
fut = LineBreak()
xml = tostring(fut.to_tree())
expected = """ <br xmlns="http://schemas.openxmlformats.org/drawingml/2006/main" /> """
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, LineBreak):
src = """
<br />
"""
node = fromstring(src)
fut = LineBreak.from_tree(node)
assert fut == LineBreak()
|
[
"questionably@gmail.com"
] |
questionably@gmail.com
|
41e9e39d9234f668e5bdebd3c69be5fac6a52ed8
|
bc074a145c83c53c24288a62806e9806f4bf992f
|
/lib/bp_utils/filt.py
|
8ef6443344a1f4016b9beb9ad690d9e0634a3618
|
[] |
no_license
|
Genomon-Project/GenomonBreakPoint
|
4b9f44751894d67d8e19a0170f162ab15ce6b237
|
0eed3922c483edcc8a181af042fcce86ad9d9203
|
refs/heads/master
| 2021-06-09T06:36:31.676564
| 2016-11-20T13:26:36
| 2016-11-20T13:26:36
| 73,768,508
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,154
|
py
|
#! /usr/bin/env python
import sys, gzip, math, numpy
import pysam
from scipy import stats
def filter_by_control(tumor_bp_file, output_file, matched_control_bp_file, merged_control_file,
min_support_num, min_median_mapq, min_max_clip_size, max_control_num_thres):
use_matched_control = True if matched_control_bp_file != "" else False
if use_matched_control: matched_control_db = pysam.TabixFile(matched_control_bp_file)
use_merged_control = True if merged_control_file != "" else False
if use_merged_control: merged_control_db = pysam.TabixFile(merged_control_file)
hout = open(output_file, 'w')
with gzip.open(tumor_bp_file, 'r') as hin:
for line in hin:
F = line.rstrip('\n').split('\t')
mapqs = [int(x) for x in F[5].split(';')]
clip_sizes = [int(x) for x in F[6].split(';')]
if len(mapqs) < min_support_num: continue
if numpy.median(mapqs) < min_median_mapq: continue
if max(clip_sizes) < min_max_clip_size: continue
# filtering using merged control file
merged_control_filt_flag = False
if use_merged_control:
tabixErrorFlag = 0
try:
records = merged_control_db.fetch(F[0], int(F[1]) - 1, int(F[1]) + 1)
except Exception as inst:
print >> sys.stderr, "%s: %s" % (type(inst), inst.args)
tabixErrorMsg = str(inst.args)
tabixErrorFlag = 1
if tabixErrorFlag == 0:
for record_line in records:
record = record_line.split('\t')
if record[0] == F[0] and record[1] == F[1] and record[2] == F[2] and record[3] == F[3]:
merged_control_filt_flag = True
if merged_control_filt_flag: continue
# get readnum from matched control file
if use_matched_control:
num_matched_control = 0
tabixErrorFlag = 0
try:
records = matched_control_db.fetch(F[0], int(F[1]) - 1, int(F[1]) + 1)
except Exception as inst:
print >> sys.stderr, "%s: %s" % (type(inst), inst.args)
tabixErrorMsg = str(inst.args)
tabixErrorFlag = 1
if tabixErrorFlag == 0:
for record_line in records:
record = record_line.split('\t')
if record[0] == F[0] and record[1] == F[1] and record[2] == F[2] and record[3] == F[3]:
num_matched_control = len(record[5].split(';'))
else:
num_matched_control = "---"
if use_matched_control and num_matched_control > max_control_num_thres: continue
print >> hout, '\t'.join(F[:4]) + '\t' + str(len(mapqs)) + '\t' + str(num_matched_control)
hout.close()
def filter_by_allele_freq(input_file, output_file, tumor_bam, matched_control_bam, tumor_AF_thres, control_AF_thres, max_fisher_pvalue):
hout = open(output_file, 'w')
print >> hout, '\t'.join(["Chr", "Pos", "Dir", "Junc_Seq",
"Num_Tumor_Total_Read", "Num_Tumor_Var_Read", "Num_Control_Total_Read", "Num_Control_Var_Read",
"Minus_Log_Fisher_P_value"])
with open(input_file, 'r') as hin:
for line in hin:
F = line.rstrip('\n').split('\t')
tumor_num = int(F[4])
control_num = int(F[5])
region = F[0] + ':' + F[1] + '-' + F[1]
depth_tumor_info = pysam.depth(tumor_bam, "-r", region)
depth_tumor = int(depth_tumor_info.rstrip('\n').split('\t')[2])
AF_tumor = float(tumor_num) / depth_tumor
if AF_tumor < tumor_AF_thres: continue
# print '\t'.join(F)
if matched_control_bam != "":
depth_control_info = pysam.depth(matched_control_bam, "-r", region)
depth_control = int(depth_control_info.rstrip('\n').split('\t')[2]) if depth_control_info != "" else 0
control_AF = float(control_num) / depth_control if depth_control > 0 else 1.0
else:
depth_control = "---"
control_AF = "---"
if control_AF != "---" and control_AF > control_AF_thres: continue
lpvalue = "---"
if control_AF != "":
oddsratio, pvalue = stats.fisher_exact([[depth_tumor - tumor_num, tumor_num], [depth_control - control_num, control_num]], 'less')
if pvalue < 1e-100: pvalue = 1e-100
lpvalue = (- math.log(pvalue, 10) if pvalue < 1 else 0)
lpvalue = round(lpvalue, 4)
if 10**(-lpvalue) > float(max_fisher_pvalue): continue
print >> hout, '\t'.join(F[:4]) + '\t' + str(depth_tumor) + '\t' + str(tumor_num) + '\t' + \
str(depth_control) + '\t' + str(control_num) + '\t' + str(lpvalue)
hout.close()
|
[
"friend1ws@gmail.com"
] |
friend1ws@gmail.com
|
38e297f2ecdcdafc8a850489ec195d720ca6a99a
|
fff5eeff850258b5208f41d4f6c3027044f5374a
|
/blog/tests/test_urls.py
|
e384ffffc42e1bbf5c436fcd0981a200d3649038
|
[] |
no_license
|
khabdrick/django-pytest
|
3f4300f875ed4c6ad9d4fa1bb3bf0902c3e420e7
|
5ce5f5cd1973885dfa2d476b1817d00644e9b10c
|
refs/heads/main
| 2023-04-01T17:10:22.220605
| 2021-04-20T17:27:43
| 2021-04-20T17:27:43
| 345,196,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
from django.urls import reverse, resolve
class TestUrls:
def test_post_content_url(self):
path = reverse("content", kwargs={'pk':1})
assert resolve(path).view_name == "content"
|
[
"muhamzyali@gmail.com"
] |
muhamzyali@gmail.com
|
ec2f321127e4a1f870d4e4c9b178002ea220402a
|
d74ccf6290b7acb0011fd9b9132cd8beac0bd9d3
|
/back/movies/migrations/0003_movie_like_users.py
|
13f3f2abdab2d74e4da72d3a07d59fe254a85fc1
|
[] |
no_license
|
gaberani/final_netflix
|
a0687c9cec9157712c9fe2a8627d3624e5fe00b6
|
637016fd6a0c589f1ff96ed5e9225deffc8f18cb
|
refs/heads/master
| 2022-11-09T10:42:22.460795
| 2020-06-21T00:30:21
| 2020-06-21T00:30:21
| 272,981,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
# Generated by Django 2.1.15 on 2020-06-15 11:38
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('movies', '0002_comment'),
]
operations = [
migrations.AddField(
model_name='movie',
name='like_users',
field=models.ManyToManyField(related_name='like_movies', to=settings.AUTH_USER_MODEL),
),
]
|
[
"khs0783@naver.com"
] |
khs0783@naver.com
|
517d75eb080fc570f9f2944db0205779a06920c9
|
6ac0bba8c1851e71529269c0d9d89a7c8fa507f2
|
/Medium/18.py
|
5808c521f0c1350c3c957493c5fcc72c735dcfcf
|
[] |
no_license
|
Hellofafar/Leetcode
|
e81dc85689cd6f9e6e9756beba070cb11e7b192e
|
7a459e9742958e63be8886874904e5ab2489411a
|
refs/heads/master
| 2021-05-16T07:07:19.823953
| 2020-02-17T03:00:09
| 2020-02-17T03:00:09
| 103,690,780
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,265
|
py
|
# ------------------------------
# 18. 4Sum
#
# Description:
# Given an array S of n integers, are there elements a, b, c, and d in S such that a + b + c + d = target? Find all unique quadruplets in the array which gives the sum of target.
#
# Note: The solution set must not contain duplicate quadruplets.
#
# Note: The solution set must not contain duplicate triplets.
# For example, given array S = [-1, 0, 1, 2, -1, -4],
# A solution set is:
# [
# [-1, 0, 1],
# [-1, -1, 2]
# ]
#
# Version: 1.0
# 10/17/17 by Jianfa
# ------------------------------
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
res = []
nums.sort()
for i in range(len(nums) - 3):
if i > 0 and nums[i] == nums[i-1]:
continue # Think over here! At first I wrote i += 1, it's wrong.
possible_rest_three = self.threeSum(nums[i+1:], target - nums[i])
if possible_rest_three:
for three_set in possible_rest_three:
three_set.insert(0, nums[i])
res.append(three_set)
return res
def threeSum(self, nums, target):
res = []
nums.sort()
for i in range(0, len(nums) - 2):
if i > 0 and nums[i] == nums[i-1]:
continue
l, r = i+1, len(nums) - 1
while l < r:
s = nums[i] + nums[l] + nums[r]
if s > target:
r -= 1
elif s < target:
l += 1
else:
res.append([nums[i], nums[l], nums[r]])
while l < r and nums[l] == nums[l+1]:
l += 1
while l < r and nums[r] == nums[r-1]:
r -= 1
l += 1
r -= 1
return res
# Used for test
if __name__ == "__main__":
test = Solution()
nums = [1,0,-1,0,-2,2]
target = 0
print(test.fourSum(nums, target))
# Summary
# Leverage the idea of 3Sum. Check integer one by one and check 3Sum for the rest.
|
[
"buptljf@gmail.com"
] |
buptljf@gmail.com
|
68616c0dbcebfbf9c42b5566168c88f7aa8c9404
|
7c2e677d931a8eb7d7cffc6d54713411abbe83e4
|
/AppBuilder9000/AppBuilder9000/NflApp/migrations/0001_initial.py
|
c6c706f8bd214fbbea2270eca679fe35fce7be36
|
[] |
no_license
|
r3bunker/Python_Live_Project
|
19e367b3cf74c2279c287fcd3a8a44a27f24041a
|
d3e06150d7daea6326cc1a4155309d99e4ff6244
|
refs/heads/main
| 2023-06-12T23:01:50.440371
| 2021-06-16T20:21:03
| 2021-06-16T20:21:03
| 344,883,966
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
# Generated by Django 2.2.5 on 2020-11-06 15:17
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='PlayerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.CharField(choices=[('TE', 'TE'), ('QB', 'QB'), ('OL', 'OL'), ('DB', 'DB'), ('LB', 'LB'), ('WR', 'WR'), ('DL', 'DL'), ('RB', 'RB')], max_length=2)),
('name', models.CharField(default='', max_length=60)),
('height', models.PositiveIntegerField(max_length=3)),
('weight', models.PositiveIntegerField(max_length=3)),
('team', models.CharField(default='', max_length=30)),
],
),
]
|
[
"r3bunker@gmail.com"
] |
r3bunker@gmail.com
|
76e31ee753accb6937d8800000f3bbc5a28dabe6
|
8a9f0a0924706ded24ab4214aa42ab07f201e38b
|
/LeetCode_Python/Linked_List/Swap_Nodes_In_Pairs.py
|
030136ef60b1879da3ce6eb6cdd836e2dfdd49ae
|
[] |
no_license
|
gitzx/Data-Structure-Algorithm
|
687162565729b12551cb660aa55a94f1d382014c
|
d6af7dfdc4d3d139fd939687a45dd36e327c914c
|
refs/heads/master
| 2021-06-03T21:27:17.750464
| 2019-06-27T10:50:48
| 2019-06-27T10:50:48
| 14,443,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 690
|
py
|
'''
Given a linked list, swap every two adjacent nodes and return its head.
For example,
Given 1->2->3->4, you should return the list as 2->1->4->3.
Your algorithm should use only constant space. You may not modify the values in the list, only nodes itself can be changed.
'''
'''
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
'''
class Solution(object):
def swapPairs(self, head):
if head == None or head.next == None:
return head
dummy = ListNode(0)
dummy.next = head
p = dummy
while p.next and p.next.next:
tmp = p.next.next
p.next.next = tmp.next
tmp.next = p.next
p.next = tmp
p = p.next.next
return dummy.next
|
[
"emailzhx@126.com"
] |
emailzhx@126.com
|
b8760e44c9b37c312a00f01f06b0f1d1992247d0
|
28b405b8a538187367e019e45dd7fff3c5f4f296
|
/src/rocks-pylib/rocks/commands/set/host/interface/vlan/__init__.py
|
6224fb0a1b94d913a4014f8e6961bc95b0bc6627
|
[] |
no_license
|
rocksclusters/core
|
95c84cbe4d9f998eea123177e43b25fa0475c823
|
7fb7208aa4a532e64db83e04759d941be9b96d91
|
refs/heads/master
| 2023-04-08T16:30:45.931720
| 2023-03-23T17:18:54
| 2023-03-23T17:18:54
| 58,084,820
| 21
| 11
| null | 2019-08-22T21:17:23
| 2016-05-04T21:21:17
|
Python
|
UTF-8
|
Python
| false
| false
| 5,142
|
py
|
# $Id: __init__.py,v 1.10 2012/11/27 00:48:28 phil Exp $
#
# @Copyright@
#
# Rocks(r)
# www.rocksclusters.org
# version 6.2 (SideWinder)
# version 7.0 (Manzanita)
#
# Copyright (c) 2000 - 2017 The Regents of the University of California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice unmodified and in its entirety, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. All advertising and press materials, printed or electronic, mentioning
# features or use of this software must display the following acknowledgement:
#
# "This product includes software developed by the Rocks(r)
# Cluster Group at the San Diego Supercomputer Center at the
# University of California, San Diego and its contributors."
#
# 4. Except as permitted for the purposes of acknowledgment in paragraph 3,
# neither the name or logo of this software nor the names of its
# authors may be used to endorse or promote products derived from this
# software without specific prior written permission. The name of the
# software includes the following terms, and any derivatives thereof:
# "Rocks", "Rocks Clusters", and "Avalanche Installer". For licensing of
# the associated name, interested parties should contact Technology
# Transfer & Intellectual Property Services, University of California,
# San Diego, 9500 Gilman Drive, Mail Code 0910, La Jolla, CA 92093-0910,
# Ph: (858) 534-5815, FAX: (858) 534-7345, E-MAIL:invent@ucsd.edu
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# @Copyright@
#
# $Log: __init__.py,v $
# Revision 1.10 2012/11/27 00:48:28 phil
# Copyright Storm for Emerald Boa
#
# Revision 1.9 2012/08/23 16:42:07 clem
# set host interface vlan and set host interface subnet did not accept properly
# MAC addresses for their iface input argument
#
# Revision 1.8 2012/05/06 05:48:35 phil
# Copyright Storm for Mamba
#
# Revision 1.7 2011/07/23 02:30:38 phil
# Viper Copyright
#
# Revision 1.6 2010/09/07 23:53:01 bruno
# star power for gb
#
# Revision 1.5 2009/07/28 17:52:20 bruno
# be consistent -- all references to 'vlanid' should be 'vlan'
#
# Revision 1.4 2009/05/01 19:07:03 mjk
# chimi con queso
#
# Revision 1.3 2009/01/08 01:20:57 bruno
# for anoop
#
# Revision 1.2 2008/10/18 00:55:57 mjk
# copyright 5.1
#
# Revision 1.1 2008/07/22 00:34:41 bruno
# first whack at vlan support
#
#
#
import rocks.commands
class Command(rocks.commands.set.host.command):
"""
Sets the VLAN ID for an interface on one of more hosts.
<arg type='string' name='host' repeat='1'>
One or more named hosts.
</arg>
<arg type='string' name='iface'>
Interface that should be updated. This may be a logical interface or
the mac address of the interface.
</arg>
<arg type='string' name='vlan'>
The VLAN ID that should be updated. This must be an integer and the
pair 'subnet/vlan' must be defined in the VLANs table.
</arg>
<param type='string' name='iface'>
Can be used in place of the iface argument.
</param>
<param type='string' name='vlan'>
Can be used in place of the vlan argument.
</param>
<example cmd='set host interface vlan compute-0-0-0 eth0 3'>
Sets compute-0-0-0's private interface to VLAN ID 3.
</example>
<example cmd='set host interface vlan compute-0-0-0 subnet=eth0 vlan=3
'>
Same as above.
</example>
<related>add host</related>
"""
def run(self, params, args):
(args, iface, vid) = self.fillPositionalArgs(
('iface', 'vlan'))
if not len(args):
self.abort('must supply host')
if not iface:
self.abort('must supply iface')
if not vid:
self.abort('must supply vlan')
else:
try:
vlanid = int(vid)
except:
self.abort('vlan "%s" must be an integer' %
(vid))
for host in self.getHostnames(args):
self.db.execute("""update networks net, nodes n
set net.vlanid = IF(%d = 0, NULL, %d)
where (net.device = '%s' or net.mac='%s') and
n.name = '%s' and net.node = n.id""" %
(vlanid, vlanid, iface, iface, host))
|
[
"ppapadopoulos@ucsd.edu"
] |
ppapadopoulos@ucsd.edu
|
9896ed4a15946204d46a0faecec93ee19b1562de
|
15373eaa353e8aece47a26741b7fb27795268bf6
|
/easy/674_longest_continuous_increasing_subsequence.py
|
ef6d6b79c989164a5d0abafb804820ca0af2c060
|
[] |
no_license
|
esddse/leetcode
|
e1a9bacf04c68a8d642a1e53c90e6c2dda2c1980
|
0ceccdb262149f7916cb30fa5f3dae93aef9e9cd
|
refs/heads/master
| 2021-06-08T19:15:14.346584
| 2020-01-09T01:41:23
| 2020-01-09T01:41:23
| 109,675,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
class Solution:
def findLengthOfLCIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
N = len(nums)
dp = [1] * N
max_len = 1
for i in range(1, N):
if nums[i] > nums[i-1]:
dp[i] = dp[i-1] + 1
max_len = max(max_len, dp[i])
return max_len
|
[
"tjz427@sina.cn"
] |
tjz427@sina.cn
|
c78fca675d5676273ac2feefb58558b427a6339b
|
74e53273dc5aa71293a385512b3d239971099738
|
/Data_structures_and_Algorithms/linked_list/odd_even_linked_list.py
|
23a4c71690de5d036acb1edf0b4d3ec4ea4b1b76
|
[] |
no_license
|
BJV-git/Data_structures_and_Algorithms
|
3b240bf699e7091453f3a1459b06da1af050c415
|
393c504b2bb17b19e76f6d9d9cce948b4c12dbb2
|
refs/heads/master
| 2020-04-23T22:32:22.525542
| 2019-02-19T16:09:51
| 2019-02-19T16:09:51
| 171,504,949
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
# just about the positional ordering
# just can go by the next.next and set the odd last to even head
def odd_even(head):
if not head or not head.next: return head
odd =head
even = head.next
evenhead = head.next
while even and even.next:
odd.next = odd.next.next
odd = odd.next
even.next = even.next.next
even = even.next
odd.next = evenhead
return head
|
[
"noreply@github.com"
] |
BJV-git.noreply@github.com
|
7b3118281a9061e8454ba7aa096b0e07c96dd9f7
|
72d95f3b8c4de3e76c1125b2e6573539cadbe66a
|
/robot/logging.py
|
bf0f763b3df4200826a487da9e41562cb5cc3fba
|
[
"MIT"
] |
permissive
|
skyzhishui/wukong-robot
|
4b2c4255fffa61705b735fed57fd0c90c4d40d7c
|
8395cd4030d340459edd7862b186a6f9395925ff
|
refs/heads/master
| 2020-12-23T03:37:03.320598
| 2020-01-29T16:03:04
| 2020-01-29T16:03:04
| 237,020,512
| 2
| 0
|
MIT
| 2020-01-29T15:48:47
| 2020-01-29T15:48:46
| null |
UTF-8
|
Python
| false
| false
| 2,197
|
py
|
import logging
import os
from robot import constants
from logging.handlers import RotatingFileHandler
PAGE = 4096
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
ERROR = logging.ERROR
def tail(filepath, n=10):
"""
实现 tail -n
"""
res = ""
with open(filepath, 'rb') as f:
f_len = f.seek(0, 2)
rem = f_len % PAGE
page_n = f_len // PAGE
r_len = rem if rem else PAGE
while True:
# 如果读取的页大小>=文件大小,直接读取数据输出
if r_len >= f_len:
f.seek(0)
lines = f.readlines()[::-1]
break
f.seek(-r_len, 2)
# print('f_len: {}, rem: {}, page_n: {}, r_len: {}'.format(f_len, rem, page_n, r_len))
lines = f.readlines()[::-1]
count = len(lines) -1 # 末行可能不完整,减一行,加大读取量
if count >= n: # 如果读取到的行数>=指定行数,则退出循环读取数据
break
else: # 如果读取行数不够,载入更多的页大小读取数据
r_len += PAGE
page_n -= 1
for line in lines[:n][::-1]:
res += line.decode('utf-8')
return res
def getLogger(name):
"""
作用同标准模块 logging.getLogger(name)
:returns: logger
"""
formatter = logging.Formatter('%(asctime)s - %(name)s - %(filename)s - %(funcName)s - line %(lineno)s - %(levelname)s - %(message)s')
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
# FileHandler
file_handler = RotatingFileHandler(os.path.join(constants.TEMP_PATH, 'wukong.log'), maxBytes=1024*1024,backupCount=5)
file_handler.setLevel(level=logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def readLog(lines=200):
"""
获取最新的指定行数的 log
:param lines: 最大的行数
:returns: 最新指定行数的 log
"""
log_path = os.path.join(constants.TEMP_PATH, 'wukong.log')
if os.path.exists(log_path):
return tail(log_path, lines)
return ''
|
[
"m@hahack.com"
] |
m@hahack.com
|
ffc8618fc8a2d869759cb4bd9356f6df5ecafba8
|
14efc9ec42d4fe9beecad1cff66bba77e68b8b74
|
/vinod/Scripts/easy_install-3.7-script.py
|
7c2e1061a952cf3de73b1ff4da86706252c40275
|
[] |
no_license
|
vinodkumar96/ModelForms_ClassBased
|
b515386554e6241af749742319d46d317755d48f
|
c1435b7145ac9eb9dff45618ad4c3446667cdc5a
|
refs/heads/master
| 2020-06-14T19:15:55.312078
| 2019-07-03T17:38:07
| 2019-07-03T17:38:07
| 195,100,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
#!C:\PyCharm\PycharmProjects\new_projects\ModelForms_ClassBased\vinod\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"vinodkumaryv96@gmail.com"
] |
vinodkumaryv96@gmail.com
|
e884df3f64553ec1d649d28e8ef4317ea6af6c01
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/organizations_write_f/delegated-administrator_deregister.py
|
92bfd542b0cbd45339078af1faf66f98e2e5cf32
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
list-delegated-administrators : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/organizations/list-delegated-administrators.html
register-delegated-administrator : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/organizations/register-delegated-administrator.html
"""
write_parameter("organizations", "deregister-delegated-administrator")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
7b72d2a794746c8ae3ce5d3b0062853fb3515f96
|
02f3fcbd495c2282694ad0612a66640b4731582f
|
/helloworld/helloworld.py
|
610c9184d9177bfd6ddccf2c45d3cd1da7bb4e73
|
[
"MIT"
] |
permissive
|
plter/LearnKivy2021
|
8f39f99bf3123d23230e047e853dfeba983329f8
|
04bf9c19c707528f4be8ca666192363aae95fb9f
|
refs/heads/main
| 2023-06-04T04:03:41.829403
| 2021-06-24T04:06:31
| 2021-06-24T04:06:31
| 379,203,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
import os
os.environ['KIVY_IMAGE'] = "pil"
from kivy.app import App
from kivy.core.window import Window
Window.size = (400, 300)
class MyApp(App):
pass
if __name__ == '__main__':
MyApp().run()
|
[
"xtiqin@163.com"
] |
xtiqin@163.com
|
1eca7ec2e36e7cae8c2c4ef9e80e9f1268d6c6e8
|
7f9396be71cdf243930d77638f59aa76c135c9c8
|
/virtual/bin/sqlformat
|
451f4a53a61bc6a3280c8e6bb71755bdd4a53720
|
[] |
no_license
|
CollinsMuiruri/Collins
|
accb894d620104e49de6660127a6a0074cf7f57e
|
9d94d528d94821983681fa8d1e5801f16878b464
|
refs/heads/main
| 2023-01-23T01:14:07.844954
| 2020-12-09T01:10:38
| 2020-12-09T01:10:38
| 316,529,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
#!/home/collins/Documents/me/Collins/Collins/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"wanyekicollins@gmail.com"
] |
wanyekicollins@gmail.com
|
|
dfe50521c8beadbcedb0a1ea16159e0c00737c40
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03945/s610658846.py
|
ffb8861904f17b983b639980e4783fc5358be706
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
import collections
def main():
S = input()
cnt = 0
tmp = ""
for s in S:
if s != tmp:
cnt += 1
tmp = s
print(cnt - 1)
if __name__ == "__main__":
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
15bd6f29ed5617bbd897682594f52dd64bb410e2
|
6fa554aff1f9507eca282ebd1352fb6689ad4842
|
/부록02/helloworld-gae/main.py
|
99283f2a5469666030a99944aa94a37db682866f
|
[] |
no_license
|
wikibook/flask
|
88260843b56c3bde2f811515d34b3561e9bcd612
|
b3c9a4e4c2a88ffb4ada3c0aabe781b590016762
|
refs/heads/master
| 2022-12-12T21:27:26.202369
| 2021-08-10T05:35:42
| 2021-08-10T05:35:42
| 15,384,305
| 79
| 85
| null | 2022-12-09T05:35:03
| 2013-12-23T00:08:26
|
Python
|
UTF-8
|
Python
| false
| false
| 129
|
py
|
# -*- coding: utf-8 -*-
from google.appengine.ext.webapp.util import run_wsgi_app
from helloworld import app
run_wsgi_app(app)
|
[
"dylee@wikibook.co.kr"
] |
dylee@wikibook.co.kr
|
3dfffc0c816b2afd57cd7b4250788052eb8bb807
|
a6719f4815ff41d3a1f09e9a63a64c4582d03702
|
/function_and_scope/func_call.py
|
224a49fd81ba7ad18b41c7b075f32fa82a7f7447
|
[
"MIT"
] |
permissive
|
thanh-vt/python-basic-programming
|
8136007b8435dae6339ae33015fe536e21b19d1d
|
5fe817986fbef2649b4b03955f07b59d2a2035d8
|
refs/heads/main
| 2023-01-30T12:57:36.819687
| 2020-12-13T17:27:05
| 2020-12-13T17:27:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
from func_declare import *
# normal function
hello_func()
# argument
intro_func('Thanh')
# multiple arguments
intro_func_2('Thanh', 'Tat', 'Vu')
# unknown number of arguments
fruit_intro_func('banana', 'orange', 'watermelon')
# key word arguments
child_intro_func(child1='Emil', child2='Tobias', child3='Linus')
# arbitrary keyword arguments
child_intro_func_2(fname="Tobias", lname='Refsnes')
# default value of argument
country_intro_func()
# list of arguments:
fruits = {'apple', 'banana', 'cherry'}
fruit_list_func(fruits)
# return value:
x = multiply_by_5_func(2)
print(x)
# empty body
empty_func()
# recursion
y = factorial_func(5)
print(y)
|
[
"thanhvt@vissoft.vn"
] |
thanhvt@vissoft.vn
|
611c1256e6469b7ceef7838061cd304f5e2f09ec
|
dcebc9eba57f252874a336b4b396c1dea328e850
|
/py/dd_match_fields.py
|
0f91450376fbfa3b376a9c2a4b765fef32abf897
|
[
"Apache-2.0"
] |
permissive
|
bcgov/diputils
|
d7408ceb7d02c1583bba75e515cb3f93e2e07a09
|
caf510c81f7f43372d4a8e18f77eaa86cdede6a5
|
refs/heads/master
| 2022-05-15T01:02:13.289995
| 2022-05-08T22:02:54
| 2022-05-08T22:02:54
| 231,476,522
| 5
| 1
|
Apache-2.0
| 2022-05-08T22:02:55
| 2020-01-02T23:31:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,121
|
py
|
# Copyright 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#20190214 dd_match.py: match dat file with data dictionary (csv2)
import os
import sys
'''
grep -n DE.HLTH_PROD_LABEL ./dd/*.csv2
data_dictionary_pharmanet-january-1-1996-onwards.xlsx_dsp_rpt.A
dd_fields.exe data_dictionary_pharmanet-january-1-1996-onwards.xlsx_dsp_rpt.A
'''
labels_for_file = {}
files = os.popen("ls -1 ~/dd/*.csv2").read().strip().split('\n')
for i in range(0, len(files)):
files[i] = files[i].strip()
f = open(os.popen('cygpath -d ' + files[i]).read().strip())
lines = f.read().strip().split("\n")
w = lines[0].strip().split(',')
if w[0].lower() == 'start':
# print w
labels = []
for line in lines:
line = line.strip()
w = line.split(',')
labels.append(w[3].lower())
labels_for_file[files[i]] = labels # store the labels from this file, according to filename
# field names for extract (no data here):
lines = ["Ft_schlstud.A.dat STUDYID SPECIAL_NEED_CODE_THIS_COLL",
"DAD STUDYID DIAGX1 DSC ADDATE",
"MSP STUDYID SPEC ICD9 ICD9_1 ICD9_2 ICD9_3 ICD9_4 ICD9_5 SERVCODE servdate",
"DES_REP.A DE.STUDYID DE.HLTH_PROD_LABEL DE.DSPD_QTY DE.SRV_DATE",
"HLTH_REP.A HP.DIN_PIN HP.GEN_DRUG"]
o_f = open("extract_me.csv", "wb")
o_f.write('\n'.join(lines))
o_f.close()
# now attempt to match the labelsets from the file, with the above:
lines = open("extract_me.csv").read().strip().split('\n')
dd_matches = []
for i in range(0, len(lines)):
line = lines[i].strip().lower().split()
line = line[1:]
print ",".join(line)
max_score, max_f = 0, []
matched = []
for f in labels_for_file:
labels = labels_for_file[f]
score = 0
for label_to_match in line:
# if print "\t", label_to_match
if label_to_match in labels:
score += 1
if label_to_match not in matched:
matched.append(label_to_match)
# make sure to handle multiple matches for the same thing
if score == max_score:
max_f.append(f)
if score > max_score:
max_f = [f]
max_score = score
print "\n\t", max_score, "/", len(line), line, "\n\t-------> ", "MATCH" if max_score==len(line) -1 else ""
print "\tvarmatch", matched
for f in max_f:
print "\t\t", f
dd_matches.append(f.strip()) # list all dd we want to use to extract
f = open("dd_match_fields_selected_dd.txt", "wb")
f.write('\n'.join(dd_matches))
f.close()
|
[
"richardson.ashlin@gmail.com"
] |
richardson.ashlin@gmail.com
|
2591fab72417dd22098a2d648b9a0eb3d6d035d2
|
0a40a0d63c8fce17f4a686e69073a4b18657b160
|
/test/functional/feature_uacomment.py
|
75b5a9c538e3ca66a92d254faac84d250742b090
|
[
"MIT"
] |
permissive
|
MotoAcidic/Cerebellum
|
23f1b8bd4f2170c1ed930eafb3f2dfff07df1c24
|
6aec42007c5b59069048b27db5a8ea1a31ae4085
|
refs/heads/main
| 2023-05-13T06:31:23.481786
| 2021-06-09T15:28:28
| 2021-06-09T15:28:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,492
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -uacomment option."""
from test_framework.test_framework import CerebellumTestFramework
from test_framework.util import assert_equal
class UacommentTest(CerebellumTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("test multiple -uacomment")
test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1]
assert_equal(test_uacomment, "(testnode0)")
self.restart_node(0, ["-uacomment=foo"])
foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1]
assert_equal(foo_uacomment, "(testnode0; foo)")
self.log.info("test -uacomment max length")
self.stop_node(0)
expected = "exceeds maximum length (256). Reduce the number or size of uacomments."
self.assert_start_raises_init_error(0, ["-uacomment=" + 'a' * 256], expected)
self.log.info("test -uacomment unsafe characters")
for unsafe_char in ['/', ':', '(', ')']:
expected = "User Agent comment (" + unsafe_char + ") contains unsafe characters"
self.assert_start_raises_init_error(0, ["-uacomment=" + unsafe_char], expected)
if __name__ == '__main__':
UacommentTest().main()
|
[
"travisfinch01@gmail.com"
] |
travisfinch01@gmail.com
|
6c9178f13779fa4d7a1c76768de12543cd42748f
|
be9d82f263466b397d354798fcf260ceee7f8419
|
/scripts/docs.py
|
02ecae1eeb67800e7d5b17c1c4238c78b8fb1b18
|
[
"MIT",
"LicenseRef-scancode-generic-export-compliance"
] |
permissive
|
pradeepbhadani/dffml
|
0af135045a6d0bbde9b6f2d539839107bc0b5181
|
35bc31be462685efe78ede981dbef8fd5577882b
|
refs/heads/master
| 2020-06-24T17:05:37.415285
| 2019-07-26T15:19:22
| 2019-07-26T15:19:22
| 199,024,888
| 0
| 0
|
MIT
| 2019-07-26T15:19:23
| 2019-07-26T13:50:26
|
Python
|
UTF-8
|
Python
| false
| false
| 5,883
|
py
|
# SPDX-License-Identifier: MIT
# Copyright (c) 2019 Intel Corporation
import os
import getpass
import inspect
import argparse
import pkg_resources
from typing import List
def traverse_get_config(target, *args):
current = target
last = target
for level in args:
last = current[level]
current = last["config"]
return current
TEMPLATE = """{name}
{underline}
*{maintenance}*
{help}"""
def data_type_string(data_type, nargs=None):
if nargs is not None:
return "List of %ss" % (data_type_string(data_type).lower(),)
if data_type is str:
return "String"
elif data_type is int:
return "Integer"
elif data_type is bool:
return "Boolean"
return data_type.__qualname__
def sanitize_default(default):
if not isinstance(default, str):
return str(default)
return default.replace(getpass.getuser(), "user")
def build_args(config):
args = []
for key, value in config.items():
arg = value["arg"]
if arg is None:
continue
build = ""
build += "- %s: %s\n" % (
key,
data_type_string(arg.get("type", str), arg.get("nargs", None)),
)
if "default" in arg or "help" in arg:
build += "\n"
if "default" in arg:
build += " - default: %s\n" % (sanitize_default(arg["default"]),)
if "help" in arg:
build += " - %s\n" % (arg["help"],)
args.append(build.rstrip())
if args:
return "**Args**\n\n" + "\n\n".join(args)
return False
def type_name(value):
if inspect.isclass(value):
return value.__qualname__
return value
def format_op_definitions(definitions):
for key, definition in definitions.items():
item = "- %s: %s(type: %s)" % (
key,
definition.name,
definition.primitive,
)
if definition.spec is not None:
item += "\n\n"
item += "\n".join(
[
" - %s: %s%s"
% (
name,
type_name(param.annotation),
"(default: %s)" % (param.default,)
if param.default is not inspect.Parameter.empty
else "",
)
for name, param in inspect.signature(
definition.spec
).parameters.items()
]
)
yield item
def format_op(op):
build = []
build.append("**Stage: %s**\n\n" % (op.stage.value))
if op.inputs:
build.append(
"**Inputs**\n\n" + "\n".join(format_op_definitions(op.inputs))
)
if op.outputs:
build.append(
"**Outputs**\n\n" + "\n".join(format_op_definitions(op.outputs))
)
if op.conditions:
build.append(
"**Conditions**\n\n"
+ "\n".join(
[
"- %s: %s" % (definition.name, definition.primitive)
for definition in op.conditions
]
)
)
return "\n\n".join(build)
def gen_docs(entrypoint: str, modules: List[str], maintenance: str = "Core"):
per_module = {name: [] for name in modules}
for i in pkg_resources.iter_entry_points(entrypoint):
cls = i.load()
if i.module_name.split(".")[0] not in modules:
continue
doc = cls.__doc__
if doc is None:
doc = "No description"
else:
doc = inspect.cleandoc(doc)
formatting = {
"name": i.name,
"underline": "~" * len(i.name),
"maintenance": maintenance,
"help": doc,
}
formatted = TEMPLATE.format(**formatting)
if getattr(cls, "op", False):
formatted += "\n\n" + format_op(cls.op)
defaults = cls.args({})
if defaults:
config = traverse_get_config(defaults, *cls.add_orig_label())
formatted += "\n\n" + build_args(config)
per_module[i.module_name.split(".")[0]].append(formatted)
return "\n\n".join(
[
name + "\n" + "-" * len(name) + "\n\n" + "\n\n".join(docs)
for name, docs in per_module.items()
if docs
]
)
def main():
parser = argparse.ArgumentParser(description="Generate plugin docs")
parser.add_argument("--entrypoint", help="Entrypoint to document")
parser.add_argument("--modules", help="Modules to care about")
parser.add_argument(
"--maintenance",
default="Core",
help="Maintained as a part of DFFML or community managed",
)
parser.add_argument(
"--care",
default="scripts/docs/care",
help="File with each line being: entrypoint package_name package_name...",
)
args = parser.parse_args()
if getattr(args, "entrypoint", False) and getattr(args, "modules", False):
print(gen_docs(args.entrypoint, args.modules, args.maintenance))
return
with open(args.care, "rb") as genspec:
for line in genspec:
entrypoint, modules = line.decode("utf-8").split(maxsplit=1)
modules = modules.split()
template = entrypoint.replace(".", "_") + ".rst"
output = os.path.join("docs", "plugins", template)
template = os.path.join("scripts", "docs", "templates", template)
with open(template, "rb") as template_fd, open(
output, "wb"
) as output_fd:
output_fd.write(
(
template_fd.read().decode("utf-8")
+ gen_docs(entrypoint, modules)
).encode("utf-8")
)
if __name__ == "__main__":
main()
|
[
"johnandersenpdx@gmail.com"
] |
johnandersenpdx@gmail.com
|
019a2296e2c2a44d1f6bad58af35702346c4199e
|
2ae8fe4fccac95d98dffe77d5a948e64c3cb91b5
|
/2404.py
|
ede38319f93e8799fd73115eae1a663d4d2fbf38
|
[] |
no_license
|
lilaboc/leetcode
|
e61362592f87d094fe57635af1d7d2b93284bfe8
|
d4c5329a9d6ce945b965fd9d811757a80934dd36
|
refs/heads/master
| 2023-07-07T07:30:29.516698
| 2023-06-24T03:16:16
| 2023-06-24T03:16:16
| 25,621,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
# https://leetcode.com/problems/most-frequent-even-element/description/
from collections import Counter
from typing import List
class Solution:
def mostFrequentEven(self, nums: List[int]) -> int:
c = Counter([i for i in nums if i % 2 == 0])
most = c.most_common()
if len(most) == 0:
return -1
return sorted([i[0] for i in most if i[1] == most[0][1]])[0]
print(Solution().mostFrequentEven([0,1,2,2,4,4,1]))
|
[
"lilaboc.cn@gmail.com"
] |
lilaboc.cn@gmail.com
|
7c4472d1c378a26fb67ec9911be21421a4e0f8e4
|
b980c0bae0cff8533253c135449beb6e09759dca
|
/Grader_Exercise/04_Loop/04_Loop_002.py
|
1edaec5e838baacb6893af99ef8e5cf7b8e6e326
|
[] |
no_license
|
manhanton/COM-PROG
|
1f76985b3f3fea54057a0da1d3911dc91998c5be
|
7a4f2c62ecd6677ec1f818a5d115aa0fb182b3a2
|
refs/heads/main
| 2023-06-18T10:25:26.448133
| 2021-07-16T07:46:45
| 2021-07-16T07:46:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
d = [int(i) for i in input().split(' ')]
p = d[-1]
i = -1
j = 0
n = len(d)
while j < n-1 :
if d[j] <= p :
i += 1
d[i],d[j] = d[j],d[i]
j += 1
d[-1],d[i+1] = d[i+1],d[-1]
print(d)
|
[
"meen2545@gmail.com"
] |
meen2545@gmail.com
|
6d06c44ef0584637a7e5e9645ae9ac066be5356e
|
610349599d32d7fc5ddae5dcb202836ca8be50aa
|
/blog/migrations/0012_auto_20200916_1140.py
|
6fec12b0c70e4c5506fc56f580196f8e254756e6
|
[] |
no_license
|
reetjakhar09/blogs
|
e3d9d14c01096e4a50474b5a7f562bea7b655a76
|
d0e17a8dd3761aaa08a59c466820040e05dc300a
|
refs/heads/master
| 2022-12-20T05:03:50.350408
| 2020-09-29T16:40:17
| 2020-09-29T16:40:17
| 299,676,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
# Generated by Django 2.2.16 on 2020-09-16 11:40
import autoslug.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0011_auto_20200916_1139'),
]
operations = [
migrations.AlterField(
model_name='post',
name='slug',
field=autoslug.fields.AutoSlugField(editable=True, populate_from='title'),
),
]
|
[
"test@gmail.com"
] |
test@gmail.com
|
aa8131480478b18d37db2f7289886a67b1ce0d30
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/meta/IngameDetailsHelpWindowMeta.py
|
dbfa7dc67745ed3fc5561ecd88202c1b71e07635
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 652
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/IngameDetailsHelpWindowMeta.py
from gui.Scaleform.framework.entities.abstract.AbstractWindowView import AbstractWindowView
class IngameDetailsHelpWindowMeta(AbstractWindowView):
def requestPageData(self, index):
self._printOverrideError('requestPageData')
def as_setPaginatorDataS(self, pages):
return self.flashObject.as_setPaginatorData(pages) if self._isDAAPIInited() else None
def as_setPageDataS(self, data):
return self.flashObject.as_setPageData(data) if self._isDAAPIInited() else None
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
0e6c2ac755ecf45fc4a0cfe0470f37f85a5c9859
|
ca66a4283c5137f835377c3ed9a37128fcaed037
|
/Lib/site-packages/sklearn/manifold/__init__.py
|
60e17b6bfd918a11c0a1c25a5a6062760e777a68
|
[] |
no_license
|
NamithaKonda09/majorProject
|
f377f7a77d40939a659a3e59f5f1b771d88889ad
|
4eff4ff18fa828c6278b00244ff2e66522e0cd51
|
refs/heads/master
| 2023-06-04T20:25:38.450271
| 2021-06-24T19:03:46
| 2021-06-24T19:03:46
| 370,240,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
"""
The :mod:`sklearn.manifold` module implements data embedding techniques.
"""
from .locally_linear import locally_linear_embedding, LocallyLinearEmbedding
from .isomap import Isomap
from .mds import MDS, smacof
from .spectral_embedding_ import SpectralEmbedding, spectral_embedding
from .t_sne import TSNE
__all__ = ['locally_linear_embedding', 'LocallyLinearEmbedding', 'Isomap',
'MDS', 'smacof', 'SpectralEmbedding', 'spectral_embedding', "TSNE"]
|
[
"namithakonda09@gmail.com"
] |
namithakonda09@gmail.com
|
cf30bd306af294595dce71b6a3e2f4cd9bbe10a6
|
3ba975cb0b4b12510f05b9970f955b98ce576124
|
/setup.py
|
cd9ddfb83ebe7ed71de9d5e82960499e761b8f73
|
[
"MIT"
] |
permissive
|
vidrafeed/terminaltables
|
44d3b85e775138fa16f36f65be7a1bc0e89c04be
|
da4531bf0002051b3bcd97098acbe09c22ee7736
|
refs/heads/master
| 2020-12-30T20:04:31.298279
| 2015-03-22T22:34:43
| 2015-03-22T22:34:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,889
|
py
|
#!/usr/bin/env python
import atexit
from codecs import open
from distutils.spawn import find_executable
import os
import re
import sys
import subprocess
import setuptools.command.sdist
from setuptools.command.test import test
_JOIN = lambda *p: os.path.join(HERE, *p)
_PACKAGES = lambda: [os.path.join(r, s) for r, d, _ in os.walk(NAME_FILE) for s in d if s != '__pycache__']
_REQUIRES = lambda p: [i for i in open(_JOIN(p), encoding='utf-8') if i[0] != '-'] if os.path.exists(_JOIN(p)) else []
_SAFE_READ = lambda f, l: open(_JOIN(f), encoding='utf-8').read(l) if os.path.exists(_JOIN(f)) else ''
_VERSION_RE = re.compile(r"^__(version|author|license)__ = '([\w\.@]+)'$", re.MULTILINE)
CLASSIFIERS = (
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries',
'Topic :: Terminals',
'Topic :: Text Processing :: Markup',
)
DESCRIPTION = 'Generate simple tables in terminals from a nested list of strings.'
HERE = os.path.abspath(os.path.dirname(__file__))
KEYWORDS = 'Shell Bash ANSI ASCII terminal tables'
NAME = 'terminaltables'
NAME_FILE = NAME
PACKAGE = False
VERSION_FILE = os.path.join(NAME_FILE, '__init__.py') if PACKAGE else '{0}.py'.format(NAME_FILE)
class PyTest(test):
description = 'Run all tests.'
user_options = []
CMD = 'test'
TEST_ARGS = ['--cov-report', 'term-missing', '--cov', NAME_FILE, 'tests']
def finalize_options(self):
overflow_args = sys.argv[sys.argv.index(self.CMD) + 1:]
test.finalize_options(self)
setattr(self, 'test_args', self.TEST_ARGS + overflow_args)
setattr(self, 'test_suite', True)
def run_tests(self):
# Import here, cause outside the eggs aren't loaded.
pytest = __import__('pytest')
err_no = pytest.main(self.test_args)
sys.exit(err_no)
class PyTestPdb(PyTest):
description = 'Run all tests, drops to ipdb upon unhandled exception.'
CMD = 'testpdb'
TEST_ARGS = ['--ipdb', 'tests']
class PyTestCovWeb(PyTest):
description = 'Generates HTML report on test coverage.'
CMD = 'testcovweb'
TEST_ARGS = ['--cov-report', 'html', '--cov', NAME_FILE, 'tests']
def run_tests(self):
if find_executable('open'):
atexit.register(lambda: subprocess.call(['open', _JOIN('htmlcov', 'index.html')]))
PyTest.run_tests(self)
ALL_DATA = dict(
author_email='robpol86@gmail.com',
classifiers=CLASSIFIERS,
cmdclass={PyTest.CMD: PyTest, PyTestPdb.CMD: PyTestPdb, PyTestCovWeb.CMD: PyTestCovWeb},
description=DESCRIPTION,
install_requires=_REQUIRES('requirements.txt'),
keywords=KEYWORDS,
long_description=_SAFE_READ('README.rst', 15000),
name=NAME,
tests_require=_REQUIRES('requirements-test.txt'),
url='https://github.com/Robpol86/{0}'.format(NAME),
zip_safe=True,
)
# noinspection PyTypeChecker
ALL_DATA.update(dict(_VERSION_RE.findall(_SAFE_READ(VERSION_FILE, 1500).replace('\r\n', '\n'))))
ALL_DATA.update(dict(py_modules=[NAME_FILE]) if not PACKAGE else dict(packages=[NAME_FILE] + _PACKAGES()))
if __name__ == '__main__':
if not all((ALL_DATA['author'], ALL_DATA['license'], ALL_DATA['version'])):
raise ValueError('Failed to obtain metadata from package/module.')
setuptools.setup(**ALL_DATA)
|
[
"robpol86@gmail.com"
] |
robpol86@gmail.com
|
4d3ad13d97346c629af9713a9ec75583d934cf79
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/web/v20181101/list_web_app_sync_function_triggers.py
|
f1ecce2911774db5bc639f91d0d5aee728445211
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935
| 2021-03-08T21:16:19
| 2021-03-08T21:16:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,108
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListWebAppSyncFunctionTriggersResult',
'AwaitableListWebAppSyncFunctionTriggersResult',
'list_web_app_sync_function_triggers',
]
@pulumi.output_type
class ListWebAppSyncFunctionTriggersResult:
"""
Function secrets.
"""
def __init__(__self__, id=None, key=None, kind=None, name=None, trigger_url=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if key and not isinstance(key, str):
raise TypeError("Expected argument 'key' to be a str")
pulumi.set(__self__, "key", key)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if trigger_url and not isinstance(trigger_url, str):
raise TypeError("Expected argument 'trigger_url' to be a str")
pulumi.set(__self__, "trigger_url", trigger_url)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def key(self) -> Optional[str]:
"""
Secret key.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="triggerUrl")
def trigger_url(self) -> Optional[str]:
"""
Trigger URL.
"""
return pulumi.get(self, "trigger_url")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableListWebAppSyncFunctionTriggersResult(ListWebAppSyncFunctionTriggersResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebAppSyncFunctionTriggersResult(
id=self.id,
key=self.key,
kind=self.kind,
name=self.name,
trigger_url=self.trigger_url,
type=self.type)
def list_web_app_sync_function_triggers(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebAppSyncFunctionTriggersResult:
"""
Function secrets.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20181101:listWebAppSyncFunctionTriggers', __args__, opts=opts, typ=ListWebAppSyncFunctionTriggersResult).value
return AwaitableListWebAppSyncFunctionTriggersResult(
id=__ret__.id,
key=__ret__.key,
kind=__ret__.kind,
name=__ret__.name,
trigger_url=__ret__.trigger_url,
type=__ret__.type)
|
[
"noreply@github.com"
] |
MisinformedDNA.noreply@github.com
|
8bc0e253bcff105a4ede5e931ee1a5b8c496b7a1
|
5d0edf31b17c5375faf6126c1a7be8e79bfe2ab8
|
/buildout-cache/eggs/plone.app.versioningbehavior-1.2.0-py2.7.egg/plone/app/versioningbehavior/testing.py
|
b6817a3b3f16fd765884b5ed7e997bd55a8acf8f
|
[] |
no_license
|
renansfs/Plone_SP
|
27cba32ebd9fc03dae3941ec23cf1bf0a7b6667a
|
8a7bdbdb98c3f9fc1073c6061cd2d3a0ec80caf5
|
refs/heads/master
| 2021-01-15T15:32:43.138965
| 2016-08-24T15:30:19
| 2016-08-24T15:30:19
| 65,313,812
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,740
|
py
|
# -*- coding: utf-8 -*-
from plone.app.testing import applyProfile
from plone.app.testing import FunctionalTesting
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone.dexterity.fti import DexterityFTI
from Products.CMFCore.utils import getToolByName
from Products.CMFDiffTool.TextDiff import TextDiff
from Products.PloneTestCase.layer import onteardown
from zope.configuration import xmlconfig
# Make it work with plone.protect < 3.0.0 where the `auto` module is not available.
# This is necessary for Plone 4.3.x compatibility.
try:
from plone.protect import auto as protect_auto
except ImportError:
class DummyAuto(object):
CSRF_DISABLED = True
protect_auto = DummyAuto()
def fix_plonetestcase_mess():
"""Registers a Products.PloneTestCase cleanup.
It is a nested teardown so that we can meake sure that it is executate
as last tear down function.
"""
def reset_zope2():
"""Testing.ZopeTestCase.layer.ZopeLite does not support tearing down.
This results in a partically teared down Zope2 instance.
This function resets the Zope2 initialization state so that we can
initialize another Zope2 instance with p.a.testing.
"""
import Zope2
Zope2._began_startup = 0
onteardown(reset_zope2)()
onteardown(fix_plonetestcase_mess)()
TEST_CONTENT_TYPE_ID = 'TestContentType'
DEFAULT_POLICIES = ('at_edit_autoversion', 'version_on_revert',)
class VersioningLayer(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
import plone.app.versioningbehavior
xmlconfig.file('configure.zcml', plone.app.versioningbehavior,
context=configurationContext)
def setUpPloneSite(self, portal):
applyProfile(portal, 'plone.app.versioningbehavior:default')
self.registerVersionedDocumentFTI(portal)
def registerVersionedDocumentFTI(self, portal):
types_tool = getToolByName(portal, 'portal_types')
fti = DexterityFTI(
TEST_CONTENT_TYPE_ID,
global_allow=True,
behaviors=(
'plone.app.versioningbehavior.behaviors.IVersionable',
'plone.app.dexterity.behaviors.metadata.IBasic',
),
model_source="""
<model xmlns="http://namespaces.plone.org/supermodel/schema">
<schema>
<field name="text" type="zope.schema.Text">
<title>Text</title>
<required>False</required>
</field>
</schema>
</model>
""")
types_tool._setObject(TEST_CONTENT_TYPE_ID, fti)
diff_tool = getToolByName(portal, 'portal_diff')
diff_tool.setDiffForPortalType(
TEST_CONTENT_TYPE_ID, {'text': TextDiff.meta_type})
portal_repository = getToolByName(portal, 'portal_repository')
portal_repository.setVersionableContentTypes(
list(portal_repository.getVersionableContentTypes()) +
[TEST_CONTENT_TYPE_ID])
for policy_id in DEFAULT_POLICIES:
portal_repository.addPolicyForContentType(
TEST_CONTENT_TYPE_ID, policy_id)
def testSetUp(self):
self.CSRF_DISABLED_ORIGINAL = protect_auto.CSRF_DISABLED
protect_auto.CSRF_DISABLED = True
def testTearDown(self):
protect_auto.CSRF_DISABLED = self.CSRF_DISABLED_ORIGINAL
VERSIONING_FIXTURE = VersioningLayer()
VERSIONING_FUNCTIONAL_TESTING = FunctionalTesting(
bases=(VERSIONING_FIXTURE,),
name='plone.app.versioningbehavior:functional')
|
[
"renansfs@gmail.com"
] |
renansfs@gmail.com
|
92c0e09476c89293544c2680a6d1bb74a148fe62
|
27691e5ef8e49fb29189b01dd76a1dc3720e7ae8
|
/AC/ABC-TDD/057/c.py
|
33fa121771e3149b137159983a0a5ae01e6fbf1c
|
[] |
no_license
|
oshou/procon
|
61e5f5bc819e0fe5ab29749fc2f894fe6f3b1d07
|
3d000c64b5917c65b51ed7da5b90cb79892d5909
|
refs/heads/master
| 2023-05-10T23:56:50.861468
| 2021-09-23T06:07:29
| 2021-09-23T06:07:29
| 116,886,484
| 1
| 0
| null | 2023-05-05T02:28:41
| 2018-01-10T00:21:38
|
Go
|
UTF-8
|
Python
| false
| false
| 350
|
py
|
import math
def f(a, b: int) -> int:
alen = len(str(a))
blen = len(str(b))
if alen > blen:
return alen
else:
return blen
n = int(input())
max = int(math.sqrt(n))
fmin = len(str(n))
for i in range(1, max+1):
if n % i == 0:
tmp = f(i, int(n/i))
if tmp < fmin:
fmin = tmp
print(fmin)
|
[
"adf1985adf@gmail.com"
] |
adf1985adf@gmail.com
|
4aa5de904d5db17eee1ffaeb1ac77ed83ca86551
|
3eae9c14c119ee2d6a7d02ef1ba5d61420959e3c
|
/modules/core/mgmt/rwuagent/test/utframework/testtasklet.py
|
e8a7ca66d397981331bc610e8777f2b7cffb1434
|
[
"Apache-2.0"
] |
permissive
|
RIFTIO/RIFT.ware
|
94d3a34836a04546ea02ec0576dae78d566dabb3
|
4ade66a5bccbeb4c5ed5b56fed8841e46e2639b0
|
refs/heads/RIFT.ware-4.4.1
| 2020-05-21T14:07:31.092287
| 2017-06-05T16:02:48
| 2017-06-05T16:02:48
| 52,545,688
| 9
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
# RIFT_IO_STANDARD_CMAKE_COPYRIGHT_HEADER(BEGIN)
# Creation Date: 3/20/2016
# RIFT_IO_STANDARD_CMAKE_COPYRIGHT_HEADER(END)
# Workaround RIFT-6485 - rpmbuild defaults to python2 for
# anything not in a site-packages directory so we have to
# install the plugin implementation in site-packages and then
# import it from the actual plugin.
import rift.tasklets.uagenttbed
class Tasklet(rift.tasklets.uagenttbed.TestTasklet):
pass
|
[
"Leslie.Giles@riftio.com"
] |
Leslie.Giles@riftio.com
|
b9459e9ff5bdd141c5f22653c1f854167a3a9ddc
|
6be1990abf99c85ef886b49dcea1824aabb648d3
|
/weixinofneolocal/weixinofneolocal/zinnia/management/commands/zinnia2wp.py
|
ed19b9f8955b229141d7afe8a566d52eb21ec5cb
|
[] |
no_license
|
neoguojing/cloudServer
|
b53ae205efe52cf0aea28dbb9e6c16c20caf991f
|
7c19101789b0c46474269e4c8fe00e92203e9cd7
|
refs/heads/master
| 2020-12-04T23:02:23.551479
| 2017-09-22T03:08:35
| 2017-09-22T03:08:35
| 67,382,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,230
|
py
|
"""Zinnia to WordPress command module"""
from django.conf import settings
from django.utils.encoding import smart_str
from django.contrib.sites.models import Site
from django.template.loader import render_to_string
from django.core.management.base import NoArgsCommand
from utils.tagging.models import Tag
from zinnia import __version__
from zinnia.settings import PROTOCOL
from zinnia.models.entry import Entry
from zinnia.models.category import Category
class Command(NoArgsCommand):
"""Command object for exporting a Zinnia blog
into WordPress via a WordPress eXtended RSS (WXR) file."""
help = 'Export Zinnia to WXR file.'
def handle_noargs(self, **options):
site = Site.objects.get_current()
blog_context = {'entries': Entry.objects.all(),
'categories': Category.objects.all(),
'tags': Tag.objects.usage_for_model(Entry),
'version': __version__,
'language': settings.LANGUAGE_CODE,
'site': site,
'site_url': '%s://%s' % (PROTOCOL, site.domain)}
export = render_to_string('zinnia/wxr.xml', blog_context)
print(smart_str(export))
|
[
"guojing_neo@163.com"
] |
guojing_neo@163.com
|
a6492367edc20853e84511db1b05e10bf5cfc989
|
82aee3211216f55392d5a757eb57f02c859e9a28
|
/Easy/172_factorailTrailingZeroes.py
|
05ea5253c0407b74dbb6ecfb30dd4f8afb340ace
|
[] |
no_license
|
Yucheng7713/CodingPracticeByYuch
|
505d18095d4b9a35c1f3b23632a90a76d811b64a
|
1461b10b8910fa90a311939c6df9082a8526f9b1
|
refs/heads/master
| 2022-05-01T11:51:00.612603
| 2022-04-18T09:46:55
| 2022-04-18T09:46:55
| 198,961,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
class Solution:
def trailingZeroes(self, n):
if n == 0:
return 0
return n // 5 + self.trailingZeroes(n // 5)
|
[
"yuchengh@usc.edu"
] |
yuchengh@usc.edu
|
bc9d969532d16be769e237943030facdb30ca407
|
731c17913b5ff61190f938909e1a74bae18285c9
|
/tf_agents/agents/sac/tanh_normal_projection_network_test.py
|
7e0a439c969d84cc822e3458cdbe3dff3e9e3f54
|
[
"Apache-2.0"
] |
permissive
|
isabella232/agents
|
d3055ca0a4d593e2251801264354fb7193c8f99f
|
b2ed02d20c43a4b789a4711f4653e8421f8ba526
|
refs/heads/master
| 2023-03-10T16:14:46.426288
| 2020-12-29T20:32:45
| 2020-12-29T20:33:06
| 326,205,625
| 0
| 0
|
Apache-2.0
| 2021-02-24T00:52:40
| 2021-01-02T14:57:47
| null |
UTF-8
|
Python
| false
| false
| 2,584
|
py
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.networks.normal_projection_network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.agents.sac import tanh_normal_projection_network
from tf_agents.specs import tensor_spec
def _get_inputs(batch_size, num_input_dims):
return tf.random.uniform([batch_size, num_input_dims])
class TanhNormalProjectionNetworkTest(tf.test.TestCase):
def testBuild(self):
output_spec = tensor_spec.BoundedTensorSpec([2], tf.float32, 0, 1)
network = tanh_normal_projection_network.TanhNormalProjectionNetwork(
output_spec)
inputs = _get_inputs(batch_size=3, num_input_dims=5)
distribution, _ = network(inputs, outer_rank=1)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertEqual(tfp.distributions.MultivariateNormalDiag,
type(distribution.input_distribution))
means = distribution.input_distribution.loc
stds = distribution.input_distribution.scale
self.assertAllEqual(means.shape.as_list(),
[3] + output_spec.shape.as_list())
self.assertAllEqual(stds.shape.as_list(),
[3] + output_spec.shape.as_list()*2)
def testTrainableVariables(self):
output_spec = tensor_spec.BoundedTensorSpec([2], tf.float32, 0, 1)
network = tanh_normal_projection_network.TanhNormalProjectionNetwork(
output_spec)
inputs = _get_inputs(batch_size=3, num_input_dims=5)
network(inputs, outer_rank=1)
self.evaluate(tf.compat.v1.global_variables_initializer())
# Dense kernel and bias.
self.assertEqual(2, len(network.trainable_variables))
self.assertEqual((5, 4), network.trainable_variables[0].shape)
self.assertEqual((4,), network.trainable_variables[1].shape)
if __name__ == '__main__':
tf.test.main()
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
f258a219f8446de388d4977228e418e4f63b53f1
|
031b24455b953907a0f98778931ee8a03c3c4b6c
|
/pacman103/front/common/delay_projection_subedge.py
|
20ba4ca1e9194c2bc870a136e70f2f2169b9f2a1
|
[] |
no_license
|
BRML/HBP-spinnaker-cerebellum
|
7e5f69c05d0e51f79442635df58815768f20e6bc
|
7fc3eb5c486df66720d227e0e422cbab65c08885
|
refs/heads/master
| 2020-12-25T23:47:09.416213
| 2015-06-26T09:45:31
| 2015-06-26T09:45:31
| 38,686,607
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,612
|
py
|
from pacman103.front.common.projection_subedge import ProjectionSubedge
from pacman103.front.common.synaptic_list import SynapticList
from pacman103.front.common.synapse_row_info import SynapseRowInfo
import logging
logger = logging.getLogger(__name__)
class DelayProjectionSubedge(ProjectionSubedge):
def __init__(self, edge, presubvertex, postsubvertex):
super(DelayProjectionSubedge, self).__init__(edge, presubvertex,
postsubvertex)
self.synapse_sublist = None
self.synapse_delay_rows = None
def get_synapse_sublist(self):
"""
Gets the synapse list for this subedge
"""
if self.synapse_sublist is None:
synapse_sublist = self.edge.synapse_list.create_atom_sublist(
self.presubvertex.lo_atom, self.presubvertex.hi_atom,
self.postsubvertex.lo_atom, self.postsubvertex.hi_atom)
# if logger.isEnabledFor("debug"):
# logger.debug("Original Synapse List rows:")
# orig_list = synapse_sublist.get_rows()
# for i in range(len(orig_list)):
# logger.debug("{}: {}".format(i, orig_list[i]))
if synapse_sublist.get_n_rows() > 256:
raise Exception(
"Delay sub-vertices can only support up to"
+ " 256 incoming neurons!")
full_delay_list = list()
for i in range(0, self.edge.num_delay_stages):
min_delay = (i * self.edge.max_delay_per_neuron)
max_delay = min_delay + self.edge.max_delay_per_neuron
delay_list = synapse_sublist.get_delay_sublist(min_delay,
max_delay)
# if logger.isEnabledFor("debug"):
# logger.debug(" Rows for delays {} - {}:".format(
# min_delay, max_delay))
# for i in range(len(delay_list)):
# logger.debug("{}: {}".format(i, delay_list[i]))
full_delay_list.extend(delay_list)
# Add extra rows for the "missing" items, up to 256
if (i + 1) < self.edge.num_delay_stages:
for _ in range(0, 256 - len(delay_list)):
full_delay_list.append(SynapseRowInfo([], [], [], []))
self.synapse_sublist = SynapticList(full_delay_list)
self.synapse_delay_rows = len(full_delay_list)
return self.synapse_sublist
def get_synaptic_data(self, controller, delay_offset):
delay_list = self.postsubvertex.vertex.get_synaptic_data(controller,
self.presubvertex, self.synapse_delay_rows, self.postsubvertex,
self.edge.synapse_row_io).get_rows()
rows = list()
for pre_atom in range(0, self.presubvertex.n_atoms):
rows.append(SynapseRowInfo([], [], [], []))
for i in range(0, self.edge.num_delay_stages):
min_delay = (i * self.edge.max_delay_per_neuron) + delay_offset
list_offset = i * 256
for pre_atom in range(0, self.presubvertex.n_atoms):
row = delay_list[list_offset + pre_atom]
rows[pre_atom].append(row, min_delay=min_delay)
return SynapticList(rows)
def free_sublist(self):
"""
Indicates that the list will not be needed again
"""
self.synapse_sublist = None
|
[
"dr.christoph.richter@gmail.com"
] |
dr.christoph.richter@gmail.com
|
504dcd6b031b80b87d353d6fb8c1fe63157987f6
|
f4a0b5a834b47bfee2f89c318e97b9f4ae11a968
|
/lib/collision_detector.py
|
47b488f9603004391d92b4af2a3d8ddc8ce1554c
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Marcus-Peterson/ros
|
5f0937dcd28198cd9be1025144d0bcc3cf69da31
|
77b1361e78f68f00ba2d3e3db908bb5ce0f973f5
|
refs/heads/master
| 2023-07-16T03:15:51.412658
| 2021-06-10T10:53:09
| 2021-06-10T10:53:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,551
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by Murray Altheim. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: altheim
# created: 2020-03-31
# modified: 2021-02-28
#
from colorama import init, Fore, Style
init()
try:
from gpiozero import DigitalInputDevice
print('successfully imported gpiozero.')
except Exception:
print('unable to import gpiozero.')
from lib.logger import Logger, Level
from lib.event import Event
from lib.message import Message
# ..............................................................................
class CollisionDetector():
'''
Uses an 15cm infrared sensor to scan for an imminent collision at the front
of the robot, particularly with the mast. This class may be extended to
include multiple sensors that send the same COLLISION_DETECT message.
'''
def __init__(self, config, message_factory, message_bus, level):
self._log = Logger('collision', Level.INFO)
if config is None:
raise ValueError('no configuration provided.')
self._message_factory = message_factory
self._message_bus = message_bus
_config = config['ros'].get('collision_detect')
_pin = _config.get('pin')
self._sensor = DigitalInputDevice(_pin, bounce_time=0.2, pull_up=False)
self._sensor.when_activated = self._activated
self._sensor.when_deactivated = self._deactivated
self._disabling = False
self._enabled = False
self._closed = False
# arm behaviour
self._arm_movement_degree_step = 5.0
self._arm_up_delay = 0.09
self._arm_down_delay = 0.04
self._log.info('ready.')
# ..........................................................................
def _activated(self):
'''
The default function called when the sensor is activated.
'''
if self._enabled:
self._log.info(Fore.YELLOW + 'detected mast sensor!')
self._message_bus.handle(self._message_factory.get_message(Event.COLLISION_DETECT, True))
else:
self._log.info('collision detection not enabled.')
# ..........................................................................
def _deactivated(self):
'''
The default function called when the sensor is deactivated.
'''
if self._enabled:
self._log.info('deactivated collision detection.')
else:
self._log.debug('collision detection not enabled.')
# ..........................................................................
def enable(self):
self._log.debug('enabling...')
if self._closed:
self._log.warning('cannot enable: closed.')
return
self._enabled = True
self._log.debug('enabled.')
# ..........................................................................
def disable(self):
if self._disabling:
self._log.warning('already disabling.')
else:
self._disabling = True
self._enabled = False
self._log.debug('disabling...')
self._disabling = False
self._log.debug('disabled.')
# ..........................................................................
def close(self):
self.disable()
self._closed = True
#EOF
|
[
"ichiro.furusato@gmail.com"
] |
ichiro.furusato@gmail.com
|
35c8bbadea7912a45f80c67b0bda1e2a9b39bce0
|
9ba30f939b79df5bc8ea8ab97196693e10d45605
|
/airflow/contrib/operators/dataflow_operator.py
|
8f61e18ab5d6a61a18b1dfc296650253e833f727
|
[
"Apache-2.0"
] |
permissive
|
suchenzang/incubator-airflow
|
5d09f1d1dfaf3f668408bd162bc275e1c112fbb7
|
5d90d132af4b5a455c6f3bb43817f0e46195cf72
|
refs/heads/master
| 2021-01-18T07:06:06.616698
| 2016-07-19T06:33:47
| 2016-07-19T06:33:47
| 63,733,649
| 2
| 0
| null | 2016-07-19T23:07:06
| 2016-07-19T23:07:06
| null |
UTF-8
|
Python
| false
| false
| 3,680
|
py
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from airflow.contrib.hooks.gcp_dataflow_hook import DataFlowHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DataFlowJavaOperator(BaseOperator):
"""
Start a Java Cloud DataFlow batch job. The parameters of the operation
will be passed to the job.
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
```
default_args = {
'dataflow_default_options': {
'project': 'my-gcp-project',
'zone': 'europe-west1-d',
'stagingLocation': 'gs://my-staging-bucket/staging/'
}
}
```
You need to pass the path to your dataflow as a file reference with the ``jar``
parameter, the jar needs to be a self executing jar. Use ``options`` to pass on
options to your job.
```
t1 = DataFlowOperation(
task_id='datapflow_example',
jar='{{var.value.gcp_dataflow_base}}pipeline/build/libs/pipeline-example-1.0.jar',
options={
'autoscalingAlgorithm': 'BASIC',
'maxNumWorkers': '50',
'start': '{{ds}}',
'partitionType': 'DAY'
},
dag=my-dag)
```
Both ``jar`` and ``options`` are templated so you can use variables in them.
"""
template_fields = ['options', 'jar']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
jar,
dataflow_default_options={},
options={},
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
"""
Create a new DataFlowJavaOperator.
For more detail on about job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
:param jar: The reference to a self executing DataFlow jar.
:type jar: string
:param dataflow_default_options: Map of default job options.
:type dataflow_default_options: dict
:param options: Map of job specific options.
:type options: dict
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
"""
super(DataFlowJavaOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.jar = jar
self.dataflow_default_options = dataflow_default_options
self.options = options
def execute(self, context):
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to)
dataflow_options = copy.copy(self.dataflow_default_options)
dataflow_options.update(self.options)
hook.start_java_dataflow(self.task_id, dataflow_options, self.jar)
|
[
"bolke@xs4all.nl"
] |
bolke@xs4all.nl
|
14ff0b9eaaa24fab20c4b9e88038e39cee101426
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_136/2374.py
|
3bfb9aefbafc0afcf5a26e222d920922d38daec3
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
# Google Codejam - Cookie Clicker Alpha
def ShouldInvest(C, F, X, Gradient):
return Gradient < (F * (X - C) / C) # mathematics ;P
def MinimumTime(C, F, X):
Gradient = 2
Time = 0
# start
while 1:
if ShouldInvest(C, F, X, Gradient):
Time += C / Gradient # time till we reach C cookies
Gradient += F # we create a new farm
else: # we do not invest anymore
Time += X / Gradient # wait till we reach X cookies
return Time
return -1 # if the interpreter arrives here, something went wrong
def main():
try:
f = open('cookie.in','r')
except:
print('cannot open input file!')
T = int(f.readline())
for t in range(1, T + 1):
print('Case #', t, ': ', end='', sep='')
Line = f.readline()
Line = Line.split(' ')
C = float(Line[0])
F = float(Line[1])
X = float(Line[2])
print('{0:.7f}'.format(MinimumTime(C, F, X)))
f.close()
return 0
if __name__=='__main__':
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
550c0de84f2fb3a9f49462534f551540f2a83251
|
f47edebe28f491f94b4fd30c2e4d8b29e1abc2a1
|
/src/models/configs/model_operation.py
|
f56635621bd7c1d91bc7c576904309f5abe043dd
|
[
"MIT"
] |
permissive
|
Nardri/rbac-service
|
4a2053a38597315b4ef1ce5b877cf031e64eae12
|
c5cf6baf60e95a7790156c85e37c76c697efd585
|
refs/heads/develop
| 2022-10-04T06:17:30.766467
| 2020-01-31T11:06:03
| 2020-01-31T11:06:03
| 226,638,869
| 0
| 0
|
MIT
| 2022-09-16T18:19:46
| 2019-12-08T08:48:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
"""Common model operations"""
from .database import database as db
class ModelOperationsMixin:
"""Mixin Model operation"""
__abstract__ = True
def save(self):
"""Save to the database."""
db.session.add(self)
db.session.commit()
return self
def delete(self):
"""Delete from database."""
db.session.delete(self)
db.session.commit()
return self
def update_(self, **kwargs):
"""Update entries.
Args:
**kwargs: kwargs to update
Returns:
object: Model Instance
"""
for field, value in kwargs.items():
setattr(self, field, value)
db.session.commit()
return self
@classmethod
def query_(cls, **kwargs):
"""
Args:
**kwargs:
Returns:
Object :
"""
if not kwargs:
instance = cls.query.filter_by().order_by(cls.created_at)
else:
instance = cls.query.filter_by(**kwargs)
return instance
|
[
"nwokeochavictor@gmail.com"
] |
nwokeochavictor@gmail.com
|
d9508c77ce6a5869b179fdf31d94ffeefa784cfd
|
800b5166148d4e3cd03825d7d20e2900fbc6c789
|
/report_form/migrations/0016_helperdataform_year_date.py
|
42b2c55917bfd96ca6159e3ab7b4b2c5526990c0
|
[] |
no_license
|
JiSuPiaoYi/dawufupin
|
4ffc979a93502eb576776673c98aaeb16021827e
|
57756a501436fabe9b27ebca2e80e60932da30dc
|
refs/heads/master
| 2020-04-07T11:37:35.728108
| 2018-11-20T09:09:50
| 2018-11-20T09:09:50
| 158,334,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2018-06-01 13:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('report_form', '0015_auto_20180531_1322'),
]
operations = [
migrations.AddField(
model_name='helperdataform',
name='year_date',
field=models.CharField(blank=True, db_column='year_date', max_length=10),
),
]
|
[
"360510132@qq.com"
] |
360510132@qq.com
|
74aafc70a85dd29f4d4cc806cfa276e0aa7575a6
|
7ba5ec9aa9ddca3f9b3384fc4457b0a865c2a0a1
|
/src/282.py
|
407f92ee3a4c1d3a373b9457eb3d21a9ba194679
|
[] |
no_license
|
ecurtin2/Project-Euler
|
71f79ee90a9abd0943421677d78a6c087419e500
|
79479da7a45b3ae67c0c7ea24da5f7d43c6f25d3
|
refs/heads/master
| 2021-03-19T14:52:57.045443
| 2018-04-12T22:05:37
| 2018-04-12T22:05:37
| 100,059,180
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
"""
For non-negative integers m, n, the Ackermann function A(m, n) is defined as follows:
For example A(1, 0) = 2, A(2, 2) = 7 and A(3, 4) = 125.
Find A(n, n) and give your answer mod 148.
"""
|
[
"ecurtin2@illinois.edu"
] |
ecurtin2@illinois.edu
|
d48e851bbed4bf582803970b6cc8e21ed795bfdd
|
3cda2dc11e1b7b96641f61a77b3afde4b93ac43f
|
/nni/compression/pruning/tools/utils.py
|
cc8e4ce5d17a5451476be7062871887d3d11bed1
|
[
"MIT"
] |
permissive
|
Eurus-Holmes/nni
|
6da51c352e721f0241c7fd26fa70a8d7c99ef537
|
b84d25bec15ece54bf1703b1acb15d9f8919f656
|
refs/heads/master
| 2023-08-23T10:45:54.879054
| 2023-08-07T02:39:54
| 2023-08-07T02:39:54
| 163,079,164
| 3
| 2
|
MIT
| 2023-08-07T12:35:54
| 2018-12-25T12:04:16
|
Python
|
UTF-8
|
Python
| false
| false
| 314
|
py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import annotations
from ...base.target_space import PruningTargetSpace
def is_active_target(target_space: PruningTargetSpace):
return target_space.sparse_ratio is not None or target_space.sparse_threshold is not None
|
[
"noreply@github.com"
] |
Eurus-Holmes.noreply@github.com
|
776a45689ed9e05ab9016d1cc5eb5a45307f9c67
|
edc1134436a79ca883a0d25f3c8dfffc4235c514
|
/examples/cvae/baseline.py
|
68695c6d5ee2eb75ef59ea4b52fbb1df51e444b1
|
[
"Apache-2.0"
] |
permissive
|
pyro-ppl/pyro
|
2283d8ca528fc090c724a3a6e0f344e505ebbf77
|
0e82cad30f75b892a07e6c9a5f9e24f2cb5d0d81
|
refs/heads/dev
| 2023-08-18T00:35:28.014919
| 2023-08-06T21:01:36
| 2023-08-06T21:01:36
| 94,506,832
| 3,647
| 606
|
Apache-2.0
| 2023-09-14T13:52:14
| 2017-06-16T05:03:47
|
Python
|
UTF-8
|
Python
| false
| false
| 3,338
|
py
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import copy
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
class BaselineNet(nn.Module):
def __init__(self, hidden_1, hidden_2):
super().__init__()
self.fc1 = nn.Linear(784, hidden_1)
self.fc2 = nn.Linear(hidden_1, hidden_2)
self.fc3 = nn.Linear(hidden_2, 784)
self.relu = nn.ReLU()
def forward(self, x):
x = x.view(-1, 784)
hidden = self.relu(self.fc1(x))
hidden = self.relu(self.fc2(hidden))
y = torch.sigmoid(self.fc3(hidden))
return y
class MaskedBCELoss(nn.Module):
def __init__(self, masked_with=-1):
super().__init__()
self.masked_with = masked_with
def forward(self, input, target):
target = target.view(input.shape)
loss = F.binary_cross_entropy(input, target, reduction="none")
loss[target == self.masked_with] = 0
return loss.sum()
def train(
device,
dataloaders,
dataset_sizes,
learning_rate,
num_epochs,
early_stop_patience,
model_path,
):
# Train baseline
baseline_net = BaselineNet(500, 500)
baseline_net.to(device)
optimizer = torch.optim.Adam(baseline_net.parameters(), lr=learning_rate)
criterion = MaskedBCELoss()
best_loss = np.inf
early_stop_count = 0
for epoch in range(num_epochs):
for phase in ["train", "val"]:
if phase == "train":
baseline_net.train()
else:
baseline_net.eval()
running_loss = 0.0
num_preds = 0
bar = tqdm(
dataloaders[phase], desc="NN Epoch {} {}".format(epoch, phase).ljust(20)
)
for i, batch in enumerate(bar):
inputs = batch["input"].to(device)
outputs = batch["output"].to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == "train"):
preds = baseline_net(inputs)
loss = criterion(preds, outputs) / inputs.size(0)
if phase == "train":
loss.backward()
optimizer.step()
running_loss += loss.item()
num_preds += 1
if i % 10 == 0:
bar.set_postfix(
loss="{:.2f}".format(running_loss / num_preds),
early_stop_count=early_stop_count,
)
epoch_loss = running_loss / dataset_sizes[phase]
# deep copy the model
if phase == "val":
if epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(baseline_net.state_dict())
early_stop_count = 0
else:
early_stop_count += 1
if early_stop_count >= early_stop_patience:
break
baseline_net.load_state_dict(best_model_wts)
baseline_net.eval()
# Save model weights
Path(model_path).parent.mkdir(parents=True, exist_ok=True)
torch.save(baseline_net.state_dict(), model_path)
return baseline_net
|
[
"noreply@github.com"
] |
pyro-ppl.noreply@github.com
|
0fc3b98016bf84ae5f510d5fa9774d1a025d4876
|
597b82737635e845fd5360e191f323669af1b2ae
|
/08_full_django/dojo_secrets/dojo_secrets/urls.py
|
eae83a229eb529cf3cb46e4debdd86ea411b8ac9
|
[] |
no_license
|
twknab/learning-python
|
1bd10497fbbe181a26f2070c147cb2fed6955178
|
75b76b2a607439aa2d8db675738adf8d3b8644df
|
refs/heads/master
| 2021-08-08T08:50:04.337490
| 2017-11-10T00:28:45
| 2017-11-10T00:28:45
| 89,213,845
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
"""dojo_secrets URL Configuration
Sets up URL files for all applications in this project.
Current applications:
- `secrets` - This application allows users to create and interact with secrets.
"""
from django.conf.urls import url, include
urlpatterns = [
url(r'^', include("apps.secrets.urls")),
]
|
[
"natureminded@users.noreply.github.com"
] |
natureminded@users.noreply.github.com
|
8911835e9a12c6ffa09456d6e6024c5d6dfe5e0d
|
d2cac4cb6b39db7413d22598e7061d228cb9bc4f
|
/main/migrations/0027_auto_20200622_0034.py
|
dd141c29c08c24a23b41b2cbd8c2ab5e248d02d7
|
[] |
no_license
|
sasha00123/furniture
|
cf9dd818ab0bc27890af41d6228163b1c20dfc71
|
98dfb3e971b6d714cd8638aefbed2407bddf1529
|
refs/heads/master
| 2023-02-21T02:09:12.976005
| 2021-01-24T18:12:10
| 2021-01-24T18:12:10
| 262,677,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
# Generated by Django 3.0.6 on 2020-06-21 19:34
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('main', '0026_telegramuser_is_admin'),
]
operations = [
migrations.AddField(
model_name='telegramuser',
name='joined',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='Зарегистрирован'),
preserve_default=False,
),
migrations.AlterField(
model_name='telegramuser',
name='is_admin',
field=models.BooleanField(default=False, verbose_name='Администратор'),
),
]
|
[
"sasha@bashkort.org"
] |
sasha@bashkort.org
|
e1fba51ef584abc797614110e47f734a5df5f760
|
56eb5f7a8921a8e995fef89672e4e1abbf02f5d4
|
/2016/quals/a/a.py
|
df23faf51800b7be647a839d403914dce0911ca1
|
[
"MIT"
] |
permissive
|
enricobacis/google-code-jam
|
e5b41e5004918001d4278c0636c91d5a3576ab49
|
43b259d5dbb167ea46b072eb144bd351e8f4e059
|
refs/heads/master
| 2021-01-01T05:00:48.774563
| 2016-04-17T07:17:44
| 2016-04-17T07:17:44
| 56,395,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
#!/usr/bin/env python
# Google Code Jam 2016 - Qualification Round - Problem A
def single(func):
return func(raw_input())
def row(func):
return map(func, raw_input().split())
def printcase(case, out, pattern='Case #%d: %s'):
print pattern % (case, out)
def digits(n):
res = set()
while n:
n, d = divmod(n, 10)
res.add(d)
return res
def a(N, max=1000):
seen = set()
for x in xrange(1, max):
seen |= digits(x * N)
if len(seen) == 10:
return x * N
T = single(int)
for t in xrange(1, T + 1):
N = single(int)
printcase(t, a(N) or 'INSOMNIA')
|
[
"enrico.bacis@gmail.com"
] |
enrico.bacis@gmail.com
|
7ea5df191482c3f5d23570452224674f639e9414
|
f329f3061e3a72b2562bb242dfe1a2ed07fe65f0
|
/plugins/edayshop_cms.py
|
edd1d718942dbcbac642c5095f742ef2fc9226ea
|
[
"MIT"
] |
permissive
|
ver007/getcms
|
58263174355eb16bae95b74f6efaff5069b4ce56
|
da03c07457abc266cacddc3ccd67126f0b03da3d
|
refs/heads/master
| 2021-01-19T07:01:51.453626
| 2016-04-13T08:28:14
| 2016-04-13T08:28:14
| 56,134,430
| 0
| 0
| null | 2016-04-13T08:27:38
| 2016-04-13T08:27:38
| null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
#!/usr/bin/env python
# encoding: utf-8
def run(whatweb, pluginname):
whatweb.recog_from_file(pluginname, "inc/ys.css", "djs1wz")
|
[
"hackerlq@gmail.com"
] |
hackerlq@gmail.com
|
c6cb4d2f3b19bd4743d379f707a29aae0cd5836e
|
bc233c24523f05708dd1e091dca817f9095e6bb5
|
/bitmovin_api_sdk/models/local_output.py
|
ea1587edd07eab7c4a840bd7109e17b67c5f5349
|
[
"MIT"
] |
permissive
|
bitmovin/bitmovin-api-sdk-python
|
e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd
|
b0860c0b1be7747cf22ad060985504da625255eb
|
refs/heads/main
| 2023-09-01T15:41:03.628720
| 2023-08-30T10:52:13
| 2023-08-30T10:52:13
| 175,209,828
| 13
| 14
|
MIT
| 2021-04-29T12:30:31
| 2019-03-12T12:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,965
|
py
|
# coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.output import Output
import pprint
import six
class LocalOutput(Output):
@poscheck_model
def __init__(self,
id_=None,
name=None,
description=None,
created_at=None,
modified_at=None,
custom_data=None,
acl=None,
path=None):
# type: (string_types, string_types, string_types, datetime, datetime, dict, list[AclEntry], string_types) -> None
super(LocalOutput, self).__init__(id_=id_, name=name, description=description, created_at=created_at, modified_at=modified_at, custom_data=custom_data, acl=acl)
self._path = None
self.discriminator = None
if path is not None:
self.path = path
@property
def openapi_types(self):
types = {}
if hasattr(super(LocalOutput, self), 'openapi_types'):
types = getattr(super(LocalOutput, self), 'openapi_types')
types.update({
'path': 'string_types'
})
return types
@property
def attribute_map(self):
attributes = {}
if hasattr(super(LocalOutput, self), 'attribute_map'):
attributes = getattr(super(LocalOutput, self), 'attribute_map')
attributes.update({
'path': 'path'
})
return attributes
@property
def path(self):
# type: () -> string_types
"""Gets the path of this LocalOutput.
Path to your local storage (required)
:return: The path of this LocalOutput.
:rtype: string_types
"""
return self._path
@path.setter
def path(self, path):
# type: (string_types) -> None
"""Sets the path of this LocalOutput.
Path to your local storage (required)
:param path: The path of this LocalOutput.
:type: string_types
"""
if path is not None:
if not isinstance(path, string_types):
raise TypeError("Invalid type for `path`, type has to be `string_types`")
self._path = path
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
if hasattr(super(LocalOutput, self), "to_dict"):
result = super(LocalOutput, self).to_dict()
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
if len(value) == 0:
continue
result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]]
elif hasattr(value, "to_dict"):
result[self.attribute_map.get(attr)] = value.to_dict()
elif isinstance(value, Enum):
result[self.attribute_map.get(attr)] = value.value
elif isinstance(value, dict):
result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()}
else:
result[self.attribute_map.get(attr)] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LocalOutput):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"openapi@bitmovin.com"
] |
openapi@bitmovin.com
|
70178561fb6044f424e0ea11e1209181b7647f8d
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/edifact/D09A/REBORDD09AUN.py
|
bec780d969532c1a91a16b22dc253e67a96fb6d8
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 1,871
|
py
|
#Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD09AUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'GEI', MIN: 1, MAX: 6},
{ID: 'NAD', MIN: 1, MAX: 9, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 9},
]},
{ID: 'DTM', MIN: 1, MAX: 6},
{ID: 'FTX', MIN: 0, MAX: 6},
{ID: 'ARD', MIN: 1, MAX: 999, LEVEL: [
{ID: 'CUX', MIN: 1, MAX: 1},
{ID: 'GEI', MIN: 0, MAX: 5},
{ID: 'LOC', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'FTX', MIN: 0, MAX: 3},
{ID: 'RFF', MIN: 1, MAX: 9},
{ID: 'REL', MIN: 1, MAX: 999, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'GEI', MIN: 0, MAX: 7},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'NAD', MIN: 0, MAX: 7},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 6},
{ID: 'PCD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'NAD', MIN: 0, MAX: 1},
]},
{ID: 'MOA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'GEI', MIN: 0, MAX: 2},
{ID: 'PCD', MIN: 0, MAX: 3},
{ID: 'DTM', MIN: 0, MAX: 2},
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 3},
{ID: 'COM', MIN: 0, MAX: 1},
]},
{ID: 'CUX', MIN: 0, MAX: 1},
]},
]},
]},
{ID: 'UNS', MIN: 1, MAX: 1},
{ID: 'MOA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 3},
{ID: 'COM', MIN: 0, MAX: 1},
]},
{ID: 'PCD', MIN: 0, MAX: 3},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
a4cb9fbfcd0887f5b389ccf0a1c066f258b6fe94
|
3f182e860f4485e304bc9195d1c1eaa8b2de70aa
|
/benchmarks/pbs.py
|
262de7acb034729890bb7eb2db75ed80b25538aa
|
[
"MIT"
] |
permissive
|
spirali/estee
|
0feda169af35edc51bd4ac9cab2d36377561a576
|
55c0834db3d7da407b7c37d46fa41b5b563e2bbe
|
refs/heads/master
| 2020-03-28T02:22:22.218292
| 2019-04-05T13:55:16
| 2019-04-05T13:55:16
| 147,566,052
| 9
| 4
|
MIT
| 2019-04-05T12:32:29
| 2018-09-05T19:01:33
|
Python
|
UTF-8
|
Python
| false
| false
| 6,915
|
py
|
import json
import os
import socket
import subprocess
import sys
import time
import click
import pandas as pd
from benchmark import BenchmarkConfig, load_graphs, SCHEDULERS, CLUSTERS, NETMODELS, BANDWIDTHS, \
IMODES, SCHED_TIMINGS, create_instances, run_benchmark, parse_timeout, load_resultfile
BENCHMARK_DIR = os.path.dirname(os.path.abspath(__file__))
DASK_PORT = 8786
def dirpath(path):
return os.path.join(BENCHMARK_DIR, path)
def filename(path):
return os.path.splitext(os.path.basename(path))[0]
def get_workdir(jobid, input_file, output):
return os.path.abspath("runs/{}-{}-{}".format(jobid, filename(input_file), filename(output)))
def parse_configs(definition, graph_frame):
groups = definition["groups"]
experiments = definition["experiments"]
group_cache = {}
configs = []
keys = {
"scheduler": SCHEDULERS,
"cluster": CLUSTERS,
"netmodel": NETMODELS,
"bandwidth": BANDWIDTHS,
"imode": IMODES,
"sched-timing": SCHED_TIMINGS
}
in_progress = set()
def get_group(name):
if name in in_progress:
print("Recursive definition of {}".format(name))
exit(1)
in_progress.add(name)
data = groups[name]
result = {}
if isinstance(data, dict):
key = data["type"]
value = data["values"]
if key != "repeat":
if value == "all":
value = set(keys[key].keys())
else:
value = set(value.split(","))
result[key] = value
elif isinstance(data, list):
result = {}
for group in data:
group_data = get_group(group)
for key in group_data:
if key in result:
if key == "repeat":
result[key] = max(group_data[key], result[key])
else:
result[key].update(group_data[key])
else:
result[key] = group_data[key]
else:
assert False
in_progress.remove(name)
return result
def get_value(data, key):
if key == "repeat":
return data.get(key, 1)
return list(data.get(key, keys[key].keys()))
for experiment in experiments:
data = group_cache.setdefault(experiment, get_group(experiment))
configs.append(BenchmarkConfig(
graph_frame,
get_value(data, "scheduler"),
get_value(data, "cluster"),
get_value(data, "netmodel"),
get_value(data, "bandwidth"),
get_value(data, "imode"),
get_value(data, "sched-timing"),
get_value(data, "repeat")
))
return configs
def run_computation(index, input_file, definition):
from dask_cluster import start_cluster
input = definition["inputs"][int(index)]
output = definition["outputs"][int(index)]
is_pbs = "PBS_JOBID" in os.environ
workdir = get_workdir(os.environ.get("PBS_JOBID", "local-{}".format(index)), input_file, output)
if not os.path.exists(workdir):
os.makedirs(workdir)
with open(os.path.join(workdir, os.path.basename(input_file)), "w") as dst:
definition["index"] = index
json.dump(definition, dst, indent=4)
dask_cluster = None
if definition.get("dask"):
start_cluster(port=DASK_PORT, path=BENCHMARK_DIR)
dask_cluster = "{}:{}".format(socket.gethostname(), DASK_PORT)
graph_frame = load_graphs([input])
frame = load_resultfile(output, True)
if is_pbs:
with open(os.path.join(workdir, "output"), "w") as out:
with open(os.path.join(workdir, "error"), "w") as err:
sys.stdout = out
sys.stderr = err
run_benchmark(parse_configs(definition, graph_frame), frame, output, True,
parse_timeout(definition.get("timeout")), dask_cluster)
else:
run_benchmark(parse_configs(definition, graph_frame), frame, output, True,
parse_timeout(definition.get("timeout")), dask_cluster)
def run_pbs(input_file, definition):
nodes = 1
if definition.get("dask"):
nodes = 8
print("Starting jobs from file {}".format(input_file))
for i, input in enumerate(definition["inputs"]):
input = definition["inputs"][i]
output = definition["outputs"][i]
graph_frame = load_graphs([input])
configs = parse_configs(definition, graph_frame)
if os.path.isfile(output):
oldframe = pd.read_csv(output)
instances = create_instances(configs, oldframe, True, 5)
if not instances:
print("All instances were completed for {}".format(input))
continue
name = "estee-{}-{}".format(filename(input_file), filename(output))
qsub_args = {
"benchmark_dir": BENCHMARK_DIR,
"name": name,
"input": os.path.abspath(input_file),
"index": i,
"nodes": nodes,
"working_directory": os.getcwd()
}
qsub_input = """
#!/bin/bash
#PBS -q qexp
#PBS -N {name}
#PBS -lselect={nodes}:ncpus=24
source ~/.bashrc
workon estee
cd {working_directory}
python {benchmark_dir}/pbs.py compute {input} --index {index}
""".format(**qsub_args)
pbs_script = "/tmp/{}-pbs-{}.sh".format(name, int(time.time()))
with open(pbs_script, "w") as f:
f.write(qsub_input)
print("Starting job {}-{} ({})".format(filename(input_file), filename(output), pbs_script))
result = subprocess.run(["qsub", pbs_script],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if result.returncode != 0:
raise Exception("Error during PBS submit: {}\n{}".format(result.stdout.decode(),
result.stderr.decode()))
print("Job id: {}".format(result.stdout.decode().strip()))
@click.command()
@click.argument("input_file")
@click.option("--index")
def compute(input_file, index):
with open(input_file) as f:
definition = json.load(f)
if index is not None:
run_computation(index, input_file, definition)
else:
for index in range(len(definition["inputs"])):
run_computation(index, input_file, definition)
@click.command()
@click.argument("input_files", nargs=-1)
def submit(input_files):
for input_file in input_files:
with open(input_file) as f:
definition = json.load(f)
run_pbs(input_file, definition)
@click.group()
def cli():
pass
if __name__ == "__main__":
cli.add_command(submit)
cli.add_command(compute)
cli()
|
[
"stanislav.bohm@vsb.cz"
] |
stanislav.bohm@vsb.cz
|
5772bb6470b24532b7994a0214d80fef1d75cdec
|
2af6a5c2d33e2046a1d25ae9dd66d349d3833940
|
/res/scripts/client/gui/scaleform/daapi/view/lobby/prb_windows/companies_dps.py
|
e082606ad29ce039f92198d5bb1043082e01fb4c
|
[] |
no_license
|
webiumsk/WOT-0.9.12-CT
|
e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2
|
2506e34bd6634ad500b6501f4ed4f04af3f43fa0
|
refs/heads/master
| 2021-01-10T01:38:38.080814
| 2015-11-11T00:08:04
| 2015-11-11T00:08:04
| 45,803,240
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 3,465
|
py
|
# 2015.11.10 21:27:16 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/prb_windows/companies_dps.py
from constants import PREBATTLE_COMPANY_DIVISION, PREBATTLE_COMPANY_DIVISION_NAMES
from gui.LobbyContext import g_lobbyContext
from gui.Scaleform.framework.entities.DAAPIDataProvider import DAAPIDataProvider
from gui.prb_control.formatters import getCompanyDivisionString
from gui.prb_control.settings import PREBATTLE_ROSTER
from helpers import i18n
from messenger import g_settings
from messenger.m_constants import USER_GUI_TYPE
from messenger.storage import storage_getter
def getDivisionsList(addAll = True):
result = []
if addAll:
result.append({'data': 0,
'label': i18n.makeString('#prebattle:labels/company/division/ALL')})
for divID in PREBATTLE_COMPANY_DIVISION.RANGE:
divName = PREBATTLE_COMPANY_DIVISION_NAMES[divID]
result.append({'data': divID,
'label': getCompanyDivisionString(divName)})
return result
class CompaniesDataProvider(DAAPIDataProvider):
def __init__(self):
super(CompaniesDataProvider, self).__init__()
self.__list = []
@storage_getter('users')
def usersStorage(self):
return None
@property
def collection(self):
return self.__list
def buildList(self, prebattles):
self.__list = []
for item in prebattles:
self.__list.append({'prbID': item.prbID,
'creatorName': item.creator,
'creatorClan': item.clanAbbrev,
'creatorIgrType': item.creatorIgrType,
'creatorRegion': g_lobbyContext.getRegionCode(item.creatorDbId),
'comment': item.getCensoredComment(),
'playersCount': item.playersCount,
'division': getCompanyDivisionString(item.getDivisionName()),
'players': []})
def emptyItem(self):
return {'prbID': 0,
'creatorName': '',
'comment': '',
'playersCount': 0,
'division': '',
'players': []}
def setPlayers(self, prbID, roster):
foundIdx = -1
getUser = self.usersStorage.getUser
getColor = g_settings.getColorScheme('rosters').getColor
for idx, item in enumerate(self.__list):
if item['prbID'] == prbID:
players = []
foundIdx = idx
for info in roster:
if info.roster is PREBATTLE_ROSTER.ASSIGNED_IN_TEAM1:
user = getUser(info.dbID)
if user is not None:
key = user.getGuiType()
else:
key = USER_GUI_TYPE.OTHER
players.append({'label': info.getFullName(),
'userName': info.name,
'clanAbbrev': info.clanAbbrev,
'igrType': info.igrType,
'region': g_lobbyContext.getRegionCode(info.dbID),
'color': getColor(key)})
item['players'] = players
else:
item['players'] = []
return foundIdx
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\lobby\prb_windows\companies_dps.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:27:16 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
0061de720027daadb201b017b60778a348bd49e4
|
4e96f383d4703ad8ee58869ed91a0c8432c8a051
|
/Cura/Cura/cura/Machines/ContainerNode.py
|
a8bbf0a537b1a075b08ce72a608469761fc4ce74
|
[
"LGPL-3.0-only",
"GPL-3.0-only"
] |
permissive
|
flight7788/3d-printing-with-moveo-1
|
b2dba26010c4fa31815bc1d2d0966161a8600081
|
7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0
|
refs/heads/Feature_Marlin_with_AlanBoy
| 2022-08-30T18:36:44.785058
| 2020-05-30T07:52:58
| 2020-05-30T07:52:58
| 212,583,912
| 0
| 0
|
MIT
| 2020-05-16T07:39:47
| 2019-10-03T13:13:01
|
C
|
UTF-8
|
Python
| false
| false
| 3,102
|
py
|
# Copyright (c) 2019 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Any, Dict, Optional
from UM.ConfigurationErrorMessage import ConfigurationErrorMessage
from UM.Settings.ContainerRegistry import ContainerRegistry
from UM.Logger import Logger
from UM.Settings.InstanceContainer import InstanceContainer
## A node in the container tree. It represents one container.
#
# The container it represents is referenced by its container_id. During normal
# use of the tree, this container is not constructed. Only when parts of the
# tree need to get loaded in the container stack should it get constructed.
class ContainerNode:
## Creates a new node for the container tree.
# \param container_id The ID of the container that this node should
# represent.
def __init__(self, container_id: str) -> None:
self.container_id = container_id
self._container = None # type: Optional[InstanceContainer]
self.children_map = {} # type: Dict[str, ContainerNode] # Mapping from container ID to container node.
## Gets the metadata of the container that this node represents.
# Getting the metadata from the container directly is about 10x as fast.
# \return The metadata of the container in this node.
def getMetadata(self):
return ContainerRegistry.getInstance().findContainersMetadata(id = self.container_id)[0]
## Get an entry from the metadata of the container that this node contains.
#
# This is just a convenience function.
# \param entry The metadata entry key to return.
# \param default If the metadata is not present or the container is not
# found, the value of this default is returned.
# \return The value of the metadata entry, or the default if it was not
# present.
def getMetaDataEntry(self, entry: str, default: Any = None) -> Any:
container_metadata = ContainerRegistry.getInstance().findContainersMetadata(id = self.container_id)
if len(container_metadata) == 0:
return default
return container_metadata[0].get(entry, default)
## The container that this node's container ID refers to.
#
# This can be used to finally instantiate the container in order to put it
# in the container stack.
# \return A container.
@property
def container(self) -> Optional[InstanceContainer]:
if not self._container:
container_list = ContainerRegistry.getInstance().findInstanceContainers(id = self.container_id)
if len(container_list) == 0:
Logger.log("e", "Failed to lazy-load container [{container_id}]. Cannot find it.".format(container_id = self.container_id))
error_message = ConfigurationErrorMessage.getInstance()
error_message.addFaultyContainers(self.container_id)
return None
self._container = container_list[0]
return self._container
def __str__(self) -> str:
return "%s[%s]" % (self.__class__.__name__, self.container_id)
|
[
"t106360212@ntut.org.tw"
] |
t106360212@ntut.org.tw
|
121baaf372cb1f46859c1327a05f3c81c6651dfd
|
c50e7b5a9597980f7b659fa4294f065b152ef9cf
|
/definindo_atributos_para_uma_classe.py
|
fb4e643e0bb0ac1649f7d7494c7ce7f8d669c712
|
[] |
no_license
|
crishonsou/modern_python3_bootcamp
|
aab60ede8f8bec4cb38dc81e896823795460ea5b
|
086e0e97dacaf05862fa8d73184b217cf5688114
|
refs/heads/main
| 2022-12-24T00:00:03.621358
| 2020-10-06T15:35:26
| 2020-10-06T15:35:26
| 301,769,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
class User:
def __init__(self, first, last, age):
# print('New user has been made')
self.first = first
self.last = last
self.age = age
user1 = User("Leticia", "Carvalho", 35)
user2 = User("Cristiano", "Gonçalves", 44)
print(user1.first, user1.last, user1.age)
print(user2.first, user2.last, user2.age)
|
[
"noreply@github.com"
] |
crishonsou.noreply@github.com
|
7e7acef5cce5e50952cd4019ea5d94b671b6733f
|
18e35b5a45063dadd0a9b541198572907cfe8908
|
/13_4/4.py
|
6c6c76fce90a958343f4f5878d480ad3efde5566
|
[] |
no_license
|
syn7hgg/ejercicios-python
|
b516d04ee7b705e477b30e3fe09108d592ff45a2
|
ace895650219898cf9aac40dae523e18f63c2b13
|
refs/heads/main
| 2023-06-12T03:32:06.332235
| 2021-06-29T18:37:53
| 2021-06-29T18:37:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
import math
print(math.pi)
# imprime un número real con 3 decimales %.3f
print("%.3f" % math.pi)
# forma equivalente con formateo de strings
s = "{:.3f}".format(math.pi)
print(s)
|
[
"funandreth97@gmail.com"
] |
funandreth97@gmail.com
|
295822b31175fdee626cbd7b2df44a18716f6f33
|
8e7ecd2b5121c66692d0b7c76aafad938906a1c4
|
/mist/action_server/helpers.py
|
1b42b1bfdf30ccf116a3ee3f1a452b476f60ea81
|
[
"Apache-2.0"
] |
permissive
|
pombredanne/mist
|
93e425078c68d63925cc33bf86724e1ff8380910
|
3821ccd089e78cefe78282ac7297a6b4882cc53c
|
refs/heads/master
| 2023-03-18T20:49:58.243258
| 2021-07-13T10:10:16
| 2021-07-13T10:10:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
from functools import wraps
from flask import request, abort, jsonify, Flask
def setup_custom_errors(_app: Flask): # pragma: no cover
@_app.errorhandler(400)
def invalid_request(e):
return jsonify(error=e.description), 400
@_app.errorhandler(401)
def request_error(e):
return jsonify(error=e.description), 401
@_app.errorhandler(403)
def access_forbidden(e):
return jsonify(error=e.description), 403
@_app.errorhandler(404)
def resource_not_found(e):
return jsonify(error=e.description), 404
@_app.errorhandler(409)
def unauthorized(e):
return jsonify(error=e.description), 409
@_app.errorhandler(422)
def unprocessable_entity(e):
return jsonify(error=e.description), 422
def ensure_json(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not request.is_json:
abort(400, description="JSON data required")
return f(*args, **kwargs)
return decorated_function
|
[
"cr0hn@cr0hn.com"
] |
cr0hn@cr0hn.com
|
8e415f9388d7251829b968cdb8d65cffddadb507
|
99d8895888ab093b06a3ba03594f2a74cb97758f
|
/Scripts/Python_HSE/WEEK8/homework/solution10.py
|
951b789a6af19bec7d8728d47a81122f56db1823
|
[] |
no_license
|
AlekseyAbakumov/PythonLearn
|
4c35efeb0996f4a3221f228b57e30d595bb62637
|
fdd2cc9bdaa0fac3d582ddd5f2fbf9018218bda5
|
refs/heads/master
| 2023-03-23T20:12:42.603817
| 2021-03-16T18:22:48
| 2021-03-16T18:22:48
| 265,309,409
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
from functools import reduce
from operator import xor
print(
*reduce(
lambda x, y: map(xor, x, y),
map(
lambda line: map(
int,
line.split()
),
map(
lambda _: input(),
range(
int(
input()
)
)
)
)
)
)
|
[
"a-starostin@mail.ru"
] |
a-starostin@mail.ru
|
19ef51007e955745a99630518184f72e57979c65
|
cef9e79f4776a97f3e1980f907bd531e02eb8ef6
|
/src/news_crawler.py
|
f5f2f020eb01483bebc58bce202549f4ca4ea8b7
|
[] |
no_license
|
Sapphirine/202005-11-Risk-Analysis-and-Default-Prediction-on-Taiwanese-Companies
|
afa54bc6bf3c15d791fe8970a6001beffcc85e52
|
f02df8db08a425b4cc29468df2b988fc0e977762
|
refs/heads/master
| 2022-07-22T20:30:02.984293
| 2020-05-15T20:58:11
| 2020-05-15T20:58:11
| 264,291,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,408
|
py
|
#
# This file is used to retrieve posts from Google news.
# Usage:
# python news_crawler.py ${CSV_FILE} ${DEST_FOLDER}
#
# Example:
# python news_crawler.py computer_peripherals.csv ./global_industry/
#
#
import requests
from bs4 import BeautifulSoup
from urllib.parse import quote
from urllib.parse import urlparse
import json
import os
import csv
import sys
import re
import urllib3
import certifi
# In some website, the article is not in <p>, so will be empty. Check articles
# dictionary to delete empty news
skip_list = ['電腦及周邊設備', '上游', '下游']
search_period = {
"Q1": "min%3A1%2F1%2F2019%2Ccd_max%3A3%2F31%2F2019&tbm=nws",
"Q2": "min%3A4%2F1%2F2019%2Ccd_max%3A6%2F30%2F2019&tbm=nws",
"Q3": "min%3A7%2F1%2F2019%2Ccd_max%3A9%2F30%2F2019&tbm=nws",
"Q4": "min%3A10%2F1%2F2019%2Ccd_max%3A9%2F31%2F2019&tbm=nws",
"Q5": "min%3A1%2F1%2F2020%2Ccd_max%3A3%2F31%2F2020&tbm=nws"
}
def search(key_word="null", period=""):
articles = {}
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if key_word == "null" or key_word in skip_list:
#print("The key word is empty or in the skip list")
return None
keyword = quote(key_word.encode('utf8'))
print('duration = {0}'.format(search_period[period]))
# Sample request in Google search
# https://www.google.com/search?q=%E5%8F%B0%E7%A9%8D%E9%9B%BB&client=safari&rls=en&biw=1780&bih=946&source=lnt&tbs=cdr%3A1%2Ccd_min%3A7%2F1%2F2019%2Ccd_max%3A9%2F30%2F2019&tbm=nws
res = requests.get("https://www.google.com/search?q=" + keyword + "&client=safari&rls=en&biw=1780&bih=946&source=lnt&tbs=cdr%3A1%2Ccd_" + search_period[period], verify=False)
if res.status_code != 200:
return articles
headline = []
soup = BeautifulSoup(res.text, 'lxml')
a = soup.find_all('a')
for i in a:
k = i.get('href')
#print(k)
try:
m = re.search("(?P<url>https?://[^\s]+)", k)
n = m.group(0)
rul = n.split('&')[0]
domain = urlparse(rul)
if(re.search('google.com', domain.netloc)) or (re.search('zh.wikipedia.org', domain.netloc)):
continue
else:
headline.append(rul)
except:
continue
#print('total number of the news: {0}'.format(len(headline)))
# Only iterate 100 posts
for h in headline[0:100]:
try:
resp = requests.get(h, allow_redirects=False)
soup = BeautifulSoup(resp.content, 'html.parser')
test = soup.findAll(text = re.compile(key_word))
if len(test) == 0:
print('Keyword {0} not found in webpage {1}'.format(key_word, h))
continue
except requests.ConnectionError as e:
print("OOPS!! Connection Error. Make sure you are connected to Internet. Technical Details given below.\n")
print(str(e))
#renewIPadress()
continue
except requests.Timeout as e:
print("OOPS!! Timeout Error")
print(str(e))
#renewIPadress()
continue
except requests.RequestException as e:
print("OOPS!! General Error")
print(str(e))
#renewIPadress()
continue
except KeyboardInterrupt:
print("Someone closed the program")
"""
try:
res1 = requests.get(h)
except requests.ConnectionError as e:
print("OOPS!! Connection Error. Make sure you are connected to Internet. Technical Details given below.\n")
print(str(e))
#renewIPadress()
continue
except requests.Timeout as e:
print("OOPS!! Timeout Error")
print(str(e))
#renewIPadress()
continue
except requests.RequestException as e:
print("OOPS!! General Error")
print(str(e))
#renewIPadress()
continue
except KeyboardInterrupt:
print("Someone closed the program")
"""
# Concatenate all news together
soup1 = BeautifulSoup(resp.content, "html.parser")
lines = soup1.findAll('p')
cont = ''
for l in lines:
cont += l.text
articles[h] = cont
#print(cont)
return articles
def parse_csv(file_name = "None", dest_path = "./", period=""):
with open(file_name, encoding='utf8') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
print(row)
if len(row) == 0:
break
if len(row) >= 1:
msg0 = search(row[0], period)
if msg0 != None:
save_data(msg0, row[0], dest_path, period)
if sys.argv[1] != "Relationship.csv" and sys.argv[1] != "test.txt" and "company.txt" not in sys.argv[1] :
msg1 = search(row[1], period)
if msg1 != None:
save_data(msg1, row[1], dest_path, period)
msg2 = search(row[2], period)
if msg2 != None:
save_data(msg2, row[2], dest_path, period)
return True
def save_data(articles, file_name, path, period=""):
file_name = file_name.replace(" ", "_")
file_name = file_name.replace("/", "")
print('file name = {0}'.format(file_name))
dataset = path + "news_" + file_name + "_" + period + ".json"
# save to json....
with open(dataset, 'w', encoding='utf-8') as f:
json.dump(articles, f, ensure_ascii=False, indent=4)
if __name__== "__main__":
print("This is news crawler")
print('file = {0}, path = {1}'.format(sys.argv[1], sys.argv[2]))
#urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
"""
for QQQ in search_period:
print('period = {0}'.format(QQQ))
data = parse_csv(sys.argv[1], sys.argv[2], QQQ)
"""
QQQ = "Q5"
print('period = {0}'.format(QQQ))
data = parse_csv(sys.argv[1], sys.argv[2], QQQ)
|
[
"noreply@github.com"
] |
Sapphirine.noreply@github.com
|
b01528b02aaaeaf6601446f6bcbe39ab03432c27
|
ca41157d95d87a9899730637fd2339479ce80088
|
/gPhoton/photonpipe/__init__.py
|
e8bccb42bbd96b09fe8bd03c3656a75895354873
|
[
"BSD-3-Clause"
] |
permissive
|
MillionConcepts/gPhoton2
|
b5c2b36b68cfcc38b324f371a9677b86e51709df
|
0f1b054094bd476b2998e5b32aceb7e0a764ebda
|
refs/heads/main
| 2023-08-21T09:55:24.598275
| 2023-08-11T06:28:12
| 2023-08-11T06:28:12
| 383,023,797
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
"""
.. module:: PhotonPipe
:synopsis: A recreation / port of key functionality of the GALEX mission
execute_pipeline to generate calibrated and sky-projected photon-level data from
raw spacecraft and detector telemetry. Generates time-tagged photon
lists given mission-produced -raw6, -scst, and -asprta data.
"""
from .core import execute_photonpipe
|
[
"mstclair@millionconcepts.com"
] |
mstclair@millionconcepts.com
|
247972aaa39a632dc35b5983f933198777e8b5d0
|
6a1f69c2b11a1cfda8a2e63006b0efa721ed8d7b
|
/scoreboard_backend/const.py
|
f832e88117df79f44f73716114489f5a435ae415
|
[] |
no_license
|
o-o-overflow/dc2021q_scoreboard
|
54aa471daf6263225e8c45c71553f2ffb26c22c7
|
bb0e0054fec807dc3f6472d0b1fa5ee21f607b92
|
refs/heads/main
| 2023-04-21T19:44:39.168136
| 2021-05-03T07:39:26
| 2021-05-03T07:39:26
| 356,939,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
ACCESS_TOKEN_DURATION = 3600 # Seconds
COMPETITION_END = 1_620_000_000
COMPETITION_START = 1_619_827_200
REGISTRATION_PROOF_OF_WORK = "00ff00"
SUBMISSION_DELAY = 30 # Seconds
TIMESTAMP_MAX_DELTA = 600 # Seconds
TOKEN_PROOF_OF_WORK = "f00f"
|
[
"bbzbryce@gmail.com"
] |
bbzbryce@gmail.com
|
a4221a26f7a8f15d99820a04fb870d3c580e7c79
|
002f28763ed3e0b2114c1ba950ca0ddbd6be4cdc
|
/08_Django/day01/Day01/day1/news/views.py
|
05147949532d30c137cf3eaa601b9a1ed6874c3d
|
[] |
no_license
|
rogerbear/tarena_project
|
e599359b94eece6decc13672c6a920071cb65e4c
|
d8dc5e84d1a81943e94a72a62e09d44919c617c1
|
refs/heads/master
| 2020-05-28T00:50:44.248954
| 2019-12-20T07:26:58
| 2019-12-20T07:26:58
| 188,836,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index_views(request):
return HttpResponse('这是news应用中的index视图')
|
[
"402100940@qq.com"
] |
402100940@qq.com
|
488ddd5c60c724031615fd26d7275674b48d650f
|
c934e7c27f0e72385218a14b4e2a7e94a747a360
|
/google-cloud-sdk/lib/surface/certificate_manager/maps/update.py
|
7a2c24ca74ad26f07718becdb48850597811789b
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
PrateekKhatri/gcloud_cli
|
5f74b97494df4f61816026af9460b9c4d8e89431
|
849d09dd7863efecbdf4072a504e1554e119f6ae
|
refs/heads/master
| 2023-03-27T05:53:53.796695
| 2021-03-10T04:08:14
| 2021-03-10T04:08:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,719
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud certificate-manager maps update` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.certificate_manager import certificate_maps
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.certificate_manager import flags
from googlecloudsdk.command_lib.certificate_manager import resource_args
from googlecloudsdk.command_lib.certificate_manager import util
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Update(base.UpdateCommand):
"""Update a certificate map.
This command updates existing certificate map.
## EXAMPLES
To update a certificate map with name simple-map, run:
$ {command} simple-map --description="desc" --update-labels="key=value"
"""
@staticmethod
def Args(parser):
resource_args.AddCertificateMapResourceArg(parser, 'to update')
labels_util.AddUpdateLabelsFlags(parser)
flags.AddDescriptionFlagToParser(parser, 'certificate map')
flags.AddAsyncFlagToParser(parser)
def Run(self, args):
client = certificate_maps.CertificateMapClient()
map_ref = args.CONCEPTS.map.Parse()
new_description = None
if args.IsSpecified('description'):
new_description = args.description
labels_update = None
labels_diff = labels_util.Diff.FromUpdateArgs(args)
if labels_diff.MayHaveUpdates():
orig_resource = client.Get(map_ref)
labels_update = labels_diff.Apply(
client.messages.CertificateMap.LabelsValue,
orig_resource.labels).GetOrNone()
if new_description is None and labels_update is None:
raise exceptions.Error('Nothing to update.')
response = client.Patch(
map_ref, labels=labels_update, description=new_description)
response = util.WaitForOperation(response, is_async=args.async_)
log.UpdatedResource(map_ref.Name(), 'certificate map', is_async=args.async_)
return response
|
[
"code@bootstraponline.com"
] |
code@bootstraponline.com
|
bffabba54a0d868677fd9c4efb7ae8979d02983b
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r8/Gen/DecFiles/options/12145431.py
|
c4e465ef175e1027d3e54926439b5bf2956f89a2
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,813
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/12145431.py generated: Fri, 27 Mar 2015 15:48:08
#
# Event Type: 12145431
#
# ASCII decay Descriptor: [B+ -> K+ (J/psi(1S) -> mu+ mu- {,gamma} {,gamma}) (eta -> pi+ pi- pi0)]cc
#
from Configurables import Generation
Generation().EventType = 12145431
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bu_JpsietaK,mm,pipipi=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 521,-521 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 521
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 521,-521 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_521.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 12145431
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
9916fa68f4ceb29a1038f6d031fe37d34505a80f
|
d93fe0484fc3b32c8fd9b33cc66cfd636a148ec4
|
/AtCoder/ABC-E/143probE.py
|
70f6181217ad03009e6bb24c889899e531bd2a4f
|
[] |
no_license
|
wattaihei/ProgrammingContest
|
0d34f42f60fa6693e04c933c978527ffaddceda7
|
c26de8d42790651aaee56df0956e0b206d1cceb4
|
refs/heads/master
| 2023-04-22T19:43:43.394907
| 2021-05-02T13:05:21
| 2021-05-02T13:05:21
| 264,400,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,541
|
py
|
import sys
input = sys.stdin.readline
def main():
N, M, L = map(int, input().split())
INF = 10**13
dis = [[INF for _ in range(N)] for _ in range(N)]
for _ in range(M):
a, b, c = map(int, input().split())
dis[a-1][b-1] = c
dis[b-1][a-1] = c
Q = int(input())
Query = [list(map(int, input().split())) for _ in range(Q)]
for k in range(N):
for i in range(N):
for j in range(N):
dis[i][j] = min(dis[i][j], dis[i][k]+dis[k][j])
movable = [[] for _ in range(N)]
for i in range(N):
for j in range(N):
if i != j and dis[i][j] <= L:
movable[i].append(j)
for s, t in Query:
s, t = s-1, t-1
q = movable[s]
checked = [False]*N
ok = False
for p in q:
if p == t:
ok = True
break
checked[p] = True
checked[s] = True
if ok:
print(0)
continue
c = 0
while q:
c += 1
qq = []
for p in q:
for np in movable[p]:
if np == t:
ok = True
break
if not checked[np]:
qq.append(np)
checked[np] = True
if ok: break
q = qq
if ok:
print(c)
else:
print(-1)
if __name__ == "__main__":
main()
|
[
"wattaihei.rapyuta@gmail.com"
] |
wattaihei.rapyuta@gmail.com
|
7f144350d87384443247fefaa3f0ea005b1d6671
|
8da91c26d423bacbeee1163ac7e969904c7e4338
|
/pyvisdk/do/virtual_machine_memory_reservation_info.py
|
3707eed990dc6821036df0d45d5babbbba5e6e7d
|
[] |
no_license
|
pexip/os-python-infi-pyvisdk
|
5d8f3a3858cdd61fb76485574e74ae525cdc7e25
|
1aadea0afbc306d09f6ecb9af0e683dbbf961d20
|
refs/heads/master
| 2023-08-28T02:40:28.789786
| 2020-07-16T04:00:53
| 2020-07-16T04:00:53
| 10,032,240
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VirtualMachineMemoryReservationInfo(vim, *args, **kwargs):
'''The VirtualMachineReservationInfo data object type describes the amount of
memory that is being reserved for virtual machines on the host, and how
additional memory may be acquired.'''
obj = vim.client.factory.create('{urn:vim25}VirtualMachineMemoryReservationInfo')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'allocationPolicy', 'virtualMachineMax', 'virtualMachineMin',
'virtualMachineReserved' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
[
"jmb@pexip.com"
] |
jmb@pexip.com
|
99dc217bc0e3c7d93969753502d481a115a55776
|
4bd4bacecee33cada173e427b5ecb1d758bafaad
|
/src/scalarizr/storage2/cloudfs/base.py
|
78b39ceaae1d9ed9189164619f4692252f4984d3
|
[] |
no_license
|
kenorb-contrib/scalarizr
|
3f2492b20910c42f6ab38749545fdbb79969473f
|
3cc8b64d5a1b39c4cf36f5057f1a6a84a9a74c83
|
refs/heads/master
| 2022-11-26T10:00:58.706301
| 2017-11-02T16:41:34
| 2017-11-02T16:41:34
| 108,550,233
| 0
| 2
| null | 2020-07-24T11:05:36
| 2017-10-27T13:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,270
|
py
|
import sys
import urlparse
import os
class DriverError(Exception):
pass
def raises(exc_class):
"""
Catches all exceptions from the underlying function, raises *exc_class*
instead.
.. code-block:: python
@raises(MyError)
def func():
raise Exception(message)
func() # raises MyError(message)
"""
def decorator(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
exc = sys.exc_info()
raise exc_class, exc[1], exc[2]
return wrapper
return decorator
def decorate_public_methods(decorator):
"""
An easy way to decorate all methods of a class and it's descendants with
the same decorator. The two following examples are equal:
.. code-block:: python
class Foo(object):
__metaclass__ = decorate_public_methods(decorator)
def foo(self):
pass
class Bar(Foo):
def bar(self):
pass
.. code-block:: python
class Foo(object):
@decorator
def foo(self):
pass
class Bar(Foo):
@decorator
def bar(self):
pass
"""
class DecoratePublicMethods(type):
def __init__(self, name, bases, dic):
super(DecoratePublicMethods, self).__init__(name, bases, dic)
for key, val in dic.iteritems():
if not key.startswith('_') and callable(val):
setattr(self, key, decorator(val))
return DecoratePublicMethods
class CloudFileSystem(object):
__metaclass__ = decorate_public_methods(raises(DriverError))
schema = None
features = {
'multipart': False
}
def _parse_url(self, url):
"""
:returns: bucket, key
"""
o = urlparse.urlparse(url)
assert o.scheme == self.schema, 'Wrong schema: %s' % o.scheme
return o.netloc, o.path[1:]
def _format_url(self, bucket, key):
return '%s://%s/%s' % (self.schema, bucket, key)
def exists(self, url):
parent = os.path.dirname(url.rstrip('/'))
# NOTE: s3 & gcs driver converts bucket names to lowercase while url
# arg in this method stays uncoverted -> url with uppercase bucket
# name will never be found
return url in self.ls(parent)
def ls(self, url):
raise NotImplementedError()
def stat(self, url):
'''
size in bytes
type = dir | file | container
'''
raise NotImplementedError()
def put(self, src, url, report_to=None):
raise NotImplementedError()
def get(self, url, dst, report_to=None):
raise NotImplementedError()
def delete(self, url):
raise NotImplementedError()
def multipart_init(self, path, part_size):
'''
:returns: upload_id
'''
raise NotImplementedError()
def multipart_put(self, upload_id, src):
raise NotImplementedError()
def multipart_complete(self, upload_id):
raise NotImplementedError()
def multipart_abort(self, upload_id):
raise NotImplementedError()
|
[
"kenorb@users.noreply.github.com"
] |
kenorb@users.noreply.github.com
|
f04339670af036069b46af8492061f639973847f
|
088314e3bd6ca7ef34d15f2aa45b743b363641d9
|
/tasks/R2R/speaker/paths.py
|
974d47598f0ee0d6c3af239f2de1668c10bf89b4
|
[
"MIT"
] |
permissive
|
weituo12321/PREVALENT_R2R
|
7a27d580fcbe8f72a209697d053ca3eb2013e3a0
|
868fb53d6b7978bbb10439a59e65044c811ee5c2
|
refs/heads/master
| 2022-11-24T00:54:32.385940
| 2020-07-24T17:56:42
| 2020-07-24T17:56:42
| 248,832,547
| 8
| 7
|
MIT
| 2022-11-22T02:10:54
| 2020-03-20T19:07:08
|
Python
|
UTF-8
|
Python
| false
| false
| 637
|
py
|
convolutional_feature_store_paths = {
'imagenet': 'img_features/imagenet_convolutional',
'places365': 'img_features/places365_convolutional',
}
mean_pooled_feature_store_paths = {
'imagenet': 'img_features/ResNet-152-imagenet.tsv',
'places365': 'img_features/ResNet-152-places365.tsv',
}
bottom_up_feature_store_path = "img_features/bottom_up_10_100"
bottom_up_feature_cache_path = "img_features/bottom_up_10_100.pkl"
bottom_up_feature_cache_dir = "img_features/bottom_up_10_100_cache"
bottom_up_attribute_path = "data/visual_genome/attributes_vocab.txt"
bottom_up_object_path = "data/visual_genome/objects_vocab.txt"
|
[
"weituo.hao@gmail.com"
] |
weituo.hao@gmail.com
|
2e9eff8dee310ca6608ca04cd53b779bdaac0063
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02570/s396702537.py
|
9fe0af1e414c1c161a8894caf8757f2e4915c3db
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
import sys; input = sys.stdin.readline
from math import ceil
d, t, s = map(int, input().split())
u, l = ceil(d/t), d//t
if u == l:
if u <= s: print("Yes")
else: print("No")
else:
if d/t <= s: print("Yes")
else:print("No")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
35caceecfe007534fe487f5b4ff7aa3dbc3ca320
|
a62348929ea1911e7842beef868e5fa4d64d7927
|
/api/server.py
|
8184d335c168b6db069c51ebc95d73fb154b6084
|
[
"MIT"
] |
permissive
|
RENCI/pds-server-mock
|
8bfa040421b4ffcfd0bfe7332ff39cecc1b84408
|
8d003561948b23aa4260c32d453a8f2c901e1dc7
|
refs/heads/master
| 2021-01-05T21:18:26.476560
| 2020-12-05T05:15:00
| 2020-12-05T05:15:00
| 241,140,488
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
import connexion
def create_app():
app = connexion.FlaskApp(__name__, specification_dir='openapi/')
app.add_api('my_api.yaml')
return app
|
[
"xuh@cs.unc.edu"
] |
xuh@cs.unc.edu
|
c370c472aafebd84a2b87575f6dcb0e37b94e932
|
6472c4553c49a8c05103355ff53b1cbb7f025e8f
|
/pava/implementation/natives/sun/nio/ch/WindowsAsynchronousSocketChannelImpl.py
|
a42dc13374784791b67769b761b7248140f118ab
|
[
"MIT"
] |
permissive
|
laffra/pava
|
0b012e27c207a3e0f3ca772667b0c32168fe3123
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
refs/heads/master
| 2021-01-23T04:23:22.887146
| 2020-12-21T23:14:09
| 2020-12-21T23:14:09
| 86,191,143
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
def add_native_methods(clazz):
def initIDs____(a0):
raise NotImplementedError()
def connect0__long__boolean__java_net_InetAddress__int__long__(a0, a1, a2, a3, a4, a5):
raise NotImplementedError()
def updateConnectContext__long__(a0, a1):
raise NotImplementedError()
def read0__long__int__long__long__(a0, a1, a2, a3, a4):
raise NotImplementedError()
def write0__long__int__long__long__(a0, a1, a2, a3, a4):
raise NotImplementedError()
def shutdown0__long__int__(a0, a1, a2):
raise NotImplementedError()
def closesocket0__long__(a0, a1):
raise NotImplementedError()
clazz.initIDs____ = staticmethod(initIDs____)
clazz.connect0__long__boolean__java_net_InetAddress__int__long__ = staticmethod(connect0__long__boolean__java_net_InetAddress__int__long__)
clazz.updateConnectContext__long__ = staticmethod(updateConnectContext__long__)
clazz.read0__long__int__long__long__ = staticmethod(read0__long__int__long__long__)
clazz.write0__long__int__long__long__ = staticmethod(write0__long__int__long__long__)
clazz.shutdown0__long__int__ = staticmethod(shutdown0__long__int__)
clazz.closesocket0__long__ = staticmethod(closesocket0__long__)
|
[
"iV29VQzQVT11"
] |
iV29VQzQVT11
|
c298645d8fbccbc1048f2356f90a670ad3994bf9
|
46e9a375f0562f5dfd9282da20cbb34900f6b230
|
/Python/986.py
|
083a36ffc45f5cd0093f7a1ef29041c027ef9106
|
[] |
no_license
|
MohisinShaik/LeetCode
|
da90a65a4980d9daa1bc11581f4d0aa415ddb2f4
|
dd788a3bffc8c5121cbb83d8c2efe077bf8693db
|
refs/heads/master
| 2022-04-23T11:16:42.855063
| 2020-04-16T04:58:39
| 2020-04-16T04:58:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
class Solution:
def intervalIntersection(self, A: List[List[int]], B: List[List[int]]) -> List[List[int]]:
res = []
i = 0
j = 0
while i < len(A) and j < len(B):
a_overlap_b = A[i][0] >= B[j][0] and A[i][0] <= B[j][1]
b_overlap_a = B[j][0] >= A[i][0] and B[j][0] <= A[i][1]
if a_overlap_b or b_overlap_a:
start = max(A[i][0], B[j][0])
end = min(A[i][1], B[j][1])
res.append([start, end])
if A[i][1] < B[j][1]:
i += 1
else:
j += 1
return res
|
[
"tvandcc@gmail.com"
] |
tvandcc@gmail.com
|
0183f3f7f2baf910cf0facb6ce6d58af7ec6df00
|
ebcb092d796366d36a1afe9c381cd9e4c31026f1
|
/redis/python_redis_publisher.py
|
6e0bd349be9638af02233a09bf270796fdcc99af
|
[
"MIT"
] |
permissive
|
MiracleWong/PythonBasic
|
d2e0e56c88781ebf9c6870f185ceaba6ffaa21ca
|
cb8ec59dc646842b41966ea4ea4b1ee66a342eee
|
refs/heads/master
| 2021-06-06T22:26:08.780210
| 2020-01-08T14:48:54
| 2020-01-08T14:48:54
| 96,536,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
#!/usr/local/bin/python
# -*- coding:utf-8 -*-
# 订阅
from RedisHelper import RedisHelper
obj = RedisHelper()
obj.publish('nihao')#发布
|
[
"cfwr1991@126.com"
] |
cfwr1991@126.com
|
62e37ebc3e6e0bfb19bb51f5101b05e188c158be
|
3c9011b549dd06b6344c6235ed22b9dd483365d1
|
/OrientacaoObjeto/aula18.py
|
1ed8f6b0cc2da4d9f90b4791c271fa308f1c8a10
|
[] |
no_license
|
joaoo-vittor/estudo-python
|
1411f4c3620bbc5f6b7c674a096cae8f90f0db8d
|
5562d823dd574d7df49fddca87a1fbd319356969
|
refs/heads/master
| 2023-05-31T17:59:16.752835
| 2021-06-25T04:54:56
| 2021-06-25T04:54:56
| 292,372,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 784
|
py
|
from contextlib import contextmanager
"""
Aula 18
Context Manager - Criando e Usando gerenciadores de contexto
"""
"""
class Arquivo:
def __init__(self, arquivo, modo):
print('__init__')
self.arquivo = open(arquivo, modo)
def __enter__(self):
print('__enter__')
return self.arquivo
def __exit__(self, exc_type, exc_val, exc_tb):
print('__exit__')
self.arquivo.close()
with Arquivo('teste.txt', 'w') as f:
f.write('Hello World!')
"""
@contextmanager
def abrir(arquivo, modo):
try:
print('abrindo arquivo')
arquivo = open(arquivo, modo)
yield arquivo
finally:
print('fechando arquivo')
arquivo.close()
with abrir('teste.txt', 'w') as f:
f.write('Ola, mundo')
|
[
"joaoo.vittor007@gmail.com"
] |
joaoo.vittor007@gmail.com
|
9c69b46842a2a97671f844c0372d9dcd3097c9b1
|
9e30a239886210dc57e6c7cb9a71ad95a840712e
|
/views/get_post_reactions/tests/__init__.py
|
c021c82af21c3a42f479640bfdc616e5ccf42f8a
|
[] |
no_license
|
sridhar562345/fb_post_v2
|
0a26d661a3f335d9a9cf129c24265d7674b3fb22
|
dfd150ab5521f05291f66944d7a8686a00477547
|
refs/heads/master
| 2022-11-08T00:32:35.752419
| 2020-06-23T15:32:02
| 2020-06-23T15:32:02
| 274,440,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
# pylint: disable=wrong-import-position
APP_NAME = "fb_post_v2"
OPERATION_NAME = "get_post_reactions"
REQUEST_METHOD = "get"
URL_SUFFIX = "posts/{post_id}/reactions/v1/"
from .test_case_01 import TestCase01GetPostReactionsAPITestCase
__all__ = [
"TestCase01GetPostReactionsAPITestCase"
]
|
[
"="
] |
=
|
c370b9cb49b49d7f9bf6414b9561be8f703a7b7a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03776/s172150439.py
|
3a29529fed9c416827a4e10cbb9dd63cdab6e24e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
nCr = {}
def cmb(n, r):
if r == 0 or r == n: return 1
if r == 1: return n
if (n,r) in nCr: return nCr[(n,r)]
nCr[(n,r)] = cmb(n-1,r) + cmb(n-1,r-1)
return nCr[(n,r)]
N,A,B = map(int,input().split())
v = sorted(list(map(int,input().split())),reverse=True)
"""
if len(set(v)) == 1:
print(1)
print(1125899906842623)
exit()
"""
m = sum(v[:A])/A
print(m)
if len(set(v[:A]))==1:
ans = 0
c = v.count(v[0])
for i in range(A,B+1):
if i <= c:
ans += cmb(c,i)
print(ans)
exit()
mi = min(v[:A])
n = v[:A].count(mi)
m = v.count(mi)
print(cmb(m,n))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
a612afd2b5fe7e22ba24750568476b126ced3165
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_151/65.py
|
72e9454d35bbe107abb5efe84efe7d59d6024fa8
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
#!/usr/bin/pypy
import itertools
def cnts(S):
cnt = 1
bor = {}
for s in S:
cur = bor
for x in s:
if x in cur:
cur = cur[x]
else:
cnt+=1
cur[x] = cur = {}
return cnt
def calc(S, ost, pcnt=0):
if ost==1:
global pmax, rcnt
cnt = cnts(S)
if cnt + pcnt > pmax:
pmax = cnt + pcnt
rcnt = 1
elif cnt + pcnt == pmax:
rcnt += 1
return
res = 0
for i in range(1,len(S)-ost+2):
for comb in itertools.combinations(S,i):
l1 = comb
l2 = [x for x in S if x not in l1]
calc(l2,ost-1,pcnt+cnts(l1))
def solve():
M, N = map(int,raw_input().split())
S = [raw_input().strip() for _ in range(M)]
global pmax, rcnt
pmax = rcnt = 0
calc(S, N)
return "%d %d"%(pmax, rcnt)
if __name__ == "__main__":
T = int(raw_input())
for t in range(1,T+1):
print "Case #%d:"%t,solve()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
41089bdf4fb39893bf5360ec2ffff7c5a0f6d71e
|
127e99fbdc4e04f90c0afc6f4d076cc3d7fdce06
|
/2021_하반기 코테연습/boj1197.py
|
ca8be4cf43b06e8029b3a28c61c048807ef92221
|
[] |
no_license
|
holim0/Algo_Study
|
54a6f10239368c6cf230b9f1273fe42caa97401c
|
ce734dcde091fa7f29b66dd3fb86d7a6109e8d9c
|
refs/heads/master
| 2023-08-25T14:07:56.420288
| 2021-10-25T12:28:23
| 2021-10-25T12:28:23
| 276,076,057
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
from heapq import *
import sys
from collections import defaultdict
input = sys.stdin.readline
answer = 0
v,e = map(int, input().split())
link = defaultdict(list)
for _ in range(e):
a, b, c = map(int, input().split())
link[a].append((b,c))
link[b].append((a, c))
link_set = []
h = []
heappush(h, (0, 1))
cnt = 0
while h:
weight, cur = heappop(h)
if cur not in link_set:
link_set.append(cur)
answer+=weight
for nxt in link[cur]:
n, nw = nxt
if n not in link_set:
heappush(h, (nw, n))
print(answer)
|
[
"holim1226@gmail.com"
] |
holim1226@gmail.com
|
c184003f838b0b7d4dd109970baf5f2b4711a076
|
e7b665624c1134f7a6b3ab7c043cfa5ec83227bb
|
/CoGAN/impl2_tf/cogan_tf.py
|
a0037d11f6c7e06eb0738bf75acc6851b1e76fc4
|
[] |
no_license
|
zhijie-ai/GAN
|
46f896909d1f5caedb7725cf44d328e24f4ad699
|
5e64b416209058721c582c3b71a1e9ca25cf169d
|
refs/heads/master
| 2022-10-26T10:28:08.279901
| 2019-08-26T14:09:15
| 2019-08-26T14:09:15
| 204,423,289
| 1
| 3
| null | 2022-10-07T00:52:36
| 2019-08-26T07:45:08
|
Python
|
UTF-8
|
Python
| false
| false
| 5,513
|
py
|
#----------------------------------------------
# -*- encoding=utf-8 -*- #
# __author__:'xiaojie' #
# CreateTime: #
# 2019/7/4 10:01 #
# #
# 天下风云出我辈, #
# 一入江湖岁月催。 #
# 皇图霸业谈笑中, #
# 不胜人生一场醉。 #
#----------------------------------------------
# https://github.com/wiseodd/generative-models/blob/master/GAN/coupled_gan/cogan_tensorflow.py
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
import scipy.ndimage.interpolation
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
mb_size = 32
X_dim = mnist.train.images.shape[1]
y_dim = mnist.train.labels.shape[1]
z_dim = 10
h_dim = 128
eps = 1e-8
lr = 1e-3
d_steps = 3
def plot(samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
return fig
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
X1 = tf.placeholder(tf.float32, shape=[None, X_dim])
X2 = tf.placeholder(tf.float32, shape=[None, X_dim])
z = tf.placeholder(tf.float32, shape=[None, z_dim])
G_W1 = tf.Variable(xavier_init([z_dim, h_dim]))
G_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
G1_W2 = tf.Variable(xavier_init([h_dim, X_dim]))
G1_b2 = tf.Variable(tf.zeros(shape=[X_dim]))
G2_W2 = tf.Variable(xavier_init([h_dim, X_dim]))
G2_b2 = tf.Variable(tf.zeros(shape=[X_dim]))
def G(z):
h = tf.nn.relu(tf.matmul(z,G_W1)+G_b1)
G1 = tf.nn.sigmoid(tf.matmul(h,G1_W2)+G1_b2)
G2 = tf.nn.sigmoid(tf.matmul(h,G2_W2)+G2_b2)
return G1,G2
D1_W1 = tf.Variable(xavier_init([X_dim, h_dim]))
D1_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
D2_W1 = tf.Variable(xavier_init([X_dim, h_dim]))
D2_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
D_W2 = tf.Variable(xavier_init([h_dim, 1]))
D_b2 = tf.Variable(tf.zeros(shape=[1]))
def D(X1,X2):
h1 = tf.nn.relu(tf.matmul(X1,D1_W1)+D1_b1)
h2 = tf.nn.relu(tf.matmul(X2,D2_W1)+D2_b1)
D1_out = tf.nn.sigmoid(tf.matmul(h1,D_W2)+D_b2)
D2_out = tf.nn.sigmoid(tf.matmul(h2,D_W2)+D_b2)
return D1_out,D2_out
theta_G = [G1_W2,G2_W2,G1_b2,G2_b2]
theta_G_shared = [G_W1,G_b1]
theta_D = [D1_W1,D2_W1,D1_b1,D2_b1]
theta_D_shared = [D_W2,D_b2]
#Train D
G1_sample,G2_sample = G(z)
D1_real,D2_real = D(X1,X2)
D1_fake,D2_fake = D(G1_sample,G2_sample)
D1_loss = -tf.reduce_mean(tf.log(D1_real+eps)+tf.log(1.-D1_fake+eps))
D2_loss = -tf.reduce_mean(tf.log(D2_real+eps)+tf.log(1.-D2_fake+eps))
D_loss = D1_loss + D2_loss
# Train G
G1_loss = -tf.reduce_mean(tf.log(D1_fake+eps))
G2_loss = -tf.reduce_mean(tf.log(D2_fake+eps))
G_loss = G1_loss+G2_loss
# D optimizer
D_opt = tf.train.AdamOptimizer(learning_rate=lr)
# Compute the gradients for a list of variables
D_gv = D_opt.compute_gradients(D_loss,theta_D)
D_shared_gv = D_opt.compute_gradients(D_loss,theta_D_shared)
# Average by halfing the shared gradients
D_shared_gv = [(0.5*x[0],x[1]) for x in D_shared_gv]
# Update
D_solver = tf.group(D_opt.apply_gradients(D_gv),D_opt.apply_gradients(D_shared_gv))
# G_optimizer
G_opt = tf.train.AdamOptimizer(learning_rate=lr)
# Compute the gradients for a list of variables
G_gv = G_opt.compute_gradients(G_loss,theta_G)
G_shared_gv = G_opt.compute_gradients(G_loss,theta_G_shared)
# Average by halfing the shared gradients
G_shared_gv = [(0.5*x[0],x[1]) for x in G_shared_gv]
# Update
G_solver = tf.group(G_opt.apply_gradients(G_gv),G_opt.apply_gradients(G_shared_gv))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
X_train = mnist.train.images
half = int(X_train.shape[0]/2)
#Real image
X_train1 = X_train[:half]
print('X_train1.shape',X_train1.shape)
#Rotated images
X_train2 = X_train[half:].reshape(-1,28,28)
X_train2 = scipy.ndimage.interpolation.rotate(X_train2,90,axes=(1,2))
X_train2 = X_train2.reshape(-1,28*28)
print('X_train2.shape',X_train2.shape)
# Cleanup
del X_train
def sample_X(X,size):
start_idx = np.random.randint(0,X.shape[0]-size)
return X[start_idx:start_idx+size]
def sample_z(m,n):
return np.random.uniform(-1.,1.,size=[m,n])
if not os.path.exists('out/'):
os.mkdir('out/')
i = 0
for it in range(1000000):
X1_mb,X2_mb =sample_X(X_train1,mb_size),sample_X(X_train2,mb_size)
z_mb = sample_z(mb_size,z_dim)
_,D_loss_curr = sess.run([D_solver,D_loss],feed_dict={X1:X1_mb,X2:X2_mb,z:z_mb})
_,G_loss_curr = sess.run([G_solver,G_loss],feed_dict={z:z_mb})
if it %1000 ==0:
sample1,sample2 = sess.run([G1_sample,G2_sample],feed_dict={z:sample_z(8,z_dim)})
samples = np.vstack([sample1,sample2])
print('AAAAAAAAAAA',samples.shape)
print('Iter:{};D_loss:{:.4};G_loss:{:.4}'.format(it,D_loss_curr,G_loss_curr))
fig = plot(samples)
plt.savefig('out/{}.png'.format(str(i).zfill(3)),bbox_inchs='tight')
i+=1
plt.close(fig)
|
[
"15311484394@189.cn"
] |
15311484394@189.cn
|
062c3d98dcd6b0c5dddfb9ca904e99660a416d33
|
6c5daf5133656a33574dc2f5b62b9f1a1bdf1390
|
/draw-pictures/draw_ppqq.py
|
38729585225143f55f0e755f3a5d0a742a8ec286
|
[] |
no_license
|
RobinChen121/Python-Practices
|
6c10b721dce3a8d2b76e190959d0940c52f0d1cc
|
85bd9ad30c245dd62dc7ea837f964eaecbe24ed9
|
refs/heads/master
| 2023-08-31T10:08:01.613828
| 2023-08-27T14:51:46
| 2023-08-27T14:51:46
| 142,564,793
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 993
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 22 22:30:23 2020
@author: zhen chen
MIT Licence.
Python version: 3.7
Description:
"""
import scipy.stats as st
import matplotlib.pyplot as plt
import numpy as np
n = 100
samples = st.norm.rvs(loc = 5, scale = 2, size = n)
samples_sort = sorted(samples)
x_labels_p = np.arange(1/(2*n), 1, 1/n)
y_labels_p = st.norm.cdf(samples_sort, loc = 5, scale = 2)
plt.scatter(x_labels_p, y_labels_p)
plt.title('PP plot for normal distribution')
plt.show()
x_labels_q = samples_sort
y_labels_q = st.norm.ppf(x_labels_p, loc = 5, scale = 2)
plt.scatter(x_labels_q, y_labels_q)
plt.title('QQ plot for normal distribution')
plt.show()
import statsmodels.api as sm
probplot = sm.ProbPlot(samples, dist = st.norm, loc = 5, scale = 2)
probplot.qqplot(line='45')
#res = st.probplot(samples, sparams=(5, 2), plot = plt) # 若没有 sparams,默认会标准化样本数据
#plt.title('QQ plot by probplot for normal distribution')
#plt.show()
|
[
"40953071+RobinChen121@users.noreply.github.com"
] |
40953071+RobinChen121@users.noreply.github.com
|
1178707ecfe919527b34e6a1c3cdff681fb91f2e
|
0c110eb32f2eaea5c65d40bda846ddc05757ced6
|
/python_scripts/pimriscripts/mastersort/scripts_dir/p7575_run2M6.py
|
77c08e0b84c585a82c7e3b37095796f621c6ee0b
|
[] |
no_license
|
nyspisoccog/ks_scripts
|
792148a288d1a9d808e397c1d2e93deda2580ff4
|
744b5a9dfa0f958062fc66e0331613faaaee5419
|
refs/heads/master
| 2021-01-18T14:22:25.291331
| 2018-10-15T13:08:24
| 2018-10-15T13:08:24
| 46,814,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,197
|
py
|
from __future__ import with_statement
import os, csv, shutil,tarfile, uf, dcm_ops
dest_root = '/ifs/scratch/pimri/soccog/test_working'
dst_path_lst = ['7575', 'run2M6']
uf.buildtree(dest_root, dst_path_lst)
uf.copytree('/ifs/scratch/pimri/soccog/old/SocCog_Raw_Data_By_Exam_Number/3184/E3184_e858822/s944135_5610_2M6_s34', '/ifs/scratch/pimri/soccog/test_working/7575/run2M6')
t = tarfile.open(os.path.join('/ifs/scratch/pimri/soccog/test_working/7575/run2M6','MRDC_files.tar.gz'), 'r')
t.extractall('/ifs/scratch/pimri/soccog/test_working/7575/run2M6')
for f in os.listdir('/ifs/scratch/pimri/soccog/test_working/7575/run2M6'):
if 'MRDC' in f and 'gz' not in f:
old = os.path.join('/ifs/scratch/pimri/soccog/test_working/7575/run2M6', f)
new = os.path.join('/ifs/scratch/pimri/soccog/test_working/7575/run2M6', f + '.dcm')
os.rename(old, new)
qsub_cnv_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7575/run2M6', '7575_run2M6', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cnv')
#qsub_cln_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7575/run2M6', '7575_run2M6', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cln')
|
[
"katherine@Katherines-MacBook-Pro.local"
] |
katherine@Katherines-MacBook-Pro.local
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.