blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
62be565d1ad0e2bc743c1f5b5682cd2bdeef76c1
|
2e9ffd88923b1eb90047fe5c6a633a6d29c111a8
|
/muddery/typeclasses/players.py
|
76d46460f1c6c62028e28ae5e66dedef392932d5
|
[
"BSD-3-Clause"
] |
permissive
|
externIE/muddery
|
4f7424abf2eac4280baef86ba5752e8d8ddee16d
|
ee4165e97e1510e06fa1e8120a35878a6c2862b7
|
refs/heads/master
| 2020-04-06T06:48:41.501309
| 2016-08-16T12:58:47
| 2016-08-16T12:58:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,369
|
py
|
"""
This is adapt from evennia/evennia/players/players.py.
The licence of Evennia can be found in evennia/LICENSE.txt.
Player
The Player represents the game "account" and each login has only one
Player object. A Player is what chats on default channels but has no
other in-game-world existance. Rather the Player puppets Objects (such
as Characters) in order to actually participate in the game world.
Guest
Guest players are simple low-level accounts that are created/deleted
on the fly and allows users to test the game without the committment
of a full registration. Guest accounts are deactivated by default; to
activate them, add the following line to your settings file:
GUEST_ENABLED = True
You will also need to modify the connection screen to reflect the
possibility to connect with a guest account. The setting file accepts
several more options for customizing the Guest account system.
"""
import json
from evennia.utils.utils import make_iter
from evennia.players.players import DefaultPlayer, DefaultGuest
class MudderyPlayer(DefaultPlayer):
"""
This class describes the actual OOC player (i.e. the user connecting
to the MUD). It does NOT have visual appearance in the game world (that
is handled by the character which is connected to this). Comm channels
are attended/joined using this object.
It can be useful e.g. for storing configuration options for your game, but
should generally not hold any character-related info (that's best handled
on the character level).
Can be set using BASE_PLAYER_TYPECLASS.
"""
def msg(self, text=None, from_obj=None, session=None, **kwargs):
"""
Evennia -> User
This is the main route for sending data back to the user from the
server.
Args:
text (str, optional): text data to send
from_obj (Object or Player, optional): Object sending. If given,
its at_msg_send() hook will be called.
session (Session or list, optional): Session object or a list of
Sessions to receive this send. If given, overrules the
default send behavior for the current
MULTISESSION_MODE.
Notes:
All other keywords are passed on to the protocol.
"""
raw = kwargs.get("raw", False)
if not raw:
try:
text = json.dumps(text)
except Exception, e:
text = json.dumps({"err": "There is an error occurred while outputing messages."})
logger.log_tracemsg("json.dumps failed: %s" % e)
else:
text = to_str(text, force_string=True) if text else ""
# set raw=True
if kwargs:
kwargs["raw"] = True
else:
kwargs = {"raw": True}
if from_obj:
# call hook
try:
from_obj.at_msg_send(text=text, to_obj=self, **kwargs)
except Exception:
pass
# session relay
sessions = make_iter(session) if session else self.sessions.all()
for session in sessions:
session.msg(text=text, **kwargs)
class MudderyGuest(DefaultGuest):
"""
This class is used for guest logins. Unlike Players, Guests and their
characters are deleted after disconnection.
"""
pass
|
[
"luyijun999@gmail.com"
] |
luyijun999@gmail.com
|
00e2fbc37e5d8aa5a588fc4185c7bc8bab4c4f22
|
a39ed5db6c75c9ae1f5e05118794c64102dc5f7a
|
/2020/01_1/solution.py
|
091874824ee82bf49cb18909afad5b2272562b7c
|
[
"MIT"
] |
permissive
|
budavariam/advent_of_code
|
b656d5caf5d05113b82357754eb225e61e89ac0d
|
635be485ec691f9c0cdeb83f944de190f51c1ba3
|
refs/heads/master
| 2022-12-25T18:12:00.981365
| 2022-12-20T08:20:51
| 2022-12-20T08:20:51
| 114,570,426
| 1
| 1
|
MIT
| 2022-12-09T09:29:06
| 2017-12-17T21:36:00
|
Python
|
UTF-8
|
Python
| false
| false
| 656
|
py
|
""" Advent of code 2020 day 1/1 """
import math
from os import path
def solution(data):
""" Solution to the problem """
lines = data.split("\n")
precalculate = dict()
for line_value_str in lines:
precalculate[2020 - int(line_value_str)] = True
for line_value_str in lines:
current_value = int(line_value_str)
inverse = 2020 - current_value
if (precalculate.get(current_value) == True):
return current_value * inverse
return None
if __name__ == "__main__":
with(open(path.join(path.dirname(__file__), 'input.txt'), 'r')) as input_file:
print(solution(input_file.read()))
|
[
"budavariam@gmail.com"
] |
budavariam@gmail.com
|
92c59a1156df87073eec8744b9a4011e1e6fd657
|
f07e66293cc41a9fe71fc44f765b432fd7a0997c
|
/selfdrive/controls/lib/cluster/SConscript
|
97eb4300d4da6618962e0430ca534fc43fb0640f
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
kegman/openpilot
|
c9ba96a72d905956f02c684e065091e023942883
|
b35291c91783657a5fc83abfff012d3bb49dd89f
|
refs/heads/kegman-ultimate
| 2022-05-22T17:07:16.656336
| 2021-10-25T13:35:28
| 2021-10-25T13:35:28
| 229,979,925
| 105
| 212
|
MIT
| 2022-03-13T05:47:51
| 2019-12-24T17:27:11
|
C
|
UTF-8
|
Python
| false
| false
| 185
|
Import('env')
fc = env.SharedLibrary("fastcluster", "fastcluster.cpp")
# TODO: how do I gate on test
#env.Program("test", ["test.cpp"], LIBS=[fc])
#valgrind --leak-check=full ./test
|
[
"user@comma.ai"
] |
user@comma.ai
|
|
99d58cfffec18317f497271c87e04c101c9d5fbf
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/rdbms/azure-mgmt-rdbms/generated_samples/mysql/server_security_alerts_create_max.py
|
702f9e0bb6a8a7da00508fb08c8a992824a0c71c
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,227
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.rdbms.mysql import MySQLManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-rdbms
# USAGE
python server_security_alerts_create_max.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = MySQLManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-1111-2222-3333-444444444444",
)
response = client.server_security_alert_policies.begin_create_or_update(
resource_group_name="securityalert-4799",
server_name="securityalert-6440",
security_alert_policy_name="Default",
parameters={
"properties": {
"disabledAlerts": ["Access_Anomaly", "Usage_Anomaly"],
"emailAccountAdmins": True,
"emailAddresses": ["testSecurityAlert@microsoft.com"],
"retentionDays": 5,
"state": "Enabled",
"storageAccountAccessKey": "sdlfkjabc+sdlfkjsdlkfsjdfLDKFTERLKFDFKLjsdfksjdflsdkfD2342309432849328476458/3RSD==",
"storageEndpoint": "https://mystorage.blob.core.windows.net",
}
},
).result()
print(response)
# x-ms-original-file: specification/mysql/resource-manager/Microsoft.DBforMySQL/legacy/stable/2017-12-01/examples/ServerSecurityAlertsCreateMax.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
98d4b751487188eba562b6871a2298cb5ea68deb
|
34d5ebe9e6de9d6742c234dabfa9b38f0adb7774
|
/carriage_return/ui.py
|
6b4e0f2958482b03ef044f9c62842f0bd045a463
|
[] |
no_license
|
campagnola/return-to-carriage
|
f37acaf8e41ccf04e7089018574732a1fdcd2a64
|
eeb7f31b16e2c236c875c867a0295173fa6f4b0a
|
refs/heads/master
| 2021-08-05T14:02:49.988526
| 2021-07-31T08:38:17
| 2021-07-31T08:38:17
| 84,014,684
| 0
| 2
| null | 2021-07-30T02:48:13
| 2017-03-06T00:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,518
|
py
|
import numpy as np
import vispy.scene, vispy.app
import vispy.util.ptime as ptime
from .input import InputDispatcher, CommandInputHandler
from .graphics import TextBox
from .console import CommandInterpreter
class MainWindow:
"""Implements user interface: graphical panels, key input handling
"""
def __init__(self):
self.canvas = vispy.scene.SceneCanvas()
self.canvas.show()
self.canvas.size = 1400, 900
self.debug_line_of_sight = False
self.debug_los_tex = False
# Setup input event handling
self.input_dispatcher = InputDispatcher(self.canvas)
self.command_mode = False
# setup UI
self.view = self.canvas.central_widget.add_view()
self.view.camera = 'panzoom'
self.view.camera.rect = [0, -5, 120, 60]
self.view.camera.aspect = 0.6
self.view.events.key_press.disconnect()
self.camera_target = self.view.camera.rect
self._last_camera_update = ptime.time()
self.scroll_timer = vispy.app.Timer(start=True, connect=self._scroll_camera, interval=0.016)
self.console_grid = self.canvas.central_widget.add_grid()
self.stats_box = TextBox((2, 160))
self.console_grid.add_widget(self.stats_box.view, 1, 0, 1, 2)
self.stats_box.write(
"HP:17/33 Food:56% Water:34% Sleep:65% Weight:207(45) Level:3 Int:12 Str:9 Wis:11 Cha:2")
self.stats_box.view.height_max = 30
self.stats_box.view.stretch = (1, 10)
self.info_box = TextBox((15, 80))
self.console_grid.add_widget(self.info_box.view, 2, 0)
self.info_box.write("There is a scroll of infinite recursion here.")
self.info_box.view.height_max = 200
self.stats_box.view.stretch = (1, 1)
self.console = TextBox((15, 80))
self.console_grid.add_widget(self.console.view, 2, 1)
self.console.view.stretch = (1, 10)
# self.console.view.parent = self.canvas.scene
self.console.view.rect = vispy.geometry.Rect(30, 620, 1350, 250)
self.console.transform = vispy.visuals.transforms.STTransform((0, 0, -0.5))
# self.console.view.camera.aspect = 0.6
self.console.view.height_max = 200
self.console.write('Hello?')
self.console.write('Is anybody\n there?')
self.console.write(''.join([chr(i) for i in range(0x20, 128)]))
# self.console.view.camera.rect = [-1, -1, 30, 3]
self.command = CommandInterpreter(self)
self.cmd_input_handler = CommandInputHandler(self.console, self.command)
self._follow_entity = None
def follow_entity(self, entity):
if self._follow_entity is not None:
self._follow_entity.location.global_changed.disconnect(self._update_camera_target)
self._follow_entity = entity
entity.location.global_changed.connect(self._update_camera_target)
self._update_camera_target()
def toggle_command_mode(self):
# todo: visual cue
self.command_mode = not self.command_mode
if self.command_mode:
self.cmd_input_handler.activate()
else:
self.cmd_input_handler.deactivate()
def _scroll_camera(self, ev):
now = ptime.time()
dt = now - self._last_camera_update
self._last_camera_update = now
cr = vispy.geometry.Rect(self.view.camera.rect)
tr = self.camera_target
crv = np.array(cr.pos + cr.size, dtype='float32')
trv = np.array(tr.pos + tr.size, dtype='float32')
if not np.any(abs(trv - crv) > 1e-2):
return
s = np.exp(-dt / 0.4) # 400 ms settling time constant
nrv = crv * s + trv * (1.0 - s)
cr.pos = nrv[:2]
cr.size = nrv[2:]
self.view.camera.rect = cr
def _update_camera_target(self, event=None):
location = self._follow_entity.location
pp = np.array(location.global_location.slot)
cr = vispy.geometry.Rect(self.view.camera.rect)
cc = np.array(cr.center)
cs = np.array(cr.size)
cp = np.array(cr.pos)
dif = pp - cc
maxdif = 0.1 * cs # start correcting camera at 10% width from center
for ax in (0, 1):
if dif[ax] < -maxdif[ax]:
cp[ax] += dif[ax] + maxdif[ax]
elif dif[ax] > maxdif[ax]:
cp[ax] += dif[ax] - maxdif[ax]
cr.pos = cp
self.camera_target = cr
def quit(self):
self.canvas.close()
|
[
"luke.campagnola@gmail.com"
] |
luke.campagnola@gmail.com
|
89a83059cc975cbb899bcbf35c4ce9000b7da5e0
|
59166105545cdd87626d15bf42e60a9ee1ef2413
|
/test/test_watermill_api.py
|
9b7b62efa75df5f6b212c5921dfa2cb31da4fd6a
|
[] |
no_license
|
mosoriob/dbpedia_api_client
|
8c594fc115ce75235315e890d55fbf6bd555fa85
|
8d6f0d04a3a30a82ce0e9277e4c9ce00ecd0c0cc
|
refs/heads/master
| 2022-11-20T01:42:33.481024
| 2020-05-12T23:22:54
| 2020-05-12T23:22:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 957
|
py
|
# coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import dbpedia
from dbpedia.api.watermill_api import WatermillApi # noqa: E501
from dbpedia.rest import ApiException
class TestWatermillApi(unittest.TestCase):
"""WatermillApi unit test stubs"""
def setUp(self):
self.api = dbpedia.api.watermill_api.WatermillApi() # noqa: E501
def tearDown(self):
pass
def test_watermills_get(self):
"""Test case for watermills_get
List all instances of Watermill # noqa: E501
"""
pass
def test_watermills_id_get(self):
"""Test case for watermills_id_get
Get a single Watermill by its id # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"maxiosorio@gmail.com"
] |
maxiosorio@gmail.com
|
ce66f81dd62ef4c454b93bada3202dfdabc764a2
|
adbb2b958296815f9485bab60c0d38827befeeeb
|
/build/lib.linux-i686-2.7/gdrivefs/change.py
|
394f5bedbdc47e5902688e014679cddbd2e96977
|
[
"MIT"
] |
permissive
|
gryphius/GDriveFS
|
4b4619e1eefceb562ded6ae13dcc9a2c5b4c0a1b
|
fadfbdea019cfa4c2a821f4636380edbc8be32bc
|
refs/heads/master
| 2021-01-18T14:14:32.028542
| 2013-04-24T06:17:03
| 2013-04-24T06:17:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,892
|
py
|
import logging
from threading import Lock, Timer
from gdrivefs.gdtool import AccountInfo, drive_proxy
from gdrivefs.conf import Conf
from gdrivefs.cache import PathRelations, EntryCache
from gdrivefs.timer import Timers
def _sched_check_changes():
logging.debug("Doing scheduled check for changes.")
get_change_manager().process_updates()
# Schedule next invocation.
t = Timer(Conf.get('change_check_frequency_s'), _sched_check_changes)
t.start()
Timers.get_instance().register_timer('change', t)
class _ChangeManager(object):
__log = None
at_change_id = None
def __init__(self):
self.__log = logging.getLogger().getChild('ChangeMan')
try:
self.at_change_id = AccountInfo.get_instance().largest_change_id
except:
self.__log.exception("Could not get largest change-ID.")
raise
self.__log.info("Latest change-ID at startup is (%d)." %
(self.at_change_id))
def mount_init(self):
"""Called when filesystem is first mounted."""
self.__log.debug("Change init.")
_sched_check_changes()
def mount_destroy(self):
"""Called when the filesystem is unmounted."""
self.__log.debug("Change destroy.")
def process_updates(self):
"""Process any changes to our files. Return True if everything is up to
date or False if we need to be run again.
"""
start_at_id = (self.at_change_id + 1)
try:
result = drive_proxy('list_changes', start_change_id=start_at_id)
except:
self.__log.exception("Could not retrieve updates. Skipped.")
return True
(largest_change_id, next_page_token, changes) = result
self.__log.debug("The latest reported change-ID is (%d) and we're "
"currently at change-ID (%d)." % (largest_change_id,
self.at_change_id))
if largest_change_id == self.at_change_id:
self.__log.debug("No entries have changed.")
return True
self.__log.info("(%d) changes will now be applied." % (len(changes)))
for change_id, change_tuple in changes.iteritems():
# Apply the changes. We expect to be running them from oldest to
# newest.
self.__log.info("Change with ID (%d) will now be applied." %
(change_id))
try:
self.__apply_change(change_id, change_tuple)
except:
self.__log.exception("There was a problem while processing change"
" with ID (%d). No more changes will be "
"applied." % (change_id))
return False
self.at_change_id = change_id
return (next_page_token == None)
def __apply_change(self, change_id, change_tuple):
"""Apply changes to our filesystem reported by GD. All we do is remove
the current record components, if it's valid, and then reload it with
what we were given. Note that since we don't necessarily know
about the entries that have been changed, this also allows us to slowly
increase our knowledge of the filesystem (of, obviously, only those
things that change).
"""
(entry_id, was_deleted, entry) = change_tuple
is_visible = entry.is_visible if entry else None
self.__log.info("Applying change with change-ID (%d), entry-ID [%s], and "
"is-visible of [%s]" % (change_id, entry_id, is_visible))
# First, remove any current knowledge from the system.
self.__log.debug("Removing all trace of entry with ID [%s]." % (entry_id))
try:
PathRelations.get_instance().remove_entry_all(entry_id)
except:
self.__log.exception("There was a problem remove entry with ID [%s] "
"from the caches." % (entry_id))
raise
# If it wasn't deleted, add it back.
self.__log.debug("Registering changed entry with ID [%s]." % (entry_id))
if is_visible:
path_relations = PathRelations.get_instance()
try:
path_relations.register_entry(entry)
except:
self.__log.exception("Could not register changed entry with ID "
"[%s] with path-relations cache." %
(entry_id))
raise
def get_change_manager():
with get_change_manager.lock:
if not get_change_manager.instance:
get_change_manager.instance = _ChangeManager()
return get_change_manager.instance
get_change_manager.instance = None
get_change_manager.lock = Lock()
|
[
"myselfasunder@gmail.com"
] |
myselfasunder@gmail.com
|
5260e5f6e9e62dff2851c2a69b0d9942a5673c04
|
ccbb7fb8fda4d936e765263f05a435058b397bd9
|
/src/guiltytargets/ppi_network_annotation/pipeline.py
|
4556892fb1e8316cdaac58aa4319506234f86649
|
[
"MIT"
] |
permissive
|
GuiltyTargets/guiltytargets
|
5a5d3ba9e45867a64c81a91529ae6689f8be447f
|
c20a5cae6c9cc71c2ca73080a862abe986bc34c0
|
refs/heads/master
| 2022-02-13T03:30:49.705239
| 2021-12-22T12:51:20
| 2021-12-22T12:51:20
| 154,318,881
| 10
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,627
|
py
|
# -*- coding: utf-8 -*-
"""Functions to easily set up the network."""
import logging
from typing import List, Optional
from .model.gene import Gene
from .model.network import Network
from .parsers import parse_csv, parse_disease_associations, parse_disease_ids, parse_excel, parse_ppi_graph
__all__ = [
'generate_ppi_network',
'parse_dge',
]
logger = logging.getLogger(__name__)
def generate_ppi_network(
ppi_graph_path: str,
dge_list: List[Gene],
max_adj_p: float,
max_log2_fold_change: float,
min_log2_fold_change: float,
ppi_edge_min_confidence: Optional[float] = None,
current_disease_ids_path: Optional[str] = None,
disease_associations_path: Optional[str] = None,
) -> Network:
"""Generate the protein-protein interaction network.
:return Network: Protein-protein interaction network with information on differential expression.
"""
# Compilation of a protein-protein interaction (PPI) graph (HIPPIE)
protein_interactions = parse_ppi_graph(ppi_graph_path, ppi_edge_min_confidence)
protein_interactions = protein_interactions.simplify()
if disease_associations_path is not None and current_disease_ids_path is not None:
current_disease_ids = parse_disease_ids(current_disease_ids_path)
disease_associations = parse_disease_associations(disease_associations_path,
current_disease_ids)
else:
disease_associations = None
# Build an undirected weighted graph with the remaining interactions based on Entrez gene IDs
network = Network(
protein_interactions,
max_adj_p=max_adj_p,
max_l2fc=max_log2_fold_change,
min_l2fc=min_log2_fold_change,
)
network.set_up_network(dge_list, disease_associations=disease_associations)
return network
def parse_dge(
dge_path: str,
entrez_id_header: str,
log2_fold_change_header: str,
adj_p_header: str,
entrez_delimiter: str,
base_mean_header: Optional[str] = None,
) -> List[Gene]:
"""Parse a differential expression file.
:param dge_path: Path to the file.
:param entrez_id_header: Header for the Entrez identifier column
:param log2_fold_change_header: Header for the log2 fold change column
:param adj_p_header: Header for the adjusted p-value column
:param entrez_delimiter: Delimiter between Entrez ids.
:param base_mean_header: Header for the base mean column.
:return: A list of genes.
"""
if dge_path.endswith('.xlsx'):
return parse_excel(
dge_path,
entrez_id_header=entrez_id_header,
log_fold_change_header=log2_fold_change_header,
adjusted_p_value_header=adj_p_header,
entrez_delimiter=entrez_delimiter,
base_mean_header=base_mean_header,
)
if dge_path.endswith('.csv'):
return parse_csv(
dge_path,
entrez_id_header=entrez_id_header,
log_fold_change_header=log2_fold_change_header,
adjusted_p_value_header=adj_p_header,
entrez_delimiter=entrez_delimiter,
base_mean_header=base_mean_header,
)
if dge_path.endswith('.tsv'):
return parse_csv(
dge_path,
entrez_id_header=entrez_id_header,
log_fold_change_header=log2_fold_change_header,
adjusted_p_value_header=adj_p_header,
entrez_delimiter=entrez_delimiter,
base_mean_header=base_mean_header,
sep="\t",
)
raise ValueError(f'Unsupported extension: {dge_path}')
|
[
"cthoyt@gmail.com"
] |
cthoyt@gmail.com
|
8f15048573ae6cf53c784fe29bb50ef7345fb154
|
99701affb7ae46c42c55484f3301d59f79294a10
|
/project/Examples/Examples/PP2E/Dstruct/Basic/inter2.py
|
200364cc5828b3f08ae4bba0989169e3e39861b8
|
[] |
no_license
|
inteljack/EL6183-Digital-Signal-Processing-Lab-2015-Fall
|
1050b9e9bddb335bf42b7debf2abebe51dd9f9e0
|
0f650a97d8fbaa576142e5bb1745f136b027bc73
|
refs/heads/master
| 2021-01-21T21:48:21.326372
| 2016-04-06T20:05:19
| 2016-04-06T20:05:19
| 42,902,523
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 605
|
py
|
def intersect(*args):
res = []
for x in args[0]: # scan the first list
for other in args[1:]: # for all other arguments
if x not in other: break # this item in each one?
else:
res.append(x) # add common items to the end
return res
def union(*args):
res = []
for seq in args: # for all sequence-arguments
for x in seq: # for all nodes in argument
if not x in res:
res.append(x) # add new items to result
return res
|
[
"inteljack2008@gmail.com"
] |
inteljack2008@gmail.com
|
5582e0c04ffcb5fecce6af3812ec4c05c1be9fb2
|
6219e6536774e8eeb4cadc4a84f6f2bea376c1b0
|
/scraper/storage_spiders/thnhatrangvn.py
|
220429db599deaabf7822d301bccd557a783a259
|
[
"MIT"
] |
permissive
|
nguyenminhthai/choinho
|
109d354b410b92784a9737f020894d073bea1534
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
refs/heads/master
| 2023-05-07T16:51:46.667755
| 2019-10-22T07:53:41
| 2019-10-22T07:53:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,077
|
py
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='home-center']/div[@class='product-info']/div[@class='product-name']/h1",
'price' : "//div[@class='product-right']/div[@class='product-price']/p[@class='cssPriceSpecial']/b",
'category' : "//div[@class='wrap']/div[@class='home-content']/div[@class='category-path']/a",
'description' : "//div[@id='pro_content_desc']/div//span",
'images' : "//div[@id='pro_big']/a/@href",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : ""
}
name = 'thnhatrang.vn'
allowed_domains = ['thnhatrang.vn']
start_urls = ['http://thnhatrang.vn']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-p\d+\.html']), 'parse_item'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-c\d+\.html']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
|
[
"nguyenchungthuy.hust@gmail.com"
] |
nguyenchungthuy.hust@gmail.com
|
9cbdd8a6c6170a9d1d5a9ca37e428a2e16bc6c22
|
309d17b81cea038713ba67bee72a41d2df4d6869
|
/Python/Python_basic/Python_OOP/OOP21_composition2.py
|
86f25cef9fcfcf5256d11e83738ff6e7e74ed70b
|
[] |
no_license
|
Bongkot-Kladklaen/Programming_tutorial_code
|
ac07e39da2bce396e670611884436b360536cdc5
|
cda7508c15c3e3d179c64b9aac163b6173ef3519
|
refs/heads/master
| 2023-06-20T13:14:17.077809
| 2021-07-18T04:41:04
| 2021-07-18T04:41:04
| 387,081,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
class Printer:
def print_page(self, data):
print("printing {}".format(data))
class Scanner:
def scan_page(self):
print("scanning...")
class Fax:
def fax_page(self,number):
print("faxing to {}".format(number))
class Aio: # All in one printer
def __init__(self, p, s,f):
self.p = p
self.s = s
self.f = f
if __name__ == '__main__':
a = Aio(Printer(), Scanner(), Fax())
a.p.print_page("hello")
a.s.scan_page()
a.f.fax_page("02848248")
|
[
"bongkot.klad@gmail.com"
] |
bongkot.klad@gmail.com
|
ea8ca2060f2262c3ecaf0c88506fad93bb81a001
|
eb54d732b5f14f03d9bf2988c6157605c80bbdd5
|
/bubble_sort.py
|
e599bb7065016d2e01b3e67d5e93e3dc4947d828
|
[] |
no_license
|
tngo0508/practice_coding
|
2e60519fed83a9b3c28b52c2d5ec1ee1d2a609ed
|
453c9a7b9a8aa80f37b245f9df447525a9b0a2d1
|
refs/heads/master
| 2022-03-27T01:44:56.589650
| 2020-01-05T18:58:31
| 2020-01-05T18:58:31
| 225,294,423
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
def bubble_sort(nums):
for i in range(len(nums) - 1, 0, -1):
for j in range(i):
if nums[j] > nums[j + 1]:
nums[j], nums[j+1] = nums[j+1], nums[j]
print(nums)
return nums
print(bubble_sort([4, 1, 0, 3, 5, 1, 2, 6]))
|
[
"tngo0508@gmail.com"
] |
tngo0508@gmail.com
|
86a2d304179a0d4d021966bafce213f4365d57c2
|
84290c584128de3e872e66dc99b5b407a7a4612f
|
/Statistical Thinking in Python (Part 2)/Bootstrap confidence intervals/Visualizing bootstrap samples.py
|
325418de26f528e09ecafe5c6554c241dae959c8
|
[] |
no_license
|
BautizarCodigo/DataAnalyticEssentials
|
91eddc56dd1b457e9e3e1e3db5fbbb2a85d3b789
|
7f5f3d8936dd4945ee0fd854ef17f04a04eb7b57
|
refs/heads/main
| 2023-04-11T04:42:17.977491
| 2021-03-21T19:05:17
| 2021-03-21T19:05:17
| 349,784,608
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
for _ in range(50):
# Generate bootstrap sample: bs_sample
bs_sample = np.random.choice(rainfall, size=len(rainfall))
# Compute and plot ECDF from bootstrap sample
x, y = ecdf(bs_sample)
_ = plt.plot(x, y, marker='.', linestyle='none',
color='gray', alpha=0.1)
# Compute and plot ECDF from original data
x, y = ecdf(rainfall)
_ = plt.plot(x, y, marker='.')
# Make margins and label axes
plt.margins(0.02)
_ = plt.xlabel('yearly rainfall (mm)')
_ = plt.ylabel('ECDF')
# Show the plot
plt.show()
|
[
"78171986+BautizarCodigo@users.noreply.github.com"
] |
78171986+BautizarCodigo@users.noreply.github.com
|
391a306f78fe5c96c880603c95534afa317eb828
|
874f8db726d5ce5da971dbd54aac58f0b3176d78
|
/aa通用的工具类或方法/一个通用的mongodb类.py
|
688e8d6c4c304fd0c6613395dc49c4fed7d13fcf
|
[] |
no_license
|
Social-Engineering-OrigData/python
|
a8442ab5b3a772ddfc568eb5e386b11074c5bf93
|
6dde78f75e2a3306bccdc0085a44751cf2b901ca
|
refs/heads/master
| 2021-09-09T12:56:09.781127
| 2018-03-16T09:34:17
| 2018-03-16T09:34:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
#!/usr/bin/env python
# encoding: utf-8
'''
@contact: wersonliugmail.com
@File : 一个通用的mongodb类.py
'''
from pymongo import MongoClient
"""
在爬虫存数据时使用,不需要事先建立数据库,直接存字典
"""
class MyMongo:
def __init__(self, dbname, colname):
"""
:param dbname: 初始化 命名自己的库
:param colname: 初始化 命名自己的表(集合)名
"""
# host,port 按自己需要重写
self.host = "127.0.0.1"
self.port = 27017
# self.dbname = dbname
# self.colname = colname
self.client = MongoClient(host=self.host, port=self.port)
self.db = self.client[dbname]
self.col = self.db[colname]
def process_data(self, data):
self.col.insert(data)
print("成功插入%s" % data)
def close_mongo(self):
self.client.close()
# 其他增删改查操作
my = MyMongo("wnagyi", "info")
my.process_data({"姓名": "刘伟", "工资": 1800})
my.close_mongo()
|
[
"wersonliu@gmail.com"
] |
wersonliu@gmail.com
|
35380b0997d3dc37aa77773fe400ca9768d179f3
|
9c05ec071dda2aa98ea1b12d9703dd91df19c87d
|
/quantum/hooks.py
|
2c6a587a6d593503d2bbf9fee3977197c254c5db
|
[
"Apache-2.0"
] |
permissive
|
DestinyOneSystems/quantum
|
af6ff44dd5e8cff944e53946f60adb11efb47bd5
|
d7eafd8ffa719d91108b230221ecf27531a3530d
|
refs/heads/master
| 2020-04-05T18:59:00.584768
| 2013-10-21T02:41:15
| 2013-10-21T02:41:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,109
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
def setup_hook(config):
"""Filter config parsed from a setup.cfg to inject our defaults."""
metadata = config['metadata']
if sys.platform == 'win32':
requires = metadata.get('requires_dist', list()).split('\n')
requires.append('pywin32')
requires.append('wmi')
requires.remove('pyudev')
metadata['requires_dist'] = "\n".join(requires)
config['metadata'] = metadata
|
[
"mordred@inaugust.com"
] |
mordred@inaugust.com
|
f138655f1c273477db99f1f85129ea718053c624
|
1a2cbc44bfcda1eafe4e8513de8541d8cd49bd08
|
/fts/test_t1_amend_user2.py
|
f8bebf459f343d1e016841a1993e789e179cfd24
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
DonaldMc/gdms
|
d62d34585a3914330cc933476dcb0d3ab750b7d8
|
7bfdf40d929afab2e204256c781c3700f6e24443
|
refs/heads/master
| 2021-01-18T12:38:55.798638
| 2016-05-30T18:59:55
| 2016-05-30T18:59:55
| 56,460,151
| 0
| 0
| null | 2016-05-30T20:59:22
| 2016-04-17T21:44:40
|
Python
|
UTF-8
|
Python
| false
| false
| 3,486
|
py
|
from functional_tests import FunctionalTest, ROOT, USERS
from ddt import ddt, data, unpack
from selenium.webdriver.support.ui import WebDriverWait
import time
from selenium.webdriver.support.ui import Select
# Testuser1 - stays as unspecified
# Testuser2 - specifies Africa and unspecified country and subdivision
# Testuser3 - specifies Africa and South Africa and unspecified subdivision
# Testuser4 - specifies Europe and unspecifoed country
# Testuser5 - specifies Europe and Switzerland and unspecified Subdivision
# Testuser6 - specifies North America and Unspeccified country
# Testuser7 - specifies North America, Canada and unspecified subdivision
# Testuser8 - specifies North America, Canada and Alberta
# Testuser9 - specifies North America, Canada and Saskatchewan
@ddt
class TestRegisterPage (FunctionalTest):
def setUp(self):
self.url = ROOT + '/default/user/login'
get_browser=self.browser.get(self.url)
# setup below for user7 being set twice seems stupid however for reasons that escape me the
# setting of unspecified subdivision isn't working if done in a single step hence Manitoba
# temporarily wheeled into play
@data((USERS['USER7'], USERS['PASSWORD7'], 'North America (NA)', 'Canada (NA)', 'Manitoba'),
(USERS['USER6'], USERS['PASSWORD6'], 'North America (NA)', 'Unspecified', 'Unspecified'),
(USERS['USER8'], USERS['PASSWORD8'], 'North America (NA)', 'Canada (NA)', 'Alberta'),
(USERS['USER9'], USERS['PASSWORD9'], 'North America (NA)', 'Canada (NA)', 'Saskatchewan'),
(USERS['USER7'], USERS['PASSWORD7'], 'North America (NA)', 'Canada (NA)', 'Unspecified'))
@unpack
def test_put_values_in_register_form(self, user, passwd, continent, country, subdivision):
mailstring = user + '@user.com'
email = WebDriverWait(self, 10).until(lambda self: self.browser.find_element_by_name("email"))
email.send_keys(mailstring)
password = self.browser.find_element_by_name("password")
password.send_keys(passwd)
time.sleep(1)
submit_button = self.browser.find_element_by_css_selector("#submit_record__row input")
time.sleep(1)
submit_button.click()
time.sleep(1)
self.url = ROOT + '/default/user/profile'
get_browser=self.browser.get(self.url)
time.sleep(1)
select = Select(self.browser.find_element_by_id("auth_user_continent"))
time.sleep(1)
select.select_by_visible_text(continent)
time.sleep(1)
select = Select(self.browser.find_element_by_id("countryopt"))
time.sleep(2)
select.select_by_visible_text(country)
time.sleep(3)
select = Select(self.browser.find_element_by_id("subdivopt"))
time.sleep(3)
select.select_by_visible_text(subdivision)
time.sleep(3)
self.browser.find_element_by_xpath("//input[@value='Apply changes']").click()
# TODO get this changed to changes applied after working
resultstring = 'Welcome'
time.sleep(2)
body = WebDriverWait(self, 10).until(lambda self: self.browser.find_element_by_tag_name('body'))
self.assertIn(resultstring, body.text)
#welcome_message = self.browser.find_element_by_css_selector(".flash")
#self.assertEqual(resultstring, welcome_message.text)
self.url = ROOT + '/default/user/logout'
get_browser = self.browser.get(self.url)
time.sleep(1)
|
[
"donaldm2020@gmail.com"
] |
donaldm2020@gmail.com
|
c85113890b4775751eea8a0787ac818401ea92d5
|
c660fdd49861211926a9dac0206d3856002ff2a8
|
/smbl/prog/plugins/samtools.py
|
e203b8094d9a9201ecb7919fbc2f9595a2242875
|
[
"MIT"
] |
permissive
|
hermanzhaozzzz/smbl
|
d493a8b7ecfaf961c7ca7280d94c945a3e4e3b92
|
5922fa2fc4060d86172e991361a1cceb0af51af8
|
refs/heads/master
| 2021-06-23T11:27:57.869235
| 2017-08-19T02:21:51
| 2017-08-19T02:21:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
import smbl
import snakemake
import os
from ._program import *
SAMTOOLS = get_bin_file_path("samtools")
##########################################
##########################################
class SamTools(Program):
@classmethod
def get_installation_files(cls):
return [
SAMTOOLS,
]
@classmethod
def install(cls):
gitdir_samtools=cls.git_clone("http://github.com/samtools/samtools","samtools")
gitdir_htslib=cls.git_clone("http://github.com/samtools/htslib","htslib")
smbl.prog.correct_samtools_make(os.path.join(gitdir_samtools,"Makefile"))
cls.run_make("samtools")
cls.install_file("samtools/samtools",SAMTOOLS)
@classmethod
def supported_platforms(cls):
return ["cygwin","osx","linux"]
|
[
"karel.brinda@gmail.com"
] |
karel.brinda@gmail.com
|
ee8aad80ea9fe488f536a12acb866395bcbdfc70
|
c26dc7928b1facac2c0912f6532076d35c19e835
|
/devel/lib/python2.7/dist-packages/cob_object_detection_msgs/srv/__init__.py
|
8d01ac1030bab33d482fd8bc39a91912a52446bc
|
[] |
no_license
|
mattedminster/inmoov_ros
|
33c29a2ea711f61f15ad5e2c53dd9db65ef6437f
|
e063a90b61418c3612b8df7876a633bc0dc2c428
|
refs/heads/master
| 2021-01-23T02:39:36.090746
| 2017-08-09T02:56:42
| 2017-08-09T02:56:42
| 85,995,826
| 0
| 0
| null | 2017-03-23T20:45:32
| 2017-03-23T20:45:32
| null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
from ._AcquireObjectImage import *
from ._BaTestEnvironment import *
from ._BagTrainObject import *
from ._ComputeGraspsVacuumGripper import *
from ._DetectObjects import *
from ._SaveRecordedObject import *
from ._StartObjectRecording import *
from ._StopObjectRecording import *
from ._TrainObject import *
|
[
"mattedminster@gmail.com"
] |
mattedminster@gmail.com
|
47ede935441605d7d56f33de91b7e10d1f544291
|
930309163b930559929323647b8d82238724f392
|
/sumitb2019_c.py
|
8ebf6c2adc23f64ec6e3e5122b0e1896defd65e2
|
[] |
no_license
|
GINK03/atcoder-solvers
|
874251dffc9f23b187faa77c439b445e53f8dfe1
|
b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7
|
refs/heads/master
| 2021-11-07T14:16:52.138894
| 2021-09-12T13:32:29
| 2021-09-12T13:32:29
| 11,724,396
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
import itertools
X = int(input())
ps = [100, 101, 102, 103, 104, 105]
dp=[0]*(X+1)
dp[0] = 1
for p in ps:
for i in range(len(dp)):
if i >= p:
dp[i] = max(dp[i], dp[i-p])
print(dp[X])
|
[
"gim.kobayashi@gmail.com"
] |
gim.kobayashi@gmail.com
|
56583f3316a24edddd70b4a0f9c935cbd4ceb946
|
3b79a802f8dd9f26bee0bfde4630ac0cab932803
|
/srcSegcls/getEventSegDF.py
|
b004b92f2a243f693794a4efdb8cca0d07350ef9
|
[] |
no_license
|
qolina/Twevent
|
87fc4706564088361e9db6ddc44efc10647e67fe
|
4b90b0604493b20dee90448c17e0a8e0d557165e
|
refs/heads/master
| 2021-06-24T19:06:02.022882
| 2017-08-15T05:20:09
| 2017-08-15T05:20:09
| 100,341,172
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,338
|
py
|
#! /usr/bin/env python
#coding=utf-8
import time
import re
import os
import math
import cPickle
############################
## load tweetID-usrID
def loadUsrId(filepath):
usrFile = file(filepath,"r")
tweIdToUsrIdHash = cPickle.load(usrFile)
usrFile.close()
return tweIdToUsrIdHash
############################
## load event segments from file
def loadEvtseg(filePath):
unitHash = {}#segment:segmentID(count from 0)
inFile = file(filePath)
unitID = 0
while True:
lineStr = inFile.readline()
lineStr = re.sub(r'\n', ' ', lineStr)
lineStr = lineStr.strip()
if len(lineStr) <= 0:
break
contentArr = lineStr.split("\t")
unit = contentArr[2]
unitHash[unit] = unitID
unitID += 1
inFile.close()
print "### " + str(len(unitHash)) + " event " + UNIT + "s are loaded from " + inFile.name
return unitHash
############################
## getEventSegment's df
def getEventSegmentDF(dataFilePath, toolDirPath):
fileList = os.listdir(dataFilePath)
for item in sorted(fileList):
if item.find("segged") != 0:
continue
print "### Processing " + item
seggedFile = file(dataFilePath + item)
tStr = item[len(item)-2:len(item)]
print "Time window: " + tStr
eventSegFilePath = dataFilePath + "event" + UNIT + tStr
unitHash = loadEvtseg(eventSegFilePath)
eventSegDFFile = file(dataFilePath + "event" + UNIT + "DF" + tStr, "w")
unitDFHash = {} # unit:dfhash
N_t = 0
Usr_t = 0
usrHash = {}
unitUsrHash = {}
tweToUsrFilePath = toolDirPath + "tweIdToUsrId" + tStr
tweIdToUsrIdHash = loadUsrId(tweToUsrFilePath)
while True:
lineStr = seggedFile.readline()
lineStr = re.sub(r'\n', " ", lineStr)
lineStr = lineStr.strip()
if len(lineStr) <= 0:
break
contentArr = lineStr.split("\t")
tweetIDstr = contentArr[0]
tweetText = contentArr[2]
usrIDstr = tweIdToUsrIdHash[tweetIDstr]
if len(tweetText)*len(tweetIDstr) == 0:
print "Error: empty id or text: " + tweetIDstr + "#" + tweetText
exit
N_t += 1
if usrIDstr not in usrHash:
usrHash[usrIDstr] = 1
textArr = tweetText.split("|")
for segment in textArr:
wordArr = segment.split(" ")
containslang = False
if useSegmentFlag:
unit = segment
if unit not in unitHash:
continue
# segment df
df_t_hash = {}
if unit in unitDFHash:
df_t_hash = unitDFHash[unit]
df_t_hash[tweetIDstr] = 1
unitDFHash[unit] = df_t_hash
# segment users
usr_hash = {}
if unit in unitUsrHash:
usr_hash = unitUsrHash[unit]
usr_hash[usrIDstr] = 1
unitUsrHash[unit] = usr_hash
else:
for word in wordArr:
unit = word
if unit not in unitHash:
continue
# word df
df_t_hash = {}
if unit in unitDFHash:
df_t_hash = unitDFHash[unit]
df_t_hash[tweetIDstr] = 1
unitDFHash[unit] = df_t_hash
# word users
usr_hash = {}
if unit in unitUsrHash:
usr_hash = unitUsrHash[unit]
usr_hash[usrIDstr] = 1
unitUsrHash[unit] = usr_hash
if N_t % 100000 == 0:
print "### " + str(time.asctime()) + " " + str(N_t) + " tweets are processed!"
windowHash[tStr] = N_t
Usr_t = len(usrHash)
cPickle.dump(N_t, eventSegDFFile)
cPickle.dump(Usr_t, eventSegDFFile)
cPickle.dump(unitDFHash, eventSegDFFile)
cPickle.dump(unitUsrHash, eventSegDFFile)
for unit in unitDFHash:
print unit + "\t" + str(len(unitDFHash[unit]))
print "### " + str(time.asctime()) + " " + str(len(unitHash)) + " event " + UNIT + "s DF/UsrDF are calculated and writen to " + eventSegDFFile.name
seggedFile.close()
eventSegDFFile.close()
############################
## main Function
global useSegmentFlag, UNIT
print "###program starts at " + str(time.asctime())
#dataFilePath = r"../Data_hfmon/segged_qtwe/"
dataFilePath = r"../Data_hfmon/segged_ltwe/"
#dataFilePath = r"../Data_hfmon/segged_ltwe_hash/"
# use segment or word as unit
useSegmentFlag = True
if useSegmentFlag:
UNIT = "segment"
else:
UNIT = "word"
toolDirPath = r"../Tools/"
windowHash = {} # timeSliceIdStr:tweetNum
getEventSegmentDF(dataFilePath, toolDirPath)
print "###program ends at " + str(time.asctime())
|
[
"qolina@gmail.com"
] |
qolina@gmail.com
|
b6c7bc0863d3be11b0c5fdaf4028d0651061b62a
|
3ee0418421955d01558b1c623def251932bcfc01
|
/python-examples/marble_sort/write_json.py
|
b3388c9cc682286c4a2476f1d08641cbb8ddb79c
|
[
"MIT"
] |
permissive
|
pep-dortmund/mindstorms
|
89f426930516155bb75f52b9fdd24a0b64fc0951
|
9e6be52545e21ab8ba3bca7e1b0e64ed2320366d
|
refs/heads/master
| 2021-01-01T19:19:26.508803
| 2017-04-29T11:39:35
| 2017-04-29T11:39:35
| 38,932,641
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
from argparse import ArgumentParser
import zmq
import json
parser = ArgumentParser()
parser.add_argument('outputfile')
parser.add_argument('-p', '--port', type=int, default=5000)
context = zmq.Context()
socket = context.socket(zmq.REP)
def main():
args = parser.parse_args()
socket.bind('tcp://0.0.0.0:{}'.format(args.port))
events = 0
with open(args.outputfile, 'a') as f:
while True:
data = socket.recv_pyobj()
socket.send_string('ok')
events += 1
print('Events:', events)
f.write(json.dumps(data))
f.write('\n')
if __name__ == '__main__':
main()
|
[
"maximilian.noethe@tu-dortmund.de"
] |
maximilian.noethe@tu-dortmund.de
|
6afad1eb9a9749a808aa04ff852f4ed7cf4fb72b
|
889d13d15084f12e84731f48f50c72169f4ca45f
|
/public/class03demos/class03p10.py
|
d49c82eb8a80a9c4ac35087d43a3a802aada5e9c
|
[] |
no_license
|
puneet-khatod/ml4us
|
1bb4a661f3d59d8d0b7ff9e959b2f51324c7a9c9
|
917cdac85086bfc82f03e3db3ba8e7b15f9c407b
|
refs/heads/master
| 2021-05-06T15:59:13.646649
| 2017-12-09T08:03:30
| 2017-12-09T08:03:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
"""
class03p10.py
This script should use Pandas to plot prices of GSPC for 2016.
"""
import pandas as pd
import matplotlib.pyplot as plt
csvfile = 'http://spy611.herokuapp.com/csv/allpredictions.csv'
# Goog: In pandas how to sort a dataframe?
cp_df = pd.read_csv(csvfile).sort_values(['cdate'])
# Goog: In pandas how to filter?
cp2016_sr = (cp_df.cdate > '2016') & (cp_df.cdate < '2017')
cp2016_df = cp_df[['cdate','cp']][cp2016_sr]
# I should plot
cpdate2016_df = cp2016_df.set_index(['cdate'])
# Goog: In Pandas what is an index?
# Goog: In Pandas what does set_index do?
cpdate2016_df.plot.line(title="GSPC 2016")
plt.show() # This line might be slow
'bye'
|
[
"bikle@bikle.com"
] |
bikle@bikle.com
|
cd4907ec3488eeaa4af0b6adb78c6fe463d8811d
|
4142b8c513d87361da196631f7edd82f11465abb
|
/python/round135/219A.py
|
84c2546d1739cabe735229c97479d28929b9d4e4
|
[] |
no_license
|
npkhanhh/codeforces
|
b52b66780426682ea1a3d72c66aedbe6dc71d7fe
|
107acd623b0e99ef0a635dfce3e87041347e36df
|
refs/heads/master
| 2022-02-08T17:01:01.731524
| 2022-02-07T10:29:52
| 2022-02-07T10:29:52
| 228,027,631
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
from collections import Counter
k = int(input())
s = input()
d = Counter(s)
res = ''
for t in d:
val = d[t]
if val % k == 0:
res += t * (val // k)
else:
res = '-1'
break
if res != '-1':
res *= k
print(res)
|
[
"npkhanh93@gmail.com"
] |
npkhanh93@gmail.com
|
9d7639d189d421797740d682aac51312abee9e92
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4011/codes/1745_1531.py
|
f673af6f93026f7831e6b2d8cc72542a9d884f67
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
from math import*
x = eval(input("radiano: "))
k = int(input("Quantidade de termos da serie: "))
n = 0
soma =
while(n < k):
n = n + 1
sinal = (x**(2 + 2*n)/factorial(2*n))
sinal = - sinal
soma = sinal + sinal
print(round(serie, 10))
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
f5dd2bb68d941f22a8ece354d5ebe4a7ff628fca
|
736250d9d14552c5fa0aca25b25d9c8a28fcd1a0
|
/mtmpro/mtmapp/migrations/0001_initial.py
|
42368fcde0eff7c119ef4b9236f3139dcdb96da7
|
[] |
no_license
|
maheswatapradhan/feedback
|
57f052a2082902cb8a72b474e0b863b7a00d1c9c
|
31c7dcb113a38e29b3a56481fcb9ae2fce7d61a2
|
refs/heads/master
| 2020-09-15T23:42:32.041306
| 2019-11-23T12:54:25
| 2019-11-23T12:54:25
| 223,585,900
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,254
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2019-09-16 11:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cno', models.IntegerField()),
('cname', models.CharField(max_length=100)),
('fee', models.IntegerField()),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sno', models.IntegerField()),
('sname', models.CharField(max_length=100)),
('location', models.CharField(max_length=100)),
('marks', models.IntegerField()),
],
),
migrations.AddField(
model_name='course',
name='student',
field=models.ManyToManyField(to='mtmapp.Student'),
),
]
|
[
"test@test.com"
] |
test@test.com
|
e92bb7009b48dbf53be81f216d049bab6787cdce
|
5d61565651b7ba5fa8fade3313a5e82fca8b6686
|
/login/migrations/0003_auto_20190709_2213.py
|
58c72a12c002fd6586fd9fbdb94b2ed1aaacc6c2
|
[] |
no_license
|
lonelyxmas/ISMS
|
d597b00072bfa77907875f575b866fbb1fb53295
|
08c5e2f3518fc639cf1a1f2869f4b2f3ae58e306
|
refs/heads/master
| 2023-08-14T12:02:59.001215
| 2021-03-22T03:34:58
| 2021-03-22T03:34:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
# Generated by Django 2.1.4 on 2019-07-09 14:13
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('login', '0002_auto_20190704_0826'),
]
operations = [
migrations.AlterField(
model_name='user',
name='FID',
field=models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='user',
name='FType',
field=models.IntegerField(choices=[(0, '企业账户'), (1, '合作伙伴'), (2, '管理员')], default=0, verbose_name='用户类型'),
),
]
|
[
"11325818@qq.com"
] |
11325818@qq.com
|
db9503f8d4917677b10f97a48c4f912d05a9290a
|
acc244c97a943d8e2074339afa1bff1274ae4cfc
|
/CGATPipelines/PipelineMedip.py
|
3f12a921f960aaedb163d725a83b325930f8e7fb
|
[] |
no_license
|
eromasko/cgat
|
00114f4c95b439ba6595ddf2092d1a3307347401
|
d82d197f3913b8d65b656c0b205ca48854fdb2a6
|
refs/heads/master
| 2021-01-17T09:37:17.168278
| 2015-02-20T09:03:31
| 2015-02-20T09:03:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,824
|
py
|
'''
PipelineMedip.py - tasks associated with MedipSeq analysis
==========================================================
'''
import re
import os
import collections
import sqlite3
import CGAT.Experiment as E
import CGAT.Pipeline as P
import CGAT.Database as Database
import CGAT.IOTools as IOTools
from rpy2.robjects import r as R
import rpy2.robjects as ro
PARAMS = {}
def buildDMRStats(tables, method, outfile):
'''build dmr summary statistics.
Creates some diagnostic plots in
<exportdir>/<method> directory.
Tables should be labeled <tileset>_<design>_<method>.
'''
dbhandle = sqlite3.connect(PARAMS["database"])
def togeneset(tablename):
return re.match("([^_]+)_", tablename).groups()[0]
keys_status = "OK", "NOTEST", "FAIL", "NOCALL"
outf = IOTools.openFile(outfile, "w")
outf.write("\t".join(("tileset", "design", "track1", "track2", "tested",
"\t".join(["status_%s" % x for x in keys_status]),
"significant",
"up", "down",
"twofold",
"twofold_up", "twofold_down",
)) + "\n")
all_tables = set(Database.getTables(dbhandle))
outdir = os.path.join(PARAMS["exportdir"], "diff_methylation")
for tablename in tables:
prefix = P.snip(tablename, "_%s" % method)
tileset, design = prefix.split("_")
def toDict(vals, l=2):
return collections.defaultdict(int, [(tuple(x[:l]), x[l]) for x in vals])
E.info("collecting data from %s" % tablename)
tested = toDict(Database.executewait(dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename)s
GROUP BY treatment_name,control_name""" % locals() ).fetchall() )
status = toDict(Database.executewait(dbhandle,
"""SELECT treatment_name, control_name, status, COUNT(*) FROM %(tablename)s
GROUP BY treatment_name,control_name,status""" % locals() ).fetchall(), 3 )
signif = toDict(Database.executewait(dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename)s
WHERE significant
GROUP BY treatment_name,control_name""" % locals() ).fetchall() )
fold2 = toDict(Database.executewait(dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename)s
WHERE (l2fold >= 1 or l2fold <= -1) AND significant
GROUP BY treatment_name,control_name,significant""" % locals() ).fetchall() )
up = toDict(Database.executewait(dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename)s
WHERE l2fold > 0 AND significant
GROUP BY treatment_name,control_name,significant""" % locals() ).fetchall() )
down = toDict(Database.executewait(dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename)s
WHERE l2fold < 0 AND significant
GROUP BY treatment_name,control_name,significant""" % locals() ).fetchall() )
fold2up = toDict(Database.executewait(dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename)s
WHERE l2fold > 1 AND significant
GROUP BY treatment_name,control_name,significant""" % locals() ).fetchall() )
fold2down = toDict(Database.executewait(dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename)s
WHERE l2fold < -1 AND significant
GROUP BY treatment_name,control_name,significant""" % locals() ).fetchall() )
groups = tested.keys()
for treatment_name, control_name in groups:
k = (treatment_name, control_name)
outf.write("\t".join(map(str, (
tileset,
design,
treatment_name,
control_name,
tested[k],
"\t".join([str(status[(treatment_name, control_name, x)])
for x in keys_status]),
signif[(k)],
up[k], down[k],
fold2[k],
fold2up[k], fold2down[k]))) + "\n")
###########################################
###########################################
###########################################
# plot length versus P-Value
data = Database.executewait(dbhandle,
'''SELECT end - start, pvalue
FROM %(tablename)s
WHERE significant''' % locals() ).fetchall()
# require at least 10 datapoints - otherwise smooth scatter fails
if len(data) > 10:
data = zip(*data)
pngfile = "%(outdir)s/%(tileset)s_%(design)s_%(method)s_pvalue_vs_length.png" % locals()
R.png(pngfile)
R.smoothScatter(R.log10(ro.FloatVector(data[0])),
R.log10(ro.FloatVector(data[1])),
xlab='log10( length )',
ylab='log10( pvalue )',
log="x", pch=20, cex=.1)
R['dev.off']()
outf.close()
|
[
"andreas.heger@gmail.com"
] |
andreas.heger@gmail.com
|
7a821db6e73317f1eda8b4668d934a936b9bc173
|
efb3d0c2f9fcc5be631323e31f4b8dfcdd0ab676
|
/compiler/tests/14_replica_column_test.py
|
c8d50a539879db74ee9e9e7d09880960e2cc6270
|
[
"BSD-3-Clause"
] |
permissive
|
kanokkorn/OpenRAM
|
5f30beb35e3c161fbf0d233b59fe7d7805d3c348
|
3a9693e37fd3afbd52001839966b0f2811fb4ccd
|
refs/heads/master
| 2022-06-03T12:53:47.750245
| 2022-05-27T15:53:05
| 2022-05-27T15:53:05
| 189,780,330
| 0
| 0
|
BSD-3-Clause
| 2021-04-07T06:49:08
| 2019-06-01T21:47:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,291
|
py
|
#!/usr/bin/env python3
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2021 Regents of the University of California
# All rights reserved.
#
import unittest
from testutils import *
import sys, os
sys.path.append(os.getenv("OPENRAM_HOME"))
import globals
from globals import OPTS
from sram_factory import factory
import debug
class replica_column_test(openram_test):
def runTest(self):
config_file = "{}/tests/configs/config".format(os.getenv("OPENRAM_HOME"))
globals.init_openram(config_file)
if OPTS.tech_name == "sky130":
num_spare_rows = 1
num_spare_cols = 1
else:
num_spare_rows = 0
num_spare_cols = 0
debug.info(2, "Testing replica column for single port")
a = factory.create(module_type="replica_column",
rows=4 + num_spare_rows,
rbl=[1, 0],
replica_bit=1,
column_offset=num_spare_cols)
self.local_check(a)
globals.end_openram()
# run the test from the command line
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main(testRunner=debugTestRunner())
|
[
"mrg@ucsc.edu"
] |
mrg@ucsc.edu
|
7b205e91d3d2e6bea20b6b48b78dc7bf2b245db8
|
c908dacdc0006e247aa529dddb98bc1d67fbf7c8
|
/user.py
|
c2f9669f15bbddd02c3b88046a27e25547ba194d
|
[] |
no_license
|
TomeCirun/flask_blog
|
40e3bd041fd7ba376c181073c92e19f296aca928
|
de34ac14e2e3e2044e3f327e288eefadf34b7faf
|
refs/heads/main
| 2023-03-05T13:51:37.335673
| 2021-02-17T12:04:00
| 2021-02-17T12:04:00
| 339,709,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
class User():
def __init__(self,id,username,password):
self.id = id
self.username = username
self.password = password
def __str__(self):
return f'User Id: {self.id}'
|
[
"cirun@live.com"
] |
cirun@live.com
|
7332bb72184308f1c755b9859e825e727dc18a52
|
2205363ea412aae36aa2c5f8b7d608cd8a158a03
|
/Personal_Blog/Pb/Pb/settings.py
|
d3f8de8c66bb1455f934b84f6bb3190cd42b086b
|
[] |
no_license
|
Akanksha2403/HacktoberFest2020
|
986ef7ba5595679085e5159d35c5a30d9e91ebc5
|
789762e3a4a3ad23fd2c1ca3b6cc3bc8f39eed82
|
refs/heads/master
| 2023-08-28T04:25:07.466359
| 2021-10-20T10:16:46
| 2021-10-20T10:16:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,674
|
py
|
"""
Django settings for Pb project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-0r=r9##5pcrhvdnxxoblg4uj7#@^n$z3t%+a7&t@1_4ebckoxo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Blog.apps.BlogConfig',
'chat.apps.ChatConfig',
'resume',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Pb.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR/'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Pb.wsgi.application'
ASGI_APPLICATION = 'chatty.asgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
STATIC_ROOT = os.path.join(BASE_DIR, "static")
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"lit2020026@gmail.com"
] |
lit2020026@gmail.com
|
38ffeefe71c4acb79a5a838efeb26765465afa7f
|
159d4ae61f4ca91d94e29e769697ff46d11ae4a4
|
/venv/bin/iptest
|
62ac6036fc5f261d69ea933bb91ed9bee7ded5ca
|
[
"MIT"
] |
permissive
|
davidycliao/bisCrawler
|
729db002afe10ae405306b9eed45b782e68eace8
|
f42281f35b866b52e5860b6a062790ae8147a4a4
|
refs/heads/main
| 2023-05-24T00:41:50.224279
| 2023-01-22T23:17:51
| 2023-01-22T23:17:51
| 411,470,732
| 8
| 0
|
MIT
| 2023-02-09T16:28:24
| 2021-09-28T23:48:13
|
Python
|
UTF-8
|
Python
| false
| false
| 269
|
#!/Users/yenchiehliao/Dropbox/bisCrawler/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from IPython.testing.iptestcontroller import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"davidycliao@gmail.com"
] |
davidycliao@gmail.com
|
|
412d94ad7ce1d9d7b92b6406d8aa5350f3f77fe9
|
2e79b8f2e4cc5ea10789de787f787fdc56137993
|
/leetcode/438.找到字符串中所有字母异味词.py
|
e1f674a878c118e39a1c3fa3bfafdb8b51fc9564
|
[] |
no_license
|
wangye707/Test
|
d486ccb0947f6a83662a73fb56554260d1445c30
|
0d5fb8ea7da79d7d168d99f7158c8aa5757a1d35
|
refs/heads/master
| 2020-06-04T05:48:46.132054
| 2020-04-28T14:53:30
| 2020-04-28T14:53:30
| 191,894,269
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
#!D:/workplace/python
# -*- coding: utf-8 -*-
# @File : 438.找到字符串中所有字母异味词.py
# @Author: WangYe
# @Date : 2019/9/25
# @Software: PyCharm
def findAnagrams(s, p):
"""
:type s: str
:type p: str
:rtype: List[int]
"""
out = []
need = {}
for i in p:
if i in need:
need[i] += 1
else:
need[i] = 1
l = len(p)
win = {}
for i in range(len(s)-l+1):
if i ==0:
win = {}
for k in s[i:i + l]:
if k in win:
win[k] += 1
else:
win[k] = 1
else:
# print(s[i-1],win)
if win[s[i-1]] >1:
win[s[i-1]] -=1
else:
del win[s[i-1]]
if s[i+l-1] in win:
win[s[i+l-1]] +=1
else:
win[s[i+l-1]] = 1
if win==need:
out.append(i)
return out
s = "cbaebabacd"
p = "abc"
print(findAnagrams(s,p))
|
[
"1119744330@qq.com"
] |
1119744330@qq.com
|
e170f688e59520f390ab02a6b3e1b52b161b747b
|
66bfac516682bc8c3c804a5b7414cfc8b3440186
|
/leads/apps/leads/serializers.py
|
5e9d220555f6c26071a166a7b386b109ee1a7eb8
|
[] |
no_license
|
Izaiasjun1Dev/leads
|
190d1bf01f1809c34cb53582e0f1020c3d704b58
|
22a209b43fd0eb60218deba731c9bf189ea9568a
|
refs/heads/master
| 2023-04-05T15:15:39.834194
| 2021-03-30T11:35:36
| 2021-03-30T11:35:36
| 352,966,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
from rest_framework import serializers
from .models import Lead
# Serializador lead
class LeadSerializer(serializers.ModelSerializer):
class Meta:
model = Lead
fields = '__all__'
|
[
"solucaoprogramer@gmail.com"
] |
solucaoprogramer@gmail.com
|
71f27e6f44fc1dfef7571b27982acccf33236218
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/ring/siren.py
|
7f1b147471d271411715ee41520529c0afef4805
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,678
|
py
|
"""Component providing HA Siren support for Ring Chimes."""
import logging
from typing import Any
from ring_doorbell.const import CHIME_TEST_SOUND_KINDS, KIND_DING
from homeassistant.components.siren import ATTR_TONE, SirenEntity, SirenEntityFeature
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import DOMAIN
from .entity import RingEntityMixin
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Create the sirens for the Ring devices."""
devices = hass.data[DOMAIN][config_entry.entry_id]["devices"]
sirens = []
for device in devices["chimes"]:
sirens.append(RingChimeSiren(config_entry, device))
async_add_entities(sirens)
class RingChimeSiren(RingEntityMixin, SirenEntity):
"""Creates a siren to play the test chimes of a Chime device."""
_attr_available_tones = CHIME_TEST_SOUND_KINDS
_attr_supported_features = SirenEntityFeature.TURN_ON | SirenEntityFeature.TONES
_attr_translation_key = "siren"
def __init__(self, config_entry: ConfigEntry, device) -> None:
"""Initialize a Ring Chime siren."""
super().__init__(config_entry.entry_id, device)
# Entity class attributes
self._attr_unique_id = f"{self._device.id}-siren"
def turn_on(self, **kwargs: Any) -> None:
"""Play the test sound on a Ring Chime device."""
tone = kwargs.get(ATTR_TONE) or KIND_DING
self._device.test_sound(kind=tone)
|
[
"noreply@github.com"
] |
home-assistant.noreply@github.com
|
f9d21162737f40168c323f56d4a303bf6211ce0c
|
c6d89d2507efe02ead1802649a769e021795b2b6
|
/categories/context_processors.py
|
cb9c2687489bdc34c2746a89d05b11c34a37b16c
|
[] |
no_license
|
ikonitas/pleasuresallmine
|
b671b05d2f13428973cc19d39e58d0b56d1914f0
|
875e6067a202be801a9b1fddb27c4d313fd133f4
|
refs/heads/master
| 2021-05-29T19:50:39.812885
| 2014-11-27T21:22:22
| 2014-11-27T21:22:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
# coding=utf-8
from django.core.cache import cache
from models import Category
def list_categories(request):
categories = cache.get('list_categories')
if not categories:
categories = Category.objects.filter(
is_active=True).order_by('sort_order')
cache.set('list_categories', categories, 60)
return {'list_categories': categories}
|
[
"ikonitas@gmail.com"
] |
ikonitas@gmail.com
|
29511c1e8bcf903725d957b2e420756cc1908ad8
|
29d7ba390d4b6046666f783e682ea248108ea900
|
/cbagent/__main__.py
|
09669d3c032cc65f432c457e3e7024f81dfcc2cd
|
[
"Apache-2.0"
] |
permissive
|
pavel-paulau/cbagent
|
5f289fbaf08b997b55d270944d67f716ec1a127a
|
f905974d663e0320e55a00076d292cbf489e53d9
|
refs/heads/master
| 2020-04-26T09:55:43.761203
| 2014-07-31T12:41:18
| 2014-07-31T12:41:18
| 13,084,444
| 2
| 1
| null | 2014-06-19T02:15:22
| 2013-09-25T04:52:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,380
|
py
|
import sys
from optparse import OptionParser
from cbagent.collectors.active_tasks import ActiveTasks
from cbagent.collectors.iostat import IO
from cbagent.collectors.latency import Latency
from cbagent.collectors.observe import ObserveLatency
from cbagent.collectors.net import Net
from cbagent.collectors.ns_server import NSServer
from cbagent.collectors.ps import PS
from cbagent.collectors.sync_gateway import SyncGateway
from cbagent.collectors.xdcr_lag import XdcrLag
from cbagent.settings import Settings
def main():
parser = OptionParser(prog="cbagent")
parser.add_option("--at", action="store_true", dest="active_tasks",
help="Active tasks")
parser.add_option("--io", action="store_true", dest="iostat",
help="iostat")
parser.add_option("--l", action="store_true", dest="latency",
help="Latency")
parser.add_option("--o", action="store_true", dest="observe",
help="Observe latency")
parser.add_option("--n", action="store_true", dest="net",
help="Net")
parser.add_option("--ns", action="store_true", dest="ns_server",
help="ns_server")
parser.add_option("--ps", action="store_true", dest="ps",
help="ps CPU, RSS and VSIZE")
parser.add_option("--sg", action="store_true", dest="sync_gateway",
help="Sync Gateway")
parser.add_option("--x", action="store_true", dest="xdcr_lag",
help="XDCR lag")
options, args = parser.parse_args()
if not args:
sys.exit("No configuration provided")
if options.active_tasks:
collector = ActiveTasks
elif options.iostat:
collector = IO
elif options.latency:
collector = Latency
elif options.observe:
collector = ObserveLatency
elif options.net:
collector = Net
elif options.ns_server:
collector = NSServer
elif options.ps:
collector = PS
elif options.sync_gateway:
collector = SyncGateway
elif options.xdcr_lag:
collector = XdcrLag
else:
sys.exit("No collector selected")
settings = Settings()
settings.read_cfg(args[0])
collector = collector(settings)
collector.update_metadata()
collector.collect()
if __name__ == '__main__':
main()
|
[
"pavel.paulau@gmail.com"
] |
pavel.paulau@gmail.com
|
1fa53956af9d567b5bb6cde0572f8a7cb11d736f
|
70121257e52e0fd2f0895414fcee3c991737443a
|
/python_recipes/tfpreprocess_cifar.py
|
33aaef3fdca4998831ffa5306a3bf25f080ae646
|
[] |
no_license
|
OlgaBelitskaya/cookbooks
|
2e54208bb5e5157814deea6ff71cd7ce5b1e4972
|
216dde3e5617203371ed4c4bb7d9e8391640c588
|
refs/heads/master
| 2021-07-11T15:56:44.923442
| 2021-03-25T08:38:46
| 2021-03-25T08:38:46
| 99,447,645
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,672
|
py
|
import warnings; warnings.filterwarnings('ignore')
import tensorflow as tf,numpy as np,pandas as pd
import tensorflow_datasets as tfds
from IPython.display import display,HTML
pd.set_option('precision',3)
tf.keras.backend.set_floatx('float64')
tfds.disable_progress_bar()
img_size=32
buffer_size,batch_size=10000,64
c1,c2,f1,f2,fs1,fs2=\
'#11ff66','#6611ff','Wallpoet','Orbitron',20,10
def dhtml(string,fontcolor=c1,font=f1,fontsize=fs1):
display(HTML("""<style>
@import 'https://fonts.googleapis.com/css?family="""\
+font+"""&effect=3d-float';</style>
<h1 class='font-effect-3d-float'
style='font-family:"""+font+\
"""; color:"""+fontcolor+\
"""; font-size:"""+str(fontsize)+"""px;'>
%s</h1>"""%string))
def load_cifar():
cifar=tfds.builder('cifar10')
cifar.download_and_prepare()
ds=cifar.as_dataset(shuffle_files=False,
split=['train','test'])
cifar_train,cifar_test=ds[0],ds[1]
dhtml(cifar.info.features['image'],c2,f2,fs2)
dhtml(cifar.info.features['label'],c2,f2,fs2)
cifar_train=cifar_train.map(
lambda item:(tf.cast(item['image'],tf.float32)/255.,
tf.cast(item['label'],tf.int32)))
cifar_test=cifar_test.map(
lambda item:(tf.cast(item['image'],tf.float32)/255.,
tf.cast(item['label'],tf.int32)))
tf.random.set_seed(123)
cifar_train=cifar_train.shuffle(
buffer_size=buffer_size,
reshuffle_each_iteration=False)
cifar_valid=cifar_train.take(buffer_size).batch(batch_size)
cifar_train=cifar_train.skip(buffer_size).batch(batch_size)
return cifar_train,cifar_valid,cifar_test
|
[
"safuolga@gmail.com"
] |
safuolga@gmail.com
|
c623380ca8277769f08041e14cc66374a1963eb7
|
5be7afab3f57b7b5365053700386c01bad7031e6
|
/quotes.toscrape.com/1.2.quote_web_scraping/spiders/quotes_spider.py
|
16b47d33a8206b7bb7caf819229b34ef62e264fb
|
[] |
no_license
|
enji-coder/SCRAPY-PROJECTS
|
c0c76e1ef8697320a0cb9b3fa9155a158574a5c1
|
bd65e6f3cf83912bc082ef39aba702db6cc4465c
|
refs/heads/main
| 2023-06-20T19:11:36.764847
| 2021-08-04T04:39:08
| 2021-08-04T04:39:08
| 386,542,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 707
|
py
|
import scrapy
class ExampleSpider(scrapy.Spider):
name = 'quotes'
allowed_domains = ['example.com']
start_urls = ['http://quotes.toscrape.com']
def parse(self, response):
all_quotes = response.css('div.quote')
# retrive all quotes title , author and tag details
# note it retrive 1st page all data only
for quotes in all_quotes:
desc = quotes.css('span.text::text').extract()
author = quotes.css('.author::text').extract()
tag = quotes.css('div.tags a::text').extract()
yield{
'--->> desc': desc,
'author': author,
'tag': tag,
}
|
[
"47570231+enji-coder@users.noreply.github.com"
] |
47570231+enji-coder@users.noreply.github.com
|
e6ea0a18c418751b3458be9dd1196e1a7f5514d0
|
2d13b3206b04d663eed9c5cfe7b6d273abaab33e
|
/2.Algorithm/pycharm/SW Academy/20200309/harvest.py
|
89098f8eaff5f7281c33299f947b60d69d741907
|
[] |
no_license
|
hdp0545/TIL
|
0ba5378274f0076cd2b029581b292785a77207da
|
6d6e5e54373bd71606823e97b3a5fb2d63a2784e
|
refs/heads/master
| 2023-05-24T12:37:33.690750
| 2023-05-19T06:57:49
| 2023-05-19T06:57:49
| 235,004,133
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
for test_case in range(1, int(input())+1):
N = int(input())
matrix = [list(map(int, [n for n in input()])) for _ in range(N)]
result = 0
c = N // 2
for i in range(N):
di = (N//2) - abs(i - (N//2))
result += sum(matrix[i][c-di:c+di+1])
print('#{} {}'.format(test_case, result))
|
[
"hdp0545@gmail.com"
] |
hdp0545@gmail.com
|
569c0fe40b397c4990eb34ce4716eead233cf51f
|
e0ede722874d222a789411070f76b50026bbe3d8
|
/practice/solution/0040_combination_sum_ii.py
|
522d0209dcadd27bc9829d15d2270d94bb200cd4
|
[] |
no_license
|
kesarb/leetcode-summary-python
|
cd67456cb57bdff7ee227dab3930aaf9c2a6ad00
|
dc45210cb2cc50bfefd8c21c865e6ee2163a022a
|
refs/heads/master
| 2023-05-26T06:07:25.943854
| 2021-06-06T20:02:13
| 2021-06-06T20:02:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
class Solution(object):
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
self.res = []
self.dfs(0, sorted(candidates), target, [])
return self.res
def dfs(self, start, candidates, target, value_list):
if target < 0:
return
if not target:
self.res.append(value_list)
for i in range(start, len(candidates)):
if i > start and candidates[i] == candidates[i - 1]:
continue
self.dfs(i + 1, candidates, target - candidates[i], value_list + [candidates[i]])
|
[
"weikunhan@g.ucla.edu"
] |
weikunhan@g.ucla.edu
|
b7f7294d6eed3c6580709c80a3bbdedfde794b91
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03767/s342121711.py
|
e279603e0ad4d33f1c70bcc3c868122d20a4b586
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
n = int(input())
a = list(map(int, input().split()))
a.sort(reverse = True)
list = []
for i, j in enumerate(a):
if i % 2 == 1:
list.append(j)
answer = sum(list[0 : n])
print(answer)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
0d497de579e262500807394359bad38278397bee
|
90ea49bb872623a0fc117632df0232f26e078033
|
/redis_main.py
|
6e22c3d257bc2ca5b18745dc8e70d73601aefcc6
|
[
"MIT"
] |
permissive
|
JX-Wang/Redis-servcie-T
|
e4612967a30c8c18ba5fa51aac91482e5f4f591a
|
26005d0b15defa8628220512046aadc94765bd5b
|
refs/heads/master
| 2020-06-17T04:39:28.779495
| 2019-07-09T12:35:38
| 2019-07-09T12:35:38
| 195,799,949
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
# usr/bin/env python
# coding:utf-8
"""
redis Notes
============
Date@2019/7/9
Author@Wangjunxiong
"""
import redis
try:
r = redis.Redis(host="39.106.165.57", port=6379, db=0)
r.get("msg")
except Exception as e:
print "Connect Error as -> ", str(e)
|
[
"1411349759@qq.com"
] |
1411349759@qq.com
|
69e9bebc4513c00a473c70457e1a049832307ad5
|
8ebb138562884f01cae3d3ffaad9501a91e35611
|
/dbCruiseKeywords/insertKeywordsAMT09.py
|
df5628bef408fcbba07deedb761444ed58a7b142
|
[] |
no_license
|
simonscmap/DBIngest
|
7b92214034e90f8de88b06c17b48f83c769d8d35
|
9ae035cbf7453df375f0af5e920df3880a419107
|
refs/heads/master
| 2021-07-16T07:12:31.749027
| 2020-08-13T16:28:24
| 2020-08-13T16:28:24
| 200,295,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,128
|
py
|
import sys
import pycmap
sys.path.append('../')
import insertFunctions as iF
import config_vault as cfgv
import pandas as pd
sys.path.append('../dbCatalog/')
import catalogFunctions as cF
"""-----------------------------"""
""" AMT09 CRUISE KEYWORDS"""
"""-----------------------------"""
cruise_name = 'AMT09'
server = 'Rainier'
rawFilePath = cfgv.rep_cruise_keywords_raw
rawFileName = 'AMT09.xlsx'
keyword_col = 'cruise_keywords'
import sys
import pycmap
sys.path.append('../')
import insertFunctions as iF
import config_vault as cfgv
import pandas as pd
sys.path.append('../dbCatalog/')
import catalogFunctions as cF
"""-----------------------------"""
""" AMT09 CRUISE KEYWORDS"""
"""-----------------------------"""
cruise_name = 'AMT09'
server = 'Rainier'
rawFilePath = cfgv.rep_cruise_keywords_raw
rawFileName = 'AMT09.xlsx'
keyword_col = 'cruise_keywords'
############################
""" Reads in the keyword excel file"""
df = pd.read_excel(rawFilePath + rawFileName)
ID = cF.getCruiseID(cruise_name)
prov_df = cF.getLonghurstProv(cruise_name)
ocean_df = cF.getOceanName(cruise_name)
seasons_df = cF.getCruiseSeasons(cruise_name)
months_df = cF.getCruiseMonths(cruise_name)
years_df = cF.getCruiseYear(cruise_name)
details_df = cF.getCruiseDetails(cruise_name)
short_name_df = cF.getCruiseAssosiatedShortName(cruise_name)
# long_name_df = cF.getCruiseAssosiatedLongName(cruise_name)
short_name_syn_df = cF.getShortNameSynonyms(cruise_name)
dataset_name_df = cF.getCruiseAssosiatedDataset_Name(cruise_name)
df = cF.addDFtoKeywordDF(df, dataset_name_df)
df = cF.addDFtoKeywordDF(df, short_name_syn_df)
df = cF.addDFtoKeywordDF(df, prov_df)
df = cF.addDFtoKeywordDF(df, ocean_df)
df = cF.addDFtoKeywordDF(df, seasons_df)
df = cF.addDFtoKeywordDF(df, months_df)
df = cF.addDFtoKeywordDF(df, years_df)
df = cF.addDFtoKeywordDF(df, details_df)
df = cF.addDFtoKeywordDF(df, short_name_df)
# df = cF.addDFtoKeywordDF(df, long_name_df)
df = cF.removeDuplicates(df)
df = cF.stripWhitespace(df,keyword_col)
df = cF.removeAnyRedundantWord(df)
""" INSERTS INTO tblCruise_Keywords"""
cF.insertCruiseKeywords(ID,df,server)
|
[
"norlandrhagen@gmail.com"
] |
norlandrhagen@gmail.com
|
00fb0b2202d07d72ab8075b038f6426190d4d82e
|
de01cb554c2292b0fbb79b4d5413a2f6414ea472
|
/algorithms/Hard/1449.form-largest-integer-with-digits-that-add-up-to-target.py
|
fde6df1309dddc7154ccfbf41d760c6ba9bd1dbe
|
[] |
no_license
|
h4hany/yeet-the-leet
|
98292017eadd3dde98a079aafcd7648aa98701b4
|
563d779467ef5a7cc85cbe954eeaf3c1f5463313
|
refs/heads/master
| 2022-12-10T08:35:39.830260
| 2020-09-02T23:12:15
| 2020-09-02T23:12:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,868
|
py
|
#
# @lc app=leetcode id=1449 lang=python3
#
# [1449] Form Largest Integer With Digits That Add up to Target
#
# https://leetcode.com/problems/form-largest-integer-with-digits-that-add-up-to-target/description/
#
# algorithms
# Hard (42.08%)
# Total Accepted: 6.5K
# Total Submissions: 15.5K
# Testcase Example: '[4,3,2,5,6,7,2,5,5]\n9'
#
# Given an array of integers cost and an integer target. Return the maximum
# integer you can paint under the following rules:
#
#
# The cost of painting a digit (i+1) is given by cost[i] (0 indexed).
# The total cost used must be equal to target.
# Integer does not have digits 0.
#
#
# Since the answer may be too large, return it as string.
#
# If there is no way to paint any integer given the condition, return "0".
#
#
# Example 1:
#
#
# Input: cost = [4,3,2,5,6,7,2,5,5], target = 9
# Output: "7772"
# Explanation: The cost to paint the digit '7' is 2, and the digit '2' is 3.
# Then cost("7772") = 2*3+ 3*1 = 9. You could also paint "977", but "7772" is
# the largest number.
# Digit cost
# 1 -> 4
# 2 -> 3
# 3 -> 2
# 4 -> 5
# 5 -> 6
# 6 -> 7
# 7 -> 2
# 8 -> 5
# 9 -> 5
#
#
# Example 2:
#
#
# Input: cost = [7,6,5,5,5,6,8,7,8], target = 12
# Output: "85"
# Explanation: The cost to paint the digit '8' is 7, and the digit '5' is 5.
# Then cost("85") = 7 + 5 = 12.
#
#
# Example 3:
#
#
# Input: cost = [2,4,6,2,4,6,4,4,4], target = 5
# Output: "0"
# Explanation: It's not possible to paint any integer with total cost equal to
# target.
#
#
# Example 4:
#
#
# Input: cost = [6,10,15,40,40,40,40,40,40], target = 47
# Output: "32211"
#
#
#
# Constraints:
#
#
# cost.length == 9
# 1 <= cost[i] <= 5000
# 1 <= target <= 5000
#
#
#
class Solution:
def largestNumber(self, cost: List[int], target: int) -> str:
|
[
"kevin.wkmiao@gmail.com"
] |
kevin.wkmiao@gmail.com
|
42d77cdb15f7031c1d699412730a8035bd7e471a
|
367d2670c75d385d122bca60b9f550ca5b3888c1
|
/gem5/env/lib/python3.6/site-packages/kombu/asynchronous/http/__init__.py
|
e776977dd40d3fa99f91d5b31d93c25a7d36b580
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
Anish-Saxena/aqua_rowhammer_mitigation
|
4f060037d50fb17707338a6edcaa0ac33c39d559
|
3fef5b6aa80c006a4bd6ed4bedd726016142a81c
|
refs/heads/main
| 2023-04-13T05:35:20.872581
| 2023-01-05T21:10:39
| 2023-01-05T21:10:39
| 519,395,072
| 4
| 3
|
Unlicense
| 2023-01-05T21:10:40
| 2022-07-30T02:03:02
|
C++
|
UTF-8
|
Python
| false
| false
| 591
|
py
|
from kombu.asynchronous import get_event_loop
from .base import Request, Headers, Response
__all__ = ('Client', 'Headers', 'Response', 'Request')
def Client(hub=None, **kwargs):
"""Create new HTTP client."""
from .curl import CurlClient
return CurlClient(hub, **kwargs)
def get_client(hub=None, **kwargs):
"""Get or create HTTP client bound to the current event loop."""
hub = hub or get_event_loop()
try:
return hub._current_http_client
except AttributeError:
client = hub._current_http_client = Client(hub, **kwargs)
return client
|
[
"asaxena317@krishna-srv4.ece.gatech.edu"
] |
asaxena317@krishna-srv4.ece.gatech.edu
|
95a65891632e1c526dfe49cd5b082b05a23fb3a0
|
d80173b86be50c7b8c8dec265bfe4e9b66575f7c
|
/objects.py
|
80305cbd3829557b6a79312bc8f6e6372c6c0d8e
|
[] |
no_license
|
Pk13055/bomberman
|
11450bb673ab1ffbb827d9dddeac3583742ce7e5
|
5e4d4413f9572e520de5604174123393f4463e86
|
refs/heads/master
| 2021-01-19T06:02:39.774474
| 2017-10-20T14:08:16
| 2017-10-20T14:08:16
| 100,589,676
| 6
| 4
| null | 2017-10-20T14:08:17
| 2017-08-17T10:03:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,723
|
py
|
'''
contains the structure of each object
'''
import config
from config import x_fac, y_fac
import numpy as np
class Object:
'''# bombs, walls, bricks all will be of this type'''
def __init__(self, x, y, ch=config._empty):
'''# the x and y coords wrt top left of board'''
self._x = x
self._y = y
self.width = 4
self.height = 2
self.is_killable = False
self._ch = ch
self.structure = np.chararray((self.height, self.width))
self.structure[:, :] = self._ch
self._type = config.types[self._ch]
def get_type(self):
'''# returns whether "Bomber", "Enemy", etc'''
return self._type
def get_size(self):
'''# returns (height, willdth)'''
return self.structure.shape
def get_coords(self):
'''# returns (x, y)'''
return (self._x, self._y)
def update_location(self, board, new_x, new_y, init=False):
'''# update the location of the person'''
if board.draw_obj(type(self)(new_x, new_y)):
# if initial update, will not clear original
if not init:
board.clear_obj(self)
self._x, self._y = new_x, new_y
return True
return False
class Wall(Object):
'''# this is the repr of the wall object
it implements no methods and some data about each wall element'''
def __init__(self, n, m):
'''# preferred size = 2 x 4'''
super(Wall, self).__init__(n, m, config._wall)
self.height = int(m)
self.width = int(n)
def __repr__(self):
''' repr '''
for r in range(self.height):
print("\n")
for c in range(self.width):
try:
print(self.structure[r, c].decode(), end="")
except UnicodeDecodeError:
print(self.structure[r, c], end="")
return ""
class Bomb(Object):
'''# this class implements the bomb object'''
def __init__(self, x, y):
''' init '''
super(Bomb, self).__init__(x, y, config._bomb)
self.timer = 0
self.active = False
self.is_killable = True
self.structure[:, :] = np.matrix([['[', self._ch, self._ch, ']'],
['[', self._ch, self._ch, ']']])
self.blast_radius = [(x + 1 * x_fac, y), (x + 2 * x_fac, y),
(x - 1 * x_fac, y), (x - 2 * x_fac, y), (x,
y + 1 * y_fac), (x, y + 2 * y_fac),
(x, y - 1 * y_fac), (x, y - 2 * y_fac)]
self.owner = None
def detonate(self, time):
'''# begin detonating the bomb (happens one frame after)'''
self.active = True
self.timer = time
def countdown(self):
''' countdown the bomb when active '''
if self.active:
self.timer -= 1
self.structure[:, 1:3] = str(self.timer)
return True
if not self.timer:
self.structure[:, :] = config._expl
def __repr__(self):
''' repr '''
return "<Bomb (%d, %d) | Active : %s | %d frames left>" % \
(self._x, self._y, self.active, self.timer)
class Bricks(Object):
'''# this class implements the bricks Object'''
def __init__(self, x, y):
''' init '''
super(Bricks, self).__init__(x, y, config._bricks)
self.is_killable = True
self.structure[:, :] = self._ch
def __repr__(self):
''' repr '''
return "<Bomb (%d, %d) | Active : %s | %d frames left>" % \
(self._x, self._y, self.active, self.timer)
|
[
"pkrockstar7@gmail.com"
] |
pkrockstar7@gmail.com
|
80f796be803c6cbe9307785b3beaf103fdaf5177
|
52266a44e2aca241707984e3b138775681b3e95f
|
/一本册子/字符串.py
|
739ef5576c270d031768b4e1d83d68f15064ac44
|
[] |
no_license
|
Mr-hongji/pythonNote
|
91b1252711ce0b919fc365932276b89d85d4c16b
|
ff5eda0c8f63345de4d98cff8f0f7ab5254c77a6
|
refs/heads/master
| 2020-04-11T14:49:39.637983
| 2019-05-26T09:21:09
| 2019-05-26T09:21:09
| 161,869,676
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
print 'hello'
print "I'm Shihongji"
'''
\被称作转译字符,除了用来表示引号,还有比如用
\\表示字符串中的\
\n表示字符串中的换行
'''
print 'I\'m a \"good\" people'
print 'I\'m a "good" people'
print '我是良民\\'
print '我是良民\n吗'
'''
作业
输出以下文字
1、He said, "I'm yours!"
2、\\_v_//
3、Stay hungry,
stay foolish.
---Steve Jobs
4、 *
***
****
***
*
'''
print 'He said, "I\'m yours!\"'
print "\\\\_v_//"
print "Stay hunngry,\nstay foolish.\n -- Steve Jobs"
print '*\n***\n****\n***\n*'
|
[
"shihongji@xiaoneng.cn"
] |
shihongji@xiaoneng.cn
|
26f644c66a8b92892987b70efed6d22aee3270b8
|
6160586aa239eada16e735d40d57970dedbe1dfc
|
/modules/app_additional/app_custom/app_position_update_info.py
|
b2724f015a7037a4d90534964a519bb0702c5061
|
[] |
no_license
|
showgea/AIOT
|
7f9ffcd49da54836714b3342232cdba330d11e6c
|
fe8275aba1c4b5402c7c2c2987509c0ecf49f330
|
refs/heads/master
| 2020-07-23T10:19:37.478456
| 2019-09-23T12:25:59
| 2019-09-23T12:25:59
| 207,525,184
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 945
|
py
|
import requests
from config import readcfg
header_Gary = readcfg.header_Gary
header_Jenny = readcfg.header_Jenny
url = readcfg.url
def app_position_update_info(positionId, positionName=None, isDefault=None):
url_ = url + "/app/v1.0/lumi/app/position/update/info"
json_ = {
"positionId": positionId,
"positionName": positionName,
"isDefault": isDefault
}
list_ = ["positionId", "positionName", "isDefault"]
num = 0
for i in (positionId, positionName, isDefault):
if i is None:
json_.pop(list_[num])
num += 1
proxies = {'http': 'http://127.0.0.1:8888', 'https': 'http://127.0.0.1:8888'}
print("请求数据:%s" % json_)
r = requests.post(url=url_, json=json_, headers=header_Gary, proxies=proxies, verify=False)
return r
if __name__ == '__main__':
result_main = app_position_update_info("real2.615945282455937024")
print(result_main.text)
|
[
"tangguobing2011@163.com"
] |
tangguobing2011@163.com
|
16e1c2a3227a5d0baee604734564e9d99490428f
|
bc441bb06b8948288f110af63feda4e798f30225
|
/monitor_sdk/model/notify/operation_log_with_meta_pb2.pyi
|
4eb4b81a00e7c4fbc85a45c47c2b95b1eeda8653
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,093
|
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from monitor_sdk.model.notify.operation_log_pb2 import (
OperationLog as monitor_sdk___model___notify___operation_log_pb2___OperationLog,
)
from typing import (
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class OperationLogWithMeta(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
system = ... # type: typing___Text
topic = ... # type: typing___Text
@property
def data(self) -> monitor_sdk___model___notify___operation_log_pb2___OperationLog: ...
def __init__(self,
*,
system : typing___Optional[typing___Text] = None,
topic : typing___Optional[typing___Text] = None,
data : typing___Optional[monitor_sdk___model___notify___operation_log_pb2___OperationLog] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> OperationLogWithMeta: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> OperationLogWithMeta: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"data",b"data"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"data",b"data",u"system",b"system",u"topic",b"topic"]) -> None: ...
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
202431c6183a6dcff01d28a468d59da31fa8c7b1
|
cb9f5db2cdaa5c85a4c5950e34fa22d931da445e
|
/seed.py
|
d94c6e63d50668962053785917432aba4eb825c1
|
[] |
no_license
|
rmmistry/movie-ratings-
|
248fdb36a7392cebc8cfc9686cae61a3b0c516c4
|
89050e4da2dc998ab99fca8537d8df75a650e845
|
refs/heads/master
| 2021-01-10T05:13:17.863638
| 2015-10-23T00:58:23
| 2015-10-23T00:58:23
| 44,561,233
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,133
|
py
|
"""Utility file to seed ratings database from MovieLens data in seed_data/"""
from model import User, Movie, Rating
# from model import Rating
# from model import Movie
from model import connect_to_db, db
from server import app
from datetime import datetime
def load_users():
"""Load users from u.user into database."""
print "Users"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
User.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.user"):
row = row.rstrip()
user_id, age, gender, occupation, zipcode = row.split("|")
user = User(user_id=user_id,
age=age,
zipcode=zipcode)
# We need to add to the session or it won't ever be stored
db.session.add(user)
# Once we're done, we should commit our work
db.session.commit()
def load_movies():
"""Load movies from u.item into database."""
print "Movies"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
Movie.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.item"):
row = row.rstrip()
row_splitted = row.split("|")
##throwing out rows with no release date or title is unknown
movie_id = row_splitted[0]
title = row_splitted[1]
released_at = row_splitted[2]
imdb_url = row_splitted[4]
## FIX LATER: optionally, rstrip('(') - why didn't it work?
title = title[:-7]
print title
if released_at != (''):
released_at_ob = datetime.strptime(released_at, '%d-%b-%Y')
else:
pass
movie = Movie(movie_id=movie_id,
title=title,
released_at=released_at_ob,
imdb_url=imdb_url)
# We need to add to the session or it won't ever be stored
db.session.add(movie)
# Once we're done, we should commit our work
db.session.commit()
def load_ratings():
"""Load ratings from u.data into database."""
print "Ratings"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
Rating.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.data"):
row = row.rstrip()
row_splitted=row.split()
user_id = row_splitted[0]
movie_id = row_splitted[1]
score = row_splitted[2]
rating = Rating(movie_id=movie_id,
user_id=user_id,
score=score)
# We need to add to the session or it won't ever be stored
db.session.add(rating)
# Once we're done, we should commit our work
db.session.commit()
if __name__ == "__main__":
connect_to_db(app)
# In case tables haven't been created, create them
db.create_all()
# Import different types of data
load_users()
load_movies()
load_ratings()
|
[
"info@hackbrightacademy.com"
] |
info@hackbrightacademy.com
|
1ad194458a4f64f614b9ac861a9e7623c7eaa041
|
29345337bf86edc938f3b5652702d551bfc3f11a
|
/python/src/main/python/pyalink/alink/tests/examples/from_docs/test_alsusersperitemrecommbatchop.py
|
21104be85c65e675c3b2d8099853b1de16f0fc5b
|
[
"Apache-2.0"
] |
permissive
|
vacaly/Alink
|
32b71ac4572ae3509d343e3d1ff31a4da2321b6d
|
edb543ee05260a1dd314b11384d918fa1622d9c1
|
refs/heads/master
| 2023-07-21T03:29:07.612507
| 2023-07-12T12:41:31
| 2023-07-12T12:41:31
| 283,079,072
| 0
| 0
|
Apache-2.0
| 2020-07-28T02:46:14
| 2020-07-28T02:46:13
| null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd
class TestAlsUsersPerItemRecommBatchOp(unittest.TestCase):
def test_alsusersperitemrecommbatchop(self):
df_data = pd.DataFrame([
[1, 1, 0.6],
[2, 2, 0.8],
[2, 3, 0.6],
[4, 1, 0.6],
[4, 2, 0.3],
[4, 3, 0.4],
])
data = BatchOperator.fromDataframe(df_data, schemaStr='user bigint, item bigint, rating double')
als = AlsTrainBatchOp().setUserCol("user").setItemCol("item").setRateCol("rating") \
.setNumIter(10).setRank(10).setLambda(0.01)
model = als.linkFrom(data)
predictor = AlsUsersPerItemRecommBatchOp() \
.setItemCol("item").setRecommCol("rec").setK(1).setReservedCols(["item"])
predictor.linkFrom(model, data).print();
pass
|
[
"shaomeng.wang.w@gmail.com"
] |
shaomeng.wang.w@gmail.com
|
81a39a0d1720fe639ac2b59e7861b623c6118af5
|
2324dea2cb3003c8ab7e8fd80588d44973eb8c77
|
/Euler_1_17a.py
|
9a350a6d333f32263cf6731390cfab23de618e79
|
[] |
no_license
|
MikeOcc/MyProjectEulerFiles
|
5f51bc516cb6584732dc67bb2f9c7fd9e6d51e56
|
4d066d52380aade215636953589bf56d6b88f745
|
refs/heads/master
| 2021-01-16T18:45:44.133229
| 2015-05-27T18:28:43
| 2015-05-27T18:28:43
| 5,876,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
def p17():
def lowest_digit(n):
return n/10, n%10
def words(n):
if n > 999:
raise ValueError, "Number too big."
digits = [None, 'one', 'two', 'three', 'four', 'five', 'six', 'seven',
'eight', 'nine']
teens = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',
'sixteen', 'seventeen', 'eighteen', 'nineteen']
tens = [None, None, 'twenty', 'thirty', 'forty', 'fifty', 'sixty',
'seventy', 'eighty', 'ninety']
n, o = lowest_digit(n)
n, t = lowest_digit(n)
n, h = lowest_digit(n)
result = []
if t == 1:
result.append(teens[o])
else:
if o:
result.append(digits[o])
if t:
result.append(tens[t])
if h:
if t or o:
result.append('and')
result.append('hundred')
result.append(digits[h])
#return ''.join(reversed(result))
return ''.join(result)
c = 0
for i in range(1,1000):
c += len(words(i))
c+=len('onethousand')
print c
p17()
|
[
"mike.occhipinti@mlsassistant.com"
] |
mike.occhipinti@mlsassistant.com
|
77a600b8a161271244c70a072a2ad68e0c19c0f9
|
3712a929d1124f514ea7af1ac0d4a1de03bb6773
|
/开班笔记/个人项目/weather/venv/Scripts/pip3-script.py
|
a6ac6cc88412f3e6968662a23c89959c23f69bbe
|
[] |
no_license
|
jiyabing/learning
|
abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9
|
6059006b0f86aee9a74cfc116d2284eb44173f41
|
refs/heads/master
| 2020-04-02T20:47:33.025331
| 2018-10-26T05:46:10
| 2018-10-26T05:46:10
| 154,779,387
| 0
| 0
| null | null | null | null |
GB18030
|
Python
| false
| false
| 446
|
py
|
#!E:\学习文件\python学习资料\开班笔记\个人项目\weather\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"yabing_ji@163.com"
] |
yabing_ji@163.com
|
410d7498c362b982e00c1371ea8e80ffedc787f5
|
2ecfe0e10d10513917e4f2770e0a56075404c5d8
|
/oldnumba/tests/test_exceptions.py
|
80cbe4e56325c6d8248dd39bfb2723c2511aeeb1
|
[
"BSD-2-Clause"
] |
permissive
|
laserson/numba
|
84ab7615ea0177b496a63e2a86319f0b12992cd2
|
35546517b27764a9120f6dfcd82eba7f4dd858cb
|
refs/heads/master
| 2020-05-20T23:13:23.011971
| 2014-12-08T20:16:20
| 2014-12-08T20:16:20
| 16,754,385
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
"""
>>> boom()
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'boom'
>>> boom2()
Traceback (most recent call last):
...
TypeError: 'object' object is not callable
>>> boom3()
Traceback (most recent call last):
...
TypeError: 'object' object is not callable
"""
import sys
import ctypes
from numba import *
import numpy as np
@autojit(backend='ast')
def boom():
return int('boom')
@jit(int_())
def boom2():
return object()('boom')
@jit(complex128())
def boom3():
return object()('boom')
if __name__ == "__main__":
import numba
numba.testing.testmod()
|
[
"markflorisson88@gmail.com"
] |
markflorisson88@gmail.com
|
d315787bb6b8a33384f02df4fd9358fc7f3ae68e
|
f359c953ef823cc44f7d87a3736c3e4fb1817c0b
|
/EDBRCommon/python/simulation/RunIIDR74X50ns/TTbar/TTaw.py
|
71536ff1fd213b3a0b0ae79234018df0b109d56f
|
[] |
no_license
|
jruizvar/ExoDiBosonResonancesRun2
|
aa613200725cf6cd825d7bcbde60d2e39ba84e39
|
b407ab36504d0e04e6bddba4e57856f9f8c0ec66
|
refs/heads/Analysis76X
| 2021-01-18T20:00:57.358494
| 2016-05-30T21:30:19
| 2016-05-30T21:30:19
| 23,619,682
| 1
| 1
| null | 2016-04-22T18:38:45
| 2014-09-03T12:41:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,426
|
py
|
import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FAB076ED-590F-E511-B784-0CC47A4DEEBA.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FC007331-5E0F-E511-8D0C-0025904B1424.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FC9BEF1E-540F-E511-8740-002590E39F36.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FCD4075D-6A0F-E511-AA8B-00259073E410.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FEC4769D-6E0F-E511-8A65-0025907277E8.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FECA6F36-360F-E511-8BA1-0CC47A13D09C.root',
'/store/mc/RunIISpring15DR74/TT_TuneCUETP8M1_13TeV-powheg-pythia8/MINIAODSIM/Asympt50ns_MCRUN2_74_V9A-v4/60000/FED5EE4E-C910-E511-91E8-AC853D9DAC41.root' ] );
|
[
"jruizvar@cern.ch"
] |
jruizvar@cern.ch
|
e06d790514e028de8404d51db547b5b990b4f864
|
4a5d9f129d5129b34c55171c99f83f0893ae5c11
|
/archives/migrations/0006_categorie_lien.py
|
1d61623e1a6f57d121b4c3b2cf399d28cc058f6f
|
[
"MIT"
] |
permissive
|
fromdanut/syndicat-riviere
|
ec097cf9bf9aec8829069a2a93d4750a36d87a39
|
0fd099524a2a79d0932dbf8b87f8232d470308ad
|
refs/heads/master
| 2018-09-04T19:14:40.490656
| 2018-06-04T10:52:21
| 2018-06-04T10:52:21
| 103,665,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-15 06:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archives', '0005_remove_categorie_lien'),
]
operations = [
migrations.AddField(
model_name='categorie',
name='lien',
field=models.CharField(default='default_link', max_length=30, unique=True),
preserve_default=False,
),
]
|
[
"remidelannoy@hotmail.com"
] |
remidelannoy@hotmail.com
|
e174afa38ec2ea5f548eadf2273ad23fbf7cb7e9
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_105/324.py
|
9ddec5819097ba9f1a61905d441b8271fd8d44f7
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,472
|
py
|
class Item(object):
def __init__(self, index=0):
self.index = index
self.parents = []
self.childs = []
def is_source(self):
return len(self.parents) > 1
def is_dest(self):
return len(self.childs) > 1
def get_dests(self):
if len(self.parents):
dests = []
for parent in self.parents:
dests.extend(parent.get_dests())
return dests
else:
return [self]
if __name__ == '__main__':
T = int(raw_input())
for test_index in xrange(1, T+1):
N = int(raw_input())
items = [Item(_) for _ in xrange(N+1)]
for index in xrange(1, N+1):
nums = map(int, raw_input().split())
Mi,Ii = nums[0], nums[1:]
for ii in Ii:
items[index].parents.append(items[ii])
items[ii].childs.append(items[index])
src_items = filter(lambda item: item.is_source(), items)
dst_items = filter(lambda item: item.is_dest(), items)
def check_item(item):
dests = item.get_dests()
for dest in set(dests):
if dests.count(dest) > 1:
return True
return False
result = False
for src_item in src_items:
if check_item(src_item):
result = True
break
print 'Case #%d: %s' % (test_index, 'Yes' if result else 'No')
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
109a875760f5fc39260fd4abcf0b9b11c346051b
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/explore/2020/september/Evaluate_Division.1.py
|
65d4246ab245ebe5ad135c0ae57a97572fd70b22
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792
| 2023-08-22T16:43:36
| 2023-08-22T16:43:36
| 153,090,613
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
'''
Floyd
You are here!
Your runtime beats 27.33 % of python submissions.
'''
class Solution(object):
def calcEquation(self, edges, weights, pairs):
graph = collections.defaultdict(lambda: collections.defaultdict(lambda: float('inf')))
for (i, j), weight in itertools.izip(edges, weights):
graph[i][i], graph[i][j], graph[j][i], graph[j][j] = 1., weight, 1. / weight, 1.
for mid in graph:
for i in graph[mid]:
for j in graph[mid]:
graph[i][j] = min(graph[i][j], graph[i][mid] * graph[mid][j])
return [graph[i][j] if graph[i][j] < float('inf') else -1. for i, j in pairs]
|
[
"838255715@qq.com"
] |
838255715@qq.com
|
91bb39e87b153c78a084acbdc38998fcc5de7e04
|
5a01774b1815a3d9a5b02b26ca4d6ba9ecf41662
|
/Module 2/Chapter03/django-myproject-03/quotes/models.py
|
1659b30889e4e5de96390dfb7a8897a216d15bfe
|
[
"MIT"
] |
permissive
|
PacktPublishing/Django-Web-Development-with-Python
|
bf08075ff0a85df41980cb5e272877e01177fd07
|
9f619f56553b5f0bca9b5ee2ae32953e142df1b2
|
refs/heads/master
| 2023-04-27T22:36:07.610076
| 2023-01-30T08:35:11
| 2023-01-30T08:35:11
| 66,646,080
| 39
| 41
|
MIT
| 2023-04-17T10:45:45
| 2016-08-26T12:30:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,578
|
py
|
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import os
from PIL import Image
from django.db import models
from django.utils.timezone import now as timezone_now
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.urlresolvers import NoReverseMatch
from django.core.files.storage import default_storage as storage
from utils.models import UrlMixin
THUMBNAIL_SIZE = getattr(settings, "QUOTES_THUMBNAIL_SIZE", (50, 50))
def upload_to(instance, filename):
now = timezone_now()
filename_base, filename_ext = os.path.splitext(filename)
return "quotes/%s%s" % (
now.strftime("%Y/%m/%Y%m%d%H%M%S"),
filename_ext.lower(),
)
@python_2_unicode_compatible
class InspirationalQuote(UrlMixin):
author = models.CharField(_("Author"), max_length=200)
quote = models.TextField(_("Quote"))
picture = models.ImageField(_("Picture"), upload_to=upload_to, blank=True, null=True)
language = models.CharField(_("Language"), max_length=2, blank=True, choices=settings.LANGUAGES)
class Meta:
verbose_name = _("Inspirational Quote")
verbose_name_plural = _("Inspirational Quotes")
def __str__(self):
return self.quote
def get_url_path(self):
try:
return reverse("quote_detail", kwargs={"id": self.pk})
except NoReverseMatch:
return ""
def save(self, *args, **kwargs):
super(InspirationalQuote, self).save(*args, **kwargs)
# generate thumbnail picture version
self.create_thumbnail()
def create_thumbnail(self):
if not self.picture:
return ""
file_path = self.picture.name
filename_base, filename_ext = os.path.splitext(file_path)
thumbnail_file_path = "%s_thumbnail.jpg" % filename_base
if storage.exists(thumbnail_file_path):
# if thumbnail version exists, return its url path
return "exists"
try:
# resize the original image and return url path of the thumbnail version
f = storage.open(file_path, 'r')
image = Image.open(f)
width, height = image.size
if width > height:
delta = width - height
left = int(delta/2)
upper = 0
right = height + left
lower = height
else:
delta = height - width
left = 0
upper = int(delta/2)
right = width
lower = width + upper
image = image.crop((left, upper, right, lower))
image = image.resize(THUMBNAIL_SIZE, Image.ANTIALIAS)
f_mob = storage.open(thumbnail_file_path, "w")
image.save(f_mob, "JPEG")
f_mob.close()
return "success"
except:
return "error"
def get_thumbnail_picture_url(self):
if not self.picture:
return ""
file_path = self.picture.name
filename_base, filename_ext = os.path.splitext(file_path)
thumbnail_file_path = "%s_thumbnail.jpg" % filename_base
if storage.exists(thumbnail_file_path):
# if thumbnail version exists, return its url path
return storage.url(thumbnail_file_path)
# return original as a fallback
return self.picture.url
def title(self):
return self.quote
|
[
"bhavinsavalia@packtpub.com"
] |
bhavinsavalia@packtpub.com
|
2b062e03f669e6aaead91edb14be24e5af00d892
|
0d76013f6e1ee69713690d6d6e65ce05a3c94de1
|
/account/urls.py
|
e37b608b6be1c0e5f060818a1a26f890b42c089d
|
[] |
no_license
|
rafiulgits/law
|
8f8576980a47dc27ef744a9c32447e69630d3eca
|
42e6e6ac79229b648e023b3ae9c3252919045453
|
refs/heads/master
| 2023-03-05T22:05:25.854131
| 2021-02-20T04:02:52
| 2021-02-20T04:02:52
| 177,262,688
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
from account.views import auth, manage
from django.urls import path
from django.contrib.auth import views as resetviews
from rest_framework_simplejwt.views import TokenRefreshView
urlpatterns = [
path('signup/', auth.SignUp.as_view()),
path('signin/', auth.SignIn.as_view()),
path('access-renew/', TokenRefreshView.as_view()),
path('profile/', manage.Profile.as_view()),
path('update/', auth.AccountUpdate.as_view()),
path('password-change/', auth.PasswordChange.as_view()),
path('verify/', auth.VerifyEmail.as_view()),
path('password-reset/request/', auth.PasswordResetRequest.as_view()),
path('password-reset/verify/', auth.VerifyPasswordRequest.as_view()),
path('password-reset/', auth.PasswordResetView.as_view()),
]
|
[
"avoidcloud@gmail.com"
] |
avoidcloud@gmail.com
|
46d66199b07078ad113d2244608aa0f3dcff80bb
|
ed8cdcce521b8cab33c66f716c0886e17f035d21
|
/.history/script/get_cpu_mem_info_20191222122843.py
|
8d68937cd4d25850ae1c036ceb08d000a04b8098
|
[] |
no_license
|
deancsdfy/AndroidPerformanceTool_windows
|
8ac35729bc651c3af551f090d6788b6ee3f17eb5
|
c4906aa9347e8e5eca68dbb7cf2d66a327c70d1f
|
refs/heads/master
| 2020-11-27T20:38:55.014228
| 2020-01-09T15:55:52
| 2020-01-09T15:55:52
| 229,593,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,591
|
py
|
#! python3
#coding=utf-8
import sys,os,re
print(sys.path)
sys.path.append('.')
from public import publicfunction as util
PATH = lambda p: os.path.abspath(p)
#获取当前应用包名
package_name = util.get_current_packagename()
# print('本次测试APP为:%s' %(package_name))
#获取men cpu 占用情况
def top():
print('Starting get mem cpu information...')
pid=get_pid()
print(pid)
top_info = util.shell("top -n 1 | grep %d" %(int(pid))).stdout.readlines()
for x in top_info:
temp_list = x.split()
#print(temp_list[8])
cpu=float(temp_list[8])
#cpu.append(float(temp_list[8]))
#print(temp_list[9])
mem=float(temp_list[9])
#mem.append(float(temp_list[9]))
print(cpu)
print(mem)
return (cpu,mem)
def getCpuNums():
num_info = util.shell('cat /proc/cpuinfo|grep processor').stdout.readlines()
# print("cpu nums is %d" %(len(num_info)))
return len(num_info)
def getCpuInfo():
# print('Starting get mem cpu information...')
pid = get_pid()
# print(pid)
cpunums=getCpuNums()
top_info = util.shell('top -n 1 | grep %d' % (int(pid))).stdout.readlines()
if(len(top_info)!=0):
for x in top_info:
temp_list = x.split()
# print(temp_list[8])
if getSDKVersion() == '23':
cpu = round(float(str(temp_list[2])[2:-2])/cpunums,2)
print(cpu)
elif (temp_list[8]!=" "):
print(float(temp_list[8]))
cpu = round(float(temp_list[8])/cpunums,2)
# print(cpu)
else:
cpu = 0.0
return cpu
else:
return 0.0
def getMemInfo():
# print('start get mem information....')
pid=get_pid()
# print(pid)
if getSDKVersion() == '23':
temp_list = util.shell('top -n 1 | grep %d' % (int(pid))).stdout.readlines()
print(temp_list[6])
mem=round(float(temp_list[6])/1024,1)
else:
mem_info = util.shell('dumpsys meminfo %d |grep TOTAL:' %(int(pid))).stdout.readlines()
for x in mem_info:
temp_list = x.split()
mem=round(float(temp_list[1])/1024,1)
print(mem)
return mem
#获取机型名称
def getDevicesName():
devicesName = str(util.shell('getprop ro.product.model').stdout.read())
return devicesName
# 获取系统版本
def getSDKVersion():
SDKVersion = str(util.shell('getprop ro.build.version.sdk').stdout.read())[2:-7]
return SDKVersion
#获取pid
def get_pid():
# 正则匹配出package和activity的pid
pattern = re.compile(r"[a-zA-Z0-9\.]+=.[0-9\.]+")
package = util.shell('dumpsys activity top| grep ACTIVITY').stdout.read()
pid = pattern.findall(package.decode())[-1].split('=')[1]
# pid_info = util.shell('ps| grep %s' %(package_name)).stdout.readlines()
# print(pid_info)
# pid = pid_info[0].split()[1]
# print('pid为: %s' %(pid))
return pid
#获取uid
def get_uid():
cmd = 'cat /proc/'+ get_pid() + '/status'
uid_info = util.shell(cmd).stdout.readlines()
uid = uid_info[6].split()[1]
print('uid为:%s' %(uid))
return str(uid)
#上传流量,暂时不可用,需查下其他方式获取上行流量
def get_flow_send():
cmd = '"cat proc/net/xt_qtaguid/stats|grep '+'%s"'%get_uid()
print(cmd)
flow = util.shell(cmd).stdout.readlines()
print(flow)
if __name__ == "__main__":
print("Starting get top information...")
#get_flow_send()
#top()
getSDKVersion()
getCpuInfo()
getMemInfo()
|
[
"denacsdfy@gmail.com"
] |
denacsdfy@gmail.com
|
da1c27f4df3f3d42ec1025d9f87a1ffc36a10f25
|
d61d05748a59a1a73bbf3c39dd2c1a52d649d6e3
|
/chromium/content/test/gpu/gpu_tests/gpu_integration_test_unittest.py
|
7abe56fc3e5829005d6262afc304c84092b965a5
|
[
"BSD-3-Clause"
] |
permissive
|
Csineneo/Vivaldi
|
4eaad20fc0ff306ca60b400cd5fad930a9082087
|
d92465f71fb8e4345e27bd889532339204b26f1e
|
refs/heads/master
| 2022-11-23T17:11:50.714160
| 2019-05-25T11:45:11
| 2019-05-25T11:45:11
| 144,489,531
| 5
| 4
|
BSD-3-Clause
| 2022-11-04T05:55:33
| 2018-08-12T18:04:37
| null |
UTF-8
|
Python
| false
| false
| 6,491
|
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import shutil
import tempfile
import unittest
import mock
from telemetry.testing import browser_test_runner
from gpu_tests import path_util
from gpu_tests import gpu_integration_test
path_util.AddDirToPathIfNeeded(path_util.GetChromiumSrcDir(), 'tools', 'perf')
from chrome_telemetry_build import chromium_config
class GpuIntegrationTestUnittest(unittest.TestCase):
def setUp(self):
self._test_state = {}
def testSimpleIntegrationTest(self):
self._RunIntegrationTest(
'simple_integration_unittest',
['unittest_data.integration_tests.SimpleTest.unexpected_error',
'unittest_data.integration_tests.SimpleTest.unexpected_failure'],
['unittest_data.integration_tests.SimpleTest.expected_flaky',
'unittest_data.integration_tests.SimpleTest.expected_failure'],
['unittest_data.integration_tests.SimpleTest.expected_skip'],
[])
# It might be nice to be more precise about the order of operations
# with these browser restarts, but this is at least a start.
self.assertEquals(self._test_state['num_browser_starts'], 6)
def testIntegrationTesttWithBrowserFailure(self):
self._RunIntegrationTest(
'browser_start_failure_integration_unittest', [],
['unittest_data.integration_tests.BrowserStartFailureTest.restart'],
[], [])
self.assertEquals(self._test_state['num_browser_crashes'], 2)
self.assertEquals(self._test_state['num_browser_starts'], 3)
def testIntegrationTestWithBrowserCrashUponStart(self):
self._RunIntegrationTest(
'browser_crash_after_start_integration_unittest', [],
[('unittest_data.integration_tests.BrowserCrashAfterStartTest.restart')],
[], [])
self.assertEquals(self._test_state['num_browser_crashes'], 2)
self.assertEquals(self._test_state['num_browser_starts'], 3)
def testRetryLimit(self):
self._RunIntegrationTest(
'test_retry_limit',
['unittest_data.integration_tests.TestRetryLimit.unexpected_failure'],
[],
[],
['--retry-limit=2'])
# The number of attempted runs is 1 + the retry limit.
self.assertEquals(self._test_state['num_test_runs'], 3)
def testRepeat(self):
self._RunIntegrationTest(
'test_repeat',
[],
['unittest_data.integration_tests.TestRepeat.success'],
[],
['--repeat=3'])
self.assertEquals(self._test_state['num_test_runs'], 3)
def testAlsoRunDisabledTests(self):
self._RunIntegrationTest(
'test_also_run_disabled_tests',
['unittest_data.integration_tests.TestAlsoRunDisabledTests.skip',
'unittest_data.integration_tests.TestAlsoRunDisabledTests.flaky'],
# Tests that are expected to fail and do fail are treated as test passes
[('unittest_data.integration_tests.'
'TestAlsoRunDisabledTests.expected_failure')],
[],
['--also-run-disabled-tests'])
self.assertEquals(self._test_state['num_flaky_test_runs'], 4)
self.assertEquals(self._test_state['num_test_runs'], 6)
def testStartBrowser_Retries(self):
class TestException(Exception):
pass
def SetBrowserAndRaiseTestException():
gpu_integration_test.GpuIntegrationTest.browser = (
mock.MagicMock())
raise TestException
gpu_integration_test.GpuIntegrationTest.browser = None
gpu_integration_test.GpuIntegrationTest.platform = None
with mock.patch.object(
gpu_integration_test.serially_executed_browser_test_case.\
SeriallyExecutedBrowserTestCase,
'StartBrowser',
side_effect=SetBrowserAndRaiseTestException) as mock_start_browser:
with mock.patch.object(
gpu_integration_test.GpuIntegrationTest,
'StopBrowser') as mock_stop_browser:
with self.assertRaises(TestException):
gpu_integration_test.GpuIntegrationTest.StartBrowser()
self.assertEqual(mock_start_browser.call_count,
gpu_integration_test._START_BROWSER_RETRIES)
self.assertEqual(mock_stop_browser.call_count,
gpu_integration_test._START_BROWSER_RETRIES)
def _RunIntegrationTest(self, test_name, failures, successes, skips,
additional_args):
config = chromium_config.ChromiumConfig(
top_level_dir=path_util.GetGpuTestDir(),
benchmark_dirs=[
os.path.join(path_util.GetGpuTestDir(), 'unittest_data')])
temp_dir = tempfile.mkdtemp()
test_results_path = os.path.join(temp_dir, 'test_results.json')
test_state_path = os.path.join(temp_dir, 'test_state.json')
try:
browser_test_runner.Run(
config,
[test_name,
'--write-full-results-to=%s' % test_results_path,
'--test-state-json-path=%s' % test_state_path] + additional_args)
with open(test_results_path) as f:
test_result = json.load(f)
with open(test_state_path) as f:
self._test_state = json.load(f)
actual_successes, actual_failures, actual_skips = (
self._ExtractTestResults(test_result))
self.assertEquals(set(actual_failures), set(failures))
self.assertEquals(set(actual_successes), set(successes))
self.assertEquals(set(actual_skips), set(skips))
finally:
shutil.rmtree(temp_dir)
def _ExtractTestResults(self, test_result):
delimiter = test_result['path_delimiter']
failures = []
successes = []
skips = []
def _IsLeafNode(node):
test_dict = node[1]
return ('expected' in test_dict and
isinstance(test_dict['expected'], basestring))
node_queues = []
for t in test_result['tests']:
node_queues.append((t, test_result['tests'][t]))
while node_queues:
node = node_queues.pop()
full_test_name, test_dict = node
if _IsLeafNode(node):
if all(res not in test_dict['expected'].split() for res in
test_dict['actual'].split()):
failures.append(full_test_name)
elif test_dict['expected'] == test_dict['actual'] == 'SKIP':
skips.append(full_test_name)
else:
successes.append(full_test_name)
else:
for k in test_dict:
node_queues.append(
('%s%s%s' % (full_test_name, delimiter, k),
test_dict[k]))
return successes, failures, skips
|
[
"csineneo@gmail.com"
] |
csineneo@gmail.com
|
96ebd867811570532d8fc6a0934d0475f42f77e1
|
db903a5e99712d1f45e1d45c4d77537f811ae569
|
/src/python/pants/option/global_options_test.py
|
ede2086b69991da2a0ecc2330dd8015392456304
|
[
"Apache-2.0"
] |
permissive
|
Hirni-Meshram2/pants
|
777db8ea67c1fc66de46f0ab374ba4fff8597357
|
e802d62cc68176aa66947a939c771b01f47d5425
|
refs/heads/main
| 2023-05-01T09:23:10.973766
| 2021-05-19T08:24:50
| 2021-05-19T08:24:50
| 366,021,656
| 0
| 2
|
Apache-2.0
| 2021-05-10T11:38:07
| 2021-05-10T11:38:06
| null |
UTF-8
|
Python
| false
| false
| 6,764
|
py
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import sys
from pathlib import Path
from textwrap import dedent
import pytest
from pants.base.build_environment import get_buildroot
from pants.engine.environment import CompleteEnvironment
from pants.engine.internals.scheduler import ExecutionError
from pants.init.options_initializer import OptionsInitializer
from pants.option.global_options import (
DynamicRemoteExecutionOptions,
ExecutionOptions,
GlobalOptions,
)
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.testutil.option_util import create_options_bootstrapper
from pants.util.contextutil import temporary_dir
def create_dynamic_execution_options(
*,
initial_headers: dict[str, str],
token_path: str | None = None,
plugin: str | None = None,
local_only: bool = False,
) -> DynamicRemoteExecutionOptions:
args = [
"--remote-cache-read",
"--remote-execution-address=grpc://fake.url:10",
"--remote-store-address=grpc://fake.url:10",
f"--remote-store-headers={initial_headers}",
f"--remote-execution-headers={initial_headers}",
"--remote-instance-name=main",
]
if token_path:
args.append(f"--remote-oauth-bearer-token-path={token_path}")
if plugin:
args.append(f"--remote-auth-plugin={plugin}")
ob = create_options_bootstrapper(args)
env = CompleteEnvironment({})
_build_config, options = OptionsInitializer(ob).build_config_and_options(ob, env, raise_=False)
return DynamicRemoteExecutionOptions.from_options(options, env, local_only=local_only)
def test_dynamic_execution_options_remote_oauth_bearer_token_path() -> None:
with temporary_dir() as tempdir:
token_path = Path(tempdir, "token.txt")
token_path.touch()
token_path.write_text("my-token")
exec_options = create_dynamic_execution_options(
initial_headers={"foo": "bar"}, token_path=str(token_path)
)
assert exec_options.remote_store_headers == {"authorization": "Bearer my-token", "foo": "bar"}
assert exec_options.remote_execution_headers == {
"authorization": "Bearer my-token",
"foo": "bar",
}
def test_dynamic_execution_options_local_only() -> None:
# Test that local_only properly disables remote execution.
assert (
create_dynamic_execution_options(initial_headers={}, local_only=True)
== DynamicRemoteExecutionOptions.disabled()
)
def test_dynamic_execution_options_auth_plugin() -> None:
def compute_exec_options(state: str) -> DynamicRemoteExecutionOptions:
with temporary_dir() as tempdir:
# NB: For an unknown reason, if we use the same file name for multiple runs, the plugin
# result gets memoized. So, we use a distinct file name.
plugin_path = Path(tempdir, f"auth_plugin_{state}.py")
plugin_path.touch()
plugin_path.write_text(
dedent(
f"""\
from pants.option.global_options import AuthPluginState, AuthPluginResult
def auth_func(initial_execution_headers, initial_store_headers, options, **kwargs):
return AuthPluginResult(
state=AuthPluginState.{state},
execution_headers={{
**{{k: "baz" for k in initial_execution_headers}},
"exec": "xyz",
}},
store_headers={{
**{{k: "baz" for k in initial_store_headers}},
"store": "abc",
"store_url": options.for_global_scope().remote_store_address,
}},
instance_name="custom_instance",
)
"""
)
)
sys.path.append(tempdir)
result = create_dynamic_execution_options(
initial_headers={"foo": "bar"}, plugin=f"auth_plugin_{state}:auth_func"
)
sys.path.pop()
return result
exec_options = compute_exec_options("OK")
assert exec_options.remote_store_headers == {
"store": "abc",
"foo": "baz",
"store_url": "grpc://fake.url:10",
}
assert exec_options.remote_execution_headers == {"exec": "xyz", "foo": "baz"}
assert exec_options.remote_cache_read is True
assert exec_options.remote_instance_name == "custom_instance"
exec_options = compute_exec_options("UNAVAILABLE")
assert exec_options.remote_cache_read is False
assert exec_options.remote_instance_name == "main"
def test_execution_options_remote_addresses() -> None:
# Test that we properly validate and normalize the scheme.
def create_exec_options(
remote_store_address: str, remote_execution_address: str
) -> ExecutionOptions:
ob = create_options_bootstrapper(
[
f"--remote-store-address={remote_store_address}",
f"--remote-execution-address={remote_execution_address}",
]
)
_build_config, options = OptionsInitializer(ob).build_config_and_options(
ob, CompleteEnvironment({}), raise_=False
)
return ExecutionOptions.from_options(
options.for_global_scope(), DynamicRemoteExecutionOptions.disabled()
)
host = "fake-with-http-in-url.com:10"
exec_options = create_exec_options(f"grpc://{host}", f"grpc://{host}")
assert exec_options.remote_execution_address == f"http://{host}"
assert exec_options.remote_store_address == f"http://{host}"
exec_options = create_exec_options(f"grpcs://{host}", f"grpcs://{host}")
assert exec_options.remote_execution_address == f"https://{host}"
assert exec_options.remote_store_address == f"https://{host}"
with pytest.raises(ExecutionError):
create_exec_options(f"http://{host}", f"grpc://{host}")
with pytest.raises(ExecutionError):
create_exec_options(f"grpc://{host}", f"https:://{host}")
def test_invalidation_globs() -> None:
# Confirm that an un-normalized relative path in the pythonpath is filtered out.
suffix = "something-ridiculous"
ob = OptionsBootstrapper.create(env={}, args=[f"--pythonpath=../{suffix}"], allow_pantsrc=False)
globs = GlobalOptions.compute_pantsd_invalidation_globs(
get_buildroot(), ob.bootstrap_options.for_global_scope()
)
for glob in globs:
assert suffix not in glob
|
[
"noreply@github.com"
] |
Hirni-Meshram2.noreply@github.com
|
eed94a047c8ceace0d5f1642db2ffe1c7eb3bf0e
|
f8ad6963bfc851657ea50c6a036cfad29cdd7f60
|
/Study/Keras/Chapter_03_Catching_Layer_Concept/sub_03_image_augmentation.py
|
5af584ec17aef04328d39886bb785271c2918441
|
[] |
no_license
|
foru120/PythonRepository
|
e1ab0265c0f50ef2e9acdf7447237c913560692b
|
db6b6be0f9fb91b0a81a3b6a2ec5631daab10f98
|
refs/heads/master
| 2021-01-01T06:53:11.728109
| 2019-04-25T13:52:50
| 2019-04-25T13:52:50
| 97,541,222
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
import numpy as np
# 랜덤시트 고정시키기
np.random.seed(5)
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
# 데이터셋 불러오기
data_aug_gen = ImageDataGenerator(
rescale=1./255,
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=[0.8, 2.0],
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest'
)
img = load_img(path='./dataset/handwriting_shape/train/triangle/triangle001.png')
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
i = 0
for batch in data_aug_gen.flow(x, batch_size=1, save_to_dir='./dataset/handwriting_shape/preview', save_prefix='tri',
save_format='png'):
i += 1
if i > 30:
break
|
[
"broodsky1122@hanmail.net"
] |
broodsky1122@hanmail.net
|
63aece5376d78fe1adf90813932e843283448f09
|
2b28f749fef34e566b685d520be7ed50f28b7bff
|
/bondhon_docx/convert_bangla.py
|
eec2df4268d0d8e6e6bd40811d001112db6fa54b
|
[
"MIT"
] |
permissive
|
banglakit/bondhon-docx
|
cc58fea46fd9a50b4559ed26ba2142a5d708423e
|
a8f6a58995392f420d48f5fc8ec7a25dadeca30a
|
refs/heads/master
| 2020-04-28T12:00:15.608727
| 2019-03-12T18:23:15
| 2019-03-12T18:23:15
| 175,262,079
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
#!/usr/bin/env python
import argparse
import os
from docx import Document
from bondhon_docx import conversion
def main():
parser = argparse.ArgumentParser(description='Convert Bengali Documents between encodings.')
parser.add_argument('from_enc', help='Original Encoding of File')
parser.add_argument('to', help='The Encoding you want to convert to')
parser.add_argument('path', help='The path of the file')
args = parser.parse_args()
document = Document(args.path)
conversion.convert_document(args.from_enc, args.to, document)
path_without_ext, _ = os.path.splitext(args.path)
document.save(path_without_ext + '.converted.docx')
|
[
"aniruddha@adhikary.net"
] |
aniruddha@adhikary.net
|
995c7fb086f0b3ce3be2766dfa862208c3486b28
|
d52f71cac1c10a8641a18b2b30e789744f3b3ef7
|
/Experiments/Yellow_submarine/2019_01_30_ml_approach/src/qmlt/numerical/__init__.py
|
a1f226b035bbee8776fb49ed53650e6768d1eceb
|
[] |
no_license
|
BOHRTECHNOLOGY/public_research
|
89c67e583b2283f6c67ab33c7303c23bf18467df
|
d9209f20073d075ae7150250cb1a369f8cb215b7
|
refs/heads/master
| 2022-12-10T16:47:54.319350
| 2020-01-09T12:51:04
| 2020-01-09T12:51:04
| 143,842,978
| 17
| 5
| null | 2022-12-08T01:40:31
| 2018-08-07T08:26:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,833
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Numerical Quantum Circuit Learner
========================================================
**Module name:** :mod:`qmlt.numerical`
.. currentmodule:: qmlt.numerical
.. codeauthor:: Maria Schuld <maria@xanadu.ai>
This module contains a class to train models for machine learning and optimization based on variational quantum circuits.
The optimization is executed by scipy's numerical optimisation library. The user defines a function that computes
the outputs of the variational circuit, as well as the training objective, and specifies the model and training
hyperparameters.
There are three basic functionalities. The circuit can be trained, run with the current parameters, and scored.
The numerical learner module has been designed for the training of continuous-variable circuits written in StrawberryFields or
BlackBird (using any backend), but is in principle able to train any user-provided model coded in python.
.. note::
Numerical differentiation is not robust, which means that some models fail to be trained. For example, the approximations
of gradients for gradient-based methods are not precise enough to find the steepest descent in plateaus of the
optimization landscape. This can sometimes be rectified by choosing good hyperparameters, but ultimately poses a limit
to training quantum circuits with numerical methods.
CircuitLearner class
---------------------
.. currentmodule:: qmlt.numerical.CircuitLearner
.. autosummary::
train_circuit
run_circuit
score_circuit
get_circuit_parameters
Helper methods
--------------
.. currentmodule:: qmlt.numerical
.. autosummary::
check
check_X
check_Y
check_steps
check_batch_size
check_logs
Code details
------------
"""
from .learner import (CircuitLearner,
_check as check,
_check_X as check_X,
_check_Y as check_Y,
_check_steps as check_steps,
_check_batch_size as check_batch_size,
_check_logs as check_logs)
__all__ = ['CircuitLearner', 'check', 'check_X', 'check_Y', 'check_steps', 'check_batch_size', 'check_logs']
|
[
"michal.stechly@gmail.com"
] |
michal.stechly@gmail.com
|
786e15a926f9ea9ba51dff0e7cfd6b90ea532743
|
cf14b6ee602bff94d3fc2d7e712b06458540eed7
|
/gs24/enroll/urls.py
|
c58191feb75d1b077f6411cb53f93548cd76ff79
|
[] |
no_license
|
ManishShah120/Learning-Django
|
8b0d7bfe7e7c13dcb71bb3d0dcdf3ebe7c36db27
|
8fe70723d18884e103359c745fb0de5498b8d594
|
refs/heads/master
| 2023-03-29T09:49:47.694123
| 2021-03-28T16:04:34
| 2021-03-28T16:04:34
| 328,925,596
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
from django.urls import path
from .views import studentinfo
urlpatterns = [
path('stu/', studentinfo, name='studetails'),
]
|
[
"mkshah141@gmail.com"
] |
mkshah141@gmail.com
|
e3af4d6ab9808460198837d63b6e0f5553b57bbb
|
04b1803adb6653ecb7cb827c4f4aa616afacf629
|
/content/browser/frame_host/DEPS
|
3da57e57e31e41791a995a6a1205c9dbf9929048
|
[
"BSD-3-Clause"
] |
permissive
|
Samsung/Castanets
|
240d9338e097b75b3f669604315b06f7cf129d64
|
4896f732fc747dfdcfcbac3d442f2d2d42df264a
|
refs/heads/castanets_76_dev
| 2023-08-31T09:01:04.744346
| 2021-07-30T04:56:25
| 2021-08-11T05:45:21
| 125,484,161
| 58
| 49
|
BSD-3-Clause
| 2022-10-16T19:31:26
| 2018-03-16T08:07:37
| null |
UTF-8
|
Python
| false
| false
| 902
|
include_rules = [
# The frame_host files should only call upwards in the layering via the
# delegate interfaces.
"-content/browser/web_contents",
"-content/public/browser/web_contents.h",
"-content/public/browser/web_contents_delegate.h",
"-content/public/browser/web_contents_view.h",
]
specific_include_rules = {
".*_(unit|browser)test\.cc": [
"+content/browser/web_contents",
"+content/public/browser/web_contents.h",
"+content/public/browser/web_contents_delegate.h",
],
".*interstitial_page_impl\.cc": [
# TODO(nasko): This should be removed once we remove
# WebContentsObserver as the method of telling interstitial pages to
# clean themselves up.
"+content/browser/web_contents",
"+content/public/browser/web_contents_delegate.h",
],
"popup_menu_helper_mac.mm": [
"+content/app_shim_remote_cocoa/render_widget_host_view_cocoa.h",
]
}
|
[
"sunny.nam@samsung.com"
] |
sunny.nam@samsung.com
|
|
8c677a448294359eddc72929c681abd438b90e80
|
385ed58325dd0cc75bdb9fd3e61c5e005f7a4f28
|
/source/difang/src/difang/majiang2/table_state/state_xueliu.py
|
8aa6d33479d6ef5c2163185c743230768621fe2e
|
[] |
no_license
|
csirui/hall37
|
17dfa4e4f1f8bf719d0c11ac7738fa4c14fd06db
|
5c4eb4b2bf57bbbee4731470c830d8d81915d603
|
refs/heads/master
| 2021-09-04T03:55:12.460035
| 2018-01-15T15:12:30
| 2018-01-15T15:12:30
| 117,560,615
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
# -*- coding=utf-8
'''
Created on 2016年9月23日
@author: zhaol
'''
from difang.majiang2.table_state.state import MTableState
class MTableStateXueliu(MTableState):
def __init__(self):
super(MTableStateXueliu, self).__init__()
# 血战玩法
self.setState(MTableState.TABLE_STATE_DROP)
# 碰
self.setState(MTableState.TABLE_STATE_PENG)
# 杠
self.setState(MTableState.TABLE_STATE_GANG)
# 定缺
self.setState(MTableState.TABLE_STATE_ABSENCE)
# 和
self.setState(MTableState.TABLE_STATE_HU)
# 和牌后血流成河
self.setState(MTableState.TABLE_STATE_XUELIU)
|
[
"cg@ibenxi.com"
] |
cg@ibenxi.com
|
292ffd198700cdc76c0bcbe232ae0cb3ca792a13
|
07b751896b5e8c029a1808f5587a9bb30090b0b4
|
/tensorflow/python/data/experimental/kernel_tests/restructured_dataset_test.py
|
3b0d23d6e11ee17a3fe6ac5cf9cce767232c559a
|
[
"Apache-2.0"
] |
permissive
|
danfischetti/tensorflow
|
c5326578bac35c6f9a47444d8f91e03097fc2506
|
f3d4bf4345a442f605a45b1fbf74ea9656fa72ed
|
refs/heads/master
| 2020-04-11T10:07:21.324395
| 2018-12-13T22:46:13
| 2018-12-13T22:46:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,105
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `_RestructuredDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
# TODO(b/119837791): Add eager coverage
class RestructuredDatasetTest(test_base.DatasetTestBase):
@test_util.run_deprecated_v1
def testRestructureDataset(self):
components = (array_ops.placeholder(dtypes.int32),
(array_ops.placeholder(dtypes.int32, shape=[None]),
array_ops.placeholder(dtypes.int32, shape=[20, 30])))
dataset = dataset_ops.Dataset.from_tensors(components)
i32 = dtypes.int32
test_cases = [((i32, i32, i32), None),
(((i32, i32), i32), None),
((i32, i32, i32), (None, None, None)),
((i32, i32, i32), ([17], [17], [20, 30]))]
for new_types, new_shape_lists in test_cases:
# pylint: disable=protected-access
new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)
# pylint: enable=protected-access
self.assertEqual(new_types, new.output_types)
if new_shape_lists is not None:
for expected_shape_list, shape in zip(
nest.flatten(new_shape_lists), nest.flatten(new.output_shapes)):
if expected_shape_list is None:
self.assertIs(None, shape.ndims)
else:
self.assertEqual(expected_shape_list, shape.as_list())
fail_cases = [((i32, dtypes.int64, i32), None),
((i32, i32, i32, i32), None),
((i32, i32, i32), ((None, None), None)),
((i32, i32, i32), (None, None, None, None)),
((i32, i32, i32), (None, [None], [21, 30]))]
for new_types, new_shape_lists in fail_cases:
with self.assertRaises(ValueError):
# pylint: disable=protected-access
new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)
# pylint: enable=protected-access
if __name__ == "__main__":
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
07d557b67c5f57d0bc58e144628ef21653545f9f
|
ff8db86ce558e57f7b24f8f6d890a3154f6d948f
|
/neutron_plugin_contrail/plugins/opencontrail/loadbalancer/v2/loadbalancer_member.py
|
bcc4781fbd29f19c81389f17ff651e751bc75193
|
[
"Apache-2.0"
] |
permissive
|
lungdear/tf-neutron-plugin
|
143740d1cafb93f4cbe672e53a609c4771be6833
|
d19e758673e1e28bf8b270b8e934857014a46cdf
|
refs/heads/master
| 2022-12-04T21:18:39.869684
| 2020-08-08T13:32:59
| 2020-08-11T20:06:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,424
|
py
|
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
import uuid
from neutron_lbaas.extensions import loadbalancerv2
try:
from neutron.openstack.common import uuidutils
except ImportError:
from oslo_utils import uuidutils
try:
from neutron.common.exceptions import NotAuthorized
except ImportError:
from neutron_lib.exceptions import NotAuthorized
from vnc_api.vnc_api import IdPermsType, NoIdError
from vnc_api.vnc_api import LoadbalancerMember, LoadbalancerMemberType
from .. resource_manager import ResourceManager
class LoadbalancerMemberManager(ResourceManager):
_loadbalancer_member_type_mapping = {
'admin_state': 'admin_state_up',
'status': 'status',
'protocol_port': 'protocol_port',
'weight': 'weight',
'address': 'address',
'subnet_id': 'subnet_id',
}
@property
def property_type_mapping(self):
return self._loadbalancer_member_type_mapping
def make_properties(self, member):
props = LoadbalancerMemberType()
for key, mapping in self._loadbalancer_member_type_mapping.iteritems():
if mapping in member:
setattr(props, key, member[mapping])
return props
def _get_member_pool_id(self, member):
pool_uuid = member.parent_uuid
return pool_uuid
def make_dict(self, member, fields=None):
res = {'id': member.uuid,
'name': member.name,
'pool_id': member.parent_uuid,
'status': self._get_object_status(member)}
try:
pool = self._api.loadbalancer_pool_read(id=member.parent_uuid)
res['tenant_id'] = pool.parent_uuid.replace('-', '')
except NoIdError:
pass
props = member.get_loadbalancer_member_properties()
for key, mapping in self._loadbalancer_member_type_mapping.iteritems():
value = getattr(props, key, None)
if value is not None:
res[mapping] = value
return self._fields(res, fields)
def resource_read(self, id):
return self._api.loadbalancer_member_read(id=id)
def resource_list(self, tenant_id=None):
""" In order to retrive all the members for a specific tenant
the code iterates through all the pools.
"""
if tenant_id is None:
return self._api.loadbalancer_members_list()
pool_list = self._api.loadbalancer_pools_list(tenant_id)
if 'loadbalancer-pools' not in pool_list:
return {}
member_list = []
for pool in pool_list['loadbalancer-pools']:
pool_members = self._api.loadbalancer_members_list(
parent_id=pool['uuid'])
if 'loadbalancer-members' in pool_members:
member_list.extend(pool_members['loadbalancer-members'])
response = {'loadbalancer-members': member_list}
return response
def get_resource(self, context, id, pool_id, fields=None):
res = super(LoadbalancerMemberManager, self).get_resource(context, id)
if res and res['pool_id'] != pool_id:
raise loadbalancerv2.MemberNotFoundForPool(member_id=res['id'],
pool_id=res['pool_id'])
return self._fields(res, fields)
def get_collection(self, context, pool_id, filters=None, fields=None):
""" Optimize the query for members in a pool.
"""
member_list = []
pool_members = self._api.loadbalancer_members_list(
parent_id=pool_id)
if 'loadbalancer-members' in pool_members:
member_list.extend(pool_members['loadbalancer-members'])
response = []
for m in member_list:
res = self._get_resource_dict(m['uuid'], filters, fields)
if res is not None and self._is_authorized(context, res):
response.append(res)
return response
def resource_update(self, obj):
return self._api.loadbalancer_member_update(obj)
def resource_delete(self, id):
return self._api.loadbalancer_member_delete(id=id)
def get_exception_notfound(self, id=None):
return loadbalancerv2.EntityNotFound(name=self.neutron_name, id=id)
def get_exception_inuse(self, id=None):
pass
@property
def neutron_name(self):
return "member"
@property
def resource_name_plural(self):
return "loadbalancer-members"
def create(self, context, pool_id, member):
"""
Create a loadbalancer_member object.
"""
m = member['member']
try:
pool = self._api.loadbalancer_pool_read(id=pool_id)
except NoIdError:
raise loadbalancerv2.EntityNotFound(name='Pool', id=pool_id)
tenant_id = self._get_tenant_id_for_create(context, m)
if str(uuid.UUID(tenant_id)) != pool.parent_uuid:
raise NotAuthorized()
obj_uuid = uuidutils.generate_uuid()
props = self.make_properties(m)
id_perms = IdPermsType(enable=True)
member_db = LoadbalancerMember(
obj_uuid, pool, loadbalancer_member_properties=props,
id_perms=id_perms)
member_db.uuid = obj_uuid
self._api.loadbalancer_member_create(member_db)
return self.make_dict(member_db)
def update_properties(self, member_db, id, m):
props = member_db.get_loadbalancer_member_properties()
if self.update_properties_subr(props, m):
member_db.set_loadbalancer_member_properties(props)
return True
return False
def delete(self, context, id, pool_id):
try:
_ = self._api.loadbalancer_member_read(id=id)
except NoIdError:
raise loadbalancerv2.EntityNotFound(name=self.neutron_name, id=id)
try:
pool = self._api.loadbalancer_pool_read(id=pool_id)
except NoIdError:
raise loadbalancerv2.EntityNotFound(name='Pool',
id=pool_id)
if id not in [member['uuid'] for member in
pool.get_loadbalancer_members() or []]:
raise loadbalancerv2.MemberNotFoundForPool(member_id=id,
pool_id=pool_id)
super(LoadbalancerMemberManager, self).delete(context, id)
def update_object(self, member_db, id, m):
pool_id = member_db.parent_uuid
try:
pool = self._api.loadbalancer_pool_read(id=pool_id)
except NoIdError:
raise loadbalancerv2.EntityNotFound(name='Pool',
id=pool_id)
db_props = member_db.get_loadbalancer_member_properties()
members = pool.get_loadbalancer_members()
for member in members or []:
if id == member['uuid']:
continue
member_obj = self._api.loadbalancer_member_read(id=member['uuid'])
props = member_obj.get_loadbalancer_member_properties()
if (props.get_address() == db_props.get_address() and
props.get_protocol_port() == db_props.get_protocol_port()):
raise loadbalancerv2.MemberExists(
address=props.get_address(),
port=props.get_protocol_port(),
pool=pool_id)
return True
|
[
"andrey-mp@yandex.ru"
] |
andrey-mp@yandex.ru
|
33aac62c06dca320ef84cbca693af39b9e8b6757
|
ee6caf788762d7e297aed4c291b20012ed681410
|
/92. Codeforces/R73-C.py
|
25419f28edf76623870b575c939e5b06d5e7ad59
|
[] |
no_license
|
dmlimgo/Problem-Solving
|
61ea51f1737f572714bc5030470a73a6e0339336
|
c265ccac046b3e87c34d014876fde11f33a15ed9
|
refs/heads/master
| 2020-08-30T15:45:08.895947
| 2020-02-02T14:45:28
| 2020-02-02T14:45:28
| 218,424,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
Q = int(input())
for q in range(Q):
c, m, x = map(int, input().split())
if c == 0 or m == 0:
print(0)
continue
if min(c,m,x) == x:
c -= x
m -= x
b = max(c, m)
s = min(c, m)
if (s+b)//3 > s:
print(s+x)
else:
print((s+b)//3+x)
continue
if min(c,m,x) == c or min(c,m,x) == m:
print(min(c,m,x))
continue
|
[
"dongmyeong.lim@gmail.com"
] |
dongmyeong.lim@gmail.com
|
6b75e66b7182ecc217fcf6cf12e24451b43ad307
|
aa9647e01ace505d9c70e5247af0bce6749bdc45
|
/src/db.py
|
b34ffc4a33947da398cf2efb32ceeecdd3a2e601
|
[
"MIT"
] |
permissive
|
cgDeepLearn/pyserver
|
83853875dc33173eb3ae72b2e70c7db2c9ba3404
|
5a5e23ccafcc203b2d70eef289ec618ff9da0481
|
refs/heads/main
| 2023-01-29T05:45:52.110262
| 2020-12-09T09:03:33
| 2020-12-09T09:03:33
| 311,908,364
| 0
| 0
|
MIT
| 2020-11-16T08:39:29
| 2020-11-11T08:29:55
|
Python
|
UTF-8
|
Python
| false
| false
| 5,075
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : db.py
# @Author : cgDeepLearn
# @Create Date : 2020/11/16-3:30 下午
import redis
from conf import config
import pymysql
from DBUtils.PooledDB import PooledDB
from utils.log import logger
class RedisOps(object):
FIELD_EXIST = 0
NEW_FIELD = 1
def __init__(self, host, port, password, db):
rd = redis.ConnectionPool(host=host, port=port, password=password, db=db)
self.rd = redis.Redis(connection_pool=rd)
class MysqlOps(object):
def __init__(self, host, port, user, passwd, db):
self.pool = PooledDB(
pymysql,
mincached=10,
maxcached=30,
maxconnections=0,
host=host,
user=user,
passwd=passwd,
db=db,
port=port,
charset='utf8')
self.user_apply = 'user_apply'
self.user_base = 'user_base'
self.flows = 'flows'
self.table_list = list()
def _execute(self, sql, values):
'''
每次都使用新的连接池中的链接
'''
conn = self.pool.connection()
cur = conn.cursor()
cur.execute(sql, values)
conn.commit()
conn.close()
return cur
def _check_parameter(self, sql, values):
count = sql.count('%s')
if count > 0:
for elem in values:
if not elem:
return False
return True
def _get_table_list(self):
if len(self.table_list) == 0:
sql = '''SELECT COUNT(id) FROM data_split_info'''
table_num = list(self.select(sql))[0][0]
self.table_list = [num for num in range(0, table_num)]
def _replace(self, sql, table, num):
if num == 0:
if table in sql:
string = ' AND %s.deleted_at is null' % table
sql = sql + string
else:
pattern = '%s' % table
string = '%s_%d' % (table, num)
sql = sql.replace(pattern, string)
return sql
def _mulselect(self, apply_id, sql, values):
self._get_table_list()
mulcur = list()
for num in self.table_list:
temp_c = 0
sql_tmp = sql
sql_tmp = self._replace(sql_tmp, self.user_apply, num)
sql_tmp = self._replace(sql_tmp, self.user_base, num)
sql_tmp = self._replace(sql_tmp, self.flows, num)
cur = self._execute(sql_tmp, values)
for row in cur:
temp_c = temp_c + 1
mulcur.append(row)
logger.info('apply_id:%d _mulselect sql:%s, values:%s, result:%s',
apply_id, sql_tmp, values, temp_c)
return mulcur
def mulselect(self, sql, values=[], apply_id=0, check=False, log=True):
'''
多表查询接口
1、支持mysql基本查询,不支持聚集函数和分组排序等
'''
sql = sql.replace('\n', '')
if check and not self._check_parameter(sql, values):
return
if log:
logger.info('apply_id:%d mulselect sql:%s, values:%s', apply_id,
sql, values)
cur = self._mulselect(apply_id, sql, values)
for row in cur:
yield row
def sinselect(self, sql, values=[], apply_id=0, check=False, log=True):
sql = sql.replace('\n', '')
if check and not self._check_parameter(sql, values):
return
#过渡期间,增加deleted_at值判断
sql = self._replace(sql, self.user_apply, num=0)
sql = self._replace(sql, self.user_base, num=0)
sql = self._replace(sql, self.flows, num=0)
if log:
logger.info('apply_id:%d sinselect sql:%s, values:%s', apply_id,
sql, values)
cur = self._execute(sql, values)
for row in cur:
yield row
def select(self, sql, values=[], apply_id=0, check=False, log=True):
sql = sql.replace('\n', '')
if check and not self._check_parameter(sql, values):
return
if log:
logger.info('apply_id:%d select sql:%s, values:%s', apply_id, sql,
values)
cur = self._execute(sql, values)
for row in cur:
yield row
def execute(self, sql, values=[], apply_id=0, check=False, log=True):
sql = sql.replace('\n', '')
if check and not self._check_parameter(sql, values):
return
if log:
logger.info('apply_id:%d execute sql:%s, values:%s', apply_id, sql,
values)
cur = self._execute(sql, values)
redis_op = RedisOps(
host=config.redis_host, port=config.redis_port, password=config.redis_pwd, db=config.redis_db)
mysql_op = MysqlOps(
host=config.mysql_host,
port=config.mysql_port,
user=config.mysql_user,
passwd=config.mysql_pwd,
db=config.mysql_db)
if __name__ == '__main__':
print(dir(redis_op))
print(dir(mysql_op))
|
[
"cglearningnow@163.com"
] |
cglearningnow@163.com
|
4f66898e78968d145cadffd50f0fbaa0bc24e6f1
|
3b1daac7c1f72b985da899770d98e5f0e8fb835c
|
/Configurations/VBS/2017CR_v7/plot.py
|
98e0a0b236687fec6d81492a000ee0a41787e122
|
[] |
no_license
|
freejiebao/PlotsConfigurations
|
7e10aa45aa3bf742f30d1e21dc565d59d2a025d8
|
cdfd3aff38d1ece9599a699997753bc8ba01b9b1
|
refs/heads/master
| 2020-06-18T19:22:00.561542
| 2019-09-02T12:52:28
| 2019-09-02T12:52:28
| 186,931,874
| 0
| 0
| null | 2019-05-16T01:58:07
| 2019-05-16T01:58:07
| null |
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
# plot configuration
# groupPlot = {}
#
# Groups of samples to improve the plots (merge different sample during plot).
# If not defined, normal plots is used
#
Red=632; Violet=880; Green=416; Orange=800; Yellow=400; Azure=860
groupPlot['non-prompt'] = {
'nameHR' : 'non-Prompt',
'isSignal' : 0,
'color': Yellow, # kYellow
'samples' : ['Fake_lep']
}
##Fake and prompt substraction
plot['Fake_lep'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
##Data
plot['DATA'] = {
'nameHR' : 'Data',
'color': 1 ,
'isSignal' : 0,
'isData' : 1 ,
'isBlind' : 1 ,
'scale' : 1.0
}
# additional options
legend['lumi'] = 'L = 41.5/fb'
legend['sqrt'] = '#sqrt{s} = 13 TeV'
|
[
"jiexiao@pku.edu.cn"
] |
jiexiao@pku.edu.cn
|
d01e1db1a3d1d0bce24766f0e241c2a7a9923a0f
|
665b89f2472f5cf7eb441609eb112109b7381884
|
/weblatex/migrations/0003_song_attribution.py
|
98e7a851e1a47ea4155fcbc38063165cc4d344cb
|
[] |
no_license
|
Mortal/weblatex
|
5807bf25ea0d6a371e9fc6f0094f7e7375645b6c
|
9c841f9ec226e99f38b6e0c4f12e03535d2c06de
|
refs/heads/master
| 2020-05-14T11:53:08.299274
| 2016-12-23T12:03:50
| 2016-12-23T13:43:52
| 24,682,829
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-31 09:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('weblatex', '0002_auto_20151227_1835'),
]
operations = [
migrations.AddField(
model_name='song',
name='attribution',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
]
|
[
"rav@cs.au.dk"
] |
rav@cs.au.dk
|
77d2fa83d35599a5b053874fa4654b5d4fae6602
|
7e72c17745625a1dd4d04f1787c1d2b7bd90642f
|
/htmlgen/attribute.pyi
|
7d17093d5f6cc7d37287a665c5b87a2b0710bba8
|
[
"MIT"
] |
permissive
|
ra2003/python-htmlgen
|
27de75b94ad3b635caf11d26fa64f4a19e543668
|
cbe74d89acd655b78ffe12773b16ef2036502514
|
refs/heads/master
| 2022-04-08T10:37:36.265349
| 2020-03-11T13:46:53
| 2020-03-11T13:46:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,239
|
pyi
|
import datetime
from typing import Optional, List, Iterable
from htmlgen.element import Element
class html_attribute(object):
def __init__(
self, attribute_name: str, default: Optional[str] = ...
) -> None: ...
def __get__(
self, obj: Element, type: Optional[type] = ...
) -> Optional[str]: ...
def __set__(self, obj: Element, value: Optional[str]) -> None: ...
class boolean_html_attribute(object):
def __init__(self, attribute_name: str) -> None: ...
def __get__(self, obj: Element, type_: Optional[type] = ...) -> bool: ...
def __set__(self, obj: Element, value: bool) -> None: ...
class int_html_attribute(object):
def __init__(
self, attribute_name: str, default: Optional[int] = ...
) -> None: ...
def __get__(
self, obj: Element, type_: Optional[type] = ...
) -> Optional[int]: ...
def __set__(self, obj: Element, value: Optional[int]) -> None: ...
class float_html_attribute(object):
def __init__(
self, attribute_name: str, default: Optional[float] = ...
) -> None: ...
def __get__(
self, obj: Element, type_: Optional[type] = ...
) -> Optional[float]: ...
def __set__(self, obj: Element, value: Optional[float]) -> None: ...
class time_html_attribute(object):
def __init__(
self, attribute_name: str, default: Optional[datetime.time] = None
) -> None: ...
def __get__(
self, obj: Element, type_: Optional[type] = ...
) -> Optional[datetime.time]: ...
def __set__(
self, obj: Element, value: Optional[datetime.time]
) -> None: ...
class list_html_attribute(object):
def __init__(self, attribute_name: str) -> None: ...
def __get__(
self, obj: Element, type_: Optional[type] = ...
) -> List[str]: ...
def __set__(self, obj: Element, value: Iterable[str]) -> None: ...
class data_attribute(html_attribute):
def __init__(
self, data_name: str, default: Optional[str] = None
) -> None: ...
class css_class_attribute(object):
def __init__(self, css_class: str) -> None: ...
def __get__(self, obj: Element, type_: Optional[type] = ...) -> bool: ...
def __set__(self, obj: Element, value: bool) -> None: ...
|
[
"srittau@rittau.biz"
] |
srittau@rittau.biz
|
b31a19f61f75d84e9c43cae789ca4a9fafb8dfc3
|
3cae667175b2d6aac6d7f3d8189e9a02c38ea1cf
|
/AOJ/ITP1/python/ITP1_3_B_Print_Test_Cases.py
|
01ada1baf19ee14e9ca3f502aaf3c19915bc6f52
|
[] |
no_license
|
kokorinosoba/contests
|
3ee14acf729eda872ebec9ec7fe3431f50ae23c2
|
6e0dcd7c8ee086650d89fc65616981361b9b20b9
|
refs/heads/master
| 2022-08-04T13:45:29.722075
| 2022-07-24T08:50:11
| 2022-07-24T08:50:11
| 149,092,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
for i,e in enumerate(list(open(0))[:-1],1):print(f'Case {i}:',e,end='')
"""
i=1
while 1:
n=input()
if n=="0": break
print(f"Case {i}: {n}")
i+=1
"""
"""
import sys
for i,x in enumerate(sys.stdin,1):
if x=="0\n":break
print(f"Case {i}: {x}",end="")
"""
|
[
"34607448+kokorinosoba@users.noreply.github.com"
] |
34607448+kokorinosoba@users.noreply.github.com
|
e894dd2c0042e872525cb05a134c54ed4c900387
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/5010/317005010.py
|
bae484c5693cbe1c4f44c01024c8ae9c43673514
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 756
|
py
|
from bots.botsconfig import *
from records005010 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'SO',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'N1', MIN: 1, MAX: 10, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'G61', MIN: 0, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 9},
]},
{ID: 'G62', MIN: 1, MAX: 1},
{ID: 'N9', MIN: 1, MAX: 9},
{ID: 'TD5', MIN: 1, MAX: 1},
{ID: 'L0', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'L5', MIN: 0, MAX: 999},
{ID: 'H1', MIN: 0, MAX: 1},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
de44f671db344112f3455fc9a68fd630b9fa685c
|
a16feb303b7599afac19a89945fc2a9603ae2477
|
/Simple_Python/standard/exception/exception_3.py
|
c238bdfaf994db7ca61ad080adc0958a24b2cca5
|
[] |
no_license
|
yafeile/Simple_Study
|
d75874745ce388b3d0f9acfa9ebc5606a5745d78
|
c3c554f14b378b487c632e11f22e5e3118be940c
|
refs/heads/master
| 2021-01-10T22:08:34.636123
| 2015-06-10T11:58:59
| 2015-06-10T11:58:59
| 24,746,770
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
#! /usr/bin/env/python
# -*- coding:utf-8 -*-
class MyClass(object):
__slots__ = ('attribute',)
o = MyClass()
o.attribute = 'known attribute'
o.not_a_slot = 'new attribute'
|
[
"zhuzhulang@126.com"
] |
zhuzhulang@126.com
|
1c738ef73bdc0768137d85581d244067c1e3ef73
|
f9d7036649ff5d64660c33bc295ddf97e316d082
|
/blog/settings.py
|
b9c6c73355a9da6b8c57b7e16e0b4b08e72fe807
|
[] |
no_license
|
jocsakesley/blog-jocsa-kesley
|
1ebd6c11ad45c98a6b396ddfe58675da5cd113ec
|
d106a0870636542c08ee7791d971d77a948b3e0a
|
refs/heads/main
| 2023-03-16T00:08:23.688040
| 2021-03-12T15:36:57
| 2021-03-12T15:36:57
| 322,912,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,687
|
py
|
"""
Django settings for blog project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
from decouple import config, Csv
from dj_database_url import parse as dburl
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config("DEBUG", default=False, cast=bool)
ALLOWED_HOSTS = config("ALLOWED_HOSTS", default=[], cast=Csv())
# Application definition
INSTALLED_APPS = [
'blog.posts',
'blog.comentarios',
'blog.categorias',
'blog.sobre',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'widget_tweaks',
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
default_dburl = 'sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
DATABASES = {
'default': config('DATABASE_URL', default=default_dburl, cast=dburl)
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'pt-BR'
TIME_ZONE = 'America/Recife'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'templates/static'),)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
INSTALLED_APPS += ('django_summernote', )
X_FRAME_OPTIONS = 'SAMEORIGIN'
|
[
"jocsadm@gmail.com"
] |
jocsadm@gmail.com
|
0e0b02856e4b9275bbad24a7461c2c793b231d87
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_208/81.py
|
5eb844629edbf0f9bad243963bf552da90da0e7c
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
for t in range(int(input())):
n, q = (int(i) for i in input().split())
hs = [[int(i) for i in input().split()] for j in range(n)]
ds = [[int(i) for i in input().split()][j + 1] for j in range(n - 1)]
input()
input()
tc = [0] * n
tc[n - 1] = 0
for i in range(n - 2, -1, -1):
min = -1
sd = 0
for j in range(1, n - i):
sd += ds[i + j - 1]
if sd > hs[i][0]:
break
if tc[i + j] == -1:
continue
tm = tc[i + j] + sd / hs[i][1]
if min == -1 or tm < min:
min = tm
tc[i] = min
print("Case #%d: %f" % (t + 1, tc[0]))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
c536b9fd5c1e73cc295090ed7b3acb50d109db16
|
fec863b67ec1ae65da7111bd8c77d0ab2ef1f6ce
|
/movie recommendation system/.history/moviemodel_20210503171215.py
|
065bab6744cb5a59f9f2bcad99cc217a20cecea4
|
[] |
no_license
|
kannan768/movie-recommendation-system
|
e6cf71620e25a0185fed3b37896137f1f39b0801
|
7460d440d44e77390e459ab10c535b6971c9c3ab
|
refs/heads/main
| 2023-05-14T02:21:50.930672
| 2021-06-09T05:02:30
| 2021-06-09T05:02:30
| 375,225,316
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,004
|
py
|
import pandas as pd
import numpy as np
from zipfile import ZipFile
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from pathlib import Path
import matplotlib.pyplot as plt
"""##Dataset"""
df = pd.read_csv('ratings.csv', sep=',', encoding='latin-1', usecols=['userId','movieId','rating','timestamp'])
movie_df=df
user_ids = df["userId"].unique().tolist()
user2user_encoded = {x: i for i, x in enumerate(user_ids)}
userencoded2user = {i: x for i, x in enumerate(user_ids)}
movie_ids = df["movieId"].unique().tolist()
movie2movie_encoded = {x: i for i, x in enumerate(movie_ids)}
movie_encoded2movie = {i: x for i, x in enumerate(movie_ids)}
df["user"] = df["userId"].map(user2user_encoded)
df["movie"] = df["movieId"].map(movie2movie_encoded)
num_users = len(user2user_encoded)
num_movies = len(movie_encoded2movie)
df["rating"] = df["rating"].values.astype(np.float32)
min_rating = min(df["rating"])
max_rating = max(df["rating"])
# print(
# "Number of users: {}, Number of Movies: {}, Min rating: {}, Max rating: {}".format(
# num_users, num_movies, min_rating, max_rating
# )
# )
df = df.sample(frac=1, random_state=42)
x = df[["user", "movie"]].values
y = df["rating"].apply(lambda x: (x - min_rating) / (max_rating - min_rating)).values
train_indices = int(0.9 * df.shape[0])
x_train, x_val, y_train, y_val = (
x[:train_indices],
x[train_indices:],
y[:train_indices],
y[train_indices:],
)
EMBEDDING_SIZE = 50
class RecommenderNet(keras.Model):
def __init__(self, num_users, num_movies, embedding_size, **kwargs):
super(RecommenderNet, self).__init__(**kwargs)
self.num_users = num_users
self.num_movies = num_movies
self.embedding_size = embedding_size
self.user_embedding = layers.Embedding(
num_users,
embedding_size,
embeddings_initializer="he_normal",
embeddings_regularizer=keras.regularizers.l2(1e-6),
)
self.user_bias = layers.Embedding(num_users, 1)
self.movie_embedding = layers.Embedding(
num_movies,
embedding_size,
embeddings_initializer="he_normal",
embeddings_regularizer=keras.regularizers.l2(1e-6),
)
self.movie_bias = layers.Embedding(num_movies, 1)
def call(self, inputs):
user_vector = self.user_embedding(inputs[:, 0])
user_bias = self.user_bias(inputs[:, 0])
movie_vector = self.movie_embedding(inputs[:, 1])
movie_bias = self.movie_bias(inputs[:, 1])
dot_user_movie = tf.tensordot(user_vector, movie_vector, 2)
# Add all the components (including bias)
x = dot_user_movie + user_bias + movie_bias
return tf.nn.sigmoid(x)
model = RecommenderNet(num_users, num_movies, EMBEDDING_SIZE)
model.compile(
loss=tf.keras.losses.BinaryCrossentropy(), optimizer=keras.optimizers.Adam(lr=0.001)
)
history = model.fit(
x=x_train,
y=y_train,
batch_size=64,
epochs=5,
verbose=1,
validation_data=(x_val, y_val),
)
# plt.plot(history.history["loss"])
# plt.plot(history.history["val_loss"])
# plt.title("model loss")
# plt.ylabel("loss")
# plt.xlabel("epoch")
# plt.legend(["train", "test"], loc="upper left")
# plt.show()
movie_df = pd.read_csv('movies.csv', sep=',', encoding='latin-1', usecols=['movieId','title','genres'])
def Display(User_id):
user_id = df.userId.sample(1).iloc[0]
movies_watched_by_user = df[df.userId == user_id]
movies_not_watched = movie_df[~movie_df["movieId"].isin(movies_watched_by_user.movieId.values)]["movieId"]
movies_not_watched = list(
set(movies_not_watched).intersection(set(movie2movie_encoded.keys())))
movies_not_watched = [[movie2movie_encoded.get(x)] for x in movies_not_watched]
user_encoder = user2user_encoded.get(user_id)
user_movie_array = np.hstack(([[user_encoder]] * len(movies_not_watched), movies_not_watched))
ratings = model.predict(user_movie_array).flatten()
top_ratings_indices = ratings.argsort()[-10:][::-1]
recommended_movie_ids = [ movie_encoded2movie.get(movies_not_watched[x][0]) for x in top_ratings_indices]
# print("Showing recommendations for user: {}".format(user_id))
# print("====" * 9)
# print("Movies with high ratings from user")
# print("----" * 8)
top_movies_user = (movies_watched_by_user.sort_values(by="rating", ascending=False)
.head(5)
.movieId.values
)
movie_df_rows = movie_df[movie_df["movieId"].isin(top_movies_user)]
# for row in movie_df_rows.itertuples():
# print(row.title, ":", row.genres)
print("----" * 8)
print("Top 10 movie recommendations")
print("----" * 8)
recommended_movies = movie_df[movie_df["movieId"].isin(recommended_movie_ids)]
# for row in recommended_movies.itertuples():
# print(row.title, ":", row.genres)
print
# user_id=input("Please Enter User id")
user_id=int(sys.argv[1])
Display(user_id)
|
[
"kannanbsk1609080@gmail.com"
] |
kannanbsk1609080@gmail.com
|
4c592d51f61cf481cc775b42cd08c2ac8509d63a
|
d2f50124ff3bec70b9b3139ecb063b06e526781d
|
/biable/migrations/0063_auto_20170209_1210.py
|
b1f585b6e2133d7294f9972748a301e53108e589
|
[] |
no_license
|
odecsarrollo/odecopack-componentes
|
e8d993f089bf53bbf3c53d1265e70ac5c06b59b8
|
b583a115fb30205d358d97644c38d66636b573ff
|
refs/heads/master
| 2022-12-12T00:33:02.874268
| 2020-08-13T18:45:01
| 2020-08-13T18:45:01
| 189,262,705
| 0
| 0
| null | 2022-12-08T11:23:46
| 2019-05-29T16:37:21
|
Python
|
UTF-8
|
Python
| false
| false
| 470
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-09 17:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('biable', '0062_auto_20170209_1032'),
]
operations = [
migrations.AlterField(
model_name='grupocliente',
name='nombre',
field=models.CharField(max_length=120, unique=True),
),
]
|
[
"fabio.garcia.sanchez@gmail.com"
] |
fabio.garcia.sanchez@gmail.com
|
f562bc0096ec80473c16957f03b4c070b782bab7
|
99280ee4672420b43bdcedb9c6f5c93a5fe182f0
|
/API/backend_3/todo_project/todo_project/settings.py
|
297321c3ae1b7a167c333d4af61b2cc4b333d714
|
[] |
no_license
|
kamral/test_1
|
f8674a075d51fc94630df7d6a5cf55b11d086db0
|
a10ce3337463d1cb9b56876d0566798740c0b42f
|
refs/heads/master
| 2023-08-06T23:50:45.519935
| 2020-06-07T09:27:43
| 2020-06-07T09:27:43
| 265,688,683
| 0
| 0
| null | 2021-09-22T19:23:15
| 2020-05-20T21:21:21
|
Python
|
UTF-8
|
Python
| false
| false
| 3,293
|
py
|
"""
Django settings for todo_project project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=_jwj$8oi08uu8m)5170xe#@o_aqjjpyhy(5d-fq=^k-^!f9ui'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#3d party
'rest_framework',
#local
'todos.apps.TodosConfig',
]
REST_FRAMEWORK={
'DEFAULT_PERMISSION_CLASSES':[
'rest_framework.permissions.AllowAny',
]
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"kamral010101@gmail.com"
] |
kamral010101@gmail.com
|
e7d10f8320db3c2f560b7875b1bb254593aca879
|
5ffa05429f1278455cd02e759cc64f376813ce20
|
/html_form_builder/__openerp__.py
|
1e8592471c11867f3ba1a29645d05d25c8cae4e7
|
[] |
no_license
|
tonihr/Odoo9
|
217f483993c4a49d5c14ad93ec2594e0a46bef5d
|
93e0d3de55714e34229cb5273400a6ebc1f6e3e0
|
refs/heads/9.0
| 2021-01-19T04:02:57.407271
| 2017-03-08T05:35:47
| 2017-03-08T05:35:47
| 84,426,868
| 0
| 0
| null | 2017-03-09T10:02:45
| 2017-03-09T10:02:45
| null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
{
'name': "HTML Form Builder",
'version': "1.8.9",
'author': "Sythil Tech",
'category': "Tools",
'support': "steven@sythiltech.com.au",
'summary': "Manage both internal and external forms",
'description': "Manage both internal and external forms",
'license':'LGPL-3',
'data': [
'views/html_form.xml',
'views/html_form_builder_templates.xml',
'data/html.form.captcha.csv',
'data/html.form.field.type.csv',
'data/html.form.action.type.csv',
'security/ir.model.access.csv',
],
'demo': [],
'images':[
'static/description/1.jpg',
],
'depends': [],
'installable': True,
}
|
[
"steven@sythiltech.com"
] |
steven@sythiltech.com
|
9abfdc5a2c0729518fddf65bbefeae6317b8b9a0
|
24d8cf871b092b2d60fc85d5320e1bc761a7cbe2
|
/eXe/rev2283-2366/right-branch-2366/twisted/internet/tksupport.py
|
19dcf48b56a21fe81e5d2e00d290099a36bdac51
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
joliebig/featurehouse_fstmerge_examples
|
af1b963537839d13e834f829cf51f8ad5e6ffe76
|
1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad
|
refs/heads/master
| 2016-09-05T10:24:50.974902
| 2013-03-28T16:28:47
| 2013-03-28T16:28:47
| 9,080,611
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,636
|
py
|
"""This module integrates Tkinter with twisted.internet's mainloop.
API Stability: semi-stable
Maintainer: U{Itamar Shtull-Trauring<mailto:twisted@itamarst.org>}
To use, do::
| tksupport.install(rootWidget)
and then run your reactor as usual - do *not* call Tk's mainloop(),
use Twisted's regular mechanism for running the event loop.
Likewise, to stop your program you will need to stop Twisted's
event loop. For example, if you want closing your root widget to
stop Twisted::
| root.protocol('WM_DELETE_WINDOW', reactor.stop)
"""
import Tkinter, tkSimpleDialog, tkMessageBox
from twisted.python import log
from twisted.internet import task
_task = None
def install(widget, ms=10, reactor=None):
"""Install a Tkinter.Tk() object into the reactor."""
installTkFunctions()
global _task
_task = task.LoopingCall(widget.update)
_task.start(ms / 1000.0, False)
def uninstall():
"""Remove the root Tk widget from the reactor.
Call this before destroy()ing the root widget.
"""
global _task
_task.stop()
_task = None
def installTkFunctions():
import twisted.python.util
twisted.python.util.getPassword = getPassword
def getPassword(prompt = '', confirm = 0):
while 1:
try1 = tkSimpleDialog.askstring('Password Dialog', prompt, show='*')
if not confirm:
return try1
try2 = tkSimpleDialog.askstring('Password Dialog', 'Confirm Password', show='*')
if try1 == try2:
return try1
else:
tkMessageBox.showerror('Password Mismatch', 'Passwords did not match, starting over')
__all__ = ["install", "uninstall"]
|
[
"joliebig@fim.uni-passau.de"
] |
joliebig@fim.uni-passau.de
|
d1ca2a52b83d8def8c1aa10f303e6cad817df346
|
41a20700b5bb351d20562ac23ec4db06bc96f0d7
|
/src/fg/tv_metrics.py
|
f38e38ae3fce02c994c9be7c9605523073f0d3f0
|
[] |
no_license
|
kedz/noiseylg
|
ee0c54634767e8d3789b4ffb93727988c29c6979
|
17266e1a41e33aecb95dc1c3aca68f6bccee86d5
|
refs/heads/master
| 2020-07-30T11:22:08.351759
| 2019-10-30T21:33:11
| 2019-10-30T21:33:11
| 210,212,253
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,987
|
py
|
from plum.types import register, PlumModule, HP, props
from subprocess import check_output
from queue import Queue
from threading import Thread
from pathlib import Path
from tempfile import NamedTemporaryFile
import json
import d2t.preprocessing.tvs as preproc
@register("metrics.tv_metrics")
class TVMetrics(PlumModule):
path = HP(type=props.EXISTING_PATH)
search_fields = HP()
references_fields = HP()
def __pluminit__(self):
self._cache = None
self._queue = Queue(maxsize=0)
self._thread = None
self._thread = Thread(target=self._process_result)
self._thread.setDaemon(True)
self._thread.start()
self._hyp_fp = NamedTemporaryFile("w")
self._ref_fp = NamedTemporaryFile("w")
def postprocess(self, tokens, mr):
# TODO right now this is specific to the e2e dataset. Need to
# generalize how to do post processing.
tokens = [t for t in tokens if t[0] != "<" and t[-1] != ">"]
text = " ".join(tokens)
return preproc.lexicalize(text, mr)
def _process_result(self):
while True:
hyp, refs, mr = self._queue.get()
print(self.postprocess(hyp, mr), file=self._hyp_fp)
#print(" ".join(hyp), file=self._hyp_fp)
if isinstance(refs, (list, tuple)):
refs = "\n".join(refs)
print(refs, file=self._ref_fp, end="\n\n")
self._queue.task_done()
def reset(self):
self._cache = None
while not self._queue.empty():
self._queue.get()
self._queue.task_done()
self._hyp_fp = NamedTemporaryFile("w")
self._ref_fp = NamedTemporaryFile("w")
def apply_fields(self, fields, obj):
if not isinstance(fields, (list, tuple)):
fields = [fields]
for field in fields:
if hasattr(field, "__call__"):
obj = field(obj)
else:
obj = obj[field]
return obj
def forward(self, forward_state, batch):
search = self.apply_fields(self.search_fields, forward_state)
hypotheses = search.output()
reference_sets = self.apply_fields(self.references_fields, batch)
for i, (hyp, refs) in enumerate(zip(hypotheses, reference_sets)):
self._queue.put([hyp, refs, batch["mr"][i]])
def run_script(self):
self._queue.join()
self._ref_fp.flush()
self._hyp_fp.flush()
script_path = Path(self.path).resolve()
result_bytes = check_output(
[str(script_path), self._hyp_fp.name, self._ref_fp.name])
result = json.loads(result_bytes.decode("utf8"))
self._cache = result
self._ref_fp = None
self._hyp_fp = None
def compute(self):
if self._cache is None:
self.run_script()
return self._cache
def pretty_result(self):
return str(self.compute())
|
[
"kedzie@cs.columbia.edu"
] |
kedzie@cs.columbia.edu
|
b48cbc34229e604e32f551d252f74916fe277a3e
|
b789bf78ffe684782da7eed9df9d88a62d13ad82
|
/pyannote/database/protocol/__init__.py
|
d9f270782593bbe36e7e6fabe7d6039e4a1d5979
|
[
"MIT"
] |
permissive
|
yinruiqing/pyannote-database
|
8d77678efec06ffb797716e28b4673f1d5ec6453
|
731593b57082e675e0f661f6211f2dd261807561
|
refs/heads/master
| 2020-12-02T06:45:29.029202
| 2017-06-28T13:12:26
| 2017-06-28T13:12:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,320
|
py
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from .speaker_diarization import SpeakerDiarizationProtocol
from .speaker_recognition import SpeakerRecognitionProtocol
|
[
"bredin@limsi.fr"
] |
bredin@limsi.fr
|
0a101df3b11fa31f2f9270e4eb622a88f96554f3
|
41c605bf3a002a757cb2344cff526d7a7ae56ea9
|
/plotly/validators/scattercarpet/selected/marker/__init__.py
|
67542f2ea7f75af48003f76f0d057af6429e1e4c
|
[
"MIT"
] |
permissive
|
Jonathan-MW/plotly.py
|
9674b90b5de11fd9089e6afefd04b57bc4587829
|
7528c00772f44dee24c0df7e15d70a4852f171a8
|
refs/heads/master
| 2020-05-30T06:04:13.621478
| 2019-05-31T10:34:15
| 2019-05-31T10:34:15
| 189,571,988
| 2
| 0
|
MIT
| 2019-05-31T09:59:53
| 2019-05-31T09:59:53
| null |
UTF-8
|
Python
| false
| false
| 1,616
|
py
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='size',
parent_name='scattercarpet.selected.marker',
**kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'style'),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'style'),
**kwargs
)
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='opacity',
parent_name='scattercarpet.selected.marker',
**kwargs
):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'style'),
max=kwargs.pop('max', 1),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'style'),
**kwargs
)
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='scattercarpet.selected.marker',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'style'),
role=kwargs.pop('role', 'style'),
**kwargs
)
|
[
"noreply@github.com"
] |
Jonathan-MW.noreply@github.com
|
9d7d16c4a73674e00426099c87f36ac5e20d778f
|
60a4f0fa5c8239edbb4cd4390e3b4a7d70c919de
|
/user/migrations/0006_auto_20190805_2145.py
|
54fac07db74e7b1545406f8ec51ded054071913c
|
[] |
no_license
|
DuncanMoyo/Developer-Portfolio
|
cca6cbe29e13bddbf56584e400cbd169a515c047
|
9aa8dcef123b3144d9bf2c34a19f4c65c193ac98
|
refs/heads/master
| 2022-12-09T17:14:42.865413
| 2019-08-09T03:55:21
| 2019-08-09T03:55:21
| 200,691,837
| 0
| 0
| null | 2022-12-08T05:59:41
| 2019-08-05T16:31:39
|
CSS
|
UTF-8
|
Python
| false
| false
| 497
|
py
|
# Generated by Django 2.2.4 on 2019-08-05 19:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0005_auto_20190805_2144'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='skill_level',
),
migrations.AddField(
model_name='skill',
name='skill_level',
field=models.IntegerField(default=0),
),
]
|
[
"duncanfmoyo@gmail.com"
] |
duncanfmoyo@gmail.com
|
35ff7c7b0b2608a161283aad1158714f840e4261
|
bf21cd0ef7a94fa106ccd9f91a4bbfdcda7f94ed
|
/python-basic/chapter06/ex02_1.py
|
b89200405d271e475c79d5066eb693b18a584a1a
|
[] |
no_license
|
juneglee/Deep_Learning
|
fdf8cae1b962aaa0ce557cb53f78a22b6d5ae1e8
|
17a448cf6a7c5b61b967dd78af3d328d63378205
|
refs/heads/master
| 2023-07-15T03:02:55.739619
| 2021-08-19T14:04:55
| 2021-08-19T14:04:55
| 273,253,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,324
|
py
|
# 예외 고급
# 예외 객체
# try:
# 예외가 발생할 가능성이 있는 구문
# except 예외의 종류 as 예외 객체를 활용할 변수 이름:
# 예외가 발생했을 때 실행할 구문
# 예외 객체
try:
number_input_a = int(input("정수 입력> "))
print("원의 반지름:", number_input_a)
print("원의 둘레:", 2 * 3.14 * number_input_a)
print("원의 넓이:", 3.14 * number_input_a * number_input_a)
except Exception as exception:
print("type(exception):", type(exception))
print("exception:", exception) # Exception은 부모클래스
# 예외 구분하기
# 여러가지 예외가 발생할 수 있는 코드
# 에러 1 : 정수로 변환될수 없는 값을 입력 ex) "yes!!"
# 에러 2 : 리스트의 길이를 넘는 인덱스를 입력한 경우 ex) 100
list_number = [52, 273, 32, 72, 100]
try:
number_input = int(input("정수 입력> "))
print("{}번째 요소: {}".format(number_input, list_number[number_input]))
except Exception as exception:
print("type(exception):", type(exception))
print("exception:", exception)
# 예외 구분하기
# try:
# 예외가 발생할 가능성이 있는 구문
# except 예외의 종류 A:
# 예외A가 발생했을 때 실행할 구문
# except 예외의 종류 B:
# 예외B가 발생했을 때 실행할 구문
# except 예외의 종류 C:
# 예외C가 발생했을 때 실행할 구문
list_number = [52, 273, 32, 72, 100]
try:
number_input = int(input("정수 입력> "))
print("{}번째 요소: {}".format(number_input, list_number[number_input]))
except ValueError:
# ValueError가 발생하는 경우
print("정수를 입력해 주세요!")
except IndexError:
# IndexError가 발생하는 경우
print("리스트의 인덱스를 벗어났어요!")
# 예외 구분 구문과 예외 객체
# as 키워드를 사용하여 추가
list_number = [52, 273, 32, 72, 100]
try:
number_input = int(input("정수 입력> "))
print("{}번째 요소: {}".format(number_input, list_number[number_input]))
except ValueError as exception:
print("정수를 입력해 주세요!")
print("exception:", exception)
except IndexError as exception:
print("리스트의 인덱스를 벗어났어요!")
print("exception:", exception)
|
[
"klcpop1@gmail.com"
] |
klcpop1@gmail.com
|
a8d1c3855133be357e3ac72d35616e8b7fc0d18b
|
ce07ccf78739a768971f393222fdca4a56315241
|
/employee_management/employee_management/doctype/ord/ord.py
|
5cfedfcf1c3f5f11200b1042214ecfbf25a91a73
|
[
"MIT"
] |
permissive
|
Gdinesh03/Frappe
|
563e0ddbe925be536f65f925787ed321a6098c0d
|
efd2d1568b6f5b8a4e0ff31e06a415c717a3d32a
|
refs/heads/master
| 2023-08-27T19:24:12.024442
| 2021-09-14T07:04:27
| 2021-09-14T07:04:27
| 406,260,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Gopi and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class Ord(Document):
def validate(self):
total = 0
for d in self.get('product_details'):
total += int(d.product_price)
self.total_amount = total
# self.total = mow
# @frappe.whitelist()
# def get_pro(orderb):
# source = frappe.db.sql(''' select * from `tabOrderb` where name = %s''',orderb,as_dict=1)
# for i in source:
# # frappe.log_error(i,"kk")
# sam = frappe.db.sql(''' select product_total from `tabProductdetb` where parent = %s''',i.name,as_dict=1)
# for d in sam:
# mow = sum(float(d.product_total) for d in sam)
# return mow
|
[
"vivekananthan112599@gmail.com"
] |
vivekananthan112599@gmail.com
|
9e94751b6f70c73ed790cef4cef4bfb8083f9ffd
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_traipsed.py
|
f59c7ae5d2434f5d2f1133296a72f7b2307b4aa4
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
#calss header
class _TRAIPSED():
def __init__(self,):
self.name = "TRAIPSED"
self.definitions = traipse
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['traipse']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
e8cff7405331705ecde8b0a9722786a9a9e6d615
|
11ff14c118240e87c4804d0373e4656d0683d479
|
/RatToolAgent/test/firefox_test.py
|
63c7ccf8fd97890cb406cd2616cc6efaffa93c1d
|
[] |
no_license
|
wxmmavis/OS3.1
|
e3028d9c79d5a1a17449fea6380fcdda902bdec7
|
26d954344207a82d2298821c3c4f01302393dc7e
|
refs/heads/master
| 2020-03-25T20:07:11.225493
| 2018-08-13T03:20:57
| 2018-08-13T03:20:57
| 144,115,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 725
|
py
|
import sys
sys.path += ['../../RatToolAgent']
import RatToolAgent as rta
id = rta.init_and_start_browser()
conf = {
'validation_url': "http://172.16.10.252/authenticated/",
'download_loc': r"//a[@id='logo']",
'file_name': "logo.zip",
'page_title': "Ruckus Automation Test",
}
try:
rta.download_file_on_web_server(id, conf.pop('validation_url'),
conf.pop('download_loc'),
conf.pop('file_name'),
**conf
)
except Exception, e:
print '........................................'
print 'Raise:' + e.message
rta.close_browser(id)
|
[
"1475806321@qq.com"
] |
1475806321@qq.com
|
8eb0ddd533b6242fa21b29701e10215b497fcd90
|
d93901e7ff019c7c929594c17b9ed0c575dd1165
|
/NumPyNet/box.py
|
506948ebbb806413bf3c0380425a8914f0f69669
|
[
"MIT"
] |
permissive
|
Nico-Curti/NumPyNet
|
0e673ad3da4120cd761a5b1f4c1f0c429cfd20a9
|
c5e217751e28f0812282333b83964b7fee217cfb
|
refs/heads/master
| 2022-05-04T04:51:50.076629
| 2022-03-28T10:02:15
| 2022-03-28T10:02:15
| 199,490,280
| 57
| 10
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,109
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import operator
from functools import wraps
__author__ = ['Mattia Ceccarelli', 'Nico Curti']
__email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
class Box (object):
'''
Detection box class
Parameters
----------
coords : tuple (default=None)
Box Coordinates as (x, y, w, h)
Example
-------
>>> import pylab as plt
>>> from matplotlib.patches import Rectangle
>>>
>>> b1 = Box((.5, .3, .2, .1))
>>> x_1, y_1, w_1, h_1 = b1.box
>>> left_1, top_1, right_1, bottom_1 = b1.coords
>>>
>>> print('Box1: {}'.format(b1))
>>>
>>> b2 = Box((.4, .5, .2, .5))
>>> x_2, y_2, w_2, h_2 = b2.box
>>> left_2, top_2, right_2, bottom_2 = b2.coords
>>>
>>> print('Box2: {}'.format(b2))
>>>
>>> print('Intersection: {:.3f}'.format(b1.intersection(b2)))
>>> print('Union: {:.3f}'.format(b1.union(b2)))
>>> print('IOU: {:.3f}'.format(b1.iou(b2)))
>>> print('rmse: {:.3f}'.format(b1.rmse(b2)))
>>>
>>> plt.figure()
>>> axis = plt.gca()
>>> axis.add_patch(Rectangle(xy=(left_1, top_1),
>>> width=w_1, height=h_1,
>>> alpha=.5, linewidth=2, color='blue'))
>>> axis.add_patch(Rectangle(xy=(left_2, top_2),
>>> width=w_2, height=h_2,
>>> alpha=.5, linewidth=2, color='red'))
'''
def __init__ (self, coords=None):
if coords is not None:
try:
self.x, self.y, self.w, self.h = coords
except ValueError:
class_name = self.__class__.__name__
raise ValueError('{0}: inconsistent input shape. Expected a 4D (x, y, w, h) shapes and given {1}'.format(class_name, coords))
else:
self.x, self.y, self.w, self.h = (None, None, None, None)
def _is_box (func):
'''
Decorator function to check if the input variable is a Box object
'''
@wraps(func)
def _ (self, b):
if isinstance(b, self.__class__):
return func(self, b)
else:
raise ValueError('Box functions can be applied only on other Box objects')
return _
@property
def box(self):
'''
Get the box coordinates
Returns
-------
coords : tuple
Box coordinates as (x, y, w, h)
'''
return (self.x, self.y, self.w, self.h)
def __iter__ (self):
'''
Iter over coordinates as (x, y, w, h)
'''
yield self.x
yield self.y
yield self.w
yield self.h
def __eq__ (self, other):
'''
Check if the box coordinates are equal
'''
return isinstance(other, Box) and tuple(self) == tuple(other)
def __ne__ (self, other):
'''
Check if the box coordinates are NOT equal
'''
return not (self == other)
def __repr__ (self):
'''
Object representation
'''
return type(self).__name__ + repr(tuple(self))
def _overlap (self, x1, w1, x2, w2):
'''
Compute the overlap between (left, top) | (right, bottom) of the coordinates
Parameters
----------
x1 : float
X coordinate
w1 : float
W coordinate
x2 : float
w2 : float
Returns
-------
overlap : float
The overlapping are between the two boxes
'''
half_w1, half_w2 = w1 * .5, w2 * .5
l1, l2 = x1 - half_w1, x2 - half_w2
r1, r2 = x1 + half_w1, x2 + half_w2
return min(r1, r2) - max(l1, l2)
@_is_box
def intersection (self, other):
'''
Common area between boxes
Parameters
----------
other : Box
2nd term of the evaluation
Returns
-------
intersection : float
Intersection area of two boxes
'''
w = self._overlap(self.x, self.w, other.x, other.w)
h = self._overlap(self.y, self.h, other.y, other.h)
w = w if w > 0. else 0.
h = h if h > 0. else 0.
return w * h
__and__ = intersection
@_is_box
def union (self, other):
'''
Full area without intersection
Parameters
----------
other : Box
2nd term of the evaluation
Returns
-------
union : float
Union area of the two boxes
'''
return self.area + other.area - self.intersection(other)
__add__ = union
@_is_box
def iou (self, other):
'''
Intersection over union
Parameters
----------
other : Box
2nd term of the evaluation
Returns
-------
iou : float
Intersection over union between boxes
'''
union = self.union(other)
return self.intersection(other) / union if union != 0. else float('nan')
__sub__ = iou
@_is_box
def rmse (self, other):
'''
Root mean square error of the boxes
Parameters
----------
other : Box
2nd term of the evaluation
Returns
-------
rmse : float
Root mean square error of the boxes
'''
diffs = tuple(map(operator.sub, self, other))
dot = sum(map(operator.mul, diffs, diffs))
return dot**(.5)
@property
def center(self):
'''
In the current storage the x,y are the center of the box
Returns
-------
center : tuple
Center of the current box.
'''
x, y, _, _ = self._object.box
return (x, y)
@property
def dimensions(self):
'''
In the current storage the w,h are the dimensions of the rectangular box
Returns
-------
dims : tuple
Dimensions of the current box as (width, height).
'''
_, _, w, h = self._object.box
return (w, h)
@property
def area(self):
'''
Compute the are of the box
Returns
-------
area : float
Area of the current box.
'''
return self.w * self.h
@property
def coords(self):
'''
Return box coordinates in clock order (left, top, right, bottom)
Returns
-------
coords : tuple
Coordinates as (left, top, right, bottom)
'''
x, y, w, h = self.box
half_w, half_h = w * .5, h * .5
return (x - half_w, y - half_h, x + half_w, y + half_h)
def __str__(self):
'''
Printer
'''
fmt = '(left={0:.3f}, bottom={1:.3f}, right={2:.3f}, top={3:.3f})'.format(*self.coords)
return fmt
if __name__ == '__main__':
import pylab as plt
from matplotlib.patches import Rectangle
b1 = Box((.5, .3, .2, .1))
x_1, y_1, w_1, h_1 = b1.box
left_1, top_1, right_1, bottom_1 = b1.coords
print('Box1: {}'.format(b1))
b2 = Box((.4, .5, .2, .5))
x_2, y_2, w_2, h_2 = b2.box
left_2, top_2, right_2, bottom_2 = b2.coords
print('Box2: {}'.format(b2))
print('Intersection: {:.3f}'.format(b1.intersection(b2)))
print('Union: {:.3f}'.format(b1.union(b2)))
print('IOU: {:.3f}'.format(b1.iou(b2)))
print('rmse: {:.3f}'.format(b1.rmse(b2)))
plt.figure()
axis = plt.gca()
axis.add_patch(Rectangle(xy=(left_1, top_1), width=w_1, height=h_1, alpha=.5, linewidth=2, color='blue'))
axis.add_patch(Rectangle(xy=(left_2, top_2), width=w_2, height=h_2, alpha=.5, linewidth=2, color='red'))
plt.show()
|
[
"nico.curti2@unibo.it"
] |
nico.curti2@unibo.it
|
4bad0a9d74fdc33c1b08594b16c3ae6ae2d4ad36
|
26b6a35e2415d94fbc1c9fc43814309a5d6f443b
|
/tests/test_openapi_basic.py
|
f18074c73970570a97135bc4faab94c39ee95a93
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
BigRLab/apiflask
|
57e0c036aa5d284da5340dcecd49108eea651bcd
|
d6dd5595009be5de6a7741a5a887276c3ac011bf
|
refs/heads/main
| 2023-05-30T21:30:17.930046
| 2021-07-11T04:07:15
| 2021-07-11T04:07:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,700
|
py
|
import json
import pytest
from openapi_spec_validator import validate_spec
from .schemas import BarSchema
from .schemas import BazSchema
from .schemas import FooSchema
from apiflask import doc
from apiflask import input
from apiflask import output
from apiflask import Schema as BaseSchema
from apiflask.fields import Integer
def test_spec(app):
assert app.spec
assert 'openapi' in app.spec
def test_spec_processor(app, client):
@app.spec_processor
def edit_spec(spec):
assert spec['openapi'] == '3.0.3'
spec['openapi'] = '3.0.2'
assert app.title == 'APIFlask'
assert spec['info']['title'] == 'APIFlask'
spec['info']['title'] = 'Foo'
return spec
rv = client.get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert rv.json['openapi'] == '3.0.2'
assert rv.json['info']['title'] == 'Foo'
@pytest.mark.parametrize('spec_format', ['json', 'yaml', 'yml'])
def test_get_spec(app, spec_format):
spec = app._get_spec(spec_format)
if spec_format == 'json':
assert isinstance(spec, dict)
else:
assert 'title: APIFlask' in spec
def test_get_spec_force_update(app):
app._get_spec()
@app.route('/foo')
@output(FooSchema)
def foo():
pass
spec = app._get_spec()
assert '/foo' not in spec['paths']
new_spec = app._get_spec(force_update=True)
assert '/foo' in new_spec['paths']
def test_spec_attribute(app):
spec = app._get_spec()
@app.route('/foo')
@output(FooSchema)
def foo():
pass
assert '/foo' not in spec['paths']
assert '/foo' in app.spec['paths']
def test_spec_schemas(app):
@app.route('/foo')
@output(FooSchema(partial=True))
def foo():
pass
@app.route('/bar')
@output(BarSchema(many=True))
def bar():
pass
@app.route('/baz')
@output(BazSchema)
def baz():
pass
class Spam(BaseSchema):
id = Integer()
@app.route('/spam')
@output(Spam)
def spam():
pass
class Schema(BaseSchema):
id = Integer()
@app.route('/schema')
@output(Schema)
def schema():
pass
with app.app_context():
spec = app.spec
assert len(spec['components']['schemas']) == 5
assert 'FooUpdate' in spec['components']['schemas']
assert 'Bar' in spec['components']['schemas']
assert 'Baz' in spec['components']['schemas']
assert 'Spam' in spec['components']['schemas']
assert 'Schema' in spec['components']['schemas']
def test_servers_and_externaldocs(app):
assert app.external_docs is None
assert app.servers is None
app.external_docs = {
'description': 'Find more info here',
'url': 'https://docs.example.com/'
}
app.servers = [
{
'url': 'http://localhost:5000/',
'description': 'Development server'
},
{
'url': 'https://api.example.com/',
'description': 'Production server'
}
]
rv = app.test_client().get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert rv.json['externalDocs'] == {
'description': 'Find more info here',
'url': 'https://docs.example.com/'
}
assert rv.json['servers'] == [
{
'url': 'http://localhost:5000/',
'description': 'Development server'
},
{
'url': 'https://api.example.com/',
'description': 'Production server'
}
]
def test_auto_200_response(app, client):
@app.get('/foo')
def bare():
pass
@app.get('/bar')
@input(FooSchema)
def only_input():
pass
@app.get('/baz')
@doc(summary='some summary')
def only_doc():
pass
@app.get('/eggs')
@output(FooSchema, 204)
def output_204():
pass
@app.get('/spam')
@doc(responses={204: 'empty'})
def doc_responses():
pass
rv = client.get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert '200' in rv.json['paths']['/foo']['get']['responses']
assert '200' in rv.json['paths']['/bar']['get']['responses']
assert '200' in rv.json['paths']['/baz']['get']['responses']
assert '200' not in rv.json['paths']['/eggs']['get']['responses']
assert '200' not in rv.json['paths']['/spam']['get']['responses']
assert rv.json['paths']['/spam']['get']['responses'][
'204']['description'] == 'empty'
def test_sync_local_json_spec(app, client, tmp_path):
local_spec_path = tmp_path / 'openapi.json'
app.config['SYNC_LOCAL_SPEC'] = True
app.config['LOCAL_SPEC_PATH'] = local_spec_path
app.config['SPEC_FORMAT'] = 'json'
rv = client.get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
with open(local_spec_path) as f:
spec_content = f.read()
assert json.loads(spec_content) == app.spec
assert '{\n "info": {' in spec_content
assert '"title": "APIFlask",' in spec_content
def test_sync_local_yaml_spec(app, client, tmp_path):
local_spec_path = tmp_path / 'openapi.json'
app.config['SYNC_LOCAL_SPEC'] = True
app.config['LOCAL_SPEC_PATH'] = local_spec_path
app.config['SPEC_FORMAT'] = 'yaml'
rv = client.get('/openapi.json')
assert rv.status_code == 200
with open(local_spec_path) as f:
spec_content = f.read()
assert spec_content == str(app.spec)
assert 'title: APIFlask' in spec_content
def test_sync_local_spec_no_path(app):
app.config['SYNC_LOCAL_SPEC'] = True
with pytest.raises(TypeError):
app.spec
|
[
"withlihui@gmail.com"
] |
withlihui@gmail.com
|
abcd9cf3a6a72e23d78bf410cfbdac852343d238
|
eb40dce4039d528b9cd06dbeda75da09d09d7fc5
|
/need_install/Django-1.8.17/tests/basic/models.py
|
0ebe3e0b4af812d92177a78a86fa007380fb0e16
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MulticsYin/MulticsSH
|
39b62189446787c7f0f037b1640c9c780bd1dddd
|
5837a0bff0e7da0e8535e4e0b31ef6baf24274b4
|
refs/heads/master
| 2021-08-28T07:53:51.759679
| 2017-12-11T15:31:03
| 2017-12-11T15:31:03
| 82,428,902
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
# -*- coding: utf-8 -*-
"""
Bare-bones model
This is a basic model with only two non-primary-key fields.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
class Meta:
ordering = ('pub_date', 'headline')
def __str__(self):
return self.headline
class ArticleSelectOnSave(Article):
class Meta:
proxy = True
select_on_save = True
@python_2_unicode_compatible
class SelfRef(models.Model):
selfref = models.ForeignKey('self', null=True, blank=True,
related_name='+')
article = models.ForeignKey(Article, on_delete=models.SET_NULL, null=True, blank=True)
def __str__(self):
# This method intentionally doesn't work for all cases - part
# of the test for ticket #20278
return SelfRef.objects.get(selfref=self).pk
|
[
"multics_luo@163.com"
] |
multics_luo@163.com
|
0ecb406dc4b005795c6d37aaa895fd106844ac7f
|
b1e7481f8b5bf40c2547c95b1863e25b11b8ef78
|
/Kai/crab/NANOv7_NoveCampaign/2017/crab_script_2017_Mu_C.py
|
a8cd9f368837fbf5bec45d00d8e189ee53cc12fe
|
[
"Apache-2.0"
] |
permissive
|
NJManganelli/FourTopNAOD
|
3df39fd62c0546cdbb1886b23e35ebdc1d3598ad
|
c86181ae02b1933be59d563c94e76d39b83e0c52
|
refs/heads/master
| 2022-12-22T22:33:58.697162
| 2022-12-17T01:19:36
| 2022-12-17T01:19:36
| 143,607,743
| 1
| 1
|
Apache-2.0
| 2022-06-04T23:11:42
| 2018-08-05T11:40:42
|
Python
|
UTF-8
|
Python
| false
| false
| 6,794
|
py
|
#!/usr/bin/env python
import os, time, collections, copy, json, multiprocessing
from PhysicsTools.NanoAODTools.postprocessing.framework.postprocessor import *
from PhysicsTools.NanoAODTools.postprocessing.framework.crabhelper import inputFiles,runsAndLumis
from PhysicsTools.NanoAODTools.postprocessing.modules.common.puWeightProducer import *
from PhysicsTools.NanoAODTools.postprocessing.modules.btv.btagSFProducer import *
from PhysicsTools.NanoAODTools.postprocessing.modules.jme.jetmetHelperRun2 import *
from FourTopNAOD.Kai.modules.LeptonSkimmer import *
from FourTopNAOD.Kai.modules.JetMETSkimmer import *
isData = True
isUltraLegacy = False
era = "2017"
subera = "C"
thePreselection = None
crossSection = None
equivLumi = 41.53
nEventsPositive = None
nEventsNegative = None
sumWeights = None
TriggerChannel = "Mu"
JESUnc = "Merged" # options: "All", "Merged", "Total"
theFiles = inputFiles()
GoldenJSON = {"2016": {"non-UL": "Cert_271036-284044_13TeV_ReReco_07Aug2017_Collisions16_JSON.txt",
"UL": "Cert_271036-284044_13TeV_Legacy2016_Collisions16_JSON.txt"
},
"2017": {"non-UL": "Cert_294927-306462_13TeV_EOY2017ReReco_Collisions17_JSON_v1.txt",
"UL": "Cert_294927-306462_13TeV_UL2017_Collisions17_GoldenJSON.txt"
},
"2018": {"non-UL": "Cert_314472-325175_13TeV_17SeptEarlyReReco2018ABC_PromptEraD_Collisions18_JSON.txt",
"UL": "Cert_314472-325175_13TeV_Legacy2018_Collisions18_JSON.txt"
}
}
if isData:
theLumis = os.path.join(os.environ["CMSSW_BASE"], "python/FourTopNAOD/Kai/jsons", GoldenJSON.get(era).get("UL" if isUltraLegacy else "non-UL"))
print("Loading Golden Json: {}".format(theLumis))
if not os.path.isfile(theLumis):
theLumis = os.path.join(os.environ["CMSSW_BASE"], "src/FourTopNAOD/Kai/python/jsons", GoldenJSON.get(era).get("UL" if isUltraLegacy else "non-UL"))
if not os.path.isfile(theLumis):
raise RuntimeError("Valid GoldenJSON file not found, if running on CRAB try a new scram build before resubmitting")
else:
theLumis = None
moduleCache = []
if not isData:
if era == "2016":
moduleCache.append(puWeight_2016())
elif era == "2017":
moduleCache.append(puWeight_2017())
elif era == "2018":
moduleCache.append(puWeight_2018())
else:
raise RuntimeError("Unexpected era identifier {}".format(era))
if JESUnc in ["All", "Merged"]: #btag POG provides all JEC unc sources, except for RelativeSample
btagjes_sources = ['jes', 'jesAbsoluteMPFBias', 'jesAbsoluteScale', 'jesAbsoluteStat', 'jesFlavorQCD', 'jesFragmentation', 'jesPileUpDataMC', 'jesPileUpPtBB', 'jesPileUpPtEC1', 'jesPileUpPtEC2', 'jesPileUpPtHF', 'jesPileUpPtRef', 'jesRelativeBal', 'jesRelativeFSR', 'jesRelativeJEREC1', 'jesRelativeJEREC2', 'jesRelativeJERHF', 'jesRelativePtBB', 'jesRelativePtEC1', 'jesRelativePtEC2', 'jesRelativePtHF', 'jesRelativeStatEC', 'jesRelativeStatFSR', 'jesRelativeStatHF', 'jesSinglePionECAL', 'jesSinglePionHCAL', 'jesTimePtEta']
# if JESUnc == "Merged": #no btag shape unc for regrouped JEC available, so use the total one ("jes") and the remaining single ones that are not grouped (see also: https://docs.google.com/spreadsheets/d/1Feuj1n0MdotcPq19Mht7SUIgvkXkA4hiB0BxEuBShLw/edit#gid=1345121349)
# btagjes_sources = ['jes', 'jesFlavorQCD','jesPileUpPtEC2', 'jesRelativeBal']
else:
btagjes_sources = ['jes']
moduleCache.append(btagSFProducer(era,
algo="deepjet",
selectedWPs=['M', 'shape_corr'],
sfFileName=None, #Automatically deduced
verbose=0,
jesSystsForShape=btagjes_sources
)
)
moduleCache.append(btagSFProducer(era,
algo="deepcsv",
selectedWPs=['M', 'shape_corr'],
sfFileName=None, #Automatically deduced
verbose=0,
jesSystsForShape=btagjes_sources
)
)
#Need to make it into a function, so extra () pair...
jmeModule = createJMECorrector(isMC=(not isData),
dataYear=int(era),
runPeriod=subera if isData else None,
jesUncert=JESUnc,
jetType="AK4PFchs",
noGroom=False,
metBranchName="METFixEE2017" if era == "2017" else "MET",
applySmearing=True,
isFastSim=False,
applyHEMfix=True if era == "2018" and isUltraLegacy else False,
splitJER=False,
saveMETUncs=['T1', 'T1Smear']
)
moduleCache.append(jmeModule())
moduleCache.append(TriggerAndLeptonSkimmer('baseline',
era=era,
subera=subera,
isData=isData,
TriggerChannel=TriggerChannel,
fillHists=False,
mode="Flag",
)
)
moduleCache.append(JetMETSkimmer(jetMinPt=20.0,
jetMaxEta=2.4 if era == "2016" else 2.5,
jetMinID=0b010,
jetMinCount=4,
minPseudoHT=350,
fillHists=False
)
)
p=PostProcessor(".",
theFiles,
modules=moduleCache,
cut=thePreselection,
provenance=True,
fwkJobReport=True,
jsonInput=theLumis,
histFileName="hist.root",
histDirName="plots",
branchsel=None,
outputbranchsel=None,
compression="LZMA:9",
friend=False,
postfix=None,
noOut=False,
justcount=False,
haddFileName="tree.root",
maxEntries=None,
firstEntry=0,
prefetch=True,
longTermCache=False
)
p.run()
|
[
"nicholas.james.manganelli@cern.ch"
] |
nicholas.james.manganelli@cern.ch
|
0cbc26a7c531c9e66e72aff03e1ef1e05d090406
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2542/60761/235001.py
|
0f6cce935b31eb1a6dc6d3e0854022eb80c48159
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
import math
arr=input("")
arr=arr.replace("[","")
arr=arr.replace("]","")
arr=list(map(int,arr.split(",")))
arr.sort()
i=1
maxlen=1
templen=1
while(i<len(arr)):
if(arr[i]==arr[i-1]+1):
templen=templen+1
else:
maxlen=max(templen,maxlen)
templen=1
i=i+1
print(maxlen)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
7c658b02af1216d35936435030ac30caedbcf48f
|
e79888cd68177e7ec5125270cdc52f888e211e78
|
/hirao/chapter01/knock04.py
|
de4c0c4219da8267d76dd51e2e4cbcf9b31ea0fd
|
[] |
no_license
|
cafenoctua/100knock2019
|
ec259bee27936bdacfe0097d42f23cc7500f0a07
|
88717a78c4290101a021fbe8b4f054f76c9d3fa6
|
refs/heads/master
| 2022-06-22T04:42:03.939373
| 2019-09-03T11:05:19
| 2019-09-03T11:05:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
s = "Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can."
drop = ",."
print(s)
for c in list(drop):
s = s.replace(c, "")
s = s.split()
display_list = [1, 5, 6, 7, 8, 9, 15, 16, 19]
ans_dict = {}
for i, word in enumerate(s):
if i + 1 in display_list:
ans = word[0]
else:
ans = word[:2]
ans_dict[ans] = i + 1
print(ans_dict)
|
[
"reohirao116@gmail.com"
] |
reohirao116@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.