blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a6002b838c345fcf81971450b2d2349ee5589150 | d6ff55dd9e2c9992b2db4c613cd5b8f18a6f5be1 | /takeyourmeds/reminders/reminders_messages/views.py | c86f499691792b07c8052e2975178dbf97b957e0 | [
"MIT"
] | permissive | RickIsWright/takeyourmeds-web | f2f8d6da7e2ad1adfe67cbebe0a20bc7c08f45eb | edf24188f26948902cfb69793b4d5aa3cf8b6dea | refs/heads/master | 2023-05-08T20:34:58.640415 | 2016-02-07T10:50:55 | 2016-02-07T10:50:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | import datetime
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from .enums import StateEnum
from .models import Message
@csrf_exempt
@require_POST
def status_callback(request, ident):
"""
https://www.twilio.com/help/faq/sms/what-do-the-sms-statuses-mean
Example POST fields:
ApiVersion: 2010-04-01
AccountSid: AC7d6b676d2a17527a71a2bb41301b5e6f
SmsSid: SMaff4a74e3fe241b3b35f84ccf5130d50
From: TakeYourMed
MessageSid: SMaff4a74e3fe241b3b35f84ccf5130d50
SmsStatus: sent
MessageStatus: sent
To: +447490416163
"""
message = get_object_or_404(Message, ident=ident)
try:
message.state = {
'accepted': StateEnum.sending,
'queued': StateEnum.sending,
'sending': StateEnum.sending,
'sent': StateEnum.sent,
'delivered': StateEnum.delivered,
'failed': StateEnum.failed,
'undelivered': StateEnum.failed,
}[request.POST['MessageStatus']]
except KeyError:
message.state = StateEnum.unknown
message.state_updated = datetime.datetime.utcnow()
message.save(update_fields=('state', 'state_updated'))
return HttpResponse('')
| [
"chris@chris-lamb.co.uk"
] | chris@chris-lamb.co.uk |
84d64df5377f89c6129e1845a08475f98bba9834 | e8199f1d424592affe19b50fd96a02815067d1b1 | /Trees/114. Flatten Binary Tree to Linked List.py | c0b48b6ebddd9397ba740c2c767d12c38017c08f | [] | no_license | srajsonu/LeetCode-Solutions-Python | 39a809e4c6d555a3a3055ce03d59cfa40b93a287 | 8ec31c8df2885f3da533424ba13060b7d3e3af78 | refs/heads/master | 2023-03-19T10:05:42.578615 | 2021-03-13T17:21:36 | 2021-03-13T17:21:36 | 280,716,200 | 0 | 1 | null | 2020-10-06T09:54:02 | 2020-07-18T18:32:04 | Python | UTF-8 | Python | false | false | 324 | py | class Solution:
def flatten(self, root) -> None:
if not root:
return
self.flatten(root.left)
self.flatten(root.right)
tmp = root.right
root.right = root.left
root.left = None
while root.right:
root = root.right
root.right = tmp
| [
"srajsonu02@gmail.com"
] | srajsonu02@gmail.com |
e62f5aed9913a6893e12bc1c232e29e82e090b74 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-dc/huaweicloudsdkdc/v3/model/list_project_tags_response.py | a613ada4478b95c5198278565770aa8e70e54d72 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,055 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListProjectTagsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'tags': 'list[Tag]',
'request_id': 'str'
}
attribute_map = {
'tags': 'tags',
'request_id': 'request_id'
}
def __init__(self, tags=None, request_id=None):
"""ListProjectTagsResponse
The model defined in huaweicloud sdk
:param tags: 标签列表。
:type tags: list[:class:`huaweicloudsdkdc.v3.Tag`]
:param request_id: 请求ID
:type request_id: str
"""
super(ListProjectTagsResponse, self).__init__()
self._tags = None
self._request_id = None
self.discriminator = None
if tags is not None:
self.tags = tags
if request_id is not None:
self.request_id = request_id
@property
def tags(self):
"""Gets the tags of this ListProjectTagsResponse.
标签列表。
:return: The tags of this ListProjectTagsResponse.
:rtype: list[:class:`huaweicloudsdkdc.v3.Tag`]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this ListProjectTagsResponse.
标签列表。
:param tags: The tags of this ListProjectTagsResponse.
:type tags: list[:class:`huaweicloudsdkdc.v3.Tag`]
"""
self._tags = tags
@property
def request_id(self):
"""Gets the request_id of this ListProjectTagsResponse.
请求ID
:return: The request_id of this ListProjectTagsResponse.
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this ListProjectTagsResponse.
请求ID
:param request_id: The request_id of this ListProjectTagsResponse.
:type request_id: str
"""
self._request_id = request_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListProjectTagsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
eb686b7fd91f8a800ac24739cab4e201a963a750 | 4b94ea665c5423e6097b65ccaa6045bc0c84879f | /service_catalog/models/tower_server.py | cb7061a987ecc37859b758485b3bbb51c018e682 | [
"Apache-2.0"
] | permissive | jeffkight/squest | 65051af133584f02b76b9b3476a2e1edae7a3e85 | fb5568af7aa174d34dfe1f24b4a2508d321e44ac | refs/heads/master | 2023-04-26T17:41:00.642466 | 2021-05-19T16:24:28 | 2021-05-19T16:33:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | from django.db import models
from towerlib import Tower
class TowerServer(models.Model):
name = models.CharField(max_length=100)
host = models.CharField(max_length=200, unique=True)
token = models.CharField(max_length=200)
secure = models.BooleanField(default=True)
ssl_verify = models.BooleanField(default=False)
def sync(self):
"""
Sync all job templates
:return:
"""
from .job_templates import JobTemplate as JobTemplateLocal
tower = self.get_tower_instance()
for job_template in tower.job_templates:
try:
updated_job_template = JobTemplateLocal.objects.get(tower_id=job_template.id)
# update the survey
updated_job_template.survey = job_template.survey_spec
updated_job_template.save()
except JobTemplateLocal.DoesNotExist:
updated_job_template = JobTemplateLocal.objects.create(name=job_template.name,
tower_id=job_template.id,
survey=job_template.survey_spec,
tower_server=self)
# update all operation that uses this template
from service_catalog.models import Operation
Operation.update_survey_after_job_template_update(updated_job_template)
def get_tower_instance(self):
return Tower(self.host, None, None, secure=self.secure, ssl_verify=self.ssl_verify, token=self.token)
| [
"nico.marcq@gmail.com"
] | nico.marcq@gmail.com |
bca88e88c8f3f3c0e45b8e2d5553a78f53445208 | caaf56727714f8c03be38710bc7d0434c3ec5b11 | /homeassistant/components/nexia/entity.py | 7820ebb62165395095f1cd0c4e67e852db799e77 | [
"Apache-2.0"
] | permissive | tchellomello/home-assistant | c8db86880619d7467901fd145f27e0f2f1a79acc | ed4ab403deaed9e8c95e0db728477fcb012bf4fa | refs/heads/dev | 2023-01-27T23:48:17.550374 | 2020-09-18T01:18:55 | 2020-09-18T01:18:55 | 62,690,461 | 8 | 1 | Apache-2.0 | 2023-01-13T06:02:03 | 2016-07-06T04:13:49 | Python | UTF-8 | Python | false | false | 3,157 | py | """The nexia integration base entity."""
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
ATTRIBUTION,
DOMAIN,
MANUFACTURER,
SIGNAL_THERMOSTAT_UPDATE,
SIGNAL_ZONE_UPDATE,
)
class NexiaEntity(CoordinatorEntity):
"""Base class for nexia entities."""
def __init__(self, coordinator, name, unique_id):
"""Initialize the entity."""
super().__init__(coordinator)
self._unique_id = unique_id
self._name = name
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name."""
return self._name
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
}
class NexiaThermostatEntity(NexiaEntity):
"""Base class for nexia devices attached to a thermostat."""
def __init__(self, coordinator, thermostat, name, unique_id):
"""Initialize the entity."""
super().__init__(coordinator, name, unique_id)
self._thermostat = thermostat
@property
def device_info(self):
"""Return the device_info of the device."""
return {
"identifiers": {(DOMAIN, self._thermostat.thermostat_id)},
"name": self._thermostat.get_name(),
"model": self._thermostat.get_model(),
"sw_version": self._thermostat.get_firmware(),
"manufacturer": MANUFACTURER,
}
async def async_added_to_hass(self):
"""Listen for signals for services."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SIGNAL_THERMOSTAT_UPDATE}-{self._thermostat.thermostat_id}",
self.async_write_ha_state,
)
)
class NexiaThermostatZoneEntity(NexiaThermostatEntity):
"""Base class for nexia devices attached to a thermostat."""
def __init__(self, coordinator, zone, name, unique_id):
"""Initialize the entity."""
super().__init__(coordinator, zone.thermostat, name, unique_id)
self._zone = zone
@property
def device_info(self):
"""Return the device_info of the device."""
data = super().device_info
data.update(
{
"identifiers": {(DOMAIN, self._zone.zone_id)},
"name": self._zone.get_name(),
"via_device": (DOMAIN, self._zone.thermostat.thermostat_id),
}
)
return data
async def async_added_to_hass(self):
"""Listen for signals for services."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SIGNAL_ZONE_UPDATE}-{self._zone.zone_id}",
self.async_write_ha_state,
)
)
| [
"noreply@github.com"
] | tchellomello.noreply@github.com |
babd17417d73f4e425c109124c145c5c4a8c31cf | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /StudentProblem/10.21.11.16/7/1569574648.py | 22b71b371b1aff43c1caf4bf9829301a2e09cc0f | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def divisors(n: int) -> list:
m = n + 1
for i, j in range(m):
if i % j == 0:
list.append(i)
######################################################################
## Lösung Teil 2. (Tests)
print(divisors(20))
######################################################################
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
add59155a5083acfc89ed306683adfa443936eb5 | 4dd811c2595a990cb21327afe7a2dfd54ba9b52f | /Open-CV_Basics/contour_V2.py | 2496a3a89b30c938309ed899350f63146a740774 | [] | no_license | mitesh55/Deep-Learning | 40842af8b8f1ea3c041fa4de2e76d068f554c1a4 | 0a7c0305edb027acfaad38e7abe52808260cede9 | refs/heads/master | 2023-07-25T14:31:14.285489 | 2021-09-09T13:46:31 | 2021-09-09T13:46:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,269 | py | import cv2
import matplotlib.pyplot as plt
import numpy as np
#let's start with contours
image = cv2.imread('Contour.png')
imageCopy = image.copy()
gray = cv2.cvtColor(image ,cv2.COLOR_BGR2GRAY)
#find all the contours
contours , hierarchy = cv2.findContours(gray , cv2.RETR_LIST , cv2.CHAIN_APPROX_SIMPLE)
print("Number of contours found = {}".format(len(contours)))
print("\nHierarchy : \n{}".format(hierarchy))
#draw the contours
cv2.drawContours(image , contours ,-1 , (0,255,0),2)
plt.imshow(image[:,:,::-1])
#plt.show()
#now lets only find external Contours
contours , hierarchy = cv2.findContours(gray , cv2.RETR_EXTERNAL , cv2.CHAIN_APPROX_SIMPLE)
print("Number of contours found = {}".format(len(contours)))
image = imageCopy.copy()
cv2.drawContours(image , contours ,-1 , (0,255,255),2)
plt.imshow(image[:,:,::-1])
#plt.show()
#Now lets try to draw only a single contour in that
image = imageCopy.copy()
cv2.drawContours(image , contours[3] ,-1 , (0,255,255),4)
plt.imshow(image[:,:,::-1])
#plt.show()
"""
Finding contours is only the basic part we can use this information for many other things
Such as Object detection and Recognition
Now for basics lets try to find the centroids of the contours
"""
image = imageCopy.copy()
contours , hierarchy = cv2.findContours(gray , cv2.RETR_LIST , cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image , contours ,-1 , (0,255,0),2)
#lets find the centroids
for cnt in contours:
M = cv2.moments(cnt)
x = int(round(M["m10"]/M["m00"]))
y = int(round(M["m01"]/M["m00"]))
# Mark the center
cv2.circle(image, (x,y), 10, (0,0,255), -1)
plt.imshow(image[:,:,::-1])
#plt.show()
"""
Let's Add bounding boxes for the contours
"""
#Verticle rectangle (overlapping might happen)
image = imageCopy.copy()
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image, (x,y), (x+w,y+h), (255,130,255), 2)
plt.imshow(image[:,:,::-1])
#plt.show()
#Roated Bounding boxes
image = imageCopy.copy()
for cnt in contours:
box = cv2.minAreaRect(cnt) + cv2.minAreaRect([0.5,0.5 ,0.5 ,0.5])
boxPts = np.int0(cv2.boxPoints(box))
cv2.drawContours(image, [boxPts], -1, (0,0,255), 2)
plt.imshow(image[:,:,::-1])
plt.show()
| [
"pavankuchalapk@gmail.com"
] | pavankuchalapk@gmail.com |
434401c549c15b2c22271538e4d76c0fd5d89c2b | 05ba1957e63510fd8f4f9a3430ec6875d9ecb1cd | /.history/fh/update_readme_20200816200808.py | aeb869169550907d44f3df820a1c8087ca39b324 | [] | no_license | cod-lab/try | 906b55dd76e77dbb052603f0a1c03ab433e2d4d1 | 3bc7e4ca482459a65b37dda12f24c0e3c71e88b6 | refs/heads/master | 2021-11-02T15:18:24.058888 | 2020-10-07T07:21:15 | 2020-10-07T07:21:15 | 245,672,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | # f=open('a.md','r')
# print(f.name)
# print(f.)
# f.close()
print("\n")
# Method 1 : Context Manager
with open('a.md','r') as f:
# print("len f:", len(f))
for i,line in enumerate(f):
if 37<i<43:
# for i in range(38,43):
print(i+1, line, end='')
print("type of line: ",type(line))
print("len of line: ",len(line), "\n")
for j in range(len(line)):
f.write('j: ',j)
# if f.tell() ==
# for lines in f:
# print(lines, end='')
# for lines in f:
# if f.tell() == 4:
# print(lines)
print("\n")
| [
"arihant806@gmail.com"
] | arihant806@gmail.com |
5f67093d42f2b9712d3d49d5a939b99bd9d7919a | 2b23e732ae616f6a3c07866906e14a1e3883a693 | /Numpy Practice/Array and scalars/scalars.py | 0b4cb92726368f2786aa36663be58a1912b3f5ed | [] | no_license | JitenKumar/Data-Analysis-Visualization-With-Python | 7d045e49b16ad30fb5e2b558a3e4f69457bf4697 | d53d3738c10efc6882757692215d381321ee20a3 | refs/heads/master | 2020-03-18T12:28:40.146060 | 2018-05-28T18:28:12 | 2018-05-28T18:28:12 | 134,728,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | import numpy as np
array1 = np.array([1,2,4,3453],[1,3446,4])
print(array1)
print(array1 * array1)
print(array1 - array1)
print(1/ array1)
| [
"jitenderpalsra@gmail.com"
] | jitenderpalsra@gmail.com |
e7e17ccfa54df98c658a78e4da139d1dc07cb7d3 | 373035950bdc8956cc0b74675aea2d1857263129 | /scripts-config/ta3/remote-runner-scripts/arg_config.py | fe22b61b9a74acb99479f21cd220e13114d12c54 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | limkokholefork/SPARTA | 5d122cd2e920775d61a5404688aabbafa164f22e | 6eeb28b2dd147088b6e851876b36eeba3e700f16 | refs/heads/master | 2021-11-11T21:09:38.366985 | 2017-06-02T16:21:48 | 2017-06-02T16:21:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,187 | py | # *****************************************************************
# Copyright 2013 MIT Lincoln Laboratory
# Project: SPAR
# Authors: ni24039
# Description: Config file to be used with remote_runner.py
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 10 Jan 2013 ni24039 Original Version
# *****************************************************************
import copy
# =============================================================================
# NOTE Put any convenience variables you may need in this area.
# =============================================================================
# =============================================================================
# Base component
# NOTE Put any component settings here that will be common to all components.
base_component = Component()
base_component.start_index = 1
base_component.executable = "/usr/bin/java"
base_component.args = ["-jar"]
base_component.files.update(util.recursive_files_dict( \
"/home/lincoln/argon/data", "data"))
base_component.files["/home/lincoln/argon/sparstore.jks"] = "sparstore.jks"
# =============================================================================
# =============================================================================
# Third party component
# NOTE Include as many third party components as needed. The configuration for
# each one should look like the following. The name must start with
# 'third-party-'.
tp = copy.deepcopy(base_component)
tp.name = "third-party-broker"
tp.host = "10.10.99.219"
tp.args.extend(["brokerProcess.jar", "9876"])
tp.files["/home/lincoln/argon/brokerProcess.jar"] = "brokerProcess.jar"
tp.num_cores = 10
config.add_component(tp)
# =============================================================================
# =============================================================================
# Publisher component
server = copy.deepcopy(base_component)
server.name = "server"
# NOTE Everything below should be updated as needed per SUT requirements.
#
# server.host should be updated as desired as the testing environment
# dictates. The muddler file that is run with this config file will reference a
# particular 'host info' file located in scripts-config/common/config/. The host
# info file will contain a list of all workstations in the environment with the
# following space-separated information:
# IP address, number of cores, model name, whether the system is hyperthreaded
#
# server.host should be set to one of the IP addresses in the host info file
# that will be used.
server.host = "10.10.99.221"
server.start_index = 2
server.args.extend(["publisherProcess.jar", tp.host, "9876", "5432"])
server.files["/home/lincoln/argon/publisherProcess.jar"] = "publisherProcess.jar"
server.num_cores = 10
# =============================================================================
config.add_component(server)
# =============================================================================
# Subscriber component
client = copy.deepcopy(base_component)
client.name = "client"
# NOTE client.host will be automatically populated by muddler. Whatever you set
# this to won't matter. client.host will eventually be set based on
# client.num_cpus, how many clients are specified in the muddler, and which
# model names from the host info file are allowed to run client SUTs.
client.host = "TBD"
# NOTE Everything below should be updates as needed per SUT requirements.
#
# If "%n" is present anywhere in client.args, it will be replaced with a
# unique integer representing the client SUT's ID. Take advantage of this when a
# SUT needs a unique argument of some sort. Otherwise, each SUT will receive the
# same arguments. client.args should NOT contain any semi-colons or double
# quotes.
client.start_index = 3
client.args.extend(["subscriberProcess.jar", tp.host, "9876", server.host,
"5432", "4928"])
client.files["/home/lincoln/argon/subscriberProcess.jar"] = "subscriberProcess.jar"
client.num_cores = 1
# =============================================================================
config.add_component(client)
| [
"mitchelljd@ll.mit.edu"
] | mitchelljd@ll.mit.edu |
3cef2a2f60b5ef01b713815a030a54fce72004bd | 21b632797ed6257b13574c341cdd14e6534728a9 | /ryu/app/simple_switch_lacp_13.py | ffba55722414cd3014fe4341e006bccac1ed15e4 | [
"Apache-2.0"
] | permissive | MrCocoaCat/ryu | 0473f04e2a840e027e9002f8a6af81745eaf7094 | 9e9571991a73380099b7ba7c6f37e0e587080a6a | refs/heads/master | 2021-06-19T18:09:52.833590 | 2020-05-12T08:17:21 | 2020-05-12T08:17:21 | 163,072,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,983 | py | # Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib import lacplib
from ryu.lib.dpid import str_to_dpid
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.app import simple_switch_13
class SimpleSwitchLacp13(simple_switch_13.SimpleSwitch13):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {'lacplib': lacplib.LacpLib}
def __init__(self, *args, **kwargs):
super(SimpleSwitchLacp13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self._lacp = kwargs['lacplib']
self._lacp.add(
dpid=str_to_dpid('0000000000000001'), ports=[1, 2])
def del_flow(self, datapath, match):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
mod = parser.OFPFlowMod(datapath=datapath,
command=ofproto.OFPFC_DELETE,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
match=match)
datapath.send_msg(mod)
@set_ev_cls(lacplib.EventPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
@set_ev_cls(lacplib.EventSlaveStateChanged, MAIN_DISPATCHER)
def _slave_state_changed_handler(self, ev):
datapath = ev.datapath
dpid = datapath.id
port_no = ev.port
enabled = ev.enabled
self.logger.info("slave state changed port: %d enabled: %s",
port_no, enabled)
if dpid in self.mac_to_port:
for mac in self.mac_to_port[dpid]:
match = datapath.ofproto_parser.OFPMatch(eth_dst=mac)
self.del_flow(datapath, match)
del self.mac_to_port[dpid]
self.mac_to_port.setdefault(dpid, {})
| [
"MrCocoaCat@aliyun.com"
] | MrCocoaCat@aliyun.com |
df4d48c6073d9ddfee702a38b9ce6af2ca95a9be | 896b5a6aab6cb6c1e3ee2e59aad0128226471871 | /weblayer/PRESUBMIT.py | 5ba183d5d99fafd020961cdb80a583d28e5935cd | [
"BSD-3-Clause"
] | permissive | bkueppers/chromium | 86f09d32b7cb418f431b3b01a00ffe018e24de32 | d160b8b58d58120a9b2331671d0bda228d469482 | refs/heads/master | 2023-03-14T10:41:52.563439 | 2019-11-08T13:33:40 | 2019-11-08T13:33:40 | 219,389,734 | 0 | 0 | BSD-3-Clause | 2019-11-04T01:05:37 | 2019-11-04T01:05:37 | null | UTF-8 | Python | false | false | 2,484 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit tests for weblayer.
Runs various style checks before upload.
"""
import re
WEBLAYER_VERSION_PATH = (
'weblayer/browser/java/org/chromium/weblayer_private/aidl/' +
'WebLayerVersion.java')
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CheckAIDLVersionBump(input_api, output_api))
return results
def _Canonicalize(lines):
"""Strip comments, and convert all whitespace to single spaces."""
def _CanonicalizeLine(line):
line = re.sub(r'//,*', '', line)
line = re.sub(r'\s+', ' ', line)
return line
return re.sub(r'\s*/\*.*?\*/\s*', ' ', ''.join(map(_CanonicalizeLine, lines)))
def _CheckAIDLVersionBump(input_api, output_api):
"""Any change to an AIDL file must be accompanied by a version code bump."""
def AIDLFiles(affected_file):
return input_api.FilterSourceFile(affected_file, white_list=(r'.*\.aidl$',))
aidl_changes = []
for f in input_api.AffectedSourceFiles(AIDLFiles):
old_contents = _Canonicalize(f.OldContents())
new_contents = _Canonicalize(f.NewContents())
if old_contents != new_contents:
aidl_changes.append((f.LocalPath(), f.Action()))
if not aidl_changes:
return []
aidl_changes = '\n'.join(
' {1} {0}'.format(path, action) for path, action in aidl_changes)
def VersionFile(affected_file):
return input_api.FilterSourceFile(
affected_file, white_list=(WEBLAYER_VERSION_PATH,))
changed_version_file = list(input_api.AffectedSourceFiles(VersionFile))
if not changed_version_file:
return [
output_api.PresubmitPromptWarning(
'Commit contains AIDL changes,' +
' but does not change WebLayerVersion.java\n' + aidl_changes)
]
assert len(changed_version_file) == 1
old_contents = _Canonicalize(changed_version_file[0].OldContents())
new_contents = _Canonicalize(changed_version_file[0].NewContents())
m_old = re.search(r'sVersionNumber\s*=\s*(.*?);', old_contents)
m_new = re.search(r'sVersionNumber\s*=\s*(.*?);', new_contents)
if m_old and m_new and m_old.group(1) == m_new.group(1):
return [
output_api.PresubmitPromptWarning(
'Commit contains AIDL changes,' +
' but does not change WebLayerVersion.sVersionNumber\n' +
aidl_changes)
]
return []
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
b9bd49e00b68e4bc266f6679db1a0924ddabe70b | 9140ba97a4ff6e9ef9f4e49d67ab238b669a3597 | /register/views.py | ce4dc74c1ed39b4c84b73fc40f5d5f9c41734f83 | [] | no_license | poojapauskar/foodromeoproject-api | 877ada5d72db0ac364e735a1ad7af0f46ad02bcc | 2007ed7ae12a3f5d1d227faaccaf7e6bd93f760d | refs/heads/master | 2021-01-21T13:30:31.838067 | 2016-05-09T06:41:10 | 2016-05-09T06:41:10 | 54,105,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,708 | py | from register.models import Register
from register.serializers import RegisterSerializer
from rest_framework import generics
import random
from random import randint
class StatusCode(object):
OK = 200
NOT_FOUND = 404
# add more status code according to your need
import json
from django.http import HttpResponse
def JSONResponse(data = None, status = StatusCode.OK):
if data is None:
return HttpResponse(status)
if data and type(data) is dict:
return HttpResponse(json.dumps(data, indent = 4, encoding = 'utf-8', sort_keys = True), \
mimetype = 'application/json', status = status)
else:
return HttpResponse(status = StatusCode.NOT_FOUND)
class RegisterList(generics.ListCreateAPIView):
def get(self, request, *args, **kwargs):
activation_key = str(random.randint(100000, 999999))
import datetime
activation_key_time = datetime.datetime.now()
# from django.core.mail import send_mail
# send_mail('FoodRomeo: Confirm your Account.','Click on the link to confirm your account and set a password http://localhost/set-password/?activation_key='+activation_key+' The link expires in 48 hours.', 'poojapauskar22@gmail.com', [validated_data.get('email')], fail_silently=False)
objects=[]
is_pw= request.META.get('HTTP_IS_SET_PW')
import sys
print >> sys.stderr, is_pw
if(request.META.get('HTTP_IS_SET_PW') == '1'):
if(Register.objects.filter(email=request.META.get('HTTP_EMAIL')).exists()):
objects.append(
{
'status':400,
'message':'User already exists',
}
)
from django.http import JsonResponse
return JsonResponse(objects[0],safe=False)
else:
from django.core.mail import send_mail
send_mail('FoodRomeo: Confirm your Account.','Click on the link to confirm your account and set a password http://localhost/set-password/?activation_key='+activation_key+' The link expires in 48 hours.', 'poojapauskar22@gmail.com', [request.META.get('HTTP_EMAIL')], fail_silently=False)
objects1 =Register.objects.create(email=request.META.get('HTTP_EMAIL'),firstname=request.META.get('HTTP_FIRSTNAME'),lastname=request.META.get('HTTP_LASTNAME'),phone=request.META.get('HTTP_PHONE'),address_line_1=request.META.get('HTTP_ADDRESS_LINE_1'),address_line_2=request.META.get('HTTP_ADDRESS_LINE_2'),city=request.META.get('HTTP_CITY'),pin_code=request.META.get('HTTP_PIN_CODE'),photo=request.META.get('HTTP_PHOTO'),password=request.META.get('HTTP_PASSWORD'),access_token=request.META.get('HTTP_ACCESS_TOKEN'),fb_id=request.META.get('HTTP_FB_ID'),fb_access_token=request.META.get('HTTP_FB_ACCESS_TOKEN'),google_id=request.META.get('HTTP_GOOGLE_ID'),google_access_token=request.META.get('HTTP_GOOGLE_ACCESS_TOKEN'),activation_key=request.META.get('HTTP_ACTIVATION_KEY'),activation_key_time=request.META.get('HTTP_ACTIVATION_KEY_TIME'),is_set_pw=request.META.get('HTTP_IS_SET_PW'))
import sys
print sys.stderr, "objects1"
print sys.stderr, objects1
Register.objects.filter(email=request.META.get('HTTP_EMAIL')).update(activation_key=activation_key,activation_key_time=activation_key_time)
objects.append(
{
'status':200,
'message':'Confirmation mail is sent to your email address',
}
)
from django.http import JsonResponse
return JsonResponse(objects[0],safe=False)
else:
if(Register.objects.filter(email=request.META.get('HTTP_EMAIL')).exists()):
from django.core.mail import send_mail
send_mail('FoodRomeo: Reset your password.','Click on the link to set a password http://localhost/set-password/?activation_key='+activation_key+' The link expires in 48 hours.', 'poojapauskar22@gmail.com', [request.META.get('HTTP_EMAIL')], fail_silently=False)
Register.objects.filter(email=request.META.get('HTTP_EMAIL')).update(activation_key=activation_key,activation_key_time=activation_key_time)
objects.append(
{
'status':200,
'message':'Reset mail is sent to your email address',
}
)
from django.http import JsonResponse
return JsonResponse(objects[0],safe=False)
else:
objects.append(
{
'status':400,
'message':'User does not exists',
}
)
from django.http import JsonResponse
return JsonResponse(objects[0],safe=False)
class RegisterDetail(generics.ListAPIView):
queryset = Register.objects.all()
serializer_class = RegisterSerializer | [
"git.poojapauskar@gmail.com"
] | git.poojapauskar@gmail.com |
2b00aad1ef824b5cba38e65d566d93dc45ae6ab3 | 53abcba37ef0fd69bd90453b175f936edcca842c | /Uber/8.py | f537178aa43efd112098a6c9f0a9295f5a9c857f | [] | no_license | cloi1994/session1 | 44db8fa6d523d4f8ffe6046969f395e8bbde9e40 | 9a79fd854e9842050da07f9c9b0ce5cadc94be89 | refs/heads/master | 2020-03-09T05:15:51.510027 | 2018-06-13T04:41:43 | 2018-06-13T04:41:43 | 128,608,752 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | class Solution(object):
def myAtoi(self, s):
"""
:type str: str
:rtype: int
"""
if not s:
return 0
s = s.replace(" ", "")
if s[0].isalpha():
return 0
ans = 0
sign = 1
if s[0] == '-':
sign = -1
for i in range(len(s)):
if s[i] == '.':
break
if s[i].isdigit():
ans = ans * 10 + int(s[i])
if ans >= 2**31:
return 2147483648 * sign
return ans * sign
| [
"noreply@github.com"
] | cloi1994.noreply@github.com |
0c480fcde577bb9d3095be66a2a5181bfad6dda9 | 4fca17a3dbc3e74ba7e46bd7869eb6d138e4c422 | /_0617_Merge_Two_Binary_Trees.py | 4d60139bc71d75a360ee7d1fc0b0c79c5ec5c300 | [] | no_license | mingweihe/leetcode | a2cfee0e004627b817a3c0321bb9c74128f8c1a7 | edff905f63ab95cdd40447b27a9c449c9cefec37 | refs/heads/master | 2021-06-19T07:46:46.897952 | 2021-05-02T05:13:17 | 2021-05-02T05:13:17 | 205,740,338 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def mergeTrees(self, t1, t2):
"""
:type t1: TreeNode
:type t2: TreeNode
:rtype: TreeNode
"""
if t1 is None: return t2
if t2 is None: return t1
t1.val += t2.val
t1.left = self.mergeTrees(t1.left, t2.left)
t1.right = self.mergeTrees(t1.right, t2.right)
return t1
| [
"10962421@qq.com"
] | 10962421@qq.com |
d6fa8c447ecd43e49cd350a3dc8823d77552f944 | bcc4fdd2882291c392b553761b0d398dfd786b8f | /Functions.Templates/Templates-v2/CosmosDbTrigger-Python/function_app.py | 46798ed9e2444961a5045260c5a3062f341813c3 | [
"MIT"
] | permissive | Azure/azure-functions-templates | d462182def66ef0d80bf878c23c8234ab64c5831 | aa21e49fe96441ce1e3e07e9325feac5c0d92bb3 | refs/heads/dev | 2023-08-28T14:18:56.244381 | 2023-08-25T22:36:10 | 2023-08-25T22:36:10 | 52,470,131 | 264 | 156 | MIT | 2023-09-14T16:15:37 | 2016-02-24T19:53:25 | C# | UTF-8 | Python | false | false | 377 | py | import azure.functions as func
import logging
app = func.FunctionApp()
@app.cosmos_db_trigger(arg_name="azcosmosdb", container_name="$(CONTAINER_NAME_INPUT)",
database_name="$(DB_NAME_INPUT)", connection="$(CONNECTION_STRING_INPUT)")
def $(FUNCTION_NAME_INPUT)(azcosmosdb: func.DocumentList):
logging.info('Python CosmosDB triggered.')
| [
"noreply@github.com"
] | Azure.noreply@github.com |
82810559840be07c5da1a6071c2c380f60b58cea | 0e99d2efff685a66869d5a7cd4a68de8955f498c | /newoffer/suning/lab1.py | 08a457dde4b899c6105e160f319cbf8bfb0485b4 | [] | no_license | supercp3/code_leetcode | f303109c70ccdd0baa711cf606d402158b212525 | 1dc6260e229a012111ec4d5e60071c2458ce5002 | refs/heads/master | 2020-03-26T11:33:28.741405 | 2018-10-15T02:18:24 | 2018-10-15T02:18:24 | 144,848,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | import sys
class Tree:
def __init__(self,val,left=None,right=None):
self.val=val
self.left=left
self.right=right
class Solution:
#非递归方式实现前序遍历
def preOrder(self,root):
if not root:
return []
stack=[]
vallist=[]
outputlist=[]
while root is not None or len(stack)!=0:
if root is not None:
vallist.append(root.val)
stack.append(root)
root=root.left
else:
root=stack.pop()
root=root.right
return vallist
#非递归方式实现中序遍历
def midOrder(self,root):
if not root:
return []
stack=[]
vallist=[]
while root or len(stack)!=0:
if root is not None:
stack.append(root)
root=root.left
else:
root=stack.pop()
vallist.append(root.val)
root=root.right
return vallist
#后序遍历
def lastOrder(self,root):
if not root:
return []
stack=[root]
stack2=[]
vallist=[]
while len(stack)>0:
node=stack.pop()
stack2.append(node)
if node.left is not None:
stack.append(node.left)
if node.right is not None:
stack.append(node.right)
while len(stack2)>0:
print(stack2.pop().val,end=" ")
if __name__=="__main__":
root=Tree(1,Tree(2,Tree(4),Tree(5)),Tree(3,Tree(6),Tree(7)))
s=Solution()
#presOrder=s.preOrder(root)
#midOrder=s.midOrder(root)
lastOrder=s.lastOrder(root)
print(lastOrder)
| [
"13281099@bjtu.edu.cn"
] | 13281099@bjtu.edu.cn |
0c35213774dbd23f49ea1ba7431bcc7ad7dd97c7 | 926621c29eb55046f9f59750db09bdb24ed3078e | /lib/googlecloudsdk/third_party/apis/dataproc/v1/__init__.py | 095dc4c02f9ac6029206e546e6166984b09379f5 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/SDK | 525d9b29fb2e901aa79697c9dcdf5ddd852859ab | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | refs/heads/master | 2022-11-22T18:24:13.464605 | 2016-05-18T16:53:30 | 2016-05-18T16:53:30 | 282,322,505 | 0 | 0 | NOASSERTION | 2020-07-24T21:52:25 | 2020-07-24T21:52:24 | null | UTF-8 | Python | false | false | 376 | py | """Common imports for generated dataproc client library."""
# pylint:disable=wildcard-import
import pkgutil
from googlecloudsdk.third_party.apitools.base.py import *
from googlecloudsdk.third_party.apis.dataproc.v1.dataproc_v1_client import *
from googlecloudsdk.third_party.apis.dataproc.v1.dataproc_v1_messages import *
__path__ = pkgutil.extend_path(__path__, __name__)
| [
"richarddewalhalla@gmail.com"
] | richarddewalhalla@gmail.com |
d21b3fb24785b53ed1bc671b6763bc5903a78dad | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_shrivels.py | 00eeb99420423720d1b02d8b00448f4f115e226c | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _SHRIVELS():
def __init__(self,):
self.name = "SHRIVELS"
self.definitions = shrivel
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['shrivel']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
e415d03e37a2ad73851371010023bd62642188fa | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/tenth/rank_1efn_S.py | 50231db57e8bfee2b95790432af4d32474320c59 | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '1efn.csv'
identifier = 'S'
thresholdCoef = 0.1
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/tenth/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/tenth/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Drop features with coefficients below threshold
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs = coefs[np.abs(coefs['coefficients']) < thresholdCoef]
dropList = list(coefs.index)
del coefs
df1.drop(dropList, axis = 1, inplace = True)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"tanemur1@msu.edu"
] | tanemur1@msu.edu |
1c15a36efd9cb73b2bd04e796c0a860e39753c4e | 7b00c3a416fff53e472a8ebb4e5ff710cbad4e5f | /interpreter/parser.py | f6d3044296c95de91cae043dcf7779dddc0f950e | [
"MIT",
"LicenseRef-scancode-public-domain",
"Unlicense"
] | permissive | hthompson6/Sous | 8700ac52096273d279f6a3415d089a8bc0d4420f | 79dd10078773a59625037a47e1c2e849e32cf75e | refs/heads/master | 2020-03-19T04:50:10.530596 | 2018-06-02T23:23:06 | 2018-06-02T23:23:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,189 | py | # Copyright (c) 2018 Hunter Thompson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import re
import var_gen as gen
import sous_ops as ops
func_hash = []
def check_prep(line):
"""Checks if prep block has been reached.
return --- Prep title or None
"""
if len(line) > 0:
if line[len(line)-1] == '.':
if re.search('(?<=Prep)\.', line):
return line[:-6]
def check_ing(line):
"""Checks if ingredient block has been reached."""
if re.match('Ingredients\.', line):
return True
return False
def ing_parser(line):
"""Initial processing of variable (ingredient).
return -- dict as {var_name: var_val} or None
"""
if len(line) > 0:
tokens = line.split(' ')
if len(tokens) == 1:
return gen.one_token(tokens)
elif len(tokens) == 2:
return gen.two_token(tokens)
elif len(tokens) == 3:
return gen.three_token(tokens)
else:
return gen.multi_token(tokens)
def fetch(instruct, dirname="."):
"""Imports preps from other files.
The parser is being run from a directory different
than the actual code it is interpreting. Therefore,
it must step into directory of the file being parsed,
and from there using relative paths to find the file
which is desired to be imported.
No return value is needed as the global func_hash
variable is directly modified by the driver function.
"""
import os
root = None
name = None
ret_dir = None
func_ret = None
if os.getcwd() != dirname:
ret_dir = os.getcwd()
os.chdir(dirname)
if re.search('from the counter', instruct):
root = '..'
name = instruct[10:-17]+".sf"
name = name.replace(' ', '_')
elif re.search('from the pantry', instruct):
root = './pantry'
name = instruct[10:-12]+".sf"
name = name.replace(' ', '_')
elif re.match('Fetch the ([^.=*&1-9A-Z]+)', instruct):
root = '.'
name = instruct[10:]+".sf"
name = name.replace(' ', '_')
if root and name:
for root, dirs, files in os.walk(root):
for filename in files:
if filename.split('/')[-1] == name:
driver(filename)
# TODO: else throw error
# Return to the directory of the parser
if ret_dir:
os.chdir(ret_dir)
def run_instruction(command, instruct, dirname, mixing_bowls):
if command == "Fetch":
fetch(instruct, dirname)
elif command == "Prep":
bowl_pile = parse_func(instruct[5:], dirname)
if len(mixing_bowls) > 1:
temp_bowl = {}
for bowl in mixing_bowls:
temp_bowl.update(bowl)
bowl_pile = [temp_bowl]
# This is temp only until add bowls is impl
return mixing_bowls[0].append(bowl_pile[0][0])
else:
command_list = {
"Add": ops.add,
"Remove": ops.sub,
"Combine": ops.multi,
"Divide": ops.div,
"Taste": ops.prnt,
}
return command_list[command](instruct, mixing_bowls)
def exec_parser(line, dirname, mixing_bowls):
if len(line) > 0:
sanitized_line = line.replace('. ', '.')
instructions = sanitized_line.split('.')
# Split causes extra blank to be added -> Remove it
del instructions[-1]
for instruct in instructions:
command = instruct.split(' ')[0]
ret_val = run_instruction(command,
instruct, dirname, mixing_bowls)
if ret_val:
mixing_bowls = ret_val
return mixing_bowls
return mixing_bowls
def parse_func(func_name, dirname):
INGFLAG = False
found = False
func_line = ""
mixing_bowls = [[]]
for x in range(0, len(func_hash)):
if [*func_hash[x]][0] == func_name:
func_line = func_hash[x][func_name]
if func_line:
found = True
cnt = 0
for line in func_line.split('\n'):
if INGFLAG and line:
ing = ing_parser(line)
if ing:
mixing_bowls[0].append(ing)
else:
INGFLAG = False
elif cnt == 2:
mix = exec_parser(line, dirname, mixing_bowls)
if mix:
mixing_bowls = mix
elif not INGFLAG and cnt <= 1 and line:
INGFLAG = check_ing(line)
else:
if cnt == 1:
INGFLAG = False
cnt += 1
# if not found:
# raise MethodNotFoundException()
return mixing_bowls
def driver(filename_fetch=None):
if not filename_fetch:
parser = argparse.ArgumentParser()
parser.add_argument("filename")
args = parser.parse_args()
filename = args.filename
else:
filename = filename_fetch
# Splitting results in removal of delim. Have to rebuild
dirname = ''.join(['/' + token for token in filename.split('/')[1:-1]])
prep_title = ''
with open(filename, 'r') as f:
for line in f:
line2 = line.strip('\n')
if prep_title and line:
faux_title = check_prep(line2)
if not faux_title:
func_hash[len(func_hash)-1][prep_title] += line
elif faux_title:
prep_title = faux_title
func_hash.append({prep_title: ""})
elif line:
prep_title = check_prep(line2)
if prep_title:
func_hash.append({prep_title: ""})
if not filename_fetch:
mixing_bowls = parse_func([*func_hash[0]][0], dirname)
ret_bowls = []
for bowls in mixing_bowls:
for ing in bowls:
ret_bowls.append(ing)
return ret_bowls
driver()
| [
"thompson.grey.hunter@gmail.com"
] | thompson.grey.hunter@gmail.com |
ea2af8f1b461e8f96bcd8bd4c778ccfbb9e30cf4 | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_11_01/aio/operations/_usages_operations.py | ba330d5a4006df31f4be7606be380043e45a5d56 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 6,249 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._usages_operations import build_list_by_location_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storage.v2018_11_01.aio.StorageManagementClient`'s
:attr:`usages` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_location(self, location: str, **kwargs: Any) -> AsyncIterable["_models.Usage"]:
"""Gets the current usage count and the limit for the resources of the location under the
subscription.
:param location: The location of the Azure Storage resource. Required.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Usage or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2018_11_01.models.Usage]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2018-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2018-11-01"))
cls: ClsType[_models.UsageListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_location.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("UsageListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_location.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages"
}
| [
"noreply@github.com"
] | openapi-env-test.noreply@github.com |
ab59111a97a5ad62860fa5487921405e0a3e743d | ad9bd58a3ec8fa08dfcc994d4101ee815a9f5bc0 | /02_algorithm/sw_expert_academy/code_problem/all_problem/4047.py | e3cefbc6d9833e6af40370d78a06e70694cf51ae | [] | no_license | wally-wally/TIL | 93fc1d0e3bc7d030341ed54155294c68c48b4c7d | 936783bc86f563646c0398c24e2fcaa707f0ed23 | refs/heads/master | 2023-04-28T08:59:48.235747 | 2023-04-12T12:06:52 | 2023-04-12T12:06:52 | 195,918,111 | 40 | 7 | null | 2020-09-29T16:20:46 | 2019-07-09T02:31:02 | Python | UTF-8 | Python | false | false | 1,204 | py | import sys
sys.stdin = open('sample_input.txt', 'r')
def card_count(x, y):
global card_kind, card_set, card_num
card_set[x][int(card_num)-1] += 1
if card_set[x][int(card_num)-1] >= 2:
print('#{} ERROR'.format(y + 1))
return 1
card_kind, card_num = elem, ''
T = int(input())
for a in range(T):
card_set = [[0] * 13, [0] * 13, [0] * 13, [0] * 13]
card_kind = card_num = ''
data = input() + ' '
for elem in data:
if not elem.isdigit():
if not card_num:
card_kind = elem
else:
if card_kind == 'S':
m = card_count(0, a)
if m: break
elif card_kind == 'D':
n = card_count(1, a)
if n: break
elif card_kind == 'H':
p = card_count(2, a)
if p: break
elif card_kind == 'C':
q = card_count(3, a)
if q: break
else:
card_num += elem
else:
print('#{} {} {} {} {}'.format(a + 1, card_set[0].count(0), card_set[1].count(0), card_set[2].count(0), card_set[3].count(0))) | [
"wallys0213@gmail.com"
] | wallys0213@gmail.com |
045ff32ed004db574df9188ef7e597240ba6e2df | fc1e9aceafe96b69a7e50f5d56f5bdec55b0f13e | /images/migrations/0006_auto_20180507_1730.py | 9ef7f80df3850c4e4eb5b24fc5accd0373c36ee1 | [
"MIT"
] | permissive | markmurimi/Gallery-app | c6f8085f6f8ba65616980abaf4cde1967b7b4417 | 7f167a497775fe6c508f80046bc1b86874675c7d | refs/heads/master | 2020-03-15T12:56:34.179920 | 2018-05-09T14:55:51 | 2018-05-09T14:55:51 | 132,155,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-07 14:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('images', '0005_remove_post_tags'),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.AddField(
model_name='post',
name='location_taken',
field=models.ForeignKey(default=2, on_delete=django.db.models.deletion.CASCADE, to='images.Location'),
preserve_default=False,
),
]
| [
"murimimg180@gmail.com"
] | murimimg180@gmail.com |
d89e07cc305d11ca79c8805eaa418bd79f19e003 | 63a676f3ae166d89054c13fba67ad9b6e3134c54 | /django_stuff/first_project/first_app/views.py | 7784424d7b7077df6d0bbc44fdcad5fdac1c263e | [] | no_license | Rabbi50/django_bootcamp | 917b9435e1c766b5590260a59da3b041c2cfb147 | 912c9ac9c6dc88d6b6d18a724b0265e91858ee66 | refs/heads/master | 2022-12-16T09:53:57.569656 | 2019-12-29T18:09:12 | 2019-12-29T18:09:12 | 226,795,049 | 0 | 0 | null | 2022-12-08T03:21:58 | 2019-12-09T05:52:22 | Python | UTF-8 | Python | false | false | 418 | py | from django.shortcuts import render
from django.http import HttpResponse
from first_app.models import Topic,WebPage,AccessRecord
# Create your views here.
def index(request):
webpage_list=AccessRecord.objects.order_by('date')
date_dict={'access_record':webpage_list}
# my_dict={'inset_me':'Hello i am coming from first_app/views.py!'}
return render(request,'first_app/index.html',context=date_dict)
| [
"jasrabbi50@gmail.com"
] | jasrabbi50@gmail.com |
7692f7723e1dd805bb16a39a812731df9736cebb | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/digitaltwins/azure-mgmt-digitaltwins/azure/mgmt/digitaltwins/v2020_10_31/__init__.py | b41e620d04ac82f368c0a16c525c6722c1f777f1 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 756 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._azure_digital_twins_management_client import AzureDigitalTwinsManagementClient
from ._version import VERSION
__version__ = VERSION
__all__ = ['AzureDigitalTwinsManagementClient']
try:
from ._patch import patch_sdk # type: ignore
patch_sdk()
except ImportError:
pass
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
baa37bc63151072fd6ab77cf7c167941449b6d13 | b6e59e2097e30347bb831ac17f52bf5a50f23c4f | /misc/utils.py | c9eb3637aa479305352c0b3357272027fd9b6008 | [
"MIT"
] | permissive | ramonpereira/pyplanners | 8dc32f5670cec9ee1452f6ec7191cf95efe42905 | 73209a5a9d1ce6936d80b52bd4f3e146a355760b | refs/heads/master | 2020-04-07T15:14:25.529743 | 2017-10-24T23:33:04 | 2017-10-24T23:33:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | from inspect import currentframe, getargvalues, getargspec, getfile
from numerical import *
from functions import *
from io import *
from generators import *
from objects import *
import importlib
import abc
#https://docs.python.org/2/glossary.html#term-generator
SEPARATOR = '\n'+85*'-'+'\n'
# NOTE - frame = inspect.currentframe()
def arg_info(frame, ignore=['self']):
#frame_info = inspect.getframeinfo(frame)
arg_info = getargvalues(frame)
return {arg: arg_info.locals[arg] for arg in arg_info.args if arg not in ignore}
def function_name(stack): # NOTE - stack = inspect.stack()
return stack[0][3]
def get_path(frame):
return os.path.abspath(getfile(frame))
# os.path.realpath(__file__)
def get_directory(abs_path):
return os.path.dirname(abs_path)
def global_fn(name):
# sys.modules[__name__]
return globals()[name] # locals()
# TODO - method to reload all functions
def refresh(module_name):
module = importlib.import_module(module_name) # module = __import__(module_name, fromlist=[''])
reload(module)
return module
| [
"caelan@mit.edu"
] | caelan@mit.edu |
224a769a743cd42c842828056094fb4470bbaf78 | b7ddcba90214e32407f66321f2df62d405828461 | /cms/tinymce/compressor.py | d12e444b501dc86ec28f040f4a55615b1447d3a9 | [] | no_license | bart3k1/CMS | 62e6c1696ae4d60478206b63772babb9bbb1ba1c | 2849903cb9d12fd514aef574c6a5a4eaa776b853 | refs/heads/master | 2022-08-16T15:54:08.408722 | 2020-03-05T18:01:58 | 2020-03-05T18:01:58 | 127,267,720 | 0 | 0 | null | 2022-06-27T16:10:37 | 2018-03-29T09:18:41 | TSQL | UTF-8 | Python | false | false | 5,241 | py | """
Based on "TinyMCE Compressor PHP" from MoxieCode.
http://tinymce.moxiecode.com/
Copyright (c) 2008 Jason Davies
Licensed under the terms of the MIT License (see LICENSE.txt)
"""
import json
import os
import re
from datetime import datetime
import tinymce.settings
from django.conf import settings
from django.core.cache import cache
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.utils.cache import patch_response_headers, patch_vary_headers
from django.utils.encoding import smart_text
from django.utils.http import http_date
from django.utils.text import compress_string
safe_filename_re = re.compile('^[a-zA-Z][a-zA-Z0-9_/-]*$')
def get_file_contents(filename):
if 'staticfiles' in settings.INSTALLED_APPS or \
'django.contrib.staticfiles' in settings.INSTALLED_APPS:
from django.contrib.staticfiles import finders
file_path = finders.find(os.path.join('tiny_mce', filename))
else:
file_path = os.path.join(tinymce.settings.JS_ROOT, filename)
try:
f = open(file_path)
try:
return f.read()
finally:
f.close()
except (IOError, TypeError):
return ''
def split_commas(str):
if str == '':
return []
return str.split(',')
def gzip_compressor(request):
plugins = split_commas(request.GET.get('plugins', ''))
languages = split_commas(request.GET.get('languages', ''))
themes = split_commas(request.GET.get('themes', ''))
isJS = request.GET.get('js', '') == 'true'
compress = request.GET.get('compress', 'true') == 'true'
suffix = request.GET.get('suffix', '') == '_src' and '_src' or ''
content = []
response = HttpResponse()
response['Content-Type'] = 'text/javascript'
if not isJS:
response.write(render_to_string('tinymce/tiny_mce_gzip.js', {
'base_url': tinymce.settings.JS_BASE_URL,
}))
return response
patch_vary_headers(response, ['Accept-Encoding'])
now = datetime.utcnow()
response['Date'] = now.strftime('%a, %d %b %Y %H:%M:%S GMT')
cacheKey = '|'.join(plugins + languages + themes)
cacheData = cache.get(cacheKey)
if cacheData is not None:
if 'ETag' in cacheData:
if_none_match = request.META.get('HTTP_IF_NONE_MATCH')
if if_none_match == cacheData['ETag']:
response.status_code = 304
response.content = ''
response['Content-Length'] = '0'
return response
if 'Last-Modified' in cacheData:
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if if_modified_since == cacheData['Last-Modified']:
response.status_code = 304
response.content = ''
response['Content-Length'] = '0'
return response
tinyMCEPreInit = {
'base': tinymce.settings.JS_BASE_URL,
'suffix': '',
}
content.append('var tinyMCEPreInit={!s};'.format(
json.dumps(tinyMCEPreInit)
))
# Add core
files = ['tiny_mce']
# Add core languages
for lang in languages:
files.append('langs/{!s}'.format(lang))
# Add plugins
for plugin in plugins:
files.append('plugins/{!s}/editor_plugin{!s}'.format(plugin, suffix))
for lang in languages:
files.append('plugins/{!s}/langs/{!s}'.format(plugin, lang))
# Add themes
for theme in themes:
files.append('themes/{!s}/editor_template{!s}'.format(theme, suffix))
for lang in languages:
files.append('themes/{!s}/langs/{!s}'.format(theme, lang))
for f in files:
# Check for unsafe characters
if not safe_filename_re.match(f):
continue
content.append(get_file_contents('{!s}.js'.format(f)))
# Restore loading functions
content.append('tinymce.each("{!s}".split(","), function(f){{'
'tinymce.ScriptLoader.markDone(tinyMCE.baseURL+'
'"/"+f+".js");}});'.format(','.join(files)))
unicode_content = []
for i, c in enumerate(content):
try:
unicode_content.append(c.decode('latin-1'))
except AttributeError:
# python 3 way
unicode_content.append(smart_text(c))
except UnicodeDecodeError:
try:
unicode_content.append(c.decode('utf-8'))
except Exception:
print('{!s} is nor latin-1 nor utf-8.'.format(files[i]))
raise
# Compress
if compress:
content = compress_string(b''.join([c.encode('utf-8')
for c in unicode_content]))
response['Content-Encoding'] = 'gzip'
response['Content-Length'] = str(len(content))
response.write(content)
timeout = 3600 * 24 * 10
patch_response_headers(response, timeout)
if not response.has_header('Last-Modified'):
# Last-Modified not set since Django 1.11
response['Last-Modified'] = http_date()
cache.set(cacheKey, {
'Last-Modified': response['Last-Modified'],
'ETag': response.get('ETag', ''),
})
return response
| [
"bart3k1@gmail.com"
] | bart3k1@gmail.com |
cfd5599af1194de14dc87510ec8936eb301fcb54 | 6f8041080c5896f7ccf38759c5e03719aa7e0ef7 | /project/urls.py | b3c31c448c06dd0a2da07b5ebcdbf9d2ef03bb0e | [] | no_license | cnkyrpsgl/recipe | 5258ec3885e0ebe4703e4b30c5b18541e285a466 | 3b1da89ce3619cafee7688037da35dcecd0bca90 | refs/heads/master | 2020-06-04T09:24:55.768529 | 2019-06-14T15:21:24 | 2019-06-14T15:21:24 | 191,824,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,059 | py | """project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from recipes import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home, name='home'),
path('accounts/', include('accounts.urls')),
path('recipes/', include('recipes.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"cenkay.arapsagolu@gmail.com"
] | cenkay.arapsagolu@gmail.com |
5052c3800c8b8cc06007bed9af95f9de818ca78f | ac41c81ba8597cbca0d2189cda993b74bc7fe2bb | /plot.py | 91881d45289329313daced0ebfb69c7e25a932b3 | [] | no_license | jsg921019/label_smoothing | 2ed38618d734ae9834bc33c1b328269c97a5af44 | afabc638bd755082e8956345f707c3cdc2597f65 | refs/heads/main | 2023-08-28T04:19:17.535494 | 2021-10-21T22:29:19 | 2021-10-21T22:29:19 | 405,281,003 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,016 | py | import os
import copy
import argparse
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torchvision.models import AlexNet
from torchvision.datasets import CIFAR10
parser = argparse.ArgumentParser(description='Plot distribution')
parser.add_argument('data_path', type=str)
parser.add_argument('weight_path', type=str)
parser.add_argument('--img_name', type=str, default='output')
parser.add_argument('--classes', type=int, nargs=3, default=[0, 1, 2])
args = parser.parse_args()
print(args)
class Projector:
def __init__(self, model, classes):
self.model = copy.deepcopy(model)
self.last_layer = list(self.model.modules())[-1]
self.model.classifier = torch.nn.Sequential(*list(self.model.classifier)[:-1])
self.classes = classes
weight = self.last_layer.weight.detach().cpu().numpy()
bias = self.last_layer.bias.detach().cpu().numpy()
self.template = np.concatenate([weight, bias.reshape(-1, 1)], axis=-1)
self.r0 = self.template[classes].mean(axis=0)
basis = self.template[classes[1:]] - self.r0
self.orthonormal_basis, _ = np.linalg.qr(basis.T)
def plot(self, dataloader, n_data = 300):
device = next(self.model.parameters()).device
cnt = {class_:0 for class_ in self.classes}
projection = {class_:[] for class_ in self.classes}
self.model.eval()
for imgs, label in dataloader:
imgs, label = imgs.to(device), label.to(device)
with torch.no_grad():
outputs = self.model(imgs)
for class_ in self.classes:
if cnt[class_] < n_data:
class_embeddings = outputs[label == class_].cpu().numpy()
class_embeddings = np.concatenate([class_embeddings, np.ones((len(class_embeddings), 1))], axis=-1)
if len(class_embeddings):
cnt[class_] += len(class_embeddings)
projection[class_].append((class_embeddings - self.r0) @ self.orthonormal_basis)
if all(cnt[class_] >= n_data for class_ in self.classes):
#self.model.classifier = torch.nn.Sequential(*list(self.model.classifier), self.last_layer)
return {class_:np.concatenate(projection[class_], axis=0) for class_ in self.classes}
raise ValueError(f'Not enough datas (must include at least {n_data} datas)')
transform = transforms.Compose([transforms.Resize(227),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
batch_size = 128
trainset = CIFAR10(root=args.data_path, train=True, download=True, transform=transform)
testset = CIFAR10(root=args.data_path, train=False, download=True, transform=transform)
trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
model = AlexNet().cuda()
model.classifier[6] = torch.nn.Linear(4096, 10).cuda()
model.load_state_dict(torch.load(args.weight_path))
p = Projector(model, args.classes)
for loader, title in zip([trainloader, testloader], ['Training', 'Validation']):
ret = p.plot(loader)
fig, ax = plt.subplots(figsize=(7,7), subplot_kw={'aspect':1})
for c in [0,1,2]:
ax.scatter(ret[c][:,1], ret[c][:,0], s=5, alpha=0.8)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
lim = max(abs(xmin), abs(ymin), abs(xmax), abs(ymax))
ax.set_xlim(xmin=-lim, xmax=lim)
ax.set_ylim(ymin=-lim, ymax=lim)
ax.set_title(title + (' w/ LS' if 'smooth' in args.weight_path else ' w/o LS'))
plt.savefig(args.img_name + '_' + title + '.png') | [
"jsg921019@gmail.com"
] | jsg921019@gmail.com |
8f7ab8bb24e05c54bd2b593592c57817e130f6e0 | 95f9a365cce91c584fd615ca403215f36aa3fc57 | /lesson_7_new/leroy_pars/spiders/leroymerlin.py | 5381a527ee77930f09c1b8fdeec1500038093569 | [] | no_license | AShipkov/Methods-for-data-from-the-Internet | 7cc39cf9a1eac82205dba26fa9c05bdfc85589d9 | aa915da40c2afcfdae80c92f54fd3316ed6a13b5 | refs/heads/master | 2022-11-26T17:08:02.170474 | 2020-08-11T18:06:35 | 2020-08-11T18:06:35 | 280,082,278 | 0 | 0 | null | 2020-08-11T18:06:39 | 2020-07-16T07:15:58 | Python | UTF-8 | Python | false | false | 1,209 | py | import scrapy
import scrapy
from scrapy.http import HtmlResponse
from scrapy.loader import ItemLoader
from leroy_pars.items import Leroy_parsItem
class LeroymerlinSpider(scrapy.Spider):
name = 'leroymerlin'
allowed_domains = ['leroymerlin.ru']
def __init__(self, search):
super().__init__()
self.start_urls = [f'http://leroymerlin.ru/search/?q={search}']
def parse(self, response: HtmlResponse):
itm_links = response.xpath("//uc-plp-item-new/@href")
for link in itm_links:
yield response.follow(link, callback=self.item_pars)
def item_pars(self,response:HtmlResponse):
loader=ItemLoader(item=Leroy_parsItem(), response=response)
loader.add_xpath('name', "//h1/text()")
loader.add_xpath('photos', "//source[@media=' only screen and (min-width: 1024px)']/@srcset")
loader.add_xpath('_id', "//span[@slot='article']/@content")
loader.add_xpath('price', "//uc-pdp-price-view[@slot='primary-price']/meta[@itemprop='price']/@content")
loader.add_xpath('info', "//div[@class='def-list__group']")
# loader.add_css()
loader.add_value('url', response.url)
yield loader.load_item() | [
"shipkov2007@ya.ru"
] | shipkov2007@ya.ru |
23f0a5b1c830acd508c6d38617e0e2c8e9f6c83c | 0326f06f68fb0d919f8467f4744dfd60a654836a | /eggs/Django-1.6.5-py2.7.egg/django/contrib/sessions/serializers.py | 0ded8502434733fde4e8c2648fe134d4c602df90 | [] | no_license | ethirajit/onlinepos | 67de6023241339ae08c3b88a9e7b62b837ec17a3 | 186ba6585d0b29f96a5c210462764515cccb3b47 | refs/heads/master | 2021-01-17T13:23:36.490727 | 2014-07-01T10:30:17 | 2014-07-01T10:30:17 | 34,388,218 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | from django.core.signing import JSONSerializer as BaseJSONSerializer
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
class PickleSerializer(object):
"""
Simple wrapper around pickle to be used in signing.dumps and
signing.loads.
"""
def dumps(self, obj):
return pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
def loads(self, data):
return pickle.loads(data)
JSONSerializer = BaseJSONSerializer
| [
"root@server.onlinepos.co.in"
] | root@server.onlinepos.co.in |
8bdfa7763c1f7dea7a5a524664582b26f1032154 | 9481772b5eefeaae71d8d2d668dda611735ab5a7 | /braindecode/online/dummy_ui_server.py | 4f159137325ef03edbe2c379b89e432c50f86b73 | [] | no_license | Mahelita/braindevel | f8cd77803e0860764dee5822dd00fc8a2c8c3a6c | 21f58aa74fdd2a3b03830c950b7ab14d44979045 | refs/heads/master | 2020-03-23T15:03:13.140736 | 2017-09-11T14:42:06 | 2017-09-11T14:42:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | import gevent.server
import signal
def handle(socket, address):
print ("new connection")
# using a makefile because we want to use readline()
socket_file = socket.makefile()
while True:
i_sample = socket_file.readline()
preds = socket_file.readline()
print i_sample
print preds
gevent.signal(signal.SIGQUIT, gevent.kill)
hostname = ''
port = 30000
server = gevent.server.StreamServer((hostname, port), handle)
print("Starting server on port {:d}".format(port))
server.start()
print("Started server")
server.serve_forever() | [
"robintibor@gmail.com"
] | robintibor@gmail.com |
921396e2c499a33277b130c998781aa2b3939dca | f4e9721bd529541f2402472f201bb6fde66fea53 | /trailing zeroes.py | ccc47c47fa113b1f63571096cff21a9bdd62b88f | [] | no_license | wimpywarlord/hacker_earth_and_hacker_rank_solutions | 23d973778bceca5a395dd98b0b7252db49d02366 | 1277ba97e2744a7dab62f1e1319aac77f8ec6a28 | refs/heads/master | 2021-07-01T19:48:07.501021 | 2019-12-28T05:55:39 | 2019-12-28T05:55:39 | 172,307,339 | 10 | 3 | null | 2020-10-01T06:43:03 | 2019-02-24T07:08:29 | Python | UTF-8 | Python | false | false | 220 | py | n=int(input())
fact=1
for i in range(1,n+1):
fact*=i
print(fact)
count=0
num=[]
while fact>0:
y=fact%10
num.append(y)
fact//=10
print(num)
for i in num:
if i!=0:
break
count+=1
print(count)
| [
"wimpywarlord@gmail.com"
] | wimpywarlord@gmail.com |
afd642a8c815e027b36a2ac9ceca8be953180e13 | 619a80cd7160fc42ce8203255c6420bf9e22dc19 | /app.py | 0adecaa40968aefb1a88783b2fc98f32829704e5 | [
"BSD-3-Clause"
] | permissive | andersosthus/LazyBlacksmith | c8a6615afed42d9de90575a52c0ee0e89f5f7f60 | d1689dba0052dcaac9de10598c1e91e620869852 | refs/heads/master | 2021-01-23T07:43:56.137153 | 2017-03-09T23:50:05 | 2017-03-09T23:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | # -*- encoding: utf-8 -*-
import config
import logging
from lazyblacksmith.app import create_app
app = create_app(config)
if __name__ == '__main__':
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger = logging.getLogger('lb.utils')
logger.addHandler(console)
logger.setLevel(logging.DEBUG)
logger = logging.getLogger('lb.ajax')
logger.addHandler(console)
logger.setLevel(logging.DEBUG)
app.run(port=config.PORT, host=config.HOST)
| [
"anakhon@gmail.com"
] | anakhon@gmail.com |
5cf7411ded7b5dcab29660807618ce57dfbaf889 | 07542e367da9b9d106e00c5764d2c3639cf4958e | /classification/training/update/WindowClassificationTrainUpdateAnalysisModelListVisualize.py | fda442f4d16d01ff50c1d847bbb0003eb2d26e6a | [
"Apache-2.0"
] | permissive | iamavailable/Monk_Gui | 6991308f237ee13c5bb619e766bedbe2c388e43a | 5d768240a9244629da2f68f6194d91c98e1b0ccb | refs/heads/master | 2023-01-02T12:35:53.898469 | 2020-10-22T10:29:07 | 2020-10-22T10:29:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,963 | py | import os
import sys
import json
from PIL import Image
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
class WindowClassificationTrainUpdateAnalysisModelListVisualize(QtWidgets.QWidget):
backward_model_param = QtCore.pyqtSignal();
backward_analyse = QtCore.pyqtSignal();
def __init__(self):
super().__init__()
self.cfg_setup()
self.title = 'Experiment {} - Visualize Analysis of Model List Hyperparam tuning'.format(self.system["experiment"])
self.left = 10
self.top = 10
self.width = 900
self.height = 600
self.initUI()
def cfg_setup(self):
with open('base_classification.json') as json_file:
self.system = json.load(json_file)
self.comparison_name = "Comparison_" + self.system["analysis"]["model_list"]["analysis_name"]
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height);
# Backward
self.b1 = QPushButton('Back To Analysis', self)
self.b1.move(400,550)
self.b1.clicked.connect(self.backward1)
# Forward
self.b2 = QPushButton('Back to Update model param', self)
self.b2.move(550,550)
self.b2.clicked.connect(self.backward2)
# Quit
self.b3 = QPushButton('Quit', self)
self.b3.move(800,550)
self.b3.clicked.connect(self.close)
self.createLayout_Container();
def createLayout_group(self, label, img_file):
sgroupbox = QGroupBox("Graph - {}:".format(label), self)
layout_groupbox = QVBoxLayout(sgroupbox)
l1 = QLabel(self)
l1.resize(700, 450)
layout_groupbox.addWidget(l1);
if(os.path.isfile(img_file)):
img = Image.open(img_file);
img = img.resize((700, 450));
img_file = img_file.split(".")[0] + "_.png";
img.save(img_file)
pixmap = QPixmap(img_file)
l1.setPixmap(pixmap)
return sgroupbox
def createLayout_Container(self):
self.scrollarea = QScrollArea(self)
self.scrollarea.setFixedSize(700, 480)
self.scrollarea.setWidgetResizable(True)
widget = QWidget()
self.scrollarea.setWidget(widget)
self.layout_SArea = QVBoxLayout(widget)
label_list = ["Train Accuracy", "Train Loss", "Validation Accuracy",
"Validation Loss", "Training Time", "Gpu Usage",
"Best validation accuracy"]
image_list = ["workspace/comparison/" + self.comparison_name + "/train_accuracy.png",
"workspace/comparison/" + self.comparison_name + "/train_loss.png",
"workspace/comparison/" + self.comparison_name + "/val_accuracy.png",
"workspace/comparison/" + self.comparison_name + "/val_loss.png",
"workspace/comparison/" + self.comparison_name + "/stats_training_time.png",
"workspace/comparison/" + self.comparison_name + "/stats_max_gpu_usage.png",
"workspace/comparison/" + self.comparison_name + "/stats_best_val_acc.png"
]
for i in range(len(image_list)):
self.layout_SArea.addWidget(self.createLayout_group(label_list[i], image_list[i]))
self.layout_SArea.addStretch(1)
self.scrollarea.move(10, 10)
def backward1(self):
with open('base_classification.json', 'w') as outfile:
json.dump(self.system, outfile)
self.backward_analyse.emit();
def backward2(self):
with open('base_classification.json', 'w') as outfile:
json.dump(self.system, outfile)
self.backward_model_param.emit();
'''
app = QApplication(sys.argv)
screen = WindowClassificationTrainUpdateAnalysisModelListVisualize()
screen.show()
sys.exit(app.exec_())
''' | [
"abhishek4273@gmail.com"
] | abhishek4273@gmail.com |
2a2a7fab392691ad141fac7ea0228cd9abb2beab | 3524cd098502204a7bca7c2a2dc7757f50d324d8 | /authenticate/forms.py | ce97b4729d27e12d7c8877113ab166c9daeab0ff | [] | no_license | dm1tro69/John_Elder_auth | 91594a4c960b7d955d559f1a128210625e87c58a | c5e20afbae3600083866b6b7cb013f51e38e2751 | refs/heads/master | 2020-11-29T14:06:48.055281 | 2019-12-26T19:57:22 | 2019-12-26T19:57:22 | 230,132,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,651 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.contrib.auth.models import User
class EditProfileForm(UserChangeForm):
password = forms.EmailField(label='', widget=forms.TextInput(attrs={'type': 'hidden'}))
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password')
class SignUpForm(UserCreationForm):
email = forms.EmailField(label='', widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Email Addres'}))
first_name = forms.CharField(label='', max_length=100, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'First Name'}))
last_name = forms.CharField(label='', max_length=100, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Last Name'}))
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2',)
def __init__(self, *args, **kwargs):
super(SignUpForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs['class'] = 'form-control'
self.fields['username'].widget.attrs['placeholder'] = 'Username'
self.fields['username'].label = ''
self.fields['password1'].widget.attrs['class'] = 'form-control'
self.fields['password1'].widget.attrs['placeholder'] = 'Password'
self.fields['password1'].label = ''
self.fields['password2'].widget.attrs['class'] = 'form-control'
self.fields['password2'].widget.attrs['placeholder'] = 'Confirm Password'
self.fields['password2'].label = '' | [
"dimolg22@gmail.com"
] | dimolg22@gmail.com |
d7d33f599dc291b15dbc5bfaed6d9c9df268358d | c2f4afee3ec4faef7231da2e48c8fef3d309b3e3 | /org/netsetos/python/String_Tasks/first_non_hash.py | 9557d647a2991efdaca5cfcd4d91d9cc00d6a28e | [] | no_license | tanu312000/pyChapter | a723f99754ff2b21e694a9da3cb2c6ca0cd10fce | 2fd28aefcbfaf0f6c34db90fdf0d77f9aea142ce | refs/heads/master | 2020-05-03T15:51:34.334806 | 2019-03-31T16:17:45 | 2019-03-31T16:17:45 | 178,712,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | def first_non_repeating(str):
n=len(str)
for i in range(0,n):
count=1
for j in range(0,n):
if(str[i]==str[j]):
count=count+1
if(count==2):
print(str[i])
break
str="SARTHAK"
p=first_non_repeating(str)
print(p)
| [
"tanurocks90@gmail.com"
] | tanurocks90@gmail.com |
16ef2e50c075663384a8d6a44082ac09cf7827ad | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/VBF_HToTauTau_M-140_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467448/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_3/run_cfg.py | 79d6c5b5ebda54c86474a1006cdba57a2a9043b3 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/VBF_HToTauTau_M-140_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467448/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/group/cmgtools/CMG/VBF_HToTauTau_M-140_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_18_1_8tu.root',
'/store/cmst3/group/cmgtools/CMG/VBF_HToTauTau_M-140_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_19_1_5nI.root',
'/store/cmst3/group/cmgtools/CMG/VBF_HToTauTau_M-140_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_1_1_1HY.root',
'/store/cmst3/group/cmgtools/CMG/VBF_HToTauTau_M-140_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_20_1_9LT.root',
'/store/cmst3/group/cmgtools/CMG/VBF_HToTauTau_M-140_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_21_1_6Mn.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
32637808d13e39b529be631a6f05a7b3aad5a081 | 3db2fcd1a34ae7b22225029587369f49424457dd | /classifier_alignment/test_simulator.py | 7ca5c84abe3b1facf186ef78b7ef14dd80c87880 | [] | no_license | pombredanne/realigner | 7f0fdfdf42f757fead45cdeb5ea2901c4965e944 | b0c32cace20dd720c7609f009d86846d9ecb750f | refs/heads/master | 2021-01-18T03:57:30.977009 | 2014-05-06T09:35:46 | 2014-05-06T09:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,240 | py | #!/usr/bin/python
from alignment import Fasta
from classifier_alignment.AnnotationConfig import Annotations
from tools.file_wrapper import Open
import json
import os
import random
import sys
import track
from classifier_alignment.simulator import *
P_START_GENE = 0.01
P_STOP_GENE = 0.01
P_START_DELETE = 0.01
P_STOP_DELETE = 0.1
P_NOT_MUTATE_GENE = 1.0
P_MUTATE_DNA_11 = 0.8
P_MUTATE_DNA_1 = 0.65
P_MUTATE_DNA_00 = 0.6
def create_dna_mutation_coin(s):
"""
set up DNA mutation coin
"""
p = [P_MUTATE_DNA_00, P_MUTATE_DNA_1, P_MUTATE_DNA_11]
return BiasedCoin(p[s])
def mutate(b, g):
return 3-b if g > 0 else b
def main(n, datadir='data/train_sequences/', fname='simulated_alignment'):
s1name = "sequence1"
s2name = "sequence2"
s3name = "sequence3"
annotation_name = 'gene'
alignment_extension = ".fa"
annotations_extension = ".bed"
config_extension = ".js"
if len(sys.argv) > 1:
n = int(sys.argv[1])
if len(sys.argv) > 2:
fname = sys.argv[2]
master_gene_sequence = MarkovChain(P_START_GENE, P_STOP_GENE)
human_delete_sequence = MarkovChain(P_START_DELETE, P_STOP_DELETE)
mouse_delete_sequence = MarkovChain(P_START_DELETE, P_STOP_DELETE)
horse_delete_sequence = MarkovChain(P_START_DELETE, P_STOP_DELETE)
mutator_coin = BiasedCoin(P_NOT_MUTATE_GENE)
master_gene = list()
human_gene = list()
mouse_gene = list()
horse_gene = list()
human_dna = list()
mouse_dna = list()
horse_dna = list()
for i in range(n):
# create master_gene item
g = g2 = g3 = g4 = master_gene_sequence.get_state()
# mutate master_gene item
if g:
g2 = mutator_coin.flip()
g3 = mutator_coin.flip()
g4 = mutator_coin.flip()
dna_mutation_coin = create_dna_mutation_coin(g2 + g3)
dna_mutation_coin2 = create_dna_mutation_coin(g2 + g4)
# create DNA item
c = c2 = c3 = random.randint(0, 3)
c2 = mutate(c2, g2+g3)
c, c2, c3 = [DNA_CHARS[i] for i in (c, c2, c3)]
if not dna_mutation_coin.flip():
char_index = random.randint(0, 2)
if DNA_CHARS[char_index] == c2:
char_index = 3
c2 = DNA_CHARS[char_index]
if not dna_mutation_coin2.flip():
char_index = random.randint(0, 2)
if DNA_CHARS[char_index] == c3:
char_index = 3
c3 = DNA_CHARS[char_index]
# delete DNA item
if human_delete_sequence.get_state():
c = '-'
if mouse_delete_sequence.get_state():
c2 = '-'
if horse_delete_sequence.get_state():
c3 = '-'
# add items to sequence
master_gene.append(g)
human_gene.append(g2)
mouse_gene.append(g3)
horse_gene.append(g4)
human_dna.append(c)
mouse_dna.append(c2)
horse_dna.append(c3)
# output
s1fname = os.path.join(
datadir, fname+'_'+s1name+'_'+annotation_name+annotations_extension
)
if os.path.isfile(s1fname):
os.remove(s1fname)
s2fname = os.path.join(
datadir, fname+'_'+s2name+'_'+annotation_name+annotations_extension
)
if os.path.isfile(s2fname):
os.remove(s2fname)
s3fname = os.path.join(
datadir, fname+'_'+s3name+'_'+annotation_name+annotations_extension
)
if os.path.isfile(s3fname):
os.remove(s3fname)
intervals1 = sequence_to_intervals(
get_sequence(human_gene, human_dna), annotation_name
)
intervals2 = sequence_to_intervals(
get_sequence(mouse_gene, mouse_dna), annotation_name
)
intervals3 = sequence_to_intervals(
get_sequence(horse_gene, horse_dna), annotation_name
)
annotations = Annotations()
annotations.setAnnotations([annotation_name])
annotations.addSequences([s1name, s2name, s3name])
annotations.addAnnotationFile(s1name, annotation_name, s1fname)
annotations.addAnnotationFile(s2name, annotation_name, s2fname)
# annotations.addAnnotationFile(s3name, annotation_name, s3fname)
Fasta.save(
[
(s1name, ''.join(human_dna)),
(s2name, ''.join(mouse_dna)),
# (s3name, ''.join(horse_dna))
],
os.path.join(datadir, fname+alignment_extension)
)
with track.new(s1fname, 'bed') as t:
t.fields = ['start', 'end', 'name']
t.write("chr1", intervals1)
with track.new(s2fname, 'bed') as t:
t.fields = ['start', 'end', 'name']
t.write("chr1", intervals2)
# with track.new(s3fname, 'bed') as t:
# t.fields = ['start', 'end', 'name']
# t.write("chr1", intervals3)
with Open(os.path.join(datadir, fname+config_extension), "w") as f:
json.dump(annotations.toJSON(), f)
if __name__ == "__main__":
main(10000, 'data/sequences/model_train_seq/test')
main(1000, 'data/sequences/test')
for i in range(5):
simulate(1000, 'data/sequences/test', fname='simulated_alignment{}'.format(i))
for i in range(20):
main(
10000, 'data/sequences/train_test',
fname='simulated_alignment{}'.format(i)
)
| [
"mhozza@gmail.com"
] | mhozza@gmail.com |
8a5a3795eafcb5780fc6e69a9795ed0466e687f6 | afa59644777f5cd8ae59d0b4b29f91c45d1264a9 | /web_chat/urls.py | c1f8e0b2dcef9f9e4f359d39e8ef138c834254a4 | [
"Apache-2.0"
] | permissive | yezimai/oldboyProject | aa76beca46be29e164f65b45dda35924d8fa5bbb | 889eebc2e6158b07ac0964b25eb01df743ad0117 | refs/heads/master | 2021-05-13T17:52:45.623762 | 2018-01-09T16:03:56 | 2018-01-09T16:03:57 | 116,824,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | """oldboyProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
import views
urlpatterns = [
url(r'dashboard/', views.dashboard,name='chat'),
url(r'load_contact_list/', views.load_contact_list,name='load_contact_list'),
url(r'send_msg/', views.send_msg,name='send_msg'),
url(r'get_msg/', views.send_msg,name='get_msg'),
url(r'file_upload/', views.file_upload,name='file_upload'),
url(r'file_upload_progess/', views.file_upload_progess,name='file_upload_progess'),
url(r'DelFileCache/', views.DelFileCache,name='DelFileCache'),
]
| [
"41815224@qq.com"
] | 41815224@qq.com |
6c5cad32452e5af7b0acdef23e6ad9b4fb0db1ef | 06200840fb159fcb30775fcf053dcdc54a53ed6c | /milksets/seeds/seeds.py | 1c20e2b9d978d2cefa4759e073bc6bcba0db3547 | [
"MIT"
] | permissive | luispedro/milksets | 936c3ab1be48e7c2fcfef1c96f1c0506b7a03e13 | 84fc8cba4d4a87acf573ce562cd065b0ee37fadd | refs/heads/master | 2020-04-16T09:29:54.355230 | 2015-03-18T17:20:40 | 2015-03-18T17:20:40 | 394,729 | 8 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,576 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2012, Luis Pedro Coelho <luis@luispedro.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import division
import numpy as np
from os.path import dirname
from ..vtypes import continuous
from ..utils import standard_properties, standard_classification_loader
__all__ = ['load'] + standard_properties
name = 'seeds'
short_name = 'Seeds'
long_name = 'Seeds Flower Data Set'
reference = '''\
M. Charytanowicz, J. Niewczas, P. Kulczycki, P.A. Kowalski, S. Lukasik, S. Zak,
'A Complete Gradient Clustering Algorithm for Features Analysis of X-ray
Images', in: Information Technologies in Biomedicine, Ewa Pietka, Jacek Kawa
(eds.), Springer-Verlag, Berlin-Heidelberg, 2010, pp. 15-24.
'''
url = 'http://archive.ics.uci.edu/ml/datasets/seeds'
data_source = 'UCI'
label_names = ['Kama', 'Rosa', 'Canadian']
missing_values = False
value_types = [
continuous('area'),
continuous('perimeter'),
continuous('compactness'),
continuous('length of kernel'),
continuous('width of kernel'),
continuous('asymmetry coefficien'),
continuous('length of kernel groove'),
]
@standard_classification_loader(name)
def load(force_contiguous=True):
from bz2 import BZ2File
base = dirname(__file__) + '/data/'
data = np.loadtxt(base+'seeds_dataset.txt.gz')
features = data[:,:-1]
labels = data[:,-1]
labels -= 1
labels = labels.astype(int)
if force_contiguous:
features = features.copy()
labels = labels.copy()
return features, labels
| [
"luis@luispedro.org"
] | luis@luispedro.org |
451e401a87b9f6da9415c5da86f0ec15a4ba4c57 | 1dbc955c3d717476fa75a48cc87a05e2eceb0002 | /daily_challenges/merge_two_lists.py | e9da11979e8f8e04f9950a6a677b80ceff4a60fa | [] | no_license | gregorysimpson13/leetcode | e68eaee2ba38a1edff119eda1ccdeacc0c400d26 | ae88b9f9979a5643497cb2dfeb90d19a1bcdb137 | refs/heads/master | 2023-03-31T23:58:58.940234 | 2021-04-11T14:37:31 | 2021-04-11T14:37:31 | 258,632,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # Runtime: O(n); beats 91.67%
# Space: O(1)
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
dummy = ListNode(); node = dummy
while l1 and l2:
if l1.val < l2.val: node.next, l1 = l1, l1.next
else: node.next, l2 = l2, l2.next
node = node.next
node.next = l1 if l1 else l2
return dummy.next | [
"gregorysimpson13@gmail.com"
] | gregorysimpson13@gmail.com |
12f9704a22c0030bd9d3de600224bee0149ba740 | 505f1c36d931d4388a0a4f8c57fbd8bd9ab4d821 | /ImageAnalysis/ImageAnalysis/python/references/bead-designer-test/gui/BeadDesignDesigner.py | 01953da0f43ff4fa81649da28a39c21b093b56eb | [
"MIT"
] | permissive | mikebourbeauart/perler-printer | 9e43a51b82cb9b08d35c81e680ea7ef2624fda2e | 8c5023de6bb9b3cbe2bc28c1c823030dfd708db4 | refs/heads/master | 2022-12-01T18:46:37.632443 | 2020-05-04T00:41:11 | 2020-05-04T00:41:11 | 98,070,537 | 0 | 1 | MIT | 2022-11-22T05:58:34 | 2017-07-23T02:49:35 | Python | UTF-8 | Python | false | false | 680 | py | """Subclass of Designer, which is generated by wxFormBuilder."""
import wx
import beadgui
# Implementing Designer
class BeadDesignDesigner( beadgui.Designer ):
def __init__( self, parent ):
beadgui.Designer.__init__( self, parent )
# Handlers for Designer events.
def onGenerate( self, event ):
# TODO: Implement onGenerate
pass
def onImage( self, event ):
# TODO: Implement onImage
pass
def onView( self, event ):
# TODO: Implement onView
pass
def onLoadImage( self, event ):
# TODO: Implement onLoadImage
pass
def onExit( self, event ):
# TODO: Implement onExit
pass
def onAbout( self, event ):
# TODO: Implement onAbout
pass
| [
"borbs727@gmail.com"
] | borbs727@gmail.com |
fd6c1aed9ec2bae63b58209b2b31577252f7d687 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/GluGluToHToTauTau_M-125_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467487/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_2/run_cfg.py | ccc76fd5d757844555f7d822aea28d28b5a0adf8 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/GluGluToHToTauTau_M-125_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467487/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-125_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_15_1_Ht8.root',
'/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-125_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_16_2_QWj.root',
'/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-125_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_17_1_jtd.root',
'/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-125_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_18_1_zkc.root',
'/store/cmst3/group/cmgtools/CMG/GluGluToHToTauTau_M-125_8TeV-powheg-pythia6/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_19_1_HVl.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
d5079851f208d0e4515d0bcb2767a6517585deab | 1e177ebdcb470f738c058606ac0f86a36085f661 | /BlockHeat41/coldStart03.py | 8a73d964096b40a88330d5c5ff78071f81df5640 | [] | no_license | robingreig/raspi-git | 5cbdd295c1048a0571aa2c2f8576438269439f07 | 7373bf94557d7a88c8f343362ba64f9cd19c8ce7 | refs/heads/master | 2023-08-31T03:16:17.286700 | 2023-08-26T11:54:23 | 2023-08-26T11:54:23 | 16,873,881 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | #!/usr/bin/env python3
import time
import datetime as dt
import os
# if debug > 0 then outputs will be turned OFF
debug = 0
# outside temp stored in file by blockheat program running every 10 mins
f = open("/home/robin/outsideTemp", 'r')
lines1 = f.readlines()
f.close()
lines2 = float(lines1[0])
if debug > 0:
print("Lines2 = ",lines2)
# Get current time
x = dt.datetime.now()
if debug > 0:
print(x)
# Strip of everything but H & M
y = x.strftime("%H:%M")
if debug > 0:
print(y)
# Convert to datetime format
z = dt.datetime.strptime(y,'%H:%M')
if debug > 0:
print("z should be in datetime formatt = ",z)
# Convert back to a string
z0 = dt.datetime.strftime(z,'%H:%M')
if debug > 0:
print("z0 should be only hours & mins = ",z0)
# Only run the program if time between 3:00am & 3:20am
if '03:00'<= z0 <= '03:20':
#if '06:00' <= z0 <= '09:00':
#if '08:56' <= z0 <= '09:00' and lines2 <= -9.31:
# if debug is OFF and temp is COLD turn on outputs
if debug == 0 and lines2 <= -18:
os.system("/home/robin/raspi-git/BlockHeat41/BH-right23-on-mqtt.py")
os.system("/home/robin/raspi-git/BlockHeat41/BH-left24-on-mqtt.py")
else: # if debug is ON or temp is not COLD enough, just print
print("debug > 0, or temp is :",lines2)
cht = open("/home/robin/lastNightTemp", "w")
cht.write (str(lines2))
cht.close()
| [
"robin.greig@calalta.com"
] | robin.greig@calalta.com |
0fd4d2a9c455e6f7e2152c9a99c0e76d250b7ab7 | 370f334038da2c6b37ba1d60d71e35db2ea61df8 | /2019/day12/part1.py | 5855bdaffd68749e2dfb2bf1c817a029e3c3d669 | [
"MIT"
] | permissive | fourjr/advent-of-code | 94ff31ab06af20401db2153954770bc2e0eb7960 | 4204f936acacd6566a74d07353259f468966d538 | refs/heads/master | 2023-04-03T16:06:25.473834 | 2023-03-31T16:14:16 | 2023-03-31T16:14:16 | 159,935,359 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,166 | py | from dataclasses import dataclass, field
with open('input.txt') as f:
inp = f.read().splitlines()
@dataclass
class Coordinates:
x: int = 0
y: int = 0
z: int = 0
@dataclass
class Moon:
position: Coordinates
velocity: Coordinates = field(default_factory=Coordinates)
def apply_gravity(self):
for m in moons:
if self.position.x > m.position.x:
self.velocity.x -= 1
elif self.position.x < m.position.x:
self.velocity.x += 1
if self.position.y > m.position.y:
self.velocity.y -= 1
elif self.position.y < m.position.y:
self.velocity.y += 1
if self.position.z > m.position.z:
self.velocity.z -= 1
elif self.position.z < m.position.z:
self.velocity.z += 1
def apply_velocity(self):
self.position.x += self.velocity.x
self.position.y += self.velocity.y
self.position.z += self.velocity.z
@property
def potential_energy(self):
return abs(self.position.x) + abs(self.position.y) + abs(self.position.z)
@property
def kinetic_energy(self):
return abs(self.velocity.x) + abs(self.velocity.y) + abs(self.velocity.z)
@property
def total_energy(self):
return self.potential_energy * self.kinetic_energy
def __repr__(self):
return 'Moon(position={0.position}, velocity={0.velocity}, potential={0.potential_energy}, kinetic={0.kinetic_energy}, total={0.total_energy})'.format(self)
moons = []
for i in inp:
values = i.split(', ')
new_values = []
for v in values:
for n, i in enumerate(v):
if i == '=':
new_val = v[n + 1:]
if new_val.endswith('>'):
new_val = new_val[:-1]
new_values.append(int(new_val))
moons.append(Moon(Coordinates(*new_values)))
for _ in range(1000):
for m in moons:
m.apply_gravity()
for m in moons:
m.apply_velocity()
sum_of_total_energy = 0
for m in moons:
sum_of_total_energy += m.total_energy
print(sum_of_total_energy)
| [
"28086837+fourjr@users.noreply.github.com"
] | 28086837+fourjr@users.noreply.github.com |
6645b7165fcf9af30c2a8d51e9f757db369891ef | b501a5eae1018c1c26caa96793c6ee17865ebb2d | /Mathematics/math/math_isclose.py | ada6e0ff230f6ec9d228a40940b3013e77b3fbb0 | [] | no_license | jincurry/standard_Library_Learn | 12b02f9e86d31ca574bb6863aefc95d63cc558fc | 6c7197f12747456e0f1f3efd09667682a2d1a567 | refs/heads/master | 2022-10-26T07:28:36.545847 | 2018-05-04T12:54:50 | 2018-05-04T12:54:50 | 125,447,397 | 0 | 1 | null | 2022-10-02T17:21:50 | 2018-03-16T01:32:50 | Python | UTF-8 | Python | false | false | 608 | py | import math
INPUTS = [
(1000, 900, 0.1),
(100, 90, 0.1),
(10, 9, 0.1),
(1, 0.9, 0.1),
(0.1, 0.09, 0.1),
]
print('{:^8} {:^8} {:^8} {:^8} {:^8} {:^8}'.format(
'a', 'b', 'rel_tol', 'abs(a-b)', 'tolerance', 'close')
)
print('{:-^8} {:-^8} {:-^8} {:-^8} {:-^8} {:-^8}'.format(
'-', '-', '-', '-', '-', '-'),
)
fmt = '{:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} {!s:>8}'
for a, b, rel_tol in INPUTS:
close = math.isclose(a, b, rel_tol=rel_tol)
tolerance = rel_tol * max(abs(a), abs(b))
abs_diff = abs(a - b)
print(fmt.format(a, b, rel_tol, abs_diff, tolerance, close))
| [
"jintao422516@gmail.com"
] | jintao422516@gmail.com |
2af3d30ab7952fb3b667c402629bb6083d31d3a3 | ee96dd041c00b7db4e6a2d401603db638be83ecd | /build_NN_classification.py | 8724ec820877cd9346b3b3cf8fd3d27edc00dcd2 | [] | no_license | huqinwei/tensorflow_demo | b8711b239638370b8190bdfe9d472633b55ca21a | 3b38e4a7e5770cfe607174b7b75b46f26e34b48b | refs/heads/master | 2020-03-29T02:10:51.441802 | 2019-03-05T07:23:54 | 2019-03-05T07:23:54 | 149,423,901 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,223 | py | #import add_layer
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data',one_hot=True)
def add_layer(inputs, in_size, out_size, n_layer, activation_function=None):
layer_name = 'layer%s' % n_layer
with tf.name_scope(layer_name):
with tf.name_scope('weightss'):
Weights = tf.Variable(tf.random_normal([in_size,out_size]),name='W')
tf.summary.histogram(layer_name+'/weights', Weights)
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([1,out_size]) + 0.1,name='b')
tf.summary.histogram(layer_name+'/biases', biases)
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.add(tf.matmul(inputs,Weights), biases)
tf.summary.histogram(layer_name+'/Wx_plus_b', Wx_plus_b)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
tf.summary.histogram(layer_name+'/outputs', outputs)
return outputs
def compute_accuracy(v_xs, v_ys):
global prediction
y_pre = sess.run(prediction,{xs:v_xs})
correct_prediction = tf.equal(tf.argmax(y_pre,1),tf.argmax(v_ys,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
result = sess.run(accuracy,{xs:v_xs,ys:v_ys})
return result
with tf.name_scope('inputs'):
xs = tf.placeholder(tf.float32,[None,784],name='x_input')
ys = tf.placeholder(tf.float32,[None,10],name='y_input')
prediction = add_layer(xs,784,1, n_layer = 1, activation_function = tf.nn.softmax)
with tf.name_scope('loss'):
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices=[1]))
tf.summary.scalar('cross_entropy', cross_entropy)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
init = tf.initialize_all_variables()
with tf.Session() as sess:
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("logs/",sess.graph)
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step,{xs:batch_xs, ys:batch_ys})
if i % 50 == 0:
result = sess.run(merged, {xs:x_data,ys:y_data})
writer.add_summary(result, i)
print(compute_accuracy(mnist.test.images,mnist.test.labels))
| [
"qw072117@foxmail.com"
] | qw072117@foxmail.com |
21a8c8342612bd3b7458dbde9285992c3ed7430b | 4ee5e68bc23fb87a6362147fbe39da9a7da6a75f | /supra/templatetags/filters.py | 057c6e4e615a907fbea352adaf0c3631b62288df | [] | no_license | luismoralesp/supra | 0e835bbd8c582c347549938b194d89716e8121ed | c28a8ec3bfd09e91cfbb0a97caee768b924c8e5e | refs/heads/test | 2020-04-06T07:03:37.674998 | 2016-10-28T19:04:05 | 2016-10-28T19:04:05 | 45,873,850 | 8 | 3 | null | 2016-02-25T03:37:48 | 2015-11-09T23:27:41 | Python | UTF-8 | Python | false | false | 283 | py | from django import template
import json
register = template.Library()
@register.filter
def get_type(value):
return type(value).__name__
#end def
@register.filter
def strip(dic):
stri = json.dumps(dic)
if len(stri) > 53:
stri = stri[:50] + "..."
#end if
return stri
#end def | [
"luismiguel.mopa@gmail.com"
] | luismiguel.mopa@gmail.com |
61ef75f1d48b5f63edbac404380693192963773d | 9951134436c1f98e3048caec9727fd33b6c57da1 | /New_stock_iterate.py | fa4b415f3984a6c91b3e37d94ab5af8ddd2e8ff9 | [] | no_license | buhijs/Udemy | 0bd33deb95cb3e212a948a3ebe5337756702e408 | 906bfc6cae849706009b172f1a424b53dc958118 | refs/heads/master | 2021-05-30T21:18:01.233869 | 2016-04-14T12:59:14 | 2016-04-14T12:59:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | __author__ = 'lyndsay.beaver'
stockList = ["ALR", "BTS", "CRK"]
priceList = [10, 20, 30]
numStocks = len(stockList)
def listStocks():
for i in range(numStocks):
print("The stock is {} and it's price is {}".format(stockList[i], priceList[i]))
changePrices(priceList)
def changePrices(priceList):
for i in range(numStocks):
priceList[i] += 3
print(priceList())
print(listStocks())
#changePrices()
| [
"lrbeaver@gmail.com"
] | lrbeaver@gmail.com |
64db0f593b6c14847cbf89ee2b73e28ec2b00cba | 32997e6a8607358765254ea81d2f867269ae2b35 | /01-algorithm-design-and-techniques/5_dynamic_programming/placing_parentheses.py | b1a78d984626fb7c3988af4ea4818d5d2bb989c8 | [
"MIT"
] | permissive | aman-singh7/training.computerscience.algorithms-datastructures | 0ace578ebcec13c5293b4d4dccdaa7634788604d | a4e1d1973b091589690fd2efc5dcb3c1a4df6c4c | refs/heads/master | 2023-06-09T12:27:55.569254 | 2021-06-29T20:16:37 | 2021-06-29T20:16:37 | 401,133,325 | 1 | 0 | MIT | 2021-08-29T20:12:50 | 2021-08-29T20:12:49 | null | UTF-8 | Python | false | false | 2,001 | py | def evalt(a, b, op):
if op == '+':
return a + b
elif op == '-':
return a - b
elif op == '*':
return a * b
else:
assert False
def get_maximum_value(dataset):
D = []
op = []
for i in range(len(dataset)):
if i % 2 == 0:
D.append(int(dataset[i]))
else:
op.append(dataset[i])
n = len(D)
Max = [[0 for c in range(n)] for r in range(n)]
Min = [[0 for c in range(n)] for r in range(n)]
for c in range(n):
Max[c][c] = D[c]
Min[c][c] = D[c]
for i in range(1, n):
for r in range(0, n - i):
maximum = (-1) * pow(9, 14)
minimum = pow(9, 14)
#print("(r, c) : (" + str(r) + ", " + str(r + i) + ")")
for j in range(i):
#print("(" + str(r) + ", " + str(r + j) + ") op (" + str(r + j + 1) + ", " + str(r + i) + ")")
maximum = max(maximum,
evalt(Max[r][r + j], Max[r + j + 1][r + i], op[r + j]),
evalt(Max[r][r + j], Min[r + j + 1][r + i], op[r + j]),
evalt(Min[r][r + j], Min[r + j + 1][r + i], op[r + j]),
evalt(Min[r][r + j], Max[r + j + 1][r + i], op[r + j]))
minimum = min(minimum,
evalt(Max[r][r + j], Max[r + j + 1][r + i], op[r + j]),
evalt(Max[r][r + j], Min[r + j + 1][r + i], op[r + j]),
evalt(Min[r][r + j], Min[r + j + 1][r + i], op[r + j]),
evalt(Min[r][r + j], Max[r + j + 1][r + i], op[r + j]))
Max[r][r + i] = maximum
Min[r][r + i] = minimum
#for r in range(n):
# print(Max[r])
return Max[0][n - 1]
if __name__ == "__main__":
print(get_maximum_value(input()))
#python3 placing_parentheses.py <<< "1+5"
#python3 placing_parentheses.py <<< "5-8+7*4-8+9" | [
"mamid1706@hotmail.fr"
] | mamid1706@hotmail.fr |
502477a4e0edcc05db5f0f5251d85d7836b889a3 | ee4db47ccecd23559b3b6f3fce1822c9e5982a56 | /Machine Learning/RecursiveTreeBuild.py | 163e4da49ba3fbd9d26ab9d27499332c5392538e | [] | no_license | meoclark/Data-Science-DropBox | d51e5da75569626affc89fdcca1975bed15422fd | 5f365cedc8d0a780abeb4e595cd0d90113a75d9d | refs/heads/master | 2022-10-30T08:43:22.502408 | 2020-06-16T19:45:05 | 2020-06-16T19:45:05 | 265,558,242 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | from tree import *
car_data = [['med', 'low', '3', '4', 'med', 'med'], ['med', 'vhigh', '4', 'more', 'small', 'high'], ['high', 'med', '3', '2', 'med', 'low'], ['med', 'low', '4', '4', 'med', 'low'], ['med', 'low', '5more', '2', 'big', 'med'], ['med', 'med', '2', 'more', 'big', 'high'], ['med', 'med', '2', 'more', 'med', 'med'], ['vhigh', 'vhigh', '2', '2', 'med', 'low'], ['high', 'med', '4', '2', 'big', 'low'], ['low', 'low', '2', '4', 'big', 'med']]
car_labels = ['acc', 'acc', 'unacc', 'unacc', 'unacc', 'vgood', 'acc', 'unacc', 'unacc', 'good']
def find_best_split(dataset, labels):
best_gain = 0
best_feature = 0
for feature in range(len(dataset[0])):
data_subsets, label_subsets = split(dataset, labels, feature)
gain = information_gain(labels, label_subsets)
if gain > best_gain:
best_gain, best_feature = gain, feature
return best_feature, best_gain
def build_tree(data, labels):
best_feature, best_gain = find_best_split(data, labels)
if best_gain == 0:
return Counter(labels)
data_subsets, label_subsets = split(data, labels, best_feature)
branches = []
for i in range(len(data_subsets)):
branch = build_tree(data_subsets[i], label_subsets[i])
branches.append(branch)
return branches
tree = build_tree(car_data, car_labels)
print_tree(tree) | [
"oluchukwuegbo@gmail.com"
] | oluchukwuegbo@gmail.com |
69b6a59b11956fb42d05d33a988cd428f2e0450c | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-sddp/aliyunsdksddp/request/v20190103/ModifyRuleRequest.py | 5b55af75e7b290d917a5b0f766d261f402ebe205 | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 2,387 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifyRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sddp', '2019-01-03', 'ModifyRule','sddp')
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_FeatureType(self):
return self.get_query_params().get('FeatureType')
def set_FeatureType(self,FeatureType):
self.add_query_param('FeatureType',FeatureType)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_Id(self):
return self.get_query_params().get('Id')
def set_Id(self,Id):
self.add_query_param('Id',Id)
def get_RiskLevelId(self):
return self.get_query_params().get('RiskLevelId')
def set_RiskLevelId(self,RiskLevelId):
self.add_query_param('RiskLevelId',RiskLevelId)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_CustomType(self):
return self.get_query_params().get('CustomType')
def set_CustomType(self,CustomType):
self.add_query_param('CustomType',CustomType)
def get_Category(self):
return self.get_query_params().get('Category')
def set_Category(self,Category):
self.add_query_param('Category',Category)
def get_Content(self):
return self.get_query_params().get('Content')
def set_Content(self,Content):
self.add_query_param('Content',Content) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
744f5b286ec4b2c4a600f5e1080b3bc897f0bcdb | d0f6474efe0372966d6469a194449fab0c405450 | /nextstrain/cli/__init__.py | ebe57f3ec63c3b9cefbb2ae6a2278a3505cffc3a | [
"MIT",
"LicenseRef-scancode-free-unknown"
] | permissive | ttung/cli | 5323b866c8b4392d1d456bc2f936619fce15abd3 | 799f1e8cffeb580a4178cca3576369c031ee97da | refs/heads/master | 2022-07-20T10:35:00.362131 | 2020-05-19T19:13:55 | 2020-05-19T19:16:37 | 265,400,087 | 0 | 0 | MIT | 2020-05-20T00:11:26 | 2020-05-20T00:11:25 | null | UTF-8 | Python | false | false | 2,162 | py | """
Nextstrain command-line tool
"""
import sys
import argparse
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, RawDescriptionHelpFormatter
from types import SimpleNamespace
from .argparse import register_commands, register_default_command
from .command import build, view, deploy, remote, shell, update, check_setup, version
from .errors import NextstrainCliError
from .util import warn
from .__version__ import __version__
class HelpFormatter(ArgumentDefaultsHelpFormatter, RawDescriptionHelpFormatter):
pass
def run(args):
"""
Command-line entrypoint to the nextstrain-cli package, called by the
`nextstrain` program.
"""
parser = ArgumentParser(
prog = "nextstrain",
description = __doc__,
formatter_class = HelpFormatter,
)
# Maintain these manually for now while the list is very small. If we need
# to support pluggable commands or command discovery, we can switch to
# using the "entry points" system:
# https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins
#
commands = [
build,
view,
deploy,
remote,
shell,
update,
check_setup,
version,
]
register_default_command(parser)
register_commands(parser, commands)
register_version_alias(parser)
opts = parser.parse_args(args)
try:
return opts.__command__.run(opts)
except NextstrainCliError as error:
warn(error)
return 1
def register_version_alias(parser):
"""
Add --version as a (hidden) alias for the version command.
It's not uncommon to blindly run a command with --version as the sole
argument, so its useful to make that Just Work.
"""
class run_version_command(argparse.Action):
def __call__(self, *args, **kwargs):
opts = SimpleNamespace(verbose = False)
sys.exit( version.run(opts) )
parser.add_argument(
"--version",
nargs = 0,
help = argparse.SUPPRESS,
action = run_version_command)
| [
"tsibley@fredhutch.org"
] | tsibley@fredhutch.org |
2f62b0f9e7462fc48a8c3bb3121616478a93a383 | b48efb658fe8db0568142149ef1a71df882e9328 | /_unittests/ut_ipythonhelper/test_ipythonhelper.py | 3b52ebcaba1a2a70e4253cd1352a42da7adf2aa6 | [] | no_license | ped4747/pyquickhelper | c76547ee74866bd5090e5e431c20c8788c331db4 | a682f545ae1c33599b1fe21e623965c4e1043f8d | refs/heads/master | 2021-01-22T04:28:11.222844 | 2014-12-09T18:20:32 | 2014-12-09T18:20:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | """
@brief test log(time=2s)
"""
import sys, os, unittest, re
try :
import src
except ImportError :
path = os.path.normpath(os.path.abspath( os.path.join( os.path.split(__file__)[0], "..", "..")))
if path not in sys.path : sys.path.append (path)
import src
from src.pyquickhelper import AutoCompletion, fLOG, AutoCompletionFile
class TestAutoCompletion (unittest.TestCase):
def test_completion(self) :
fLOG (__file__, self._testMethodName, OutputPrint = __name__ == "__main__")
root = AutoCompletion()
cl = root._add("name", "TestAutoCompletion")
cl._add("method", "test_completion")
cl._add("method2", "test_completion")
cl = root._add("name2", "TestAutoCompletion2")
cl._add("method3", "test_completion")
s = str(root)
fLOG("\n"+s)
assert " | |- method2" in s
l = len(root)
fLOG("l=",l)
assert l == 6
fLOG(root._)
def test_completion_file(self):
fLOG (__file__, self._testMethodName, OutputPrint = __name__ == "__main__")
fold = os.path.abspath(os.path.split(__file__)[0])
fold = os.path.join(fold, "..")
this = AutoCompletionFile(fold)
l = len(this)
assert l > 30
if __name__ == "__main__" :
unittest.main ()
| [
"xavier.dupre@gmail.com"
] | xavier.dupre@gmail.com |
ebfb5cb12cc58679ddcbb8367d48caa94a8d039e | a64b8fc6c9e81d433878009249fe9c9a109a602c | /sa/profiles/Eltex/MES/get_metrics.py | b0edfc256941a35f09eadd7e97286d65fd08a9fc | [
"BSD-3-Clause"
] | permissive | ewwwcha/noc | d1de6fe1d556e0f14a0dd31c600844cf43c96728 | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | refs/heads/master | 2020-07-29T10:10:30.862660 | 2019-09-20T07:54:52 | 2019-09-20T07:54:52 | 209,755,887 | 1 | 0 | NOASSERTION | 2019-09-20T09:36:22 | 2019-09-20T09:36:22 | null | UTF-8 | Python | false | false | 494 | py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Eltex.MES.get_metrics
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.sa.profiles.Generic.get_metrics import Script as GetMetricsScript
class Script(GetMetricsScript):
name = "Eltex.MES.get_metrics"
| [
"aversanta@gmail.com"
] | aversanta@gmail.com |
3dcc4de9bc3dbfbb1fb1d138ca14e8ac2d8e1185 | 66e6360325b781ed0791868765f1fd8a6303726f | /TB2009/WorkDirectory/5215 Shower Cluster/Clustering_108533.py | 35234a68ee0cd2199e2e07aee60354a4abdafb1d | [] | no_license | alintulu/FHead2011PhysicsProject | c969639b212d569198d8fce2f424ce866dcfa881 | 2568633d349810574354ad61b0abab24a40e510e | refs/heads/master | 2022-04-28T14:19:30.534282 | 2020-04-23T17:17:32 | 2020-04-23T17:17:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,753 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("Clustering")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("HcalTBSource",
fileNames = cms.untracked.vstring("file:/tmp/chenyi/HTB_108533.root"),
streams = cms.untracked.vstring('Chunk699', 'HCAL_Trigger', 'HCAL_SlowData', 'HCAL_QADCTDC', 'HCAL_DCC021')
)
process.tbunpack = cms.EDFilter("HcalTBObjectUnpacker",
#IncludeUnmatchedHits = cms.untracked.bool(False),
HcalTriggerFED = cms.untracked.int32(1),
HcalVLSBFED = cms.untracked.int32(699),
HcalTDCFED = cms.untracked.int32(8),
HcalQADCFED = cms.untracked.int32(8),
HcalSlowDataFED = cms.untracked.int32(3),
ConfigurationFile = cms.untracked.string('configQADCTDC_TB2009.txt')
)
process.vlsbinfo = cms.EDProducer("VLSBInformationProducer",
minSample = cms.untracked.uint32(0),
maxSample = cms.untracked.uint32(31),
baselineSamples = cms.untracked.uint32(2),
mip = cms.untracked.string("MIP_EarlyRejection.txt"),
beamEnergy = cms.untracked.double(0),
useMotherBoard0 = cms.untracked.bool(True),
useMotherBoard1 = cms.untracked.bool(False),
useMotherBoard2 = cms.untracked.bool(False),
useMotherBoard3 = cms.untracked.bool(False),
usePedestalMean = cms.untracked.bool(False),
adcMap = cms.untracked.string("FinalAdcMapping_All.txt")
)
process.ABCcut = cms.EDFilter("SingleTowerParticleFilter")
process.MessageLogger = cms.Service("MessageLogger",
default = cms.untracked.PSet(
reportEvery = cms.untracked.int32(100)
)
)
process.producecluster = cms.EDProducer("ShowerClusterProducer",
threshold = cms.untracked.double(0.2)
)
process.fillcluster = cms.EDAnalyzer("FillShowerClusterDistributions",
ntuple = cms.untracked.bool(True),
output = cms.untracked.string('ClusterInfo_108533.root')
)
process.averagecharge = cms.EDAnalyzer("FillAverageChargeLayerAnalyzer",
output = cms.untracked.string("TotalEnergy_ABC_MinEnergy_108533.root"),
textOutput = cms.untracked.bool(True),
interpolate = cms.untracked.bool(False)
)
process.runinfo = cms.EDProducer("RunInformationProducer",
beamEnergy = cms.untracked.double(0)
)
process.hitcut = cms.EDFilter("HitXFilter",
maximum = cms.untracked.double(-5)
)
process.muonveto = cms.EDFilter("MuonVetoFilter")
process.timecut = cms.EDFilter("HighestSampleTimeFilter",
minimum = cms.untracked.double(7.5),
threshold = cms.untracked.double(100)
)
process.p = cms.Path(
process.tbunpack *
process.ABCcut *
process.muonveto *
process.vlsbinfo *
process.runinfo *
process.hitcut *
process.timecut *
process.producecluster *
process.fillcluster *
process.averagecharge
)
| [
"yichen@positron01.hep.caltech.edu"
] | yichen@positron01.hep.caltech.edu |
0393653b9a1a13e7813717400b57724b668f6fdf | b50ed116481285025f8d8179397ec5b6e5b1471b | /setup.py | b53cc1b40c9ec8db1b70a569322d61c9f8ac81ef | [
"MIT"
] | permissive | dannguyen/csvmedkit | 97b2103dedd2c06c68086f5cb36aa1919e29b61d | d7e01210bc2d1cf55edfee79d2c742c73d426a50 | refs/heads/main | 2023-02-02T20:12:58.720677 | 2020-12-24T00:21:56 | 2020-12-24T00:21:56 | 300,771,409 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,450 | py | #!/usr/bin/env python
import os
import sys
from setuptools import setup
HERE_PATH = os.path.abspath(os.path.dirname(__file__))
ABOUT = {}
with open(os.path.join(HERE_PATH, "csvmedkit", "__about__.py"), "r") as f:
exec(f.read(), ABOUT)
with open("README.rst", "r") as f:
README = f.read()
install_requires = [
"csvkit",
"python-slugify>=4.0",
"regex>=2020.7.14",
]
dev_requires = [
"coverage>=4.4.2",
"nose>=1.1.2",
"parameterized",
"sphinx>=1.0.7",
"sphinx_rtd_theme",
"tox>=3.1.0",
]
setup(
name=ABOUT["__title__"],
version=ABOUT["__version__"],
description=ABOUT["__description__"],
author=ABOUT["__author__"],
author_email=ABOUT["__author_email__"],
url=ABOUT["__url__"],
long_description=README,
long_description_content_type="text/x-rst",
project_urls={
"Documentation": "https://csvmedkit.readthedocs.io/en/latest/",
},
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
],
packages=[
"csvmedkit",
],
entry_points={
"console_scripts": [
"csvflatten = csvmedkit.utils.csvflatten:launch_new_instance",
"csvheader = csvmedkit.utils.csvheader:launch_new_instance",
"csvnorm = csvmedkit.utils.csvnorm:launch_new_instance",
"csvpivot = csvmedkit.utils.csvpivot:launch_new_instance",
"csvsed = csvmedkit.utils.csvsed:launch_new_instance",
"csvslice = csvmedkit.utils.csvslice:launch_new_instance",
]
},
install_requires=install_requires,
extras_require={"dev": dev_requires},
)
| [
"dansonguyen@gmail.com"
] | dansonguyen@gmail.com |
89f3427eae1e65abf7426e232dfbcbcded54d17e | 05bccf429f42877e29a8e454ce837f94af4e2b57 | /dog/ext/monitoring/abalbots.py | a0166fc53eb4acb1c197f7e62177c3fd0b836fa0 | [
"MIT"
] | permissive | lun-4/dogbot | 599b87a817ce6f479966421bc63cf51d37638ca6 | d11b00882fae5763d775f10bdaf107a0d7b2c96c | refs/heads/master | 2021-06-20T16:02:11.651312 | 2017-07-27T19:33:37 | 2017-07-27T19:33:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | """
bots.discord.pw reporting for Dogbot.
"""
import logging
from dog import Cog
logger = logging.getLogger(__name__)
def Abalbots(Cog):
def __init__(self, bot):
super().__init__(bot)
self.reporting_interval = 60 * 10
self.reporting_task = bot.loop.create_task(self.report())
def __unload(self):
logger.debug('Cancelling abal bot reporter task.')
self.reporting_task.cancel()
async def report(self):
logger.debug('Abal bot reporter task started.')
endpoint = f'https://bots.discord.pw/api/bots/{self.bot.user.id}/stats'
headers = {'Authorization': self.bot.cfg['monitoring']['discordpw_token']}
while True:
logger.info('POSTing guild count to abal\'s website...')
guilds = len(self.bot.guilds)
# HTTP POST to the endpoint
async with self.bot.session.post(endpoint, json={'server_count': guilds}, headers=headers) as resp:
if resp.status != 200:
# this happens a lot
logger.warning('Failed to post guild count, ignoring. (HTTP %d)', resp.status)
else:
logger.info('Posted guild count successfully! (%d guilds)', guilds)
await asyncio.sleep(self.reporting_interval)
def setup(bot):
if 'discordpw_token' not in bot.cfg['monitoring']:
logger.warning('Not going to submit guild count to Abal\'s website, not configured.')
return
bot.add_cog(Abalbots(bot))
| [
"cheesy.fried.bacon@gmail.com"
] | cheesy.fried.bacon@gmail.com |
c788d7bb860941601a440e2c16d2a898909c2fbc | 0dcf78e319956f2cb2327c5cb47bd6d65e59a51b | /Python3/Array/ContainerWithMostWater/TwoPointer2_011.py | b8b1b9cea0d235dbee78a99774355591781bdd9e | [] | no_license | daviddwlee84/LeetCode | 70edd09a64a6f61492aa06d927e1ec3ab6a8fbc6 | da1774fd07b7326e66d9478b3d2619e0499ac2b7 | refs/heads/master | 2023-05-11T03:16:32.568625 | 2023-05-08T05:11:57 | 2023-05-09T05:11:57 | 134,676,851 | 14 | 4 | null | 2018-05-29T14:50:22 | 2018-05-24T07:18:31 | Python | UTF-8 | Python | false | false | 948 | py | from typing import List
class Solution:
def maxArea(self, height: List[int]) -> int:
"""
https://leetcode.com/problems/container-with-most-water/discuss/6100/Simple-and-clear-proofexplanation
https://leetcode.com/problems/container-with-most-water/discuss/1069570/Python-2-Pointers-solution-explained
https://leetcode.com/problems/container-with-most-water/discuss/6131/O(N)-7-line-Python-solution-72ms
"""
start, end = 0, len(height) - 1
ans = 0
while start < end:
ans = max(ans, (end - start) * min(height[start], height[end]))
if height[start] < height[end]:
start += 1
else:
end -= 1
return ans
# Runtime: 156 ms, faster than 95.65% of Python3 online submissions for Container With Most Water.
# Memory Usage: 16.4 MB, less than 90.84% of Python3 online submissions for Container With Most Water.
| [
"daviddwlee84@gmail.com"
] | daviddwlee84@gmail.com |
8e14429861b08ec17fc66d3811935f63d1462738 | ae67b9d90db114c1e15ce63ee0d27942d999a83b | /ask-smapi-model/ask_smapi_model/v1/skill/publication/skill_publication_response.py | 85c9e4d4ae28462fff783a4710c7c7f73598f020 | [
"Apache-2.0"
] | permissive | Birds-Awesome-Org/alexa-apis-for-python | ecb2e351b5cb1b341dda5c3ebc38927fa6d66a93 | d22c1712cb53a442b72f830f53d97ef66075750b | refs/heads/master | 2022-12-30T04:37:51.214040 | 2020-10-09T21:41:03 | 2020-10-09T21:41:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,005 | py | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
from ask_smapi_model.v1.skill.publication.skill_publication_status import SkillPublicationStatus as Publication_SkillPublicationStatusV1
class SkillPublicationResponse(object):
"""
:param publishes_at_date: Used to determine when the skill Publishing should start.
:type publishes_at_date: (optional) datetime
:param status:
:type status: (optional) ask_smapi_model.v1.skill.publication.skill_publication_status.SkillPublicationStatus
"""
deserialized_types = {
'publishes_at_date': 'datetime',
'status': 'ask_smapi_model.v1.skill.publication.skill_publication_status.SkillPublicationStatus'
} # type: Dict
attribute_map = {
'publishes_at_date': 'publishesAtDate',
'status': 'status'
} # type: Dict
supports_multiple_types = False
def __init__(self, publishes_at_date=None, status=None):
# type: (Optional[datetime], Optional[Publication_SkillPublicationStatusV1]) -> None
"""
:param publishes_at_date: Used to determine when the skill Publishing should start.
:type publishes_at_date: (optional) datetime
:param status:
:type status: (optional) ask_smapi_model.v1.skill.publication.skill_publication_status.SkillPublicationStatus
"""
self.__discriminator_value = None # type: str
self.publishes_at_date = publishes_at_date
self.status = status
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, SkillPublicationResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| [
"ask-pyth@dev-dsk-ask-sdk-python-2b-85d79f62.us-west-2.amazon.com"
] | ask-pyth@dev-dsk-ask-sdk-python-2b-85d79f62.us-west-2.amazon.com |
9c57eff0b1b90178bbe6823152f3fe492af6b0ed | 35ffeb303a9a14dab74e49882160a480d62d83e1 | /aoc/day_02_pt2.py | eee9d5f7e3ec7fb6f92e0e7aab953671e0d0e0f3 | [] | no_license | fbidu/aoc-2020 | a8fea2e26d1c9c01f2b580f668d5893b4f116e63 | 44fed66314d803cbb52191dc3e1a032d63cf5f0d | refs/heads/main | 2023-04-07T14:26:51.826217 | 2023-03-28T17:55:53 | 2023-03-28T17:55:53 | 317,533,642 | 1 | 0 | null | 2021-01-28T11:20:50 | 2020-12-01T12:26:37 | Python | UTF-8 | Python | false | false | 1,883 | py | """
Day 02
- Given a list of passwords like
```
1-3 a: abcde
1-3 b: cdefg
2-9 c: ccccccccc
.
.
.
x-y char: password
```
- Returns how many of them are valid
A valid password is:
- Has `char` at EITHER position `x-1` OR `y-1`
- That is `1-3 a: abcde` is valid because `a` is in char 0 but not on 2
- `2-9 c: ccccccccc` is invalid because `c` appears at both 1 and 8
"""
import click
def parse(line):
"""
Given a line like `1-3 a: abcde` returns the password
rule and the line separated
>>> parse("1-3 a: abcde")
['1-3 a', 'abcde']
"""
return line.split(": ")
def brute_force(lines):
"""
Given a list of lines as defined by the problem, returns
a count of how many of them are valid.
>>> brute_force(["1-3 a: abcde", "1-3 b: cdefg", "2-9 c: ccccccccc"])
1
"""
def matches(line):
"""
Given a line like "1-3 a: abcde", does a brute force
search to check if the pattern `1-3 a` matches.
That is, if the letter `a` occurs 1, 2 or 3 times
inside `abcde`.
"""
pattern, line = parse(line)
x_y, target_char = pattern.split()
x, y = [int(z) - 1 for z in x_y.split("-")]
# Lazy man's XOR
# If both of those lines are False, the sum will evaluate to 0
# If both are true, the sum will be 2
# The sum will be 1 ONLY if exactly one of those are true
return (line[x] == target_char) + (line[y] == target_char) == 1
return sum(matches(line) for line in lines)
@click.command()
@click.option("--filename", default="input.txt")
def main(filename):
"""
Does all the cool things
"""
with open(filename) as f:
number_list = f.read()
number_list = number_list.split("\n")
print(brute_force(number_list))
if __name__ == "__main__":
main(None)
| [
"felipe@felipevr.com"
] | felipe@felipevr.com |
653ec766bca7edf105c483e3453d4c1bd03e2271 | 43ceea54c8b02ad28292dd72fd57ee32329d6595 | /TestEnron.py | a1a9836ac39d8c9bf72e5175f9d740cc316eda5c | [] | no_license | HeYuanjiao/MLDF | 5a91759e1e6ab4161667cf8994350feb8cc2a9e4 | fc498d3fee99fe549c76d1087ceaf72e59e6c524 | refs/heads/main | 2023-01-28T17:17:44.046035 | 2020-12-09T03:09:21 | 2020-12-09T03:09:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,447 | py | # -*- coding=utf-8 -*-
import csv
import numpy as np
from sklearn.utils import shuffle
from learner.cascade import Cascade
from learner.measure import *
# 随机排列实例数,将实例划分为训练集和测试集
def shuffle_index(num_samples):
# a = range(0, 502),502是实例数
a = range(0, num_samples)
# 利用shuffle函数将序列a中的元素重新随机排列
a = shuffle(a)
# 去实例数的一半,上取整
length = int((num_samples + 1) / 2)
# 上半做训练集
train_index = a[:length]
# 下半做测试集
test_index = a[length:]
return [train_index, test_index]
# 加载数据和标签
def load_csv():
"""
从CSV文件中读取数据信息
:param csv_file_name: CSV文件名
:return: Data:二维数组
"""
data_csv = r'D:\Pycharm2020.1.3\WorkSpace\MLDF\dataset\enron_data.csv'
label_csv = r'D:\Pycharm2020.1.3\WorkSpace\MLDF\dataset\enron_label.csv'
with open(data_csv, encoding='utf-8') as f:
data = np.loadtxt(f, str, delimiter=",")
with open(label_csv, encoding='utf-8') as f:
label = np.loadtxt(f, str, delimiter=",")
# 将数据label强制转换为指定的类型,astype函数是在副本上进行,并非修改原数组。
# 从文件中load出来的数据类型是“class 'numpy.int16'”类型,需要进行类型转化
label = label.astype("int")
# 取数据集的行数,即是实例数
num_samples = len(data)
# 用shuffle_index函数将502这个整数随机划分成两个长为251的list,list中的元素是502以内的整数
# data是<class 'numpy.ndarray'>的二维矩阵,将上一步的list传入,会将data中按list中的元素按行取出
# 这两步就是将(502,68)的data二维矩阵划分成了两个(251,68)的二维矩阵,分别代表训练集和测试集
# 针对label这个(502, 174)的二维矩阵也是这么操作,而且采集时用的是同一组list,保证实例和标签对应
train_index, test_index = shuffle_index(num_samples)
"""
划分结果如下:
train_data <class 'numpy.ndarray'> (1209, 103) 1209
train_label <class 'numpy.ndarray'> (1209, 14) 1209
test_data <class 'numpy.ndarray'> (1208, 103) 1208
test_label <class 'numpy.ndarray'> (1208, 14) 1208
"""
train_data = data[train_index]
train_label = label[train_index]
test_data = data[test_index]
test_label = label[test_index]
print("加载yeast数据集完成!!!")
# 返回值是训练数据、测试数据、标签数
return [train_data, train_label, test_data, test_label]
if __name__ == '__main__':
dataset = "yeast"
# 初始化数据集、测试数据集、标签集
train_data, train_label, test_data, test_label = load_csv()
# 构造森林,将另个森林级联,最大层数设为10,5折交叉验证
model = Cascade(dataset, max_layer=20, num_forests=2, n_fold=6, step=3)
# 训练森林,传入训练集、训练标签、指标名称、每个森林中的树的数量设为40
model.train(train_data, train_label, "hamming loss", n_estimators=40)
test_prob = model.predict(test_data, "hamming loss")
value = do_metric(test_prob, test_label, 0.5)
meatures = ["hamming loss", "one-error", "coverage", "ranking loss", "average precision", "macro-auc"]
res = zip(meatures, value)
for item in res:
print(item)
| [
"15097686925@163.com"
] | 15097686925@163.com |
edf216cf0a8e8f4f5dc73287e1c571cf3225d426 | 9b695bda060d67d3b9d00e566daa7c6dacf6b806 | /Chapter 38 Classes/38_2_Bound_unbound_and_static_methods.py | 61e2e5bcfb6b41fbd9b6d3421f8488ee2578a013 | [] | no_license | erauner12/python-for-professionals | cc6409348e27d894e2cde8e75c14a5d57aa3f852 | d50a14806cd0ad0727ed28facb8423bf714c4383 | refs/heads/main | 2023-08-05T03:49:18.890747 | 2021-09-22T23:53:19 | 2021-09-22T23:53:19 | 406,197,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,429 | py | import inspect
class A(object):
def f(self, x):
return 2 * x
print(A.f)
print(inspect.isfunction(A.f))
# True
print(inspect.ismethod(A.f))
# False
print(A.f(1, 7))
# Out : 14
a = A()
print(A.f(a, 20))
# Out: 40
a = A()
print(a.f)
# <bound method A.f of <__main__.A object at ...>>
print(a.f is a.f) # False
# <bound method A.f of <__main__.A object at ...>>
print(a.f(2))
# 4
a.f = a.f
print(a.f is a.f) # True
print("---")
# class methods and static methods – special kinds of methods.
class D(object):
multiplier = 2 # D uses this
@classmethod
def f(cls, x):
return cls.multiplier * x # d uses this since it is an instance of the class
@staticmethod
def g(name):
print("Hello, %s" % name)
# It is worth noting that at the lowest level, functions, methods, staticmethods, etc. are actually descriptors that
# invoke __get__, __set__ and optionally __del__ special methods. For more details on classmethods and
# staticmethods:
print(D.f)
# <bound method type.f of <class '__main__.D'>>
print(D.f(12))
# 24
print(D.g)
# <function D.g at ...>
D.g("world")
# Hello, world
d = D()
d.multiplier = 1337 # print the multiplier after changing it for this instance
print((D.multiplier, d.multiplier))
# (2, 1337)
print(d.f) # <bound method D.f of <class '__main__.D'>>
print(d.f(10)) # Here we passing in a value to be multiplied by the class multiplier
# 20
| [
"erauner@medallia.com"
] | erauner@medallia.com |
95401a208e85c00b0c4519afca50543237ab22a6 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_flab.py | 6d1e9d7646f7098ee30ab0ff599f434e7b0908b7 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py |
#calss header
class _FLAB():
def __init__(self,):
self.name = "FLAB"
self.definitions = [u"soft, loose flesh on someone's body: "]
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
8d2238fd33fe5f1aede11eefb952cc8bb0d717ca | 8be083e9fbf15606201217d6c4b87c929e418065 | /trunk/build/dependencies.py | 135b53e7963783b34f8bc1dbfd60275835535532 | [
"Apache-2.0"
] | permissive | BGCX067/faint-graphics-editor-svn-to-git | 430768d441f3e9b353fbc128e132f7406ee48c0e | dad252f820d29ab336bcfa57138625dae6dfed60 | refs/heads/master | 2021-01-13T00:56:26.685520 | 2015-12-28T14:22:44 | 2015-12-28T14:22:44 | 48,752,914 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,338 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import sys
import faint_info
import subprocess
build_dir = os.path.split(os.path.realpath(__file__))[0]
os.chdir(build_dir) # Fixme: Don't change dir, use absolute paths.
root_dir = os.path.split(build_dir)[0]
sys.path.append(os.path.join(root_dir, "build-sys"))
import build_sys.dependencies as dependencies
import build_sys.gv as gv
def unflat_header_dependencies(root, count_only, headers_only):
"""Prints a list of every header mapped to every file that directly
includes the header.
"""
deps = dependencies.find_header_dependencies_all(root,
faint_info.get_src_folders())
print("Header->direct dependents!")
print()
if not count_only:
for dep in sorted(deps.keys(), key=lambda x:-len(deps[x])):
num_deps = len(deps[dep])
print("%s (%d):" % (dep.replace(root, ""), num_deps))
for dependent in sorted(deps[dep]):
if not headers_only or dependent.endswith(".hh"):
print(" ", dependent.replace(root, ""))
print()
else:
for dep in sorted(deps.keys(), key=lambda x:-len(deps[x])):
print(dep, len(deps[dep]))
def flat_header_dependencies(root, count_only):
"""Prints a list of headers mapped to every file that sees that
header."""
deps = dependencies.get_flat_header_dependencies(root,
faint_info.get_src_folders())
print("Header->dependents (recursive)")
print()
for dep in sorted(deps.keys(), key=lambda x:-len(deps[x])):
num_deps = len(deps[dep])
print("%s (%d):" % (dep.replace(root, ""), num_deps))
if not count_only:
for dependent in sorted(deps[dep]):
print(" ", dependent.replace(root, ""))
def who_includes_graph(root, name):
include_dict = dependencies.find_header_dependencies_all(root,
faint_info.get_src_folders())
dot_name = os.path.basename(name).replace(".hh", ".dot")
png_name = dot_name.replace(".dot", ".png")
with open(dot_name, 'w') as f:
f.write(gv.who_includes(include_dict, name))
out = open(png_name, 'wb')
cmd = "dot -Tpng %s" % dot_name
subprocess.call(cmd, stdout=out)
if __name__ == '__main__':
root = faint_info.FAINT_ROOT
if not root.endswith("/"):
root += "/"
count_only = '--count' in sys.argv
if '--flat' in sys.argv:
flat_header_dependencies(root, count_only)
elif '--gv' in sys.argv:
who_includes_graph(root, sys.argv[1])
else:
headers_only = '--hh' in sys.argv
unflat_header_dependencies(root, count_only, headers_only)
| [
"you@example.com"
] | you@example.com |
01bdb13b8cd56e0277a2c5d93a87183adc3bd6c6 | caa6dced49f2d03d4fed4c0e9c6f09d0d374e7ce | /Python/15657.py | 0cfc7a6e9c4e8803a4ccf30ddcf6d6069828a3c8 | [] | no_license | hooong/baekjoon | 72e70cea5769467609150416be6f075f9d4c136a | 541f67859de0871ecfa134f8e5c6a8f399d17a1e | refs/heads/master | 2022-04-06T09:21:26.761766 | 2022-03-02T12:57:21 | 2022-03-02T12:57:21 | 180,535,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # 15657번 N과 M (8)
import sys
# printS
def printS(s):
for i in s:
print(i, end=' ')
print()
# dfs
def dfs(cur, cnt, s):
global arr, n
if cnt == m:
printS(s)
return
for i in range(cur, n):
s.append(arr[i])
dfs(i, cnt+1, s)
s.pop()
# main
n, m = map(int, input().split())
arr = [int(x) for x in sys.stdin.readline().split()]
arr.sort()
for i in range(n):
dfs(i,1,[arr[i]])
| [
"tjrwns0529@gmail.com"
] | tjrwns0529@gmail.com |
5b362c67ac7d3d2992114954d0f450f5cb679a15 | e71b6d14fbdbc57c7234ca45a47329d7d02fc6f7 | /flask_api/venv/lib/python3.7/site-packages/vsts/task_agent/v4_1/models/azure_subscription_query_result.py | 4ebfad9ff9a2df06462ffbba86a993c478dfb19f | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | u-blavins/secret_sasquatch_society | c36993c738ab29a6a4879bfbeb78a5803f4f2a57 | 0214eadcdfa9b40254e331a6617c50b422212f4c | refs/heads/master | 2020-08-14T00:39:52.948272 | 2020-01-22T13:54:58 | 2020-01-22T13:54:58 | 215,058,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class AzureSubscriptionQueryResult(Model):
"""AzureSubscriptionQueryResult.
:param error_message:
:type error_message: str
:param value:
:type value: list of :class:`AzureSubscription <task-agent.v4_1.models.AzureSubscription>`
"""
_attribute_map = {
'error_message': {'key': 'errorMessage', 'type': 'str'},
'value': {'key': 'value', 'type': '[AzureSubscription]'}
}
def __init__(self, error_message=None, value=None):
super(AzureSubscriptionQueryResult, self).__init__()
self.error_message = error_message
self.value = value
| [
"usama.blavins1@gmail.com"
] | usama.blavins1@gmail.com |
dc7b4234417b4f328be6debd9d5c2191c8e165ff | 39b0d9c6df77671f540c619aff170441f953202a | /PYTHON LIBRARY/SUB_1/functools_lru_cache.py | 285397a8f7f0e5598257d78b8e4a0319ede5f20c | [] | no_license | yeboahd24/Python201 | e7d65333f343d9978efff6bf86ce0447d3a40d70 | 484e66a52d4e706b8478473347732e23998c93c5 | refs/heads/main | 2023-02-06T10:24:25.429718 | 2020-12-26T01:08:04 | 2020-12-26T01:08:04 | 306,487,550 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | #!usr/bin/env/python3
# The lru_cache() decorator wraps a function in a “least recently used” cache. Arguments to
# the function are used to build a hash key, which is then mapped to the result. Subsequent
# calls with the same arguments will fetch the value from the cache instead of calling the
# function. The decorator also adds methods to the function to examine the state of the
# cache (cache_info()) and empty the cache (cache_clear()).
import functools
@functools.lru_cache()
def expensive(a, b):
print('expensive({}, {})'.format(a, b))
return a * b
MAX = 2
# print('First set of calls:')
# for i in range(MAX):
# for j in range(MAX):
# expensive(i, j)
# print(expensive.cache_info())
# print('\nSecond set of calls:')
# for i in range(MAX + 1):
# for j in range(MAX + 1):
# expensive(i, j)
# print(expensive.cache_info())
# print('\nClearing cache:')
# expensive.cache_clear()
# print(expensive.cache_info())
# print('\nThird set of calls:')
# for i in range(MAX):
# for j in range(MAX):
# expensive(i, j)
# print(expensive.cache_info())
call1 = expensive(2, 2)
call2 = expensive(1, 2)
print(call1)
print(call2)
print(call1) | [
"noreply@github.com"
] | yeboahd24.noreply@github.com |
5eebf01189a74dd5e2e83fd2153cfd3133b4fb10 | 24f2696aab87f1632705a7c8b2d3b866e26aa3ee | /product_array_except_self_238.py | 49f524714ed803ae251420e8f3626e3850cdae71 | [] | no_license | adiggo/leetcode_py | 44a77a0b029f4d92bd0d8e24cad21ceea52e7794 | 4aa3a3a0da8b911e140446352debb9b567b6d78b | refs/heads/master | 2020-04-06T07:05:21.770518 | 2016-07-01T16:00:40 | 2016-07-01T16:00:40 | 30,397,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | class Solution(object):
def productExceptSelf(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
helper1 = [1] * len(nums)
helper2 = [1] * len(nums)
for i in xrange(1, len(nums)):
helper1[i] = nums[i - 1] * helper1[i - 1]
for j in xrange(len(nums) - 2, -1, -1):
helper2[j] = nums[j + 1] * helper2[j + 1]
for i in xrange(len(nums)):
nums[i] = helper1[i] * helper2[i]
return nums
# better approach with O(n) space
class Solution2(object):
def productExceptSelf(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
helper = [1] * len(nums)
for i in xrange(1, len(nums)):
helper[i] = nums[i - 1] * helper[i - 1]
tmp = 1
for j in xrange(len(nums) - 2, -1, -1):
helper[j] = nums[j + 1] * tmp * helper[j]
tmp = nums[j + 1] * tmp
return helper
| [
"adiggo@gmail.com"
] | adiggo@gmail.com |
14c85b88fb9533979537167aa4569a09a1e6bf74 | 74f902dedade999b9b6d9567c87f80d5975d6813 | /day2/tuples.py | ca44b2b3351f2aefa8f9842677bf09823a2c34f9 | [
"MIT"
] | permissive | britneh/CS35_IntroPython_GP | ed315daad2b06eeef9ec7d1040e3c5a874dbaf0d | e0a3441766973a833341b3f1f16f33d6c9f5c0d3 | refs/heads/main | 2022-12-05T09:29:54.053705 | 2020-09-02T21:13:19 | 2020-09-02T21:13:19 | 291,771,959 | 0 | 0 | MIT | 2020-09-02T21:12:59 | 2020-08-31T16:48:36 | null | UTF-8 | Python | false | false | 161 | py | l1 = [1, 3, 5, 7, 9] # list mutable (read write)
t1 = (1, 3, 5, 7, 9) # tuple imutable (read only)
def f(x):
x.append(29)
f(l1)
print(l1)
f(t1)
print(t1) | [
"tomtarpeydev@gmail.com"
] | tomtarpeydev@gmail.com |
573634b81d90dfc1247320dcaf317ec636e8aa10 | 244ecfc2017a48c70b74556be8c188e7a4815848 | /res/scripts/client/gui/scaleform/framework/managers/textmanager.py | ceb73a75fefd838e111c64767d74bf99d3f318fa | [] | no_license | webiumsk/WOT-0.9.12 | c1e1259411ba1e6c7b02cd6408b731419d3174e5 | 5be5fd9186f335e7bae88c9761c378ff5fbf5351 | refs/heads/master | 2021-01-10T01:38:36.523788 | 2015-11-18T11:33:37 | 2015-11-18T11:33:37 | 46,414,438 | 1 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,092 | py | # 2015.11.18 11:55:27 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/framework/managers/TextManager.py
from debug_utils import LOG_ERROR
from gui.Scaleform.framework.entities.abstract.TextManagerMeta import TextManagerMeta
from gui.Scaleform.genConsts.TEXT_MANAGER_STYLES import TEXT_MANAGER_STYLES as _TMS
from gui.shared.formatters import text_styles
class TextManager(TextManagerMeta):
def __init__(self):
super(TextManager, self).__init__()
self.__styles = text_styles.getRawStyles([ v for k, v in _TMS.__dict__.iteritems() if not k.startswith('_') ])
def getTextStyle(self, style):
if style in self.__styles:
result = self.__styles[style]
else:
LOG_ERROR('Style is not found', style)
result = ''
return result
def _dispose(self):
self.__styles.clear()
super(TextManager, self)._dispose()
class TextIcons:
CHECKMARK_ICON = 'checkmark'
NUT_ICON = 'nut'
PERCENT_ICON = 'percent'
ALERT_ICON = 'alert'
INFO_ICON = 'info'
PREMIUM_IGR_SMALL = 'premiumIgrSmall'
PREMIUM_IGR_BIG = 'premiumIgrBig'
ORDER_IN_PROGRESS_ICON = 'order_in_progress'
CLOCK_ICON = 'clock'
NOT_AVAILABLE = 'notAvailable'
LEVEL_5 = 'level5'
LEVEL_10 = 'level10'
SWORDS = 'swords'
HUMANS = 'humans'
CREDITS = 'credits'
GOLD = 'gold'
XP = 'xp'
FREE_XP = 'freeXP'
ARROW_BUTTON = 'arrowButton'
NO_SEASON = 'noSeason'
ICONS = (NUT_ICON,
PERCENT_ICON,
ALERT_ICON,
INFO_ICON,
PREMIUM_IGR_SMALL,
PREMIUM_IGR_BIG,
ORDER_IN_PROGRESS_ICON,
CLOCK_ICON,
CHECKMARK_ICON,
NOT_AVAILABLE,
LEVEL_5,
LEVEL_10,
SWORDS,
HUMANS,
CREDITS,
GOLD,
XP,
FREE_XP,
ARROW_BUTTON,
NO_SEASON)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\framework\managers\textmanager.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 11:55:27 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
e277aaaa7153abbbce1d4bf5b9680ab6ffaec947 | 34a043e6961639657e36e7ac9fd459ad5b1f6de1 | /openpathsampling/experimental/simstore/test_custom_json.py | 84d5858ed3fc2690d2f37875129014def11c313e | [
"MIT"
] | permissive | dwhswenson/openpathsampling | edaddc91e443e7ffc518e3a06c99fc920ad9d053 | 3d02df4ccdeb6d62030a28e371a6b4ea9aaee5fe | refs/heads/master | 2023-02-04T12:31:17.381582 | 2023-01-30T21:17:01 | 2023-01-30T21:17:01 | 23,991,437 | 3 | 1 | MIT | 2022-08-12T17:48:04 | 2014-09-13T10:15:43 | Python | UTF-8 | Python | false | false | 4,275 | py | from .custom_json import *
import json
import pytest
import numpy as np
from numpy import testing as npt
from . import test_utils
class TestJSONSerializerDeserializer(object):
def test_add_codec(self):
# without bytes codec, can't serialize numpy
serialization = JSONSerializerDeserializer([numpy_codec])
obj = np.array([[1.0, 0.0], [2.0, 3.2]])
with pytest.raises(TypeError):
serialization.serializer(obj)
# add the codec and it will work
serialization.add_codec(bytes_codec)
serialized = serialization.serializer(obj)
assert len(serialization.codecs) == 2
reconstructed = serialization.deserializer(serialized)
npt.assert_equal(obj, reconstructed)
class CustomJSONCodingTest(object):
def test_default(self):
for (obj, dct) in zip(self.objs, self.dcts):
assert self.codec.default(obj) == dct
def test_object_hook(self):
for (obj, dct) in zip(self.objs, self.dcts):
assert self.codec.object_hook(dct) == obj
def _test_round_trip(self, encoder, decoder):
for (obj, dct) in zip(self.objs, self.dcts):
json_str = json.dumps(obj, cls=encoder)
reconstructed = json.loads(json_str, cls=decoder)
assert reconstructed == obj
json_str_2 = json.dumps(obj, cls=encoder)
assert json_str == json_str_2
def test_round_trip(self):
encoder, decoder = custom_json_factory([self.codec])
self._test_round_trip(encoder, decoder)
def test_not_mine(self):
# test that the default behavior is obeyed
obj = {'test': 5}
json_str = '{"test": 5}'
encoder, decoder = custom_json_factory([self.codec])
assert json.dumps(obj, cls=encoder) == json_str
assert json.loads(json_str, cls=decoder) == obj
class TestNumpyCoding(CustomJSONCodingTest):
def setup_method(self):
self.codec = numpy_codec
self.objs = [np.array([[1.0, 0.0], [2.0, 3.2]]),
np.array([1, 0])]
shapes = [(2, 2), (2,)]
dtypes = [str(arr.dtype) for arr in self.objs] # may change by system?
string_reps = [arr.tobytes() for arr in self.objs]
self.dcts = [
{
'__class__': 'ndarray',
'__module__': 'numpy',
'shape': shape,
'dtype': dtype,
'string': string_rep
}
for shape, dtype, string_rep in zip(shapes, dtypes, string_reps)
]
def test_object_hook(self):
# to get custom equality testing for numpy
for (obj, dct) in zip(self.objs, self.dcts):
reconstructed = self.codec.object_hook(dct)
npt.assert_array_equal(reconstructed, obj)
def test_round_trip(self):
encoder, decoder = custom_json_factory([self.codec, bytes_codec])
for (obj, dct) in zip(self.objs, self.dcts):
json_str = json.dumps(obj, cls=encoder)
reconstructed = json.loads(json_str, cls=decoder)
npt.assert_array_equal(reconstructed, obj)
json_str_2 = json.dumps(obj, cls=encoder)
assert json_str == json_str_2
class TestUUIDCoding(object):
def setup_method(self):
self.codec = uuid_object_codec
all_objs = test_utils.all_objects
self.objs = [all_objs['int'], all_objs['str']]
updates = [{'normal_attr': 5, 'name': 'int'},
{'normal_attr': 'foo', 'name': 'str'}]
module = str(test_utils)
self.dcts = [
{
'__class__': 'MockUUIDObject',
'__module__': test_utils.__name__,
'normal_attr': None,
'obj_attr': None,
'list_attr': None,
'dict_attr': None,
'lazy_attr': None,
}
for _ in self.objs
]
for dct, update in zip(self.dcts, updates):
dct.update(update)
test_default = CustomJSONCodingTest.test_default
test_not_mine = CustomJSONCodingTest.test_not_mine
def test_object_hook(self):
for (obj, dct) in zip(self.objs, self.dcts):
assert self.codec.object_hook(dct) == dct
| [
"dwhs@hyperblazer.net"
] | dwhs@hyperblazer.net |
11ee6d575b465139a25da977604528df4b86f995 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/139/usersdata/237/61913/submittedfiles/diagonaldominante.py | 00c769a428617b962e36722463af9d2a38182937 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | # -*- coding: utf-8 -*-
import numpy as np
a=int(input("Digite o numero de linhas: "))
b=int(input("Digite o numero de colunas: "))
n=np.zeros((a,b))
for i in range(0,n.shape[0],1):
for j in range(0,n.shape[1],1):
n[i,j]=float(input("Digite o termo: "))
for i in range(0,shape[0],1):
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
8c76e72e25df0c80cd16d788bd40107a3341a2f6 | 6ae5400d117613ec162c41ed297915bad2936c24 | /Chapter09/flip_image.py | 179d06a803d00cae9a60ee59502adf439081b590 | [
"MIT"
] | permissive | PacktPublishing/Automate-it | 83390b81a299a08cd01871c5f389758d4720a486 | 203bcf984ae61049e880d420ab77a50332f8d6f9 | refs/heads/master | 2022-11-01T23:08:40.033898 | 2022-10-31T06:46:33 | 2022-10-31T06:46:33 | 80,894,455 | 30 | 21 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | from PIL import Image
#Get the image object
img = Image.open('sunset.jpg')
#Rotate the image by 180 deg
img.rotate(180).save('sunset180deg.jpg')
#Flip the images horizontally and vertically
img.transpose(Image.FLIP_LEFT_RIGHT).save('sunset_horizontal_flip.png')
img.transpose(Image.FLIP_TOP_BOTTOM).save('sunset_vertical_flip.png')
| [
"noreply@github.com"
] | PacktPublishing.noreply@github.com |
a34446385faf4891261d8eed7a2c1d18b95e45fe | 5873213f0615c13d26c389d8e6aff0291e639d51 | /migrations/versions/1d067cc24a5a_add_track_id_in_invitation.py | 697953b4bf3ce4cfb56eae23d68a7eae75a57664 | [
"MIT"
] | permissive | conferency/conf-panda | 15d9645d5834b78ea27560c58d15a0fe628749ab | d69094174e880b771cd1a5cad981f65374008359 | refs/heads/master | 2020-05-18T16:03:56.716017 | 2019-05-18T04:03:55 | 2019-05-18T04:03:55 | 184,514,509 | 0 | 2 | MIT | 2019-05-18T04:03:57 | 2019-05-02T03:14:46 | JavaScript | UTF-8 | Python | false | false | 933 | py | """add track_id in invitation
Revision ID: 1d067cc24a5a
Revises: 4c4af1936ba4
Create Date: 2016-08-02 17:42:13.459631
"""
# revision identifiers, used by Alembic.
revision = '1d067cc24a5a'
down_revision = '4c4af1936ba4'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('invitations', schema=None) as batch_op:
batch_op.add_column(sa.Column('track_id', sa.Integer(), nullable=True))
batch_op.create_foreign_key('fk_track+invitation', 'tracks', ['track_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('invitations', schema=None) as batch_op:
batch_op.drop_constraint('fk_track+invitation', type_='foreignkey')
batch_op.drop_column('track_id')
### end Alembic commands ###
| [
"harryjwang@gmail.com"
] | harryjwang@gmail.com |
41cc0fd22a2dd5f4544423c7d5e5d3736e31e8b9 | 9099ed0407521ac40b88f3b92872307f66c57bf9 | /codes/py/test_ws/test_ws_server.py | ab474cbdebac6c13b384a84bd1eea74c8c7a0d65 | [] | no_license | jiluhu/dirtysalt.github.io | 0cea3f52d2c4adf2bbf5c23b74f4cb1070025816 | c026f2969c784827fac702b34b07a9268b70b62a | refs/heads/master | 2020-08-31T09:32:05.273168 | 2019-10-29T01:53:45 | 2019-10-29T01:53:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,729 | py | #!/usr/bin/env python
# coding:utf-8
# Copyright (C) dirlt
import asyncio
import logging
import time
from autobahn.asyncio.websocket import WebSocketServerFactory, WebSocketServerProtocol
from redis_queue import RedisQueue
# # 这个地方似乎有bug, 如果使用uvloop的话,整个代码会hang住
# import uvloop
#
# asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
logger = logging.getLogger()
DEFAULT_LOGGING_FORMAT = '[%(asctime)s][%(levelname)s]%(filename)s@%(lineno)d: %(msg)s'
logging.basicConfig(level=logging.WARN, format=DEFAULT_LOGGING_FORMAT)
class ConnectionManeger:
def __init__(self):
self.conns = dict()
def add(self, peer, conn):
self.conns[peer] = conn
def remove(self, peer):
if peer in self.conns:
del self.conns[peer]
def getall(self):
yield from self.conns.values()
def count(self):
return len(self.conns)
connection_manager = ConnectionManeger()
connection_channel = asyncio.Queue()
async def listen_command():
global connection_manager, connection_channel
while True:
msg = await connection_channel.get()
logger.info('got message from channel. value = {}'.format(msg))
payload = msg
for conn in connection_manager.getall():
conn.sendMessage(payload)
command_queue = RedisQueue('command')
# 决定是否从redis queue里面收取消息然后广播到所有连接上.
async def read_command():
evloop = asyncio.get_event_loop()
while True:
item = await evloop.run_in_executor(None, command_queue.get, (30,))
if item is None:
continue
ts = int(time.time())
msg = str(ts).encode('utf8')
logger.info('get from queue {}, send msg {}'.format(item, msg))
await connection_channel.put(msg)
async def write_command():
evloop = asyncio.get_event_loop()
idx = 0
while True:
msg = 'command #%d' % idx
await evloop.run_in_executor(None, command_queue.put, (msg,))
logger.info('put queue. value = {}'.format(msg))
idx += 1
await asyncio.sleep(5)
async def print_stats():
while True:
logger.warning('total connections = {}'.format(connection_manager.count()))
# ts = int(time.time())
# msg = str(ts).encode('utf8')
# logger.info('put msg = {}'.format(msg))
# await connection_channel.put(msg)
await asyncio.sleep(5)
class MyServerProtocol(WebSocketServerProtocol):
async def onMessage(self, payload, isBinary):
logger.info('onMessage. {}, {}'.format(payload, isBinary))
self.sendMessage(b'pong')
# self.sendMessage(payload, isBinary)
async def onConnect(self, request):
logger.info('onConnect {}'.format(request))
self.peer = request.peer
connection_manager.add(self.peer, self)
pass
async def onClose(self, wasClean, code, reason):
logger.info('onClose {}, {}, {}'.format(wasClean, code, reason))
connection_manager.remove(self.peer)
pass
async def onOpen(self):
logger.info('onOpen ...')
pass
def main():
factory = WebSocketServerFactory()
factory.protocol = MyServerProtocol
loop = asyncio.get_event_loop()
coro = loop.create_server(factory, '127.0.0.1', 8765)
server = loop.run_until_complete(coro)
loop.run_until_complete(asyncio.gather(*[listen_command(),
read_command(),
# write_command(),
print_stats()]))
loop.run_forever()
loop.close()
server.close()
if __name__ == '__main__':
main()
| [
"dirtysalt1987@gmail.com"
] | dirtysalt1987@gmail.com |
9a3662c1bc6ec6e4f634b89a058657cab892da26 | d2e69d4d3d1e11a87f5a377e4a423422fe0a7058 | /pandas_code/essential_functionalty/indexing_selection_and_filtering.py | 81501debdcb7dca972d99a3dd422ebe4bcdeec75 | [] | no_license | oJacker/_python | 6f30dd4a60c1593d27c00ac485163fc0ba77dd8c | 8086d0cd78e156abfff9819a56384149dd431c56 | refs/heads/master | 2021-05-06T03:13:29.167281 | 2018-02-01T09:41:42 | 2018-02-01T09:41:42 | 114,827,084 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | # -*- coding: utf-8 -*-
'''
• Series索引(obj[...])的工作方式类似于NumPy数组的索引,只不过Series的
索引值不只是整数。
• 利用标签的切片运算与普通的Python切片运算不同,其末端是包含的
(inclusive)。
• 对DataFrame进行索引其实就是获取一个或多个列
• 为了在DataFrame的行上进行标签索引,引入了专门的索引字段ix
'''
import numpy as np
from pandas import Series, DataFrame
print('Series的索引,默认数字索引可以工作。')
obj = Series(np.arange(4.), index = ['a', 'b', 'c', 'd'])
print(obj['b'])
print(obj[3])
print(obj[[1, 3]])
print(obj[obj < 2])
print('Series的数组切片')
print(obj['b':'c']) #闭区间
obj['b':'c'] = 5
print(obj)
print('DataFrame的索引')
data = DataFrame(np.arange(16).reshape((4,4)),
index= ['Ohio', 'Colorado', 'Utah', 'New York'],
columns =['one', 'two', 'three', 'four']
)
print( data)
print(data['two']) # 打印列
print(data[['three','one']])
print(data[:2])
print(data.ix['Colorado',['two','three']]) # 指定索引和列
print(data.ix[['Colorado', 'Utah'], [3, 0, 1]])
print(data.ix[2]) # 打印第2行(从0开始)
print(data.ix[:'Utah', 'two'])
print('根据条件选择')
print(data[data.three > 5])
print(data < 5 ) # 打印True或者False
data[data<5] = 0
print(data)
| [
"623657285@qq.com"
] | 623657285@qq.com |
5bb6e469ebd7235391d2aab65a0d6343bcab59a4 | c06d18ac5b87b3b82fc486454c422b119d6c1ee9 | /src/preprocess/text/sogou.py | 30ae24bd466ca6fff2531855c09348c7a9a164c0 | [] | no_license | tangermi/nlp | b3a4c9612e6049463bf12bc9abb7aff06a084ace | aa36b8b20e8c91807be73a252ff7799789514302 | refs/heads/master | 2022-12-09T12:33:15.009413 | 2020-04-03T04:03:24 | 2020-04-03T04:03:24 | 252,056,010 | 0 | 0 | null | 2022-12-08T07:26:55 | 2020-04-01T02:55:05 | Jupyter Notebook | UTF-8 | Python | false | false | 2,982 | py | # -*- coding:utf-8 -*-
from ..base import Base
import os
import pandas as pd
import random
'''
# sogou新闻mini数据集 http://www.sogou.com/labs/resource/cs.php
读取文件夹树的txt文件
输出为csv格式的文件
In: 我要吃饭了
Out: ['我', '要', '吃饭', '了']
'''
# 数据预处理
class Sogou(Base):
def __init__(self, dic_config={}, dic_engine={}):
self.dic_engine = dic_engine
Base.__init__(self, dic_config)
self.logger.info(dic_engine)
def init(self):
self.data_dir = self.dic_engine['_in'] # 训练集地址
self.train_path = os.path.join(self.dic_engine['_out'], self.dic_engine['train_file'])
self.test_path = os.path.join(self.dic_engine['_out'], self.dic_engine['test_file'])
# 处理一个txt
def read_txt(self, new_data_dir, file, folder):
with open(os.path.join(new_data_dir, file), 'rb+') as f:
raw = f.read()
raw = raw.decode('utf8')
self.data_list.append(raw) # 添加数据集数据
self.class_list.append(folder) # 添加数据集类别
def read(self):
folder_list = os.listdir(self.data_dir) # 查看data_dir下的文件
self.data_list = [] # 数据集数据
self.class_list = [] # 数据集类别
# 遍历每个子文件夹
for folder in folder_list:
new_data_dir = os.path.join(self.data_dir, folder) # 根据子文件夹,生成新的路径
if os.path.isdir(new_data_dir):
self.logger.info('load folder: ' + str(folder))
files = os.listdir(new_data_dir) # 存放子文件夹下的txt文件列表
else:
continue
j = 1
# 遍历每个txt文件
for file in files:
if j > 400:
break
self.read_txt(new_data_dir, file, folder)
j += 1
# 乱序并切分为训练集与测试集
def process(self):
data_class_list = list(zip(self.data_list, self.class_list)) # zip压缩合并,将数据与标签对应压缩
random.shuffle(data_class_list) # 将data_class_list乱序
test_size = 0.2
index = int(len(data_class_list) * test_size) + 1 # 训练集和测试集切分的索引值
train_list = data_class_list[index:] # 训练集
test_list = data_class_list[:index] # 测试集
self.train_df = pd.DataFrame(train_list, columns=['text', 'class'])
self.test_df = pd.DataFrame(test_list, columns=['text', 'class'])
# self.logger.info(train_df.head())
# self.logger.info(train_df.info())
def dump(self):
# 输出为csv格式的文件
self.train_df.to_csv(self.train_path, sep=',', encoding='utf-8')
self.test_df.to_csv(self.test_path, sep=',', encoding='utf-8')
def run(self):
self.init()
self.read()
self.process()
self.dump()
| [
"n10057862@qut.edu.au"
] | n10057862@qut.edu.au |
e0b751fc59d2240ee869ee7941511ebd560f22ca | 997f4d09a04e163fd749c69b0a65614da6faada6 | /Product_Info.py | 80c8476eac68934fef1ab2e1c858dc68c7716dd5 | [] | no_license | viralsir/ECDemo | 410f442536dd418c144e51b0839dddb710880af5 | 53ba8f27dade0b5b80cd098f8c3f1a9b8cedacdc | refs/heads/master | 2023-03-21T07:57:55.158407 | 2021-03-23T12:54:59 | 2021-03-23T12:54:59 | 350,690,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | from util_lib import is_nagetive
class Product:
id=0
name=""
qty=0
rate=0
price=0
def setProduct(self):
self.id=is_nagetive(int(input("Enter Product Id :")),title="Id :")
self.name=input("Enter Product Name :")
self.qty=is_nagetive(int(input("Enter Product Qty :")),title="Qty :")
self.rate=is_nagetive(float(input("Enter Product Rate:")),title="Rate:")
self.price=self.qty*self.rate
def getProduct(self):
print(str(self.id).ljust(10)
,self.name.ljust(10)
,str(self.qty).ljust(10),
str(self.rate).ljust(10)
,str(self.price).ljust(10))
| [
"viralsir2018@gmail.com"
] | viralsir2018@gmail.com |
fe0ca77ec8a4eeac17ef18c28e3792ad07980656 | 1770b125d1684c09091d8694b0295de16102990f | /cltk/utils/file_operations.py | e0540566a6feaec3a0f2db8727f62062ca3e17f2 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | coderbhupendra/cltk | 79410ec7eec9727c99c08dc83b4b5f43a0fe36de | 7f6ab523d157fb874c42928e2cdfb0c6ddf4e4b5 | refs/heads/master | 2021-01-22T16:13:24.844596 | 2016-04-16T21:01:28 | 2016-04-16T21:01:28 | 52,906,095 | 1 | 0 | null | 2016-04-16T23:14:15 | 2016-03-01T20:05:00 | Python | UTF-8 | Python | false | false | 895 | py | """Miscellaneous file operations used by various parts of the CLTK."""
__author__ = 'Kyle P. Johnson <kyle@kyle-p-johnson.com>'
__license__ = 'MIT License. See LICENSE.'
from cltk.utils.cltk_logger import logger
import pickle
def open_pickle(path: str):
"""Open a pickle and return loaded pickle object.
:type path: str
:param : path: File path to pickle file to be opened.
:rtype : object
"""
try:
with open(path, 'rb') as opened_pickle:
try:
return pickle.load(opened_pickle)
except Exception as pickle_error:
logger.error(pickle_error)
raise
except FileNotFoundError as fnf_error:
logger.error(fnf_error)
raise
except IOError as io_err:
logger.error(io_err)
raise
except EOFError as eof_error:
logger.error(eof_error)
raise
| [
"kyle@kyle-p-johnson.com"
] | kyle@kyle-p-johnson.com |
2d012fd08bf234e4fcecc8c1240aa00d6ca7546d | 4331b28f22a2efb12d462ae2a8270a9f666b0df1 | /.history/dvdstore/webapp/views_20190913182252.py | cb42187fff617202e0416046cd182fd43957dac1 | [] | no_license | ZiyaadLakay/csc312.group.project | ba772a905e0841b17478eae7e14e43d8b078a95d | 9cdd9068b5e24980c59a53595a5d513c2e738a5e | refs/heads/master | 2020-07-26T23:30:22.542450 | 2019-09-16T11:46:41 | 2019-09-16T11:46:41 | 200,703,160 | 0 | 0 | null | 2019-08-05T17:52:37 | 2019-08-05T17:52:37 | null | UTF-8 | Python | false | false | 6,179 | py | from django.shortcuts import render
from .models import DVD, Transaction, Customer
from django.core.paginator import EmptyPage,PageNotAnInteger, Paginator
from django.db.models import Q
from django.contrib.auth.models import User, auth
from django.shortcuts import render, redirect
from django.contrib import messages
from django.core.files.storage import FileSystemStorage
from django.contrib.auth.decorators import login_required, permission_required
from .form import DocumentForm
import datetime
#This is the homepage for the User
def home(request):
dvds = DVD.objects.all() #imports dvds from database
query = request.GET.get("query")
gen = request.GET.get("gen")
if query:
dvds = DVD.objects.filter(Q(Title__icontains=query))#Search Function according to name
elif gen:
dvds = DVD.objects.filter(Q(genre__icontains=gen))#Search Function according to name
paginator = Paginator(dvds, 3) # Show 3 dvds per page
page = request.GET.get('page')
dvds = paginator.get_page(page)
genre = {'Action', 'Comedy', 'Drama', 'Family', 'Romance'}
return render(request, 'home.html', {'dvds':dvds}, {'genre':genre}) #renders the page
#This is the page for clerks
@login_required
def clerk(request):
dvds = DVD.objects.all() #imports dvds from database
trans = Transaction.objects.all() #imports dvds from database
users = User.objects.all() #imports dvds from database
customer = Customer.objects.all() #imports dvds from database
query = request.GET.get("query")
if query:
dvds = DVD.objects.filter(Q(Title__icontains=query)) #Search Function according to name
paginator = Paginator(dvds, 6) # Show 3 dvds per page
page = request.GET.get('page')
dvds = paginator.get_page(page)
form=DocumentForm()
context_dict = { 'dvds':dvds ,'form': form, 'trans':trans, 'users':users, 'customer':customer}
return render(request, 'clerk.html',context_dict)
@login_required
def userstbl(request):
dvds = DVD.objects.all() #imports dvds from database
trans = Transaction.objects.all() #imports dvds from database
users = User.objects.all() #imports dvds from database
customer = Customer.objects.all() #imports dvds from database
query = request.GET.get("query")
if query:
users = User.objects.filter(Q(username__icontains=query)) #Search Function according to name
paginator = Paginator(dvds, 6) # Show 3 dvds per page
page = request.GET.get('page')
dvds = paginator.get_page(page)
form=DocumentForm()
context_dict = { 'dvds':dvds ,'form': form, 'trans':trans, 'users':users, 'customer':customer}
return render(request, 'userstbl.html',context_dict)
@login_required
def transactions(request):
dvds = DVD.objects.all() #imports dvds from database
trans = Transaction.objects.all() #imports dvds from database
users = User.objects.all() #imports dvds from database
customer = Customer.objects.all() #imports dvds from database
query = request.GET.get("query")
if query:
trans = Transaction.objects.filter(Q(TransactionNumber__icontains=query)) #Search Function according to name
paginator = Paginator(dvds, 6) # Show 3 dvds per page
page = request.GET.get('page')
dvds = paginator.get_page(page)
form=DocumentForm()
context_dict = { 'dvds':dvds ,'form': form, 'trans':trans, 'users':users, 'customer':customer}
return render(request, 'transactions.html',context_dict)
def register2(request):
if request.method == 'POST':
first_name= request.POST['first_name']
last_name= request.POST['last_name']
username= request.POST['username']
email= request.POST['email']
password1= first_name[0]+last_name
if User.objects.filter(username=username).exists():
messages.info(request, 'Username Taken')
return redirect('clerk')
elif User.objects.filter(email=email).exists():
messages.info(request, 'Email Taken')
user = User.objects.create_user(username=username, password=password1, email=email, first_name=first_name, last_name=last_name)
user.save()
messages.info(request, 'User Created')
return redirect('/clerk')
def model_form_upload(request):
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('/clerk')
def booking(request):
username= request.POST['username']
dvdID= request.POST['dvdID']
DVD.objects.filter(id=dvdID).update(BookingPickup=username)
return redirect('home')
def checkout(request):
dvdID= request.POST['dvdID']
numOfDays=request.POST['numDaysBooked']
dvdPrice=request.POST['dvdPrice']
users_ID=request.POST['user_ID']
MovieTitle=request.POST['MovieTitle']
payment=request.POST['payment']
bill=int(numOfDays)*int(dvdPrice)
DVD.objects.filter(id=dvdID).update(NumDaysBooked=numOfDays,InStock=False)
RentDate= datetime.date.today()
DueDate=RentDate+datetime.timedelta(days=int(numOfDays))
t = datetime.datetime.now().strftime("%H%M%S")
TransactionNumber=payment+str(RentDate)[0:4]+str(RentDate)[8:10]+t
#Amount
trans = Transaction(users_ID=users_ID, TransactionNumber=TransactionNumber, RentDate=RentDate, DueDate=DueDate, MovieTitle=MovieTitle, Payment_Method=payment,Amount="R"+str(bill),dvdID=dvdID)
trans.save()
return redirect('/clerk')
def checkin(request):
dvdID= request.POST['dvdID']
DVD.objects.filter(id=dvdID).update(BookingPickup='None',InStock=True,NumDaysBooked=0)
return redirect('/clerk')
def deleteMovie(request):
dvdID= request.POST['dvdID']
DVD.objects.filter(id=dvdID).delete()
return redirect('/clerk')
def deleteTransaction(request):
transID= request.POST['transID']
Transaction.objects.filter(id=transID).delete()
return redirect('/transactions')
def deleteUser(request):
transID= request.POST['transID']
Transaction.objects.filter(id=transID).delete()
return redirect('/transactions')
| [
"uzairjoneswolf@gmail.com"
] | uzairjoneswolf@gmail.com |
ec182f5612596a7039c0908019cab024ef85b5b8 | 7c16a9f999f966060c064ae5bd4bddaf8f4e1dd0 | /time_pallindrome.py | 28d1eb6ad0da5ecc4682074559829912b8fe91bc | [] | no_license | sbd2309/Adv.Python | fd5ed698b14c75484903006da7753a155cf11b47 | f7ef906cd78114643ffaaaaca6d4cb0ccfb34f62 | refs/heads/master | 2021-10-25T01:48:29.420102 | 2021-10-17T06:20:11 | 2021-10-17T06:20:11 | 232,631,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | def time_pallindrome(n):
x = 0
s1 = ''
s2 = ''
for i in n:
if i == ':':
x = 1
elif x == 0:
s1 = s1 + i
elif x == 1:
s2 = s2 + i
n2 = int(s2)
while True:
xy = str(n2)
yx = xy[::-1]
if s1 == yx:
print(n2-int(s2))
break
else:
n2 = n2 + 1
n = input()
time_pallindrome(n)
| [
"noreply@github.com"
] | sbd2309.noreply@github.com |
f9746b84c22258991234cf314ab1ee76520e56cb | a3d1e8a67ed43e1bea59180cc51c49f25a961a49 | /scripts/lib/python/ld/edseg/exp/paula_export.py | 0477c0f5b5c5b754c865dc56f15353ab46931e54 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | WladimirSidorenko/TextNormalization | 38b076d88a2de40dae72dc8b4096e354b774f2f4 | ac645fb41260b86491b17fbc50e5ea3300dc28b7 | refs/heads/master | 2020-04-14T16:48:42.541883 | 2019-09-29T23:38:28 | 2019-09-29T23:38:28 | 163,962,092 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,914 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class PaulaExporter(object):
'''EDSeg PAULA (Potsdamer Austauschformat für linguistische
Annotation) exporter
'''
EDSEG = 'edseg'
SDS = 'sds'
EDS = 'eds'
EDS_FEATS = ('type', 'parent')
def __init__(self, project, path=None):
self.project = project
self.path = path
self._model = project.model
self._tok_layer = self._model.getTokenLayer()[0]
self._eds_layer = None
self._eds_id = None
self._sds_layer = None
self._sds_id = None
self._feat_layers = None
def on_start(self):
'''Create all segmentation-related markable and feature layers
and reset SDS and EDS counters.
'''
self._eds_layer = self._model.createMarkLayer(
'{0}.{1}'.format(self.EDSEG, self.EDS))
self._eds_id = 0
self._sds_layer = self._model.createMarkLayer(
'{0}.{1}'.format(self.EDSEG, self.SDS))
self._sds_id = 0
create_feat = self._model.createFeatLayer
self._feat_layers = dict((feat,
create_feat('{0}.{1}_{2}'.format(self.EDSEG,
self.EDS,
feat)))
for feat in self.EDS_FEATS)
def on_finish(self):
'''Write all segmentation-related layers and export the project
to the given path.
'''
self._eds_layer.write()
self._sds_layer.write()
for layer in self._feat_layers.itervalues():
layer.write()
if self.path is not None:
self.project.export(self.path)
def on_sds(self, sent_no, sds):
'''Add new SDS and EDS markables as well as the corresponding
EDS features.
'''
sds_tokens = []
for eds in sds.iter('EDS'):
tokens = [self._tok_layer[tok['pid']]
for tok in list(eds.iter_terminals())]
# if not tokens:
# continue
self._add_eds(tokens, eds)
sds_tokens.extend(tokens)
self._add_sds(sds_tokens)
def _add_eds(self, tokens, node):
pid = '{0}_{1}'.format(self.EDS, self._eds_id)
self._eds_layer.createMark(pid, tokens)
mark = self._eds_layer[pid]
for name, value in node.feats.iteritems():
if name == 'embedded':
name = 'parent'
value = '{0}_{1}'.format(self.EDS, self._eds_id - 1)
layer = self._feat_layers[name]
layer.addEntry(mark, None, value)
self._eds_id += 1
def _add_sds(self, tokens):
self._sds_layer.createMark('{0}_{1}'.format(self.SDS, self._sds_id),
tokens)
self._sds_id += 1
| [
"wlsidorenko@gmail.com"
] | wlsidorenko@gmail.com |
c658e2a34100833982bf9ec68969972ef4a33de2 | 2fd0c65aa0f72133f773dac5d9a5c48fe9e26fac | /Python/Core/Lib/new.py | c1ccf4e5e3837ce0f1ac30066a7c459f5021be4d | [] | no_license | FingerLeakers/DanderSpritz_docs | f5d2430e0b86b1b2f0684f02ddd4fa973a5a7364 | d96b6a71c039b329f9f81544f645857c75360e7f | refs/heads/master | 2021-01-25T13:05:51.732149 | 2018-03-08T01:22:49 | 2018-03-08T01:22:49 | 123,527,268 | 2 | 0 | null | 2018-03-02T03:48:31 | 2018-03-02T03:48:30 | null | UTF-8 | Python | false | false | 805 | py | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, Feb 6 2017, 23:53:20)
# [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)]
# Embedded file name: new.py
"""Create new objects of various types. Deprecated.
This module is no longer required except for backward compatibility.
Objects of most types can now be created by calling the type object.
"""
from warnings import warnpy3k
warnpy3k("The 'new' module has been removed in Python 3.0; use the 'types' module instead.", stacklevel=2)
del warnpy3k
from types import ClassType as classobj
from types import FunctionType as function
from types import InstanceType as instance
from types import MethodType as instancemethod
from types import ModuleType as module
from types import CodeType as code | [
"francisck@protonmail.ch"
] | francisck@protonmail.ch |
22b8133b627fb154a59167a3606b30dba808f8ff | 7196fa6e5bef1c2714dc49bb25eb11cfd531c07d | /setup.py | 4fe5e05474b981cd135b64e479da89895ab20934 | [
"MIT"
] | permissive | dinarior/daft | 0da226bc951195c6c1c92b8cdff87bc303793f60 | c982155f5fc27d1da1617ebf8a687af388cd0efa | refs/heads/master | 2020-05-09T13:40:50.913111 | 2019-04-16T12:42:16 | 2019-04-16T12:42:16 | 181,163,124 | 2 | 2 | MIT | 2019-07-02T06:06:11 | 2019-04-13T11:49:00 | Python | UTF-8 | Python | false | false | 1,246 | py | #!/usr/bin/env python
try:
from setuptools import setup, Extension
setup, Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
setup, Extension
import os
import re
import sys
if sys.argv[-1] == "publish":
os.system("python setup.py sdist upload")
sys.exit()
vre = re.compile("__version__ = \"(.*?)\"")
m = open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"daft.py")).read()
version = vre.findall(m)[0]
setup(
name="daft",
version=version,
description="PGM rendering at its finest.",
long_description=open("README.rst").read(),
author="David W. Hogg & Daniel Foreman-Mackey",
author_email="danfm@nyu.edu",
url="http://daft-pgm.org",
py_modules=["daft"],
package_data={"": ["LICENSE.rst"]},
include_package_data=True,
install_requires=[
"numpy",
"matplotlib"
],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
)
| [
"danfm@nyu.edu"
] | danfm@nyu.edu |
c1858d8a5334c24b9fa6802424d3e36b8b1618bc | cf5b54b2c84437d9e72575589812b88921d4430c | /server/message/route.py | 3fa6446bb680b052142382440f5499ae04a5073f | [] | no_license | hw233/gameserver3 | 90fc3b8b8e8f809b7c3197fc97145fb264166b93 | d86ef3b3313ef51df5ba01bc700877827b0a81cd | refs/heads/master | 2022-01-13T07:40:38.211070 | 2019-05-11T02:15:14 | 2019-05-11T02:15:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,903 | py | #coding: utf-8
from message.base import *
import random
class SimpleRoute:
def __init__(self):
self.cache = {}
def get_any_service(self,server,header):
'''随机发给任一服务'''
if header.route >= 0:
return header.route
ids = self.cache.get(header.command,None)
if ids == None:
cmd = MessageMapping.get_message_def(header.command)
if cmd == None:
return None
services = server.conf.services[cmd.service]
ids = self.cache[header.command] = [svc.id for svc in services]
if len(ids) == 1:
return ids[0]
else:
which = random.randint(0,len(ids) - 1)
return ids[which]
def get_all_service(self,server,header):
'''获取所有的服务'''
ids = self.cache.get(header.command,None)
if ids == None:
cmd = MessageMapping.get_message_def(header.command)
if cmd == None or cmd.service == None:
return None
services = server.conf.services[cmd.service]
ids = self.cache[header.command] = [svc.id for svc in services]
return ids
class UserRoute:
def __init__(self):
self.cache_servers = []
self.cache = {}
def get_any_service(self,server,header):
''' 随机发给任一服务'''
if len(self.cache_servers) == 0:
cache_services = server.conf.services["CacheService"]
self.cache_servers = [svc.host.name for svc in cache_services]
better_server = self.cache_servers[header.user % len(self.cache_servers)]
ids = self.cache.get(header.command,None)
if ids == None:
cmd = MessageMapping.get_message(header.command)
services = server.conf.services[cmd.service]
ids = self.cache[header.command] = {}
for svc in services:
ids[svc.id] = svc.host.name
if len(ids) == 1:
return ids.keys()[0]
else:
for id,name in ids.items():
if better_server == name:
return id
which = random.randint(0,len(ids) - 1)
return ids.keys()[which]
def get_all_service(self,server,header):
'''获取所有的服务'''
ids = self.cache.get(header.command,None)
if ids == None:
cmd = MessageMapping.get_message_def(header.command)
services = server.conf.services[cmd.service]
ids = self.cache[header.command] = {}
for svc in services:
ids[svc.id] = svc.host.name
return ids.keys()
ROUTE = SimpleRoute()
#ROUTE = UserRoute() | [
"87399497@qq.com"
] | 87399497@qq.com |
dd1b2907c83b59b45f29cd3e2ced242096ff6fab | 14d4c64c2a926c013fa40d42c73318c44fb3c4e8 | /scripts/dirichlet_3d_triangle_plot.py | e612d899597d49377839e90e16bd005ffc738914 | [
"MIT"
] | permissive | qiuhuachuan/pyprobml | 7e70a733cbae42ddaef12563594b50833a02b9bf | 8c83f8d565b8979f4757764d1bb3bd284c238a32 | refs/heads/master | 2023-07-11T22:01:33.473460 | 2021-08-16T03:06:30 | 2021-08-16T03:06:30 | 332,373,785 | 0 | 0 | MIT | 2021-05-18T07:24:25 | 2021-01-24T05:23:57 | null | UTF-8 | Python | false | false | 1,802 | py | import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
import scipy.spatial
import matplotlib.tri as mtri
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
#This class comes from http://stackoverflow.com/questions/22867620/putting-arrowheads-on-vectors-in-matplotlibs-3d-plot
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
#xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.axes.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
x = [1, 0, 0]
y = [0, 1, 0]
z = [0, 0, 1]
pts = np.vstack([x,y]).T
tess = scipy.spatial.Delaunay(pts)
tri = tess.vertices
triang = mtri.Triangulation(x=pts[:, 0],y=pts[:,1], triangles=tri)
fig = plt.figure()
#ax = fig.gca(projection='3d')
ax = plt.axes(projection="3d")
ax.plot_trisurf(triang, z, alpha = .3, color = 'red', edgecolors = 'blue')
ax.set_axis_off()
for i in range(3):
EndPs = [[0,0],[0,0],[0,0]]
EndPs[i][1] = 1.4
art = Arrow3D(EndPs[0], EndPs[1], EndPs[2], mutation_scale=20, lw=3, arrowstyle="-|>", color="black")
ax.add_artist(art)
theta = '$\theta_' + str(i) + '$'
EndPs = [[0,0],[0,0],[0,0]]
if i == 0:
EndPs[i][1] = 1
EndPs[2][1] = -.2
else:
EndPs[i][1] = 1
ax.text(EndPs[0][1], EndPs[1][1], EndPs[2][1], r'$\theta_%s$' % (i + 1),size=20)
ax.view_init(elev=30, azim=20)
ax.dist = 15
plt.draw()
plt.tight_layout()
plt.show()
pml.savefig('dirichletSimplex.pdf')
| [
"murphyk@gmail.com"
] | murphyk@gmail.com |
513280a0bf114c9875c8169c50851985dfbd6ab5 | a39fbe6568e2df509d3889b4b43aab27c5ad04aa | /__unported__/project_cost_plan_sale/wizard/__init__.py | 006580d522bcd3b04d2d3cc58a09e152ebd363d9 | [] | no_license | OpusVL/odoo-project-management | 796793d1fa7558ad2b676748b23d5c0311c42aa2 | 97ac8dbfddee85d06a4ae284c04afbed5349d82e | refs/heads/8.0 | 2021-01-13T02:26:26.700928 | 2015-08-24T12:32:29 | 2015-08-24T12:32:29 | 24,458,491 | 6 | 14 | null | 2015-08-23T06:19:58 | 2014-09-25T13:12:36 | Python | UTF-8 | Python | false | false | 1,104 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Eficent (<http://www.eficent.com/>)
# Jordi Ballester Alomar <jordi.ballester@eficent.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_sale_plan_make_sale
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | [
"jordi.ballester@eficent.com"
] | jordi.ballester@eficent.com |
85bca0f366f5e8d13ce2ae4dd91010eed0762f51 | 130a98632d2ab4c171503b79e455b7aa27a1dda4 | /models/official/vision/detection/modeling/base_model.py | e9dd5c6d11c801f8d7c9b296c6b4ae8d749d46fe | [
"MIT",
"Apache-2.0"
] | permissive | aboerzel/German_License_Plate_Recognition | d7fc0314295f5cf0c9d7ae9c93a795e3ef1c5787 | 6fc53292b1d3ce3c0340ce724c2c11c77e663d27 | refs/heads/master | 2023-01-30T18:08:37.339542 | 2023-01-07T07:41:36 | 2023-01-07T07:41:36 | 245,586,430 | 34 | 12 | MIT | 2023-01-07T07:41:37 | 2020-03-07T07:16:51 | Python | UTF-8 | Python | false | false | 4,631 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Model definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import re
import tensorflow as tf
from official.vision.detection.modeling import checkpoint_utils
from official.vision.detection.modeling import learning_rates
from official.vision.detection.modeling import optimizers
def _make_filter_trainable_variables_fn(frozen_variable_prefix):
"""Creates a function for filtering trainable varialbes."""
def _filter_trainable_variables(variables):
"""Filters trainable varialbes.
Args:
variables: a list of tf.Variable to be filtered.
Returns:
filtered_variables: a list of tf.Variable filtered out the frozen ones.
"""
# frozen_variable_prefix: a regex string specifing the prefix pattern of
# the frozen variables' names.
filtered_variables = [
v for v in variables if not frozen_variable_prefix or
not re.match(frozen_variable_prefix, v.name)
]
return filtered_variables
return _filter_trainable_variables
class Model(object):
"""Base class for model function."""
__metaclass__ = abc.ABCMeta
def __init__(self, params):
self._use_bfloat16 = params.architecture.use_bfloat16
if params.architecture.use_bfloat16:
policy = tf.compat.v2.keras.mixed_precision.experimental.Policy(
'mixed_bfloat16')
tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy)
# Optimization.
self._optimizer_fn = optimizers.OptimizerFactory(params.train.optimizer)
self._learning_rate = learning_rates.learning_rate_generator(
params.train.total_steps, params.train.learning_rate)
self._frozen_variable_prefix = params.train.frozen_variable_prefix
self._regularization_var_regex = params.train.regularization_variable_regex
self._l2_weight_decay = params.train.l2_weight_decay
# Checkpoint restoration.
self._checkpoint = params.train.checkpoint.as_dict()
# Summary.
self._enable_summary = params.enable_summary
self._model_dir = params.model_dir
@abc.abstractmethod
def build_outputs(self, inputs, mode):
"""Build the graph of the forward path."""
pass
@abc.abstractmethod
def build_model(self, params, mode):
"""Build the model object."""
pass
@abc.abstractmethod
def build_loss_fn(self):
"""Build the model object."""
pass
def post_processing(self, labels, outputs):
"""Post-processing function."""
return labels, outputs
def model_outputs(self, inputs, mode):
"""Build the model outputs."""
return self.build_outputs(inputs, mode)
def build_optimizer(self):
"""Returns train_op to optimize total loss."""
# Sets up the optimizer.
return self._optimizer_fn(self._learning_rate)
def make_filter_trainable_variables_fn(self):
"""Creates a function for filtering trainable varialbes."""
return _make_filter_trainable_variables_fn(self._frozen_variable_prefix)
def weight_decay_loss(self, trainable_variables):
reg_variables = [
v for v in trainable_variables
if self._regularization_var_regex is None or
re.match(self._regularization_var_regex, v.name)
]
return self._l2_weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in reg_variables])
def make_restore_checkpoint_fn(self):
"""Returns scaffold function to restore parameters from v1 checkpoint."""
if 'skip_checkpoint_variables' in self._checkpoint:
skip_regex = self._checkpoint['skip_checkpoint_variables']
else:
skip_regex = None
return checkpoint_utils.make_restore_checkpoint_fn(
self._checkpoint['path'],
prefix=self._checkpoint['prefix'],
skip_regex=skip_regex)
def eval_metrics(self):
"""Returns tuple of metric function and its inputs for evaluation."""
raise NotImplementedError('Unimplemented eval_metrics')
| [
"andreas.boerzel@gmx.de"
] | andreas.boerzel@gmx.de |
c5c7f1387c04c1dcf82c8fca038f2a525e9e0f32 | 616d502a84d123a06cf847141b0e553f5db66f3b | /scripts/hmmer3/extract_hits_from_hmm_output.py | 36c656b5591a343a7a986ab1a8c74662e092cc98 | [
"MIT"
] | permissive | wangpanqiao/MAVR | a95b225484a80fdea490c0ad909a32b7ebfef3bb | c1f86a408ca2e369b2fac874240190bc08994d7b | refs/heads/master | 2020-05-23T11:52:08.964020 | 2019-05-10T08:21:26 | 2019-05-10T08:21:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,152 | py | #!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import os
import sys
import argparse
from multiprocessing import Pool
from Bio import SearchIO
from RouToolPa.Tools.LinuxTools import CGAS
from RouToolPa.Routines import FileRoutines # make_list_of_path_to_files, split_filename, check_path, save_mkdir
def make_list_of_path_to_files_from_comma_sep_string(string):
return FileRoutines.make_list_of_path_to_files(string.split(","))
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", action="store", dest="input", required=True,
type=make_list_of_path_to_files_from_comma_sep_string,
help="Comma-separated list of files with hmm output")
parser.add_argument("-f", "--format", action="store", dest="format", required=True,
help="Format of input hmm file.")
parser.add_argument("-o", "--output_file", action="store", dest="output", default="stdout",
help="Output file")
parser.add_argument("-t", "--threads", action="store", dest="threads", default=1, type=int,
help="Number of threads to handle input")
parser.add_argument("-d", "--top_hits_dir", action="store", dest="top_hits_dir", default="top_hits_dir/",
type=FileRoutines.check_path,
help="Directory to write intermediate(splited) output")
parser.add_argument("-r", "--retain_splited_output", action="store_true", dest="retain",
help="Retain splited output")
args = parser.parse_args()
FileRoutines.safe_mkdir(args.top_hits_dir)
def handle_input(filename):
sys.stdout.write("Handling %s\n" % filename)
prefix = FileRoutines.split_filename(filename)[1]
index_file = "%s.tmp.idx" % prefix
hmm_dict = SearchIO.index_db(index_file, filename, args.format)
if args.output == "stdout":
out_fd = sys.stdout
else:
out_fd = open("%s%s.top_hits" % (args.top_hits_dir, prefix), "w")
out_fd.write("#query\thit\tevalue\tbitscore\n")
for family in hmm_dict:
#print hmm_dict[key]
for hit in hmm_dict[family]:
if hit.is_included:
out_fd.write("%s\t%s\t%s\t%s\n" % (family, hit.id, hit.evalue, hit.bitscore))
if args.output != "stdout":
out_fd.close()
os.remove(index_file)
if args.output == "stdout":
sys.stdout.write("#query\thit\tevalue\tbitscore\n")
process_pool = Pool(args.threads)
process_pool.map(handle_input, args.input)
if args.output != "stdout":
CGAS.cat(["%s%s" % (args.top_hits_dir, filename) for filename in os.listdir(args.top_hits_dir)], output=args.output)
if not args.retain:
os.remove(args.top_hits_dir)
"""
hmm_dict = SearchIO.index_db("temp.idx", args.input, args.format)
out_fd = sys.stdout if args.output == "stdout" else open(args.output, "w")
out_fd.write("#query\thit\tevalue\tbitscore\n")
for family in hmm_dict:
#print hmm_dict[key]
for hit in hmm_dict[family]:
if hit.is_included:
out_fd.write("%s\t%s\t%s\t%s\n" % (family, hit.id, hit.evalue, hit.bitscore))
if args.output != "stdout":
out_fd.close()
os.remove("temp.idx")
""" | [
"mahajrod@gmail.com"
] | mahajrod@gmail.com |
74e546fb390f9ac11660ae33d81aa7781da0a00d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_displayed.py | a9bca61a20de2d55bacd0b3a1bdb3ccdc1662730 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py |
from xai.brain.wordbase.verbs._display import _DISPLAY
#calss header
class _DISPLAYED(_DISPLAY, ):
def __init__(self,):
_DISPLAY.__init__(self)
self.name = "DISPLAYED"
self.specie = 'verbs'
self.basic = "display"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
aa218e7a99f15018fdfc44e6ac4d289c6d715c49 | 4f2f71beee2fb016550598996e100ce176100dcb | /python/dominoes/dominoes.py | 7a9f528e9cbca8856438a259a7fb79280b681eaf | [] | no_license | charles-wangkai/exercism | d2723bd160573b2d3ee9051ff63972e5be900d87 | c283a5078e3d0f05ff3d86b2c208ae086d3896a4 | refs/heads/master | 2023-05-11T13:11:23.776323 | 2023-04-30T17:40:56 | 2023-04-30T17:40:56 | 102,832,444 | 2 | 4 | null | 2020-03-14T15:49:13 | 2017-09-08T07:31:36 | C++ | UTF-8 | Python | false | false | 727 | py | import itertools
def can_chain(dominoes):
if not dominoes:
return []
for permutation in itertools.permutations(dominoes):
solution = check(permutation)
if solution is not None:
return solution
return None
def check(permutation):
last_prev, prev = permutation[0]
solution = [permutation[0]]
for i in range(1, len(permutation)):
if permutation[i][0] == prev:
prev = permutation[i][1]
solution.append(permutation[i])
elif permutation[i][1] == prev:
prev = permutation[i][0]
solution.append(permutation[i][::-1])
else:
return None
return solution if prev == last_prev else None
| [
"charles.wangkai@gmail.com"
] | charles.wangkai@gmail.com |
1cab58bc78a9b65958cbd4c9fe897f7e4d7e85ba | ec53949dafa4b6ad675d679b05ed7c83fef2c69a | /DataStructuresAndAlgo/Graphs/Prims.py | 93d5e2128cbcc2ea050a6b0ca10ae33a99821214 | [] | no_license | tpotjj/Python | 9a5a20a53cd7a6ec14386c1db8ce155e0fc9ab8a | ca73c116ada4d05c0c565508163557744c86fc76 | refs/heads/master | 2023-07-11T16:37:10.039522 | 2021-08-14T11:17:55 | 2021-08-14T11:17:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | import sys
class Graph:
def __init__(self, vertexNum, edges, nodes):
self.edges = edges
self.nodes = nodes
self.vertexNum = vertexNum
self.MST = []
def printSolution(self):
print("Edge : Weight")
for s, d, w in self.MST:
print("%s - %s: %s" % (s, d, w))
def primsAlgo(self):
visited = [0] * self.vertexNum
edgeNum = 0
visited[0] = True
while edgeNum < self.vertexNum -1:
min = sys.maxsize
for i in range(self.vertexNum):
if visited[i]:
for j in range(self.vertexNum):
if ((not visited[j] and self.edges[i][j])):
if min > self.edges[i][j]:
min = self.edges[i][j]
s = i
d = j
self.MST.append([self.nodes[s], self.nodes[d], self.edges[s][d]])
visited[d] = True
edgeNum += 1
self.printSolution()
edges = [[0, 10, 20, 0, 0],
[10, 0, 30, 5, 0],
[20, 30, 0, 15, 6],
[0, 5, 15, 0, 8],
[0, 0, 6, 8, 0]]
nodes = ["A","B","C","D","E"]
g = Graph(5, edges, nodes)
g.primsAlgo()
| [
"joris97jansen@gmail.com"
] | joris97jansen@gmail.com |
3fa1007bbaf5e3c8fa066101231c98c47a3b3e13 | 6015cb9ca97544e145556a64d8984cb2d763baf9 | /scripts/addons/uv_toolkit/operators/unwrap_selected.py | c6f538ec58c880b859dd5d72f8d9d1e541a25698 | [
"MIT"
] | permissive | belzecue/blender-custom_config | 97b02e5cc2af60ee2c344fe98ba48aabab87570d | 04b671bf373f54356a8bf21dd5045eab1e7e0563 | refs/heads/master | 2022-12-04T17:20:35.490089 | 2020-08-24T17:11:00 | 2020-08-24T17:11:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | import bpy
from bpy.props import BoolProperty
class UnwrapSelected(bpy.types.Operator):
bl_idname = "uv.toolkit_unwrap_selected"
bl_label = "Unwrap Selected (UVToolkit)"
bl_description = "Unwrap selected"
bl_options = {'REGISTER', 'UNDO'}
update_seams: BoolProperty(name="Update seams", default=True)
@classmethod
def poll(cls, context):
return context.mode == 'EDIT_MESH'
def execute(self, context):
tool_settings = context.scene.tool_settings
if tool_settings.use_uv_select_sync:
self.report({'INFO'}, 'Need to disable UV Sync')
return {'CANCELLED'}
if self.update_seams:
bpy.ops.uv.seams_from_islands(mark_seams=True, mark_sharp=False)
bpy.ops.uv.pin(clear=True)
bpy.ops.uv.select_all(action='INVERT')
bpy.ops.uv.pin(clear=False)
bpy.ops.uv.unwrap(method='ANGLE_BASED', fill_holes=True, correct_aspect=True, margin=0)
bpy.ops.uv.pin(clear=True)
bpy.ops.uv.select_all(action='INVERT')
return {'FINISHED'}
| [
"tilapiatsu@hotmail.fr"
] | tilapiatsu@hotmail.fr |
c06eecabeec08e3522e78f9fa9c089509e0556f2 | abf3ea33a5fa7457d1cd735310700df9c784d1ae | /CST100/Chapter_4/Chapter_4/Ch_4_Solutions/Ch_4_Projects/4.4/arrays.py | 005cf635a5c03d3e8dd00b8ec4ca4baa73865477 | [] | no_license | hieugomeister/ASU | 57b8a2f604a27ce339675f40d3b042ccf57efb86 | 3e9254cebeaeb1c57ae912d6e5a02af7531128e8 | refs/heads/master | 2020-12-30T16:59:17.801581 | 2017-05-12T22:44:44 | 2017-05-12T22:44:44 | 91,046,525 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,229 | py | """
File: arrays.py
Project 4.4
Adds methods insert and remove to insert or remove an item
at a given position in the array.
An Array is a restricted list whose clients can use
only [], len, iter, and str.
To instantiate, use
<variable> = array(<capacity>, <optional fill value>)
The fill value is None by default.
"""
class Array(object):
"""Represents an array."""
def __init__(self, capacity, fillValue = None):
"""Capacity is the static size of the array.
fillValue is placed at each position."""
self._items = list()
self._logicalSize = 0
# Track the capacity and fill value for adjustments later
self._capacity = capacity
self._fillValue = fillValue
for count in range(capacity):
self._items.append(fillValue)
def __len__(self):
"""-> The capacity of the array."""
return len(self._items)
def __str__(self):
"""-> The string representation of the array."""
return str(self._items)
def __iter__(self):
"""Supports traversal with a for loop."""
return iter(self._items)
def __getitem__(self, index):
"""Subscript operator for access at index.
Precondition: 0 <= index < size()"""
if index < 0 or index >= self.size():
raise IndexError("Array index out of bounds")
return self._items[index]
def __setitem__(self, index, newItem):
"""Subscript operator for replacement at index.
Precondition: 0 <= index < size()"""
if index < 0 or index >= self.size():
raise IndexError("Array index out of bounds")
self._items[index] = newItem
def size(self):
"""-> The number of items in the array."""
return self._logicalSize
def grow(self):
"""Increases the physical size of the array if necessary."""
# Double the physical size if no more room for items
# and add the fillValue to the new cells in the underlying list
for count in range(len(self)):
self._items.append(self._fillValue)
def shrink(self):
"""Decreases the physical size of the array if necessary."""
# Shrink the size by half but not below the default capacity
# and remove those garbage cells from the underlying list
newSize = max(self._capacity, len(self) // 2)
for count in range(len(self) - newSize):
self._items.pop()
def insert(self, index, newItem):
"""Inserts item at index in the array."""
if self.size() == len(self):
self.grow()
if index >= self.size():
self._items[self.size()] = newItem
else:
index = max(index, 0)
# Shift items down by one position
for i in range(self.size(), index, -1):
self._items[i] = self._items[i - 1]
# Add new item and increment logical size
self._items[index] = newItem
self._logicalSize += 1
def pop(self, index):
"""Removes and returns item at index in the array.
Precondition: 0 <= index < size()"""
if index < 0 or index >= self.size():
raise IndexError("Array index out of bounds")
itemToReturn = self._items[index]
# Shift items up by one position
for i in range(index, self.size() - 1):
self._items[i] = self._items[i + 1]
# Reset empty slot to fill value
self._items[self.size() - 1] = self._fillValue
self._logicalSize -= 1
if self.size() <= len(self) // 4 and len(self) > self._capacity:
self.shrink()
return itemToReturn
def main():
"""Test code for modified Array class."""
a = Array(5)
print ("Physical size:", len(a))
print ("Logical size:", a.size())
print ("Items:", a)
for item in range(4):
a.insert(0, item)
print ("Items:", a)
a.insert(1, 77)
print ("Items:", a)
a.insert(10, 10)
print ("Items:", a)
print(a.pop(3))
print ("Items:", a)
for count in range(6):
print(a.pop(0), end = " ")
print (a.pop(0))
if __name__ == "__main__":
main()
| [
"hieupham45@gmail.com"
] | hieupham45@gmail.com |
62ebbdcb052e719e3f4d425a3c45a1963e8338de | bcca2caf10c223c143d276e1ce84457ca97bb9f8 | /courses/api/permissions.py | 91c019daf07f3a898f44d5b69f8775686d72132b | [] | no_license | sadakchap/e-learning | 40d3a78c80dd705206f49e9a4070bc06b5020184 | 8e78d9cfe2443646d314ecc4b7554e16396ed526 | refs/heads/master | 2022-12-10T06:35:30.589661 | 2021-03-19T00:34:57 | 2021-03-19T00:34:57 | 192,658,464 | 0 | 2 | null | 2022-12-08T05:49:19 | 2019-06-19T04:29:37 | Python | UTF-8 | Python | false | false | 210 | py | from rest_framework.permissions import BasePermission
class IsEnrolled(BasePermission):
def has_object_permission(self, request, view, obj):
return obj.students.filter(id=request.user.id).exists()
| [
"aliceprerna@gmail.com"
] | aliceprerna@gmail.com |
dfd90361b45ad4b6db3ff30a994e304127794772 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /eSxvC27HnDtM6MCCb_5.py | 93804ad681efbdde9d5d52315522b54f74f6d717 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,297 | py | """
You are given a `base` (int), a list of `values` (list), and a `num` (int) to
be converted.
You are to use the values to translate the number into the base. Return
`False` if there aren't enough/too little values in the value list (it should
have the same length as the `base`). The values in value list starts with
elements representing values from zero to base - 1. Return the converted
number in string type.
### Examples
base_n(10, [0, 1, 3, 2, 4, 5, 6, 7, 8, 9], 32) ➞ "23"
base_n(8, ["zero", "one", "two", "three", "four", "five", "six", "seven"], 128 ) ➞ "twozerozero"
base_n(2, [1, 0], 8) ➞ "0111"
base_n(10, list("q*CYj#r-3a"), 1234567890) ➞ "*CYj#r-3aq"
### Notes
The number to be translated is always in BASE-10, non-negative and an integer.
"""
def base_n(base, values, num):
values = [str(i) for i in values]
if len(values) != base:
return False
res = ''
n = 1
while base ** (n + 1) < num:
n += 1
for i in range(n, 0, -1):
if num >= base**i:
rem = num % base**i
amp = int((num - rem)/base**i)
num -= amp * base**i
res += values[amp]
else:
res += values[0]
return (res + values[num]).lstrip(values[0])
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
781048fd9551104132e5c8d912930134933692d4 | de8ac44e9c6e14d0205fd1b73dd23bab2655761b | /confuse/main.py | 5432360b4caf41df31eaeb088863aed70584395c | [
"MIT"
] | permissive | kyokley/confuse | 8dabb52f83347f045f0bfe06e4d227a86da43f23 | 507e85894d380faa715983aa131cedeef60993c9 | refs/heads/master | 2021-01-10T08:05:22.853975 | 2017-08-12T23:43:11 | 2017-08-12T23:43:11 | 45,745,776 | 1 | 0 | null | 2017-01-12T02:08:39 | 2015-11-07T17:07:09 | Python | UTF-8 | Python | false | false | 1,846 | py | import re
import polib
import sys
from .alphabet import confusablesDict
NAMED_SUB_STR_REGEX = r'%(\S+)[sdiouxXeEfFgGcr]'
UNNAMED_SUB_STR_REGEX = r'%[sdiouxXeEfFgGcr]'
FORMATTER_REGEX = r'{\S?}'
HTML_TAG_REGEX = r'''</?\w+((\s+\w+(\s*=\s*(?:".*?"|'.*?'|[^'">\s]+))?)+\s*|\s*)/?>'''
regexes = [NAMED_SUB_STR_REGEX,
UNNAMED_SUB_STR_REGEX,
FORMATTER_REGEX,
HTML_TAG_REGEX,
]
PY2 = sys.version_info[0] == 2
def get_stdin():
return sys.stdin.readlines()
def confuse(string, encoding='utf-8'):
output = []
pos = 0
while pos < len(string):
for regex in regexes:
pattern = re.compile(regex)
match = pattern.match(string, pos)
if match:
pos = match.end(0)
output.extend(match.group(0))
break
else:
char = string[pos]
if char in confusablesDict:
output.append(confusablesDict[char][0])
else:
if PY2 and isinstance(char, str):
output.append(char.decode(encoding))
else:
output.append(char)
pos += 1
return u''.join(output)
def confusePO(filename):
po = polib.pofile(filename)
for entry in po:
entry.msgstr = confuse(entry.msgid)
po.save()
def main():
if len(sys.argv) > 1:
if PY2:
print(' '.join([confuse(x).encode('utf-8') for x in sys.argv[1:]]))
else:
sys.stdout.buffer.write(b' '.join([confuse(x).encode('utf-8') for x in sys.argv[1:]]))
else:
for line in get_stdin():
if PY2:
print(confuse(line).encode('utf-8'))
else:
sys.stdout.buffer.write(confuse(line).encode('utf-8'))
if __name__ == '__main__':
main()
| [
"kyokley2@gmail.com"
] | kyokley2@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.