blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b8a196d6aa3611a6fbcce7ad132fcd437d7f6bf3
|
b1b86d8528df27d99ed56ed16f1ba15b5ae78661
|
/build_isolated/waterplus_map_tools/cmake/waterplus_map_tools-genmsg-context.py
|
13af763d2d6d2c9932b8be57f4bfc5f85289af4d
|
[] |
no_license
|
gychen-n/match
|
8754ac128b43f81e00faf3ab2af160af70a1d4a3
|
ec91f19d104aa4a827c9f66d362f94fe44739cad
|
refs/heads/main
| 2023-04-09T19:56:55.507118
| 2021-04-15T13:39:02
| 2021-04-15T13:39:02
| 358,268,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/gyc/match_ws/src/tools/waterplus_map_tools/msg/Waypoint.msg"
services_str = "/home/gyc/match_ws/src/tools/waterplus_map_tools/srv/SaveWaypoints.srv;/home/gyc/match_ws/src/tools/waterplus_map_tools/srv/AddNewWaypoint.srv;/home/gyc/match_ws/src/tools/waterplus_map_tools/srv/GetNumOfWaypoints.srv;/home/gyc/match_ws/src/tools/waterplus_map_tools/srv/GetWaypointByIndex.srv;/home/gyc/match_ws/src/tools/waterplus_map_tools/srv/GetWaypointByName.srv"
pkg_name = "waterplus_map_tools"
dependencies_str = "std_msgs;geometry_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "waterplus_map_tools;/home/gyc/match_ws/src/tools/waterplus_map_tools/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/melodic/share/geometry_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
[
"gyc@autolabor-host.autolabor-domain"
] |
gyc@autolabor-host.autolabor-domain
|
ab104e594bbf8454e09b791cefc01091331f1e51
|
a90077635aeac846965381e0b07591a1df011afe
|
/care/facility/summarisation/facility_capacity.py
|
17669b7b24a61209378ee70cec2dbb3a812a2584
|
[
"MIT"
] |
permissive
|
Basharckr/care
|
f873ca140ae8607846d9b9500e3c21e9bfa15800
|
c86ae2614ea9ba80b140a2eb21ad64fdbb47ad7e
|
refs/heads/master
| 2023-06-17T21:26:48.936321
| 2021-07-12T06:03:52
| 2021-07-12T06:03:52
| 386,884,450
| 1
| 0
|
MIT
| 2021-07-17T08:41:09
| 2021-07-17T08:41:09
| null |
UTF-8
|
Python
| false
| false
| 8,112
|
py
|
from celery.decorators import periodic_task
from celery.schedules import crontab
from django.db.models import Sum
from django.utils.decorators import method_decorator
from django.utils.timezone import localtime, now
from django.views.decorators.cache import cache_page
from django_filters import rest_framework as filters
from rest_framework import serializers
from rest_framework.mixins import ListModelMixin
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.viewsets import GenericViewSet
from care.facility.api.serializers.facility import FacilitySerializer
from care.facility.api.serializers.facility_capacity import FacilityCapacitySerializer
from care.facility.models import Facility, FacilityCapacity, FacilityRelatedSummary, PatientRegistration
from care.facility.models.inventory import FacilityInventoryBurnRate, FacilityInventoryLog, FacilityInventorySummary
from care.facility.models.patient import PatientRegistration
class FacilitySummarySerializer(serializers.ModelSerializer):
facility = FacilitySerializer()
class Meta:
model = FacilityRelatedSummary
exclude = (
"id",
"s_type",
)
class FacilitySummaryFilter(filters.FilterSet):
start_date = filters.DateFilter(field_name="created_date", lookup_expr="gte")
end_date = filters.DateFilter(field_name="created_date", lookup_expr="lte")
facility = filters.UUIDFilter(field_name="facility__external_id")
district = filters.NumberFilter(field_name="facility__district__id")
local_body = filters.NumberFilter(field_name="facility__local_body__id")
state = filters.NumberFilter(field_name="facility__state__id")
class FacilityCapacitySummaryViewSet(
ListModelMixin, GenericViewSet,
):
lookup_field = "external_id"
queryset = (
FacilityRelatedSummary.objects.filter(s_type="FacilityCapacity")
.order_by("-created_date")
.select_related("facility", "facility__state", "facility__district", "facility__local_body")
)
permission_classes = (IsAuthenticatedOrReadOnly,)
serializer_class = FacilitySummarySerializer
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = FacilitySummaryFilter
@method_decorator(cache_page(60 * 10))
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
# def get_queryset(self):
# user = self.request.user
# queryset = self.queryset
# if user.is_superuser:
# return queryset
# elif self.request.user.user_type >= User.TYPE_VALUE_MAP["DistrictReadOnlyAdmin"]:
# return queryset.filter(facility__district=user.district)
# elif self.request.user.user_type >= User.TYPE_VALUE_MAP["StateReadOnlyAdmin"]:
# return queryset.filter(facility__state=user.state)
# return queryset.filter(facility__users__id__exact=user.id)
def FacilityCapacitySummary():
capacity_objects = FacilityCapacity.objects.all().select_related(
"facility", "facility__state", "facility__district", "facility__local_body"
)
capacity_summary = {}
current_date = localtime(now()).replace(hour=0, minute=0, second=0, microsecond=0)
for facility_obj in Facility.objects.all():
# Calculate Actual Patients Discharged and Live in this Facility
patients_in_facility = PatientRegistration.objects.filter(facility_id=facility_obj.id).select_related(
"state", "district", "local_body"
)
capacity_summary[facility_obj.id] = FacilitySerializer(facility_obj).data
capacity_summary[facility_obj.id]["actual_live_patients"] = patients_in_facility.filter(is_active=True).count()
discharge_patients = patients_in_facility.filter(is_active=False)
capacity_summary[facility_obj.id]["actual_discharged_patients"] = discharge_patients.count()
capacity_summary[facility_obj.id]["availability"] = []
temp_inventory_summary_obj = {}
summary_objs = FacilityInventorySummary.objects.filter(facility_id=facility_obj.id)
for summary_obj in summary_objs:
burn_rate = FacilityInventoryBurnRate.objects.filter(
facility_id=facility_obj.id, item_id=summary_obj.item.id
).first()
log_query = FacilityInventoryLog.objects.filter(
facility_id=facility_obj.id,
item_id=summary_obj.item.id,
created_date__gte=current_date,
probable_accident=False,
)
# start_log = log_query.order_by("created_date").first()
end_log = log_query.order_by("-created_date").first()
# start_stock = summary_obj.quantity_in_default_unit
# if start_log:
# if start_log.is_incoming: # Add current value to current stock to get correct stock
# start_stock = start_log.current_stock + start_log.quantity_in_default_unit
# else:
# start_stock = start_log.current_stock - start_log.quantity_in_default_unit
end_stock = summary_obj.quantity
if end_log:
end_stock = end_log.current_stock
total_consumed = 0
temp1 = log_query.filter(is_incoming=False).aggregate(Sum("quantity_in_default_unit"))
if temp1:
total_consumed = temp1.get("quantity_in_default_unit__sum", 0)
if not total_consumed:
total_consumed = 0
total_added = 0
temp2 = log_query.filter(is_incoming=True).aggregate(Sum("quantity_in_default_unit"))
if temp2:
total_added = temp2.get("quantity_in_default_unit__sum", 0)
if not total_added:
total_added = 0
# Calculate Start Stock as
# end_stock = start_stock - consumption + addition
# start_stock = end_stock - addition + consumption
# This way the start stock will never veer off course
start_stock = end_stock - total_added + total_consumed
if burn_rate:
burn_rate = burn_rate.burn_rate
temp_inventory_summary_obj[summary_obj.item.id] = {
"item_name": summary_obj.item.name,
"stock": summary_obj.quantity,
"unit": summary_obj.item.default_unit.name,
"is_low": summary_obj.is_low,
"burn_rate": burn_rate,
"start_stock": start_stock,
"end_stock": end_stock,
"total_consumed": total_consumed,
"total_added": total_added,
"modified_date": summary_obj.modified_date.astimezone().isoformat(),
}
capacity_summary[facility_obj.id]["inventory"] = temp_inventory_summary_obj
for capacity_object in capacity_objects:
facility_id = capacity_object.facility.id
if facility_id not in capacity_summary:
capacity_summary[facility_id] = FacilitySerializer(capacity_object.facility).data
if "availability" not in capacity_summary[facility_id]:
capacity_summary[facility_id]["availability"] = []
capacity_summary[facility_id]["availability"].append(FacilityCapacitySerializer(capacity_object).data)
for i in capacity_summary:
facility_summary_obj = None
if FacilityRelatedSummary.objects.filter(
s_type="FacilityCapacity", facility_id=i, created_date__gte=current_date
).exists():
facility_summary_obj = FacilityRelatedSummary.objects.get(
s_type="FacilityCapacity", facility_id=i, created_date__gte=current_date
)
else:
facility_summary_obj = FacilityRelatedSummary(s_type="FacilityCapacity", facility_id=i)
facility_summary_obj.data = capacity_summary[i]
facility_summary_obj.save()
return True
@periodic_task(run_every=crontab(minute="*/5"))
def run_midnight():
FacilityCapacitySummary()
print("Summarised Capacities")
|
[
"vichuhari100@gmail.com"
] |
vichuhari100@gmail.com
|
d3d10a4755e4599dfc81f7fea2fea1d344dc0b4b
|
82bdb812582e7ad42db922023f3eb84b4fb80f72
|
/networks.py
|
4a8973311254d1c7786d7381d47672b7ffe20ffd
|
[] |
no_license
|
hzaskywalker/AWR-Python
|
cda43594248f3db563456f67c677db4508f80a5c
|
4fb00f3691b980c93734b11fab6002737a369b31
|
refs/heads/master
| 2022-10-11T09:31:37.742771
| 2020-06-11T17:18:10
| 2020-06-11T17:18:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,135
|
py
|
import torch
import numpy as np
class Value:
def __init__(self, policy):
self.policy = policy
def __call__(self, state, params):
state = torch.cat((state, params), dim=-1)
act = self.policy.actor_target(state)
cri1, cri2 = self.policy.critic_target(state,act)
return torch.min(cri1, cri2)
#return min(min (cri1.numpy(), cri2.numpy())[0][0])
def get_td3_value(env_name):
if env_name == "DartWalker2dPT-v1":
state_dim = 25
action_dim = 6
max_action = 1.0
elif env_name == "DartHopperPT-v1":
state_dim = 16
action_dim = 3
max_action = 1.0
import utils
import policy_transfer.uposi.TD3.utils
from policy_transfer.uposi.TD3.TD3 import TD3
import policy_transfer.uposi.TD3.OurDDPG
import policy_transfer.uposi.TD3.DDPG
policy = TD3(state_dim = state_dim, action_dim = action_dim, max_action = max_action)
policy.load("/home/hza/policy_transfer/PT/policy_transfer/uposi/TD3/models/TD3_" + env_name + "_1000")
#policy.actor_target.to(torch.device("cpu"))
#policy.critic_target.to(torch.device("cpu"))
policy.actor_target.to(torch.device("cuda"))
policy.critic_target.to(torch.device("cuda"))
return Value(policy)
class UP:
def __init__(self, actor_critic, ob_rms):
self.actor_critic = actor_critic
self.ob_rms = ob_rms
self.device = 'cuda:0'
self.params = None
def reset(self):
self.hidden = torch.zeros(
1, self.actor_critic.recurrent_hidden_state_size, device=self.device)
self.mask = torch.zeros(1, 1, device=self.device)
def set_params(self, params):
self.params = params
def __call__(self, ob):
assert self.params is not None
ob = np.concatenate((ob, self.params))
ob = torch.tensor([np.clip((ob - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + 1e-08), -10.0, 10.0)], dtype=torch.float32, device=self.device)
_, action, _, self.hidden_state = self.actor_critic.act(ob, self.hidden, self.mask, deterministic=True)
return action.detach().cpu().numpy()[0]
def get_up_network(env_name, num):
import sys
import os
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'PT/policy_transfer/uposi'))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'PT/baselines'))
from a2c_ppo_acktr import algo, utils
from a2c_ppo_acktr.algo import gail
from a2c_ppo_acktr.arguments import get_args
from a2c_ppo_acktr.envs import make_vec_envs
from a2c_ppo_acktr.model import Policy
from a2c_ppo_acktr.storage import RolloutStorage
env_name = env_name[:-5]
if 'Dart' in env_name:
path = f"/home/hza/policy_transfer/PT/trained_models/ppo/UP_{env_name}_{num}.pt"
else:
path = f"/home/hza/policy_transfer/PT/trained_models/ppo/UP_{env_name}_{num}.pt"
result = torch.load(path, map_location=lambda a, b:torch.Storage().cuda())
actor_critic = result[0]
actor_critic.cuda()
ob_rms = result[1]
return UP(actor_critic, ob_rms)
class UP2(UP):
def __init__(self, agent):
self.agent = agent
self.params = None
def set_params(self, params):
self.params = params
def __call__(self, ob):
if len(self.params.shape) == 1:
ob = np.concatenate((ob, self.params), axis=0)[None,:]
else:
ob = np.concatenate((np.tile(ob,(len(self.params), 1)), self.params), axis=1)
action = self.agent.act(ob, mode='test')
return action.mean(axis=0)
def reset(self):
pass
def get_awr_network(env_name, num):
import torch
import sys
sys.path.append('awr2')
path = f'awr2/models/{env_name}'
agent = torch.load(path)
return UP2(agent)
def get_finetune_network(env_name, num, num_iter=21, num_proc=10):
import torch
import sys
from finetune import Finetune
sys.path.append('awr2')
path = f'awr2/models/{env_name}'
agent = torch.load(path)
return Finetune(env_name, num, agent, num_iter, num_proc=num_proc)
|
[
"hzaskywalker@gmail.com"
] |
hzaskywalker@gmail.com
|
d97fa27fd1ef8dab53b15a077aea13385d7f7afd
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_Quantization/trend_MovingMedian/cycle_12/ar_/test_artificial_128_Quantization_MovingMedian_12__100.py
|
64695ec2235c61ebbe172764ee17c38a3ffdbfd8
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 272
|
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 12, transform = "Quantization", sigma = 0.0, exog_count = 100, ar_order = 0);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
b87981a578e9f0a4fa7bd52c97f63d833cf5ea5c
|
3d7e1a506d65c23c84b7430fa46623cb98de8c64
|
/median.py
|
dc0584831963d0d6f87700eddb7c8140d94fb9f2
|
[] |
no_license
|
crakama/UdacityIntrotoComputerScience
|
cb6ac8a9084f078eaf245a52adc43541c35dc3f4
|
416b82b85ff70c48eabae6bb9d7b43354a158d9a
|
refs/heads/master
| 2021-01-09T20:39:15.974791
| 2016-07-18T20:59:09
| 2016-07-18T20:59:09
| 60,571,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
# Define a procedure, median, that takes three
# numbers as its inputs, and returns the median
# of the three numbers.
# Make sure your procedure has a return statement.
def bigger(a,b):
if a > b:
return a
else:
return b
def biggest(a,b,c):
return bigger(a,bigger(b,c))
def median(a, b, c):
summation = a + b + c
median = summation / 3
return median
|
[
"crakama89@gmail.com"
] |
crakama89@gmail.com
|
1ee30cd5b57a8912f7804703d5709be5b9d229d5
|
77ca708f981b3d4127568ff416e3c0a3dc1fff94
|
/util/cli_parser.py
|
be02dbf7e3bb509d0d10d208c7954fafe85a7c4c
|
[
"Apache-2.0"
] |
permissive
|
inksong/bidDB_downloader
|
df7046cadc392872d4ed58bd730474771e83aaf0
|
1550b6006d1d21bab726bbe5a10c8c2d7aa94bbc
|
refs/heads/master
| 2022-03-06T19:10:03.333813
| 2019-09-28T19:41:15
| 2019-09-28T19:41:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,575
|
py
|
import argparse
import sys
class CLIParser:
# -- Public methods
# CLIParser Constructor
def __init__(self):
super(CLIParser, self).__init__()
self.parser = argparse.ArgumentParser(prog='bidDB_downloader.py', description='BugTraq database downloader.')
self.parser.add_argument('-w','--workers', type=int, default=100, help='number of workers for execution. By '
'default, the workers number is set '
'to 100')
self.parser.add_argument('-f', '--first', type=int, default=1, help='your download will start from this '
'BugTraq Id. By default, the first BugTraq '
'Id is set to 1')
self.parser.add_argument('-l', '--last', type=int, default=100000, help='your download will finish in this last'
' BugTraq Id. By default, the last '
'BugTraq Id is set to 100000')
self.parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1.0',
help='show the version message and exit')
self.args = self.parser.parse_args()
self.__verify_args()
# -- Getters
# Gets workers
def get_workers(self):
return self.args.workers
# Gets the first bid
def get_first_bid(self):
return self.args.first
# Gets the last bid
def get_last_bid(self):
return self.args.last
# -- Private methods
# Verify command line arguments
def __verify_args(self):
if self.args.first <= 0 or self.args.last <= 0 or self.args.workers <= 0:
print(self.parser.prog + ': error: all arguments must be greater than zero.', file=sys.stderr)
exit(2)
elif self.args.first > self.args.last:
print(self.parser.prog + ': error: argument -l/--last: this argument must be greater than -f/--first '
'argument.', file=sys.stderr)
exit(2)
elif self.args.workers > 500:
print(self.parser.prog + ': warning: argument -w/--workers: your system may be unstable with values '
'greater than 500.', file=sys.stderr)
|
[
"eliasgr89@gmail.com"
] |
eliasgr89@gmail.com
|
bd1e701a5e902eca16077631d3424e1691ebb4f5
|
77900cdd9a815caf1cd04705321ca93f5072179f
|
/Project2/Project2/.history/blog/models_20211115152413.py
|
443d1fa03512e592b13bcd760279c43dbb1f7c43
|
[] |
no_license
|
Bom19990111/helloword_python
|
717799d994223d65de5adaeabecf396ff2bc1fb7
|
2ee2e67a60043f03c1ce4b070470c7d2dcdc72a7
|
refs/heads/master
| 2023-09-06T04:17:02.057628
| 2021-11-21T20:00:46
| 2021-11-21T20:00:46
| 407,063,273
| 0
| 1
| null | 2021-11-21T20:00:47
| 2021-09-16T07:18:35
|
Python
|
UTF-8
|
Python
| false
| false
| 778
|
py
|
from django.db import models
from django.contrib.auth.models import User
from ckeditor
# Create your models here.
STATUS = ((0, "Draft"), (1, "Published"))
USE_TZ = False
class Blog(models.Model):
title = models.CharField('Tiêu đề', max_length=250, blank=True)
slug = models.SlugField(max_length=250, blank=True)
author = models.ForeignKey(
User, on_delete=models.CASCADE, related_name='blog_posts')
created_on = models.DateTimeField('Giờ tạo',
auto_now_add=True)
update_on = models.DateTimeField('Giờ cập nhật', auto_now=True)
content = models.TextField()
status = models.IntegerField('Trạng thái', choices=STATUS, default=0)
class meta:
ordering = ['-created_on']
def __str__(self):
return self.title
|
[
"phanthituyngoc1995@gmail.com"
] |
phanthituyngoc1995@gmail.com
|
31b60af5a9eaecb9ec2663c0f1867332d0f02a28
|
4f328184e3e4c7ac84792f38127b4538abc1be01
|
/python/re-split/main.py
|
fd853268133194acce224d22a6d46a05d5308ea7
|
[
"Apache-2.0"
] |
permissive
|
shollingsworth/HackerRank
|
30cd45960af5983ed697c0aaf9a6e4268dc75ef7
|
2f0e048044e643d6aa9d07c1898f3b00adf489b0
|
refs/heads/master
| 2021-08-17T09:08:44.532111
| 2017-11-21T02:04:09
| 2017-11-21T02:04:09
| 103,992,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 855
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import __future__
import sys
import json
def banner():
ban = '====' * 30
print("{}\nSAMPLE INP:\n{}\n{}".format(ban,ban,open(ip, 'r').read()))
print("{}\nSAMPLE OUT:\n{}\n{}".format(ban,ban,open(op, 'r').read()))
print("{}\nSTART:\n{}".format(ban,ban))
sys.stdin = open(ip, 'r')
cnt = -1
def comp(inp,ln):
outl = output_arr[ln]
if str(inp) != outl:
raise Exception("Error input output: line {}, file: {}\ngot: {} expected: {}".format(ln,op,inp,outl))
ip = "./challenge_sample_input"
op = "./challenge_sample_output"
ip = "./input01.txt"
op = "./output01.txt"
output_arr = map(str,open(op,'r').read().split('\n'))
banner()
# https://www.hackerrank.com/challenges/re-split/problem
import re
print("\n".join([i for i in re.split('[,\.]+', raw_input()) if len(i) > 0 ]))
|
[
"shollingsworth@barracuda.com"
] |
shollingsworth@barracuda.com
|
10e2710b765ceac6d9a48e440c623599ef107024
|
98dae6deaf31bcacc078eeb1bdbdb8bd3ac3784f
|
/dace/transformation/dataflow/copy_to_device.py
|
625179c8445725cf5df002835c54eae68478799a
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cpenny42/dace
|
da9b241ea0808f1798645ab917e1484c45a3a748
|
2c7814b4f02a6870bb25ae08113c0cc3791e1178
|
refs/heads/master
| 2020-06-24T09:06:23.091624
| 2019-05-10T11:11:14
| 2019-05-10T11:11:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,767
|
py
|
""" Contains classes and functions that implement copying a nested SDFG
and its dependencies to a given device. """
import dace
from copy import deepcopy as dcpy
from dace import data, properties, symbolic, types, subsets
from dace.graph import edges, graph, nodes, nxutil
from dace.transformation import pattern_matching
from math import ceil
import sympy
import networkx as nx
def change_storage(sdfg, storage):
for state in sdfg.nodes():
for node in state.nodes():
if isinstance(node, nodes.AccessNode):
node.desc(sdfg).storage = storage
if isinstance(node, nodes.NestedSDFG):
change_storage(node.sdfg, storage)
@properties.make_properties
class CopyToDevice(pattern_matching.Transformation):
""" Implements the copy-to-device transformation, which copies a nested
SDFG and its dependencies to a given device.
The transformation changes all data storage types of a nested SDFG to
the given `storage` property, and creates new arrays and copies around
the nested SDFG to that storage.
"""
_nested_sdfg = nodes.NestedSDFG("", graph.OrderedDiGraph(), set(), set())
storage = properties.Property(
dtype=types.StorageType,
desc="Nested SDFG storage",
enum=types.StorageType,
from_string=lambda x: types.StorageType[x],
default=types.StorageType.Default)
@staticmethod
def annotates_memlets():
return True
@staticmethod
def expressions():
return [nxutil.node_path_graph(CopyToDevice._nested_sdfg)]
@staticmethod
def can_be_applied(graph, candidate, expr_index, sdfg, strict=False):
return True
@staticmethod
def match_to_str(graph, candidate):
nested_sdfg = graph.nodes()[candidate[CopyToDevice._nested_sdfg]]
return nested_sdfg.label
def apply(self, sdfg):
state = sdfg.nodes()[self.state_id]
nested_sdfg = state.nodes()[self.subgraph[CopyToDevice._nested_sdfg]]
storage = self.storage
for _, edge in enumerate(state.in_edges(nested_sdfg)):
src, src_conn, dst, dst_conn, memlet = edge
dataname = memlet.data
memdata = sdfg.arrays[dataname]
if isinstance(memdata, data.Array):
new_data = sdfg.add_array(
'device_' + dataname + '_in',
memdata.dtype, [
symbolic.overapproximate(r)
for r in memlet.bounding_box_size()
],
transient=True,
storage=storage)
elif isinstance(memdata, data.Scalar):
new_data = sdfg.add_scalar(
'device_' + dataname + '_in',
memdata.dtype,
transient=True,
storage=storage)
else:
raise NotImplementedError
data_node = nodes.AccessNode('device_' + dataname + '_in')
to_data_mm = dcpy(memlet)
from_data_mm = dcpy(memlet)
from_data_mm.data = 'device_' + dataname + '_in'
offset = []
for ind, r in enumerate(memlet.subset):
offset.append(r[0])
if isinstance(memlet.subset[ind], tuple):
begin = memlet.subset[ind][0] - r[0]
end = memlet.subset[ind][1] - r[0]
step = memlet.subset[ind][2]
from_data_mm.subset[ind] = (begin, end, step)
else:
from_data_mm.subset[ind] -= r[0]
state.remove_edge(edge)
state.add_edge(src, src_conn, data_node, None, to_data_mm)
state.add_edge(data_node, None, dst, dst_conn, from_data_mm)
for _, edge in enumerate(state.out_edges(nested_sdfg)):
src, src_conn, dst, dst_conn, memlet = edge
dataname = memlet.data
memdata = sdfg.arrays[dataname]
if isinstance(memdata, data.Array):
new_data = data.Array(
'device_' + dataname + '_out',
memdata.dtype, [
symbolic.overapproximate(r)
for r in memlet.bounding_box_size()
],
transient=True,
storage=storage)
elif isinstance(memdata, data.Scalar):
new_data = sdfg.add_scalar(
'device_' + dataname + '_out',
memdata.dtype,
transient=True,
storage=storage)
else:
raise NotImplementedError
data_node = nodes.AccessNode('device_' + dataname + '_out')
to_data_mm = dcpy(memlet)
from_data_mm = dcpy(memlet)
to_data_mm.data = 'device_' + dataname + '_out'
offset = []
for ind, r in enumerate(memlet.subset):
offset.append(r[0])
if isinstance(memlet.subset[ind], tuple):
begin = memlet.subset[ind][0] - r[0]
end = memlet.subset[ind][1] - r[0]
step = memlet.subset[ind][2]
to_data_mm.subset[ind] = (begin, end, step)
else:
to_data_mm.subset[ind] -= r[0]
state.remove_edge(edge)
state.add_edge(src, src_conn, data_node, None, to_data_mm)
state.add_edge(data_node, None, dst, dst_conn, from_data_mm)
# Change storage for all data inside nested SDFG to device.
change_storage(nested_sdfg.sdfg, storage)
pattern_matching.Transformation.register_pattern(CopyToDevice)
|
[
"talbn@inf.ethz.ch"
] |
talbn@inf.ethz.ch
|
a77bf7cae50058808c751ebac3a9f47a4889aeef
|
c51c92dc1ba350b821899715cd16ba0b8d67653c
|
/dlmPython/dlm_mod.py
|
35250f347c78335a3bad42287d6e1e41452c3dfd
|
[
"MIT"
] |
permissive
|
luiarthur/dlmPython
|
a0c2196d9753e010d4417afa6a9e439966c2bb8d
|
c3d6328b1260c795759637cd8d26d3f79febd950
|
refs/heads/master
| 2021-01-19T21:40:43.987505
| 2019-11-14T16:50:00
| 2019-11-14T16:50:00
| 88,685,692
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
import numpy as np
from .lego import E, J, join
from .dlm_uni import dlm_uni
def arma(ar=[], ma=[], tau2=1, V=None):
"""
ARMA component for DLM
- ar: list of ar coefficients
- ma: list of ma coefficients
- tau2: variance for evolution matrix
- V: variance for observations
Note that there is no discount option here because W is assumed to be
a matrix of zeros but with W_{1,1} = sig2
see West & Prado (p.75)
"""
assert not(ar is [] and ma is []), "You must specify at least one of 'ar' or 'ma'!"
p = len(ar)
q = len(ma)
m = max(p, q+1)
phi = join(np.array(ar), np.zeros(m-p))
rest = np.vstack( (np.eye(m-1), np.zeros(m-1)) )
G = np.column_stack( (phi, rest) )
psi = join(np.array(ma), np.zeros(m-1-q))
omega = np.asmatrix(join(1, psi)).transpose()
W = tau2 * omega * omega.transpose()
return dlm_uni(F=E(m), G=G, V=V, W=W)
def poly(order=1, V=None, W=None, discount=None):
"""
Polynomial trend component for DLM
- order: order 0 polynomial => mean forecast function (random walk model)
order 1 polynomial => linear forecast function
order 2 polynomial => quadratic forecast function
"""
assert order >= 0, "order needs to be > 0"
p = order + 1
return dlm_uni(F=E(p), G=J(p), V=V, W=W, discount=discount)
def seasonal():
return NotImplemented
def reg():
"""
Creates dlm model for regression
"""
return NotImplemented
|
[
"luiarthur@gmail.com"
] |
luiarthur@gmail.com
|
f07b402cb9a7603154f115821368167dcb9a18cc
|
4472e40c53ca3e1df4f9e477a6268133309b7597
|
/_unittests/ut_module/test_r.py
|
d4017e655d6fc616855c64c993b23fc27ae187d5
|
[
"MIT"
] |
permissive
|
amoussoubaruch/ensae_teaching_cs
|
289729742608da064f07a79b10cf6cce48de1b51
|
313a6ccb8756dbaa4c52724839b69af8a5f4476e
|
refs/heads/master
| 2021-01-16T19:31:49.734583
| 2016-09-09T08:29:58
| 2016-09-09T08:29:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,250
|
py
|
"""
@brief test log(time=0s)
"""
import sys
import os
import unittest
import warnings
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
try:
import pyquickhelper as skip_
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..",
"..",
"pyquickhelper",
"src")))
if path not in sys.path:
sys.path.append(path)
import pyquickhelper as skip_
from pyquickhelper.loghelper import fLOG
from src.ensae_teaching_cs.faq.faq_jupyter import r_and_notebook
class TestR (unittest.TestCase):
def test_r(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if "R_HOME" not in os.environ:
warnings.warn("R is not installed")
return
assert r_and_notebook()
if __name__ == "__main__":
unittest.main()
|
[
"xavier.dupre@ensae.fr"
] |
xavier.dupre@ensae.fr
|
016650fdb1063f4afb72aea3f846462aea5c8cf0
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2254/60636/258712.py
|
5d40a74827de64da87dac87a1dc8edd1768d7ecf
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
f_r=input().split(" ")
f=int(f_r[0])
r=int(f_r[1])
sources=[]
for i in range(f):
source=[]
for j in range(f):
source.append("0")
sources.append(source)
for i in range(r):
x=input().split(" ")
sources[int(x[0])-1][int(x[1])-1]="1"
sources[int(x[1])-1][int(x[0])-1]="1"
count_1=0
count_odd=0
for i in range(len(sources)):
count=0
for j in sources[i]:
if j=="1":
count=count+1
if count==1:
count_1+=1
elif count%2==1:
count_odd+=1
if count_1!=0:
if(count_1%2==0):
print(int(count_1/2))
else:
if(count_odd%2==0):
print(int(count_1/2)+int((count_odd)/2)+1)
else:
print(int(count_1/2))
else:
print(int((count_odd+1)/2))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
c8712f660e267eac3ed0a4a151298d3036d8637c
|
696dec6a8d1eba189d36049afedec36da47c08f3
|
/models/_model_core_utils/__init__.py
|
fe191e1f22cc8de28fd6c2603d2190a44aeef2ff
|
[] |
no_license
|
JoelRaymann/polyp-segmentation
|
d99079f56bb3ae0886fb4c610c4abcc420137781
|
38da6c8bf47df2d2382d31f04faf63649b7d8ab0
|
refs/heads/master
| 2023-04-10T06:17:34.720237
| 2021-04-14T22:04:36
| 2021-04-14T22:04:36
| 358,053,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
from ._unet import UNet
from ._unet_dice import UNetDice
from ._resunet import ResUNet
from ._deeplabv3 import Deeplabv3
from ._fcn import FCN8
from ._segnet import SegNet
from ._unet_attn import UNetAttn
from ._gar_net import GARNet
from ._resunet_plus_plus import ResUNetPlusPlus
from ._dilated_resfcn import DilatedResFCN
from ._se_unet import SEUNet
from ._dilated_unet import DilatedUNet
from ._gar_net_exp import GARNetExperimental
|
[
"joelraymann@gmail.com"
] |
joelraymann@gmail.com
|
89339d45b4d3abc2eac1659c9dae145f5a8846f8
|
50dd2a43daa8316fc11e0c176b5872738fcc5dde
|
/Learning/130_Fluent_Python/fp2-utf8/freeinteractive/freeinteractive 192.py
|
67edeb4bbefbcf40a50db41edc107e2f2a0bb953
|
[] |
no_license
|
FrenchBear/Python
|
58204d368e3e72071eef298ff00d06ff51bd7914
|
b41ab4b6a59ee9e145ef2cd887a5fe306973962b
|
refs/heads/master
| 2023-08-31T18:43:37.792427
| 2023-08-26T15:53:20
| 2023-08-26T15:53:20
| 124,466,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
>>> from domainlib import multi_probe
>>> names = 'python.org rust-lang.org golang.org nolang.invalid'.split()
>>> gen_found = (name async for name, found in multi_probe(names) if found)
>>> gen_found
<async_generator object <genexpr> at 0x10a8f9700>
>>> async for name in gen_found:
... print(name)
...
golang.org
python.org
rust-lang.org
|
[
"FrenchBear38@outlook.com"
] |
FrenchBear38@outlook.com
|
6808c8c41ce560a5d7aabbbe7ab1d3bcd280de35
|
da2d53e8021b539db006fa31f02d1c2ae46bed3b
|
/Test/test.py
|
af89a96f0b23218a82cfcd3e536f4901d5f99677
|
[] |
no_license
|
srajsonu/CodeChef
|
0723ee4975808e2f4d101d2034771d868ae3b7f7
|
a39cd5886a5f108dcd46f70922d5637dd29849ce
|
refs/heads/main
| 2023-04-22T08:33:06.376698
| 2021-05-16T05:48:17
| 2021-05-16T05:48:17
| 327,030,437
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,929
|
py
|
"""
1 : [2, 3]
7 <-> 6 (1)
6 <-> 5 (2)
2 <-> 8 (2)
0 <-> 1 (4)
2 <-> 5 (4)
6 <-> 8 (6)
2 <-> 3 (7)
7 <-> 8 (7)
3 <-> 4 (9)
5 <-> 4 (10)
1 <-> 7 (11)
3 <-> 5 (14)
7 -> 6 -> 5 -> 4
|
2 -> 8
"""
from collections import defaultdict
class Solution:
def find_root(self, A, parent):
if parent[A] == A:
return A
return self.find_root(parent[A], parent)
def union(self, A, B, height, parent):
C = self.find_root(A, parent)
D = self.find_root(B, parent)
if C == D:
return
if height[C] < height[D]:
parent[C] = D
elif height[C] > height[D]:
parent[D] = C
else:
parent[C] = D
height[C] += 1
def mst(self, A):
graph = defaultdict(list)
for i, j, k in A:
graph[i].append([j, k])
graph[j].append([i, k])
graph = sorted(graph, key= lambda i: i[1])
parent = []
height = []
if __name__ == '__main__':
A = [[0, 1, 4],
[0, 7, 8],
[1, 7, 11],
[1, 2, 8],
[2, 8, 2],
[2, 5, 4],
[3, 2, 7],
[3, 5, 14]]
"""
class Solution:
def dp(self, n, dp):
if dp[n]:
return dp[n]
if n <= 1:
return 1
count = 0
for i in range(1, n+1):
count += self.dp(i-1, dp) * self.dp(n-i, dp)
dp[n] = count
return count
def uniqueBST(self, A):
dp = [0 for _ in range(A+1)]
return self.dp(A, dp)
def solve(self, n):
dp = [0 for _ in range(n+1)]
dp[0] = 1
for i in range(1, n+1):
for j in range(1, i+1):
dp[i] += dp[j-1] * dp[i - j]
return dp[-1]
if __name__ == '__main__':
A = 9
B = Solution()
print(B.uniqueBST(A))
print(B.solve(A))
"""
class Solution:
def solve(self, arr):
m = len(arr)
n = len(arr[0])
for i in range(m):
for j in range(n):
if arr[i][j] == 0:
for k in range(m):
if arr[k][j] == 0:
continue
arr[k][j] = -1
for l in range(n):
if arr[i][l] == 0:
continue
arr[i][l] = -1
for i in range(m):
for j in range(n):
if arr[i][j] == -1:
arr[i][j] = 0
# while q:
# i, j = q.pop(0) #(1, 1)
#
# for k in range(n):
# arr[i][k] = 0
#
# for k in range(m):
# arr[k][j] = 0
return arr
if __name__ == '__main__':
arr = [[1, 1, 1],
[1, 0, 1],
[1, 1, 1]]
S = Solution()
print(S.solve(arr))
"""
1 0 1
0 0 0
1 0 1
"""
|
[
"srajsonu02@gmail.com"
] |
srajsonu02@gmail.com
|
146a0a6e3d8f7b969b3b61b74b99b083cfd95fc1
|
d05a59feee839a4af352b7ed2fd6cf10a288a3cb
|
/examples/tutorial2.py
|
3a952ba678b6f688c3b4fe342830ac8cf6ac0bc4
|
[
"BSD-2-Clause-Views"
] |
permissive
|
elessarelfstone/XlsxWriter
|
0d958afd593643f990373bd4d8a32bafc0966534
|
bb7b7881c7a93c89d6eaac25f12dda08d58d3046
|
refs/heads/master
| 2020-09-24T06:17:20.840848
| 2019-11-24T23:43:01
| 2019-11-24T23:43:01
| 225,685,272
| 1
| 0
|
NOASSERTION
| 2019-12-03T18:09:06
| 2019-12-03T18:09:05
| null |
UTF-8
|
Python
| false
| false
| 1,284
|
py
|
##############################################################################
#
# A simple program to write some data to an Excel file using the XlsxWriter
# Python module.
#
# This program is shown, with explanations, in Tutorial 2 of the XlsxWriter
# documentation.
#
# Copyright 2013-2019, John McNamara, jmcnamara@cpan.org
#
import xlsxwriter
# Create a workbook and add a worksheet.
workbook = xlsxwriter.Workbook('Expenses02.xlsx')
worksheet = workbook.add_worksheet()
# Add a bold format to use to highlight cells.
bold = workbook.add_format({'bold': True})
# Add a number format for cells with money.
money = workbook.add_format({'num_format': '$#,##0'})
# Write some data header.
worksheet.write('A1', 'Item', bold)
worksheet.write('B1', 'Cost', bold)
# Some data we want to write to the worksheet.
expenses = (
['Rent', 1000],
['Gas', 100],
['Food', 300],
['Gym', 50],
)
# Start from the first cell below the headers.
row = 1
col = 0
# Iterate over the data and write it out row by row.
for item, cost in (expenses):
worksheet.write(row, col, item)
worksheet.write(row, col + 1, cost, money)
row += 1
# Write a total using a formula.
worksheet.write(row, 0, 'Total', bold)
worksheet.write(row, 1, '=SUM(B2:B5)', money)
workbook.close()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
ee98f3883988b0a75df2d59495dab5b03aae7026
|
b4afb834fc3a3e2c128b1bf825700031e3df519a
|
/examples/cobalt-preproc/Arctic/fix_pole.py
|
7e8aedf62b4f44becced7e92fdac0761988e3965
|
[
"BSD-3-Clause"
] |
permissive
|
ESMG/pyroms
|
e4e5e9d70d66907a992846b06d61db31afcd24f3
|
5ea501ef904b01036dd2a0909b7bdc61a56e7eff
|
refs/heads/python3
| 2023-03-19T11:11:09.143443
| 2023-03-10T00:22:13
| 2023-03-10T00:22:13
| 1,012,779
| 102
| 63
|
NOASSERTION
| 2023-03-10T00:23:20
| 2010-10-21T17:22:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,787
|
py
|
import subprocess
import os
import sys
import subprocess
import numpy as np
import netCDF4 as nc
dst_dir='./'
ic_file = dst_dir + 'ARCTIC4_ic_bio_GFDL-APR.nc'
fidic = nc.Dataset(ic_file,'a')
Cs_r = fidic.variables['Cs_r']
nz = Cs_r.shape[0]
# define all tracer stuff
list_tracer = ['alk', 'cadet_arag', 'cadet_calc', 'dic', 'fed', 'fedet', 'fedi', 'felg', 'fesm', 'ldon', 'ldop', 'lith', 'lithdet', 'nbact', 'ndet', 'ndi', 'nlg', 'nsm', 'nh4', 'no3', 'o2', 'pdet', 'po4', 'srdon', 'srdop', 'sldon', 'sldop', 'sidet', 'silg', 'sio4', 'nsmz', 'nmdz', 'nlgz','cased','chl','irr_mem','htotal','co3_ion']
print('\nFixing a north pole problem')
for tr in list_tracer:
print('for variable', tr)
tracer = fidic.variables[tr][:]
mysum = np.zeros((nz))
count = 0
for j in range(753,768):
for i in range(271,287):
if tracer[0,0,j,i] != 0:
count += 1
mysum += tracer[0,:,j,i]
print('count', count)
mysum = mysum/count
print('mysum', mysum)
for j in range(753,768):
for i in range(271,287):
if tracer[0,0,j,i] == 0:
tracer[0,:,j,i] = mysum
fidic.variables[tr][:] = tracer
# These two tracers contain zeros, leading to nans.
tracer = fidic.variables['cased'][:]
mysum = 0.25*(tracer[0,:,752,279] + tracer[0,:,768,279] + tracer[0,:,760,270] + tracer[0,:,602,287])
for j in range(753,768):
for i in range(271,287):
tracer[0,:,j,i] = mysum
fidic.variables['cased'][:] = tracer
tracer = fidic.variables['irr_mem'][:]
mysum = 0.25*(tracer[0,:,752,279] + tracer[0,:,768,279] + tracer[0,:,760,270] + tracer[0,:,602,287])
for j in range(753,768):
for i in range(271,287):
tracer[0,:,j,i] = mysum
fidic.variables['irr_mem'][:] = tracer
fidic.close()
|
[
"kshedstrom@alaska.edu"
] |
kshedstrom@alaska.edu
|
4357bfbaea73a6e8726ec1d1643b71f701b1c489
|
091e97bcfe5acc0635bd601aa8497e377b74d41a
|
/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/openshift_health_checker/test/etcd_traffic_test.py
|
583c4c8dd86b2cf5270083b912f639bb34d4f855
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
openshift/openshift-tools
|
d59b63778f25cb8fb3c7a0253afe22a173e72f9d
|
e342f6659a4ef1a188ff403e2fc6b06ac6d119c7
|
refs/heads/prod
| 2023-08-30T01:52:04.108978
| 2022-03-23T21:07:28
| 2022-03-23T21:07:28
| 36,827,699
| 170
| 254
|
Apache-2.0
| 2022-06-16T12:11:51
| 2015-06-03T20:09:22
|
Python
|
UTF-8
|
Python
| false
| false
| 2,219
|
py
|
import pytest
from openshift_checks.etcd_traffic import EtcdTraffic
@pytest.mark.parametrize('group_names,version,is_active', [
(['oo_masters_to_config'], "3.5", False),
(['oo_masters_to_config'], "3.6", False),
(['oo_nodes_to_config'], "3.4", False),
(['oo_etcd_to_config'], "3.4", True),
(['oo_etcd_to_config'], "1.5", True),
(['oo_etcd_to_config'], "3.1", False),
(['oo_masters_to_config', 'oo_nodes_to_config'], "3.5", False),
(['oo_masters_to_config', 'oo_etcd_to_config'], "3.5", True),
([], "3.4", False),
])
def test_is_active(group_names, version, is_active):
task_vars = dict(
group_names=group_names,
openshift_image_tag=version,
)
assert EtcdTraffic(task_vars=task_vars).is_active() == is_active
@pytest.mark.parametrize('group_names,matched,failed,extra_words', [
(["oo_masters_to_config"], True, True, ["Higher than normal", "traffic"]),
(["oo_masters_to_config", "oo_etcd_to_config"], False, False, []),
(["oo_etcd_to_config"], False, False, []),
])
def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words):
def execute_module(module_name, *_):
return {
"matched": matched,
"failed": failed,
}
task_vars = dict(
group_names=group_names,
openshift_is_containerized=False,
openshift_service_type="origin"
)
result = EtcdTraffic(execute_module, task_vars).run()
for word in extra_words:
assert word in result.get("msg", "")
assert result.get("failed", False) == failed
@pytest.mark.parametrize('openshift_is_containerized,expected_unit_value', [
(False, "etcd"),
(True, "etcd_container"),
])
def test_systemd_unit_matches_deployment_type(openshift_is_containerized, expected_unit_value):
task_vars = dict(
openshift_is_containerized=openshift_is_containerized
)
def execute_module(module_name, args, *_):
assert module_name == "search_journalctl"
matchers = args["log_matchers"]
for matcher in matchers:
assert matcher["unit"] == expected_unit_value
return {"failed": False}
EtcdTraffic(execute_module, task_vars).run()
|
[
"mwoodson@redhat.com"
] |
mwoodson@redhat.com
|
8aa14bff98269133382561e641d8d76226b5b446
|
8383211ad5eb9bb91fef7642e43a8f148530b8fc
|
/iga/package.py
|
80c06890c846b23037d75e3903d72737c83c77b2
|
[
"MIT"
] |
permissive
|
clchiou/iga
|
599f1ba653323a49d9a26f66c072a9b4a2f0dcd7
|
5958f77410d63b712d13db142bfd5ecfbf4ce821
|
refs/heads/master
| 2021-01-23T19:37:15.670514
| 2015-04-25T07:28:17
| 2015-04-25T07:28:17
| 31,150,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,334
|
py
|
__all__ = [
'get_outputs',
'get_rule',
]
import itertools
import logging
import iga.context
import iga.precond
from iga.build_rules import build_rules
from iga.core import WriteOnceDict
from iga.error import IgaError
from iga.path import Glob
from iga.rule import Rule
from iga.rule import RuleFunc
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
# Packages that have been loaded (no BUILD file should be executed twice).
_LOADED_PACKAGES = set()
# Map a rule's outputs to that rule.
_OUTPUT_TO_RULE = WriteOnceDict()
def get_outputs():
return frozenset(_OUTPUT_TO_RULE)
def get_rule(label, *, raises=False):
"""Return Rule object or raise IgaError (if required, else return
None) if label does not refer to a rule or an output file.
"""
if label.package not in _LOADED_PACKAGES:
_load_rules(label.package)
_LOADED_PACKAGES.add(label.package)
rule_label = _OUTPUT_TO_RULE.get(label, label)
try:
return Rule.get_object(rule_label)
except KeyError:
if raises:
raise IgaError('%s does not refer to a rule or an output file' %
(label,))
return None
def _load_rules(package):
"""Load rules from a BUILD file."""
buildfile_path = iga.context.current()['source'] / package / 'BUILD'
LOG.info('load %s', buildfile_path)
with buildfile_path.open() as buildfile:
code = buildfile.read()
code = compile(code, str(buildfile_path), 'exec')
rule_data = []
with iga.context.create() as cxt:
cxt['package'] = package
cxt['rule_data'] = rule_data
exec(code, _make_buildfile_globals())
for rule in build_rules(package, rule_data):
Rule.register(rule)
for output in itertools.chain.from_iterable(rule.outputs.values()):
_OUTPUT_TO_RULE[output] = rule.name
def _make_buildfile_globals():
varz = WriteOnceDict()
varz.update(
glob=glob,
package=_do_nothing('package'),
)
varz.update(RuleFunc.get_all_objects())
return dict(varz)
def glob(string):
iga.precond.check_type(string, str)
return Glob(string)
def _do_nothing(func_name):
def func(**kwargs):
if kwargs:
LOG.debug('%s() ignores %r', func_name, sorted(kwargs))
return func
|
[
"clchiou@gmail.com"
] |
clchiou@gmail.com
|
7e9e65b6dd1786e6e0c861077cf5c2096ec0ec71
|
e262e64415335060868e9f7f73ab8701e3be2f7b
|
/.history/pytest_test_20201123175355.py
|
e93f7595e48e3c38b47a185afc239ff87f50977e
|
[] |
no_license
|
Allison001/developer_test
|
6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63
|
b8e04b4b248b0c10a35e93128a5323165990052c
|
refs/heads/master
| 2023-06-18T08:46:40.202383
| 2021-07-23T03:31:54
| 2021-07-23T03:31:54
| 322,807,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
import pytest
import yaml
class Test_A:
# @pytest.mark.parametrize('a,b',[(10,20),(5,5)])
@pytest.mark.parametrize('a,b',yaml.safe_load.)
def test_data1(self,a,b):
print(a + b)
def test_data2(self):
a = 5
b = 5
print(a+b)
if __name__ == '__main__':
pytest.main(['pytest_test.py::Test_A::test_data2','-v'])
|
[
"zhangyingxbba@gmail.com"
] |
zhangyingxbba@gmail.com
|
0d30703c7f61070278fc2ff6e705ad1ea92f81b0
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_restrictions.py
|
c41ad3763392207c8a6b6d3285a039f0e0d179bf
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
from xai.brain.wordbase.nouns._restriction import _RESTRICTION
#calss header
class _RESTRICTIONS(_RESTRICTION, ):
def __init__(self,):
_RESTRICTION.__init__(self)
self.name = "RESTRICTIONS"
self.specie = 'nouns'
self.basic = "restriction"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
7007e09c94950e2c259af5e00524b68cd8a2997b
|
8f0524fc0171e27a15f4cf5fb3fe48ef2053b40e
|
/hacker_rank/unique email addresses.py
|
bd763c3bf92789740bab9fbd0e863e314a5bfcfc
|
[] |
no_license
|
MohammedAlewi/competitive-programming
|
51514fa04ba03d14f8e00031ee413d6d74df971f
|
960da78bfa956cb1cf79a0cd19553af97a2aa0f3
|
refs/heads/master
| 2023-02-08T20:25:58.279241
| 2023-02-02T00:11:23
| 2023-02-02T00:11:23
| 222,710,225
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
def format_email(email):
email=list(email)
remove=False
for i in range(len(email)):
if email[i]=='@':
break
if email[i]=='+':
remove=True
if email[i]=='.':
email[i]=''
if remove:
email[i]=''
return "".join(email)
def unique_email_address(emails):
uinque=set()
for email in emails:
em= format_email(email)
uinque.add(em)
return len(uinque)
print( unique_email_address(["test.email+alex@leetcode.com","test.e.mail+bob.cathy@leetcode.com","testemail+david@lee.tcode.com"]) )
|
[
"rofyalewi@gmail.com"
] |
rofyalewi@gmail.com
|
6d83260ea939da933135941f5c115787ed5d1ba1
|
0f15175752b462b29725b459b46752facad35642
|
/tests/test_inputunit.py
|
f591e232e28cc0c42f917d722cd371938446aea1
|
[
"BSD-2-Clause-Views"
] |
permissive
|
Shadiesna/ooni-probe
|
f36d69fa2786e97df68cf0808da43190d4ab1daa
|
01d80a2abc235fedbd2944500e259e537fd46c45
|
refs/heads/master
| 2021-01-16T20:03:18.727535
| 2012-11-22T22:23:07
| 2012-11-22T22:23:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
import unittest
from ooni.inputunit import InputUnit, InputUnitFactory
class TestInputUnit(unittest.TestCase):
def test_input_unit_factory(self):
inputs = range(100)
inputUnit = InputUnitFactory(inputs)
for i in inputUnit:
self.assertEqual(len(list(i)), inputUnit.inputUnitSize)
def test_input_unit(self):
inputs = range(100)
inputUnit = InputUnit(inputs)
idx = 0
for i in inputUnit:
idx += 1
self.assertEqual(idx, 100)
|
[
"arturo@filasto.net"
] |
arturo@filasto.net
|
11007dbad5d04425e7f5781917716b9c536e4900
|
26452a6f63cf22f938498799db9f8e1997641774
|
/Flask/blog_flask/migrations/versions/c4a89bd2a384_.py
|
78958337a2948528512fe9e84cfb519c9345e57e
|
[] |
no_license
|
zarkaltair/Python-frameworks
|
9829e4b5130dd67a513c9e1426775cd761b96258
|
632ee4da8e008a6b0c27198dc4722b5aa3c464b8
|
refs/heads/master
| 2021-06-14T14:59:23.881043
| 2020-08-10T15:01:02
| 2020-08-10T15:01:02
| 160,838,202
| 0
| 0
| null | 2021-03-20T00:30:43
| 2018-12-07T15:02:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
"""empty message
Revision ID: c4a89bd2a384
Revises: da529ec88f6e
Create Date: 2019-02-04 23:41:02.786665
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c4a89bd2a384'
down_revision = 'da529ec88f6e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('role',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=100), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('roles_users',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('roles_users')
op.drop_table('user')
op.drop_table('role')
# ### end Alembic commands ###
|
[
"zarkaltair@gmail.com"
] |
zarkaltair@gmail.com
|
7aeb5411bc3db40e176c74f2f877e1c693ec71ba
|
fcdce57c1bd0cc4f52679fd0f3f82532550083fa
|
/214/countdown.py
|
aae0037e13d043346509cc187fbc2036bd61c9db
|
[] |
no_license
|
nishanthegde/bitesofpy
|
a16a8b5fb99ab18dc1566e606170464a4df3ace0
|
c28aa88e1366ab65f031695959d7cd0b3d08be6b
|
refs/heads/master
| 2023-08-08T16:53:17.107905
| 2023-07-22T19:07:51
| 2023-07-22T19:07:51
| 183,959,400
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 586
|
py
|
from itertools import islice
def countdown():
"""Write a generator that counts from 100 to 1"""
for i in range(100, 0, -1):
yield i
def simple_generator_function():
yield 1
yield 2
yield 3
# def main():
# # print('thank you for my life...')
# our_generator = simple_generator_function()
# # print(next(our_generator))
# # print(next(our_generator))
# # print(next(our_generator))
# cd = countdown()
# for _ in range(101):
# print(next(cd))
# if __name__ == '__main__':
# main()
|
[
"nhegde@netflix.com"
] |
nhegde@netflix.com
|
0a4805eb9f3b7986ac7670f7a74595f7d72db420
|
a992d10d89a4aea6bc67cf36f2b4db18e542cf0c
|
/NineChapters/FindConnectedComponentInDirectedGraph.py
|
a13f4c45fb34cdb034e70798a1d08752a0161c01
|
[] |
no_license
|
KeleiAzz/LeetCode
|
81f5ac3f0c722bbabbcce4f29809722c191a6850
|
3d82e6c402711057a95a6435fc29fbfcf2ee9c8f
|
refs/heads/master
| 2021-01-17T10:34:42.443543
| 2016-04-05T18:17:54
| 2016-04-05T18:17:54
| 41,068,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
# Definition for a undirected graph node
# class UndirectedGraphNode:
# def __init__(self, x):
# self.label = x
# self.neighbors = []
class Solution:
# @param {UndirectedGraphNode[]} nodes a array of undirected graph node
# @return {int[][]} a connected set of a undirected graph
def connectedSet(self, nodes):
# Write your code here
if not nodes:
return []
res = []
visited = set()
for node in nodes:
if node not in visited:
tmp = []
queue = [node]
visited.add(node)
while queue:
n = queue.pop(0)
tmp.append(n)
# visited.add(n)
for neighbor in n.neighbors:
if neighbor not in visited:
queue.append(neighbor)
visited.add(neighbor)
res.append(tmp)
for i in range(len(res)):
res[i] = [node.label for node in res[i]]
res[i].sort()
return res
|
[
"kgong@ncsu.edu"
] |
kgong@ncsu.edu
|
552c7fe21a247d235ef5c79fbb2b8bebcd54438e
|
54fa8e9d460e8aa0b64fe26056e2760d87e7bbcf
|
/baseball_utils/get_today.py
|
bbd8371d94130c19dd9e53c4cec17d0dc8abf2af
|
[
"MIT"
] |
permissive
|
mvwicky/baseball-utils
|
eb0050076d64e238353d2a86fdaa7c52bc861dbc
|
124e315a8310e32acec716c25cb6615feac02b5c
|
refs/heads/master
| 2020-03-22T22:19:47.907792
| 2018-07-13T17:36:58
| 2018-07-13T17:36:58
| 140,746,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
from datetime import datetime
import attr
from requests import Session
from baseball_utils.gameday import GamedayData
from baseball_utils.savant import Savant
from baseball_utils.util import SESSION, default_attrs
def get_today():
td = Today(SESSION)
return td.probables()
@default_attrs()
class Today(object):
session: Session = attr.ib()
gd: GamedayData = attr.ib(init=False)
def __attrs_post_init__(self):
self.gd = GamedayData(self.session, Savant(self.session))
def probables(self):
pass
if __name__ == '__main__':
pass
|
[
"mvanwickle@gmail.com"
] |
mvanwickle@gmail.com
|
d7fd680264a075e0008e66128f731daba3352906
|
c81d7dfef424b088bf2509a1baf406a80384ea5a
|
/venv/Lib/site-packages/twilio/twiml/fax_response.py
|
86f0d47e9326dbf2c53d110fb4668e8cb531ea2c
|
[] |
no_license
|
Goutham2591/OMK_PART2
|
111210d78fc4845481ed55c852b8f2f938918f4a
|
cb54fb21ebf472bffc6ee4f634bf1e68303e113d
|
refs/heads/master
| 2022-12-10T01:43:08.213010
| 2018-04-05T02:09:41
| 2018-04-05T02:09:41
| 124,828,094
| 0
| 1
| null | 2022-12-07T23:43:03
| 2018-03-12T03:20:14
|
Python
|
UTF-8
|
Python
| false
| false
| 956
|
py
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
import json
from twilio.twiml import (
TwiML,
format_language,
)
class FaxResponse(TwiML):
""" <Response> TwiML for Faxes """
def __init__(self, **kwargs):
super(FaxResponse, self).__init__(**kwargs)
self.name = 'Response'
def receive(self, action=None, method=None, **kwargs):
"""
Create a <Receive> element
:param action: Receive action URL
:param method: Receive action URL method
:param kwargs: additional attributes
:returns: <Receive> element
"""
return self.nest(Receive(action=action, method=method, **kwargs))
class Receive(TwiML):
""" <Receive> TwiML Verb """
def __init__(self, **kwargs):
super(Receive, self).__init__(**kwargs)
self.name = 'Receive'
|
[
"amatar@unomaha.edu"
] |
amatar@unomaha.edu
|
45506a93ffee58f66e981c1a00e7ef24971fca43
|
8671856181ef218f147f23f367fd0b1dc7592e1a
|
/customers/admin.py
|
243b18c20c73174bcd5b48544007e62989c33ddd
|
[] |
no_license
|
Alishrf/Shop_Website
|
e4fef9618aec2db6f4a655ff643aa68cf42dbb68
|
971d4a2ff8b7a68a0157681ff26404fe403502e6
|
refs/heads/master
| 2020-08-11T06:03:47.642870
| 2019-10-14T14:29:30
| 2019-10-14T14:29:30
| 214,504,737
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
from django.contrib import admin
from .models import Customer
class CustomerAdmin(admin.ModelAdmin):
list_display = ('user',)
admin.site.register(Customer,CustomerAdmin)
|
[
"a.sharifzadeh11@gmail.com"
] |
a.sharifzadeh11@gmail.com
|
9c33d352506198e4532780e67484f4a8d6a6b723
|
129cf00f2d4f38ba53cb638b38c75a191402ac8d
|
/ctfcli/core/plugins.py
|
4829493ac6977713f8ee265771ec8945f38bf7a2
|
[
"Apache-2.0"
] |
permissive
|
CTFd/ctfcli
|
ab92dedf467e234f9e19daab2fd7853697809aa2
|
928966c0b360d7864f2e535d258569edb7f93f88
|
refs/heads/master
| 2023-09-04T06:25:36.438018
| 2023-08-31T15:40:08
| 2023-08-31T15:40:08
| 252,505,424
| 139
| 57
|
Apache-2.0
| 2023-09-04T22:58:54
| 2020-04-02T16:14:57
|
Python
|
UTF-8
|
Python
| false
| false
| 605
|
py
|
import importlib
import logging
import sys
from typing import Dict
from ctfcli.core.config import Config
log = logging.getLogger("ctfcli.core.plugins")
def load_plugins(commands: Dict):
plugins_path = Config.get_plugins_path()
sys.path.insert(0, str(plugins_path.absolute()))
for plugin in sorted(plugins_path.iterdir()):
plugin_path = plugins_path / plugin / "__init__.py"
log.debug(f"Loading plugin '{plugin}' from '{plugin_path}'")
loaded = importlib.import_module(plugin.stem)
loaded.load(commands)
sys.path.remove(str(plugins_path.absolute()))
|
[
"noreply@github.com"
] |
CTFd.noreply@github.com
|
524a89245ae2b00ec761491fdff18fee9d9f01df
|
9f1039075cc611198a988034429afed6ec6d7408
|
/tensorflow-stubs/python/training/rmsprop.pyi
|
402add92a5c8c927c4a7d9283e05df91e4e0070f
|
[] |
no_license
|
matangover/tensorflow-stubs
|
9422fbb1cb3a3638958d621461291c315f9c6ec2
|
664bd995ef24f05ba2b3867d979d23ee845cb652
|
refs/heads/master
| 2020-05-23T12:03:40.996675
| 2019-05-15T06:21:43
| 2019-05-15T06:21:43
| 186,748,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
pyi
|
# Stubs for tensorflow.python.training.rmsprop (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.python.framework import ops as ops
from tensorflow.python.ops import array_ops as array_ops, init_ops as init_ops, math_ops as math_ops
from tensorflow.python.training import optimizer as optimizer, training_ops as training_ops
from tensorflow.python.util.tf_export import tf_export as tf_export
from typing import Any as Any
class RMSPropOptimizer(optimizer.Optimizer):
def __init__(self, learning_rate: Any, decay: float = ..., momentum: float = ..., epsilon: float = ..., use_locking: bool = ..., centered: bool = ..., name: str = ...) -> None: ...
|
[
"matangover@gmail.com"
] |
matangover@gmail.com
|
87dd0f2152017150f51e44d48c8c79da3578a5d3
|
80d596df8a8ce8f9c844b325d2df5a1185581595
|
/clld/lib/rdf.py
|
232543caf318debf194d1af0d52c9dbb675e2bf7
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause"
] |
permissive
|
FieldDB/clld
|
780d2893d6fdc766f91df0886280c0ea02f640eb
|
4738caf5125648dc952a97692c38f90ba13011b1
|
refs/heads/master
| 2021-01-15T15:15:55.131288
| 2014-02-17T20:25:10
| 2014-02-17T20:25:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,025
|
py
|
"""
This module provides functionality for handling our data as rdf.
"""
from collections import namedtuple
from cStringIO import StringIO
from rdflib import Graph, URIRef, Literal
from rdflib.namespace import (
Namespace, DC, DCTERMS, DOAP, FOAF, OWL, RDF, RDFS, SKOS, VOID, XMLNS,
)
# make flake8 happy, but still have the following importable from here:
assert DOAP
assert XMLNS
from clld.util import encoded
Notation = namedtuple('Notation', 'name extension mimetype uri')
FORMATS = dict((n.name, n) for n in [
Notation('xml', 'rdf', 'application/rdf+xml', 'http://www.w3.org/ns/formats/RDF_XML'),
Notation('n3', 'n3', 'text/n3', 'http://www.w3.org/ns/formats/N3'),
Notation('nt', 'nt', 'text/nt', 'http://www.w3.org/ns/formats/N-Triples'),
Notation('turtle', 'ttl', 'text/turtle', 'http://www.w3.org/ns/formats/Turtle')])
NAMESPACES = {
"rdf": RDF,
"void": VOID,
"foaf": FOAF,
"frbr": Namespace("http://purl.org/vocab/frbr/core#"),
"dcterms": DCTERMS,
"rdfs": RDFS,
"geo": Namespace("http://www.w3.org/2003/01/geo/wgs84_pos#"),
"isbd": Namespace("http://iflastandards.info/ns/isbd/elements/"),
"skos": SKOS,
"dc": DC,
"gold": Namespace("http://purl.org/linguistics/gold/"),
"lexvo": Namespace("http://lexvo.org/ontology"),
"vcard": Namespace("http://www.w3.org/2001/vcard-rdf/3.0#"),
"bibo": Namespace("http://purl.org/ontology/bibo/"),
"owl": OWL,
}
class ClldGraph(Graph):
"""augment the standard rdflib.Graph by making sure our standard ns prefixes are
always bound.
"""
def __init__(self, *args, **kw):
super(ClldGraph, self).__init__(*args, **kw)
for prefix, ns in NAMESPACES.items():
self.bind(prefix, ns)
def properties_as_xml_snippet(subject, props):
"""somewhat ugly way to get at a snippet of an rdf-xml serialization of properties
of a subject.
"""
if isinstance(subject, basestring):
subject = URIRef(subject)
g = ClldGraph()
if props:
for p, o in props:
if ':' in p:
prefix, name = p.split(':')
p = getattr(NAMESPACES[prefix], name)
if isinstance(o, basestring):
if o.startswith('http://') or o.startswith('https://'):
o = URIRef(o)
else:
o = Literal(o)
g.add((subject, p, o))
res = []
in_desc = False
for line in g.serialize(format='xml').split('\n'):
if line.strip().startswith('</rdf:Description'):
break
if in_desc:
res.append(line)
if line.strip().startswith('<rdf:Description'):
in_desc = True
return '\n'.join(res)
def convert(string, from_, to_):
if from_ == to_:
return encoded(string)
assert from_ in FORMATS and to_ in FORMATS
g = Graph()
g.parse(StringIO(encoded(string)), format=from_)
out = StringIO()
g.serialize(out, format=to_)
out.seek(0)
return out.read()
|
[
"xrotwang@googlemail.com"
] |
xrotwang@googlemail.com
|
74bb80ae19f6954190d3a560b8bea9a8bcb80441
|
cfefcd99016a908df2584896845406942097671d
|
/python/test/test_portal_setting_value_list.py
|
317ced9bb1079bacd0e94b55d01464c91a68e87f
|
[] |
no_license
|
tomasgarzon/vigilant-guacamole
|
982a8c7cb0a8193bb3409014b447ad8a70e6eb36
|
bde73674cf0461e2fcdfce5074bf9d93a47227f7
|
refs/heads/main
| 2023-08-17T01:51:27.168440
| 2021-09-01T11:23:46
| 2021-09-01T11:23:46
| 398,827,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
"""
Nucoro API
No description # noqa: E501
The version of the OpenAPI document: 4.175.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.portal_setting_value_list import PortalSettingValueList
class TestPortalSettingValueList(unittest.TestCase):
"""PortalSettingValueList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPortalSettingValueList(self):
"""Test PortalSettingValueList"""
# FIXME: construct object with mandatory attributes with example values
# model = PortalSettingValueList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"tomasgarzonhervas@gmail.com"
] |
tomasgarzonhervas@gmail.com
|
a95e55e38b9d6a992da62ca6d9b84f7dfc0690cd
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_countries.py
|
0afbe19c74dda7dd31584a6a6e1c39ece912a31d
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
#calss header
class _COUNTRIES():
def __init__(self,):
self.name = "COUNTRIES"
self.definitions = country
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['country']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
7d5775a6b98d8aad1f6a14ac34b9357c5a103968
|
2b52e32f8ba65202078bde0173eb8e972434d3f8
|
/Python_Algorithm/Baek/17072.py
|
141252b6bae20376848af324ff10ec28a59c4398
|
[] |
no_license
|
HoeYeon/Algorithm
|
7167c463922227c0bc82e43940f7290fc1fa16af
|
0e5ce2a3347d733bbaa894391cbf344fcb5161d6
|
refs/heads/master
| 2020-09-08T17:27:56.654485
| 2020-08-02T08:23:46
| 2020-08-02T08:23:46
| 221,195,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
def i_f(li):
num = (2126*li[0] + 7152*li[1] + 722*li[2])
if 0 <= num < 510000:
return '#'
elif 510000 <= num < 1020000:
return 'o'
elif 1020000 <= num < 1530000:
return '+'
elif 1530000 <= num < 2040000:
return '-'
elif 2040000 <= num:
return '.'
n, m = map(int, input().split(' '))
pixel = [[] for i in range(n)]
result = [[] for i in range(n)]
for i in range(n):
li = list(map(int, input().split(' ')))
for j in range(m):
pixel[i].append([li[3*j], li[3*j+1], li[3*j+2]])
for i in range(len(pixel)):
result[i] = [i_f(j) for j in pixel[i]]
for i in result:
print(''.join(i))
|
[
"shy9546@naver.com"
] |
shy9546@naver.com
|
b6b9f0446819b1f0796ee2f017b86c55a180a31c
|
dce4a52986ddccea91fbf937bd89e0ae00b9d046
|
/jni-build/jni/include/tensorflow/contrib/graph_editor/tests/subgraph_test.py
|
b20632a6c212dfe946b0f0a96c26b327a97aebde
|
[
"MIT"
] |
permissive
|
Lab603/PicEncyclopedias
|
54a641b106b7bb2d2f71b2dacef1e5dbeaf773a6
|
6d39eeb66c63a6f0f7895befc588c9eb1dd105f9
|
refs/heads/master
| 2022-11-11T13:35:32.781340
| 2018-03-15T05:53:07
| 2018-03-15T05:53:07
| 103,941,664
| 6
| 3
|
MIT
| 2022-10-28T05:31:37
| 2017-09-18T13:20:47
|
C++
|
UTF-8
|
Python
| false
| false
| 3,145
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.graph_editor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import graph_editor as ge
class SubgraphTest(tf.test.TestCase):
def setUp(self):
self.graph = tf.Graph()
with self.graph.as_default():
self.a = tf.constant([1., 1.], shape=[2], name="a")
with tf.name_scope("foo"):
self.b = tf.constant([2., 2.], shape=[2], name="b")
self.c = tf.add(self.a, self.b, name="c")
self.d = tf.constant([3., 3.], shape=[2], name="d")
with tf.name_scope("bar"):
self.e = tf.add(self.c, self.d, name="e")
self.f = tf.add(self.c, self.d, name="f")
self.g = tf.add(self.c, self.a, name="g")
with tf.control_dependencies([self.c.op]):
self.h = tf.add(self.f, self.g, name="h")
def test_subgraph(self):
sgv = ge.sgv(self.graph)
self.assertEqual(list(sgv.outputs), [self.e, self.h])
self.assertEqual(list(sgv.inputs), [])
self.assertEqual(len(sgv.ops), 8)
sgv = ge.sgv(self.f.op, self.g.op)
self.assertEqual(list(sgv.outputs), [self.f, self.g])
self.assertEqual(list(sgv.inputs), [self.c, self.d, self.a])
sgv = ge.sgv_scope("foo/bar", graph=self.graph)
self.assertEqual(list(sgv.ops),
[self.e.op, self.f.op, self.g.op, self.h.op])
def test_subgraph_remap(self):
sgv = ge.sgv(self.c.op)
self.assertEqual(list(sgv.outputs), [self.c])
self.assertEqual(list(sgv.inputs), [self.a, self.b])
sgv = sgv.remap_outputs_to_consumers()
self.assertEqual(list(sgv.outputs), [self.c, self.c, self.c])
sgv = sgv.remap_outputs_make_unique()
self.assertEqual(list(sgv.outputs), [self.c])
sgv = sgv.remap(new_input_indices=[], new_output_indices=[])
self.assertEqual(len(sgv.inputs), 0)
self.assertEqual(len(sgv.outputs), 0)
sgv = sgv.remap_default()
self.assertEqual(list(sgv.outputs), [self.c])
self.assertEqual(list(sgv.inputs), [self.a, self.b])
def test_remove_unused_ops(self):
sgv = ge.sgv(self.graph)
self.assertEqual(list(sgv.outputs), [self.e, self.h])
self.assertEqual(len(sgv.ops), 8)
sgv = sgv.remap_outputs(new_output_indices=[1]).remove_unused_ops()
self.assertEqual(list(sgv.outputs), [self.h])
self.assertEqual(len(sgv.ops), 7)
if __name__ == "__main__":
tf.test.main()
|
[
"super_mr.z@hotmail.comm"
] |
super_mr.z@hotmail.comm
|
8b44925e87277f4b637f2fb3403862d23eb35a82
|
62226afe584a0d7f8d52fc38ca416b19ffafcb7a
|
/hwtLib/amba/axi_comp/slave_timeout_test.py
|
bdcc4f60d7f43ba87be5625c2c8cd3564a3b0a8b
|
[
"MIT"
] |
permissive
|
Nic30/hwtLib
|
d08a08bdd0bf764971c4aa319ff03d4df8778395
|
4c1d54c7b15929032ad2ba984bf48b45f3549c49
|
refs/heads/master
| 2023-05-25T16:57:25.232026
| 2023-05-12T20:39:01
| 2023-05-12T20:39:01
| 63,018,738
| 36
| 8
|
MIT
| 2021-04-06T17:56:14
| 2016-07-10T21:13:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,411
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.simulator.simTestCase import SimTestCase
from hwtLib.amba.axi4 import Axi4
from hwtLib.amba.axiLite_comp.sim.utils import axi_randomize_per_channel
from hwtLib.amba.axi_comp.slave_timeout import AxiSlaveTimeout
from hwtLib.amba.constants import RESP_SLVERR, RESP_OKAY
from pyMathBitPrecise.bit_utils import mask
from hwtSimApi.constants import CLK_PERIOD
class AxiSlaveTimeoutTC(SimTestCase):
@classmethod
def setUpClass(cls):
u = cls.u = AxiSlaveTimeout(Axi4)
u.TIMEOUT = 4
cls.compileSim(u)
def randomize_all(self):
u = self.u
for axi in [u.m, u.s]:
axi_randomize_per_channel(self, axi)
def test_nop(self):
u = self.u
self.runSim(10 * CLK_PERIOD)
ae = self.assertEmpty
ae(u.m.aw._ag.data)
ae(u.m.w._ag.data)
ae(u.m.ar._ag.data)
ae(u.s.r._ag.data)
ae(u.s.b._ag.data)
def test_read(self):
u = self.u
ar_req = u.s.ar._ag.create_addr_req(0x8, 0, _id=1)
u.s.ar._ag.data.append(ar_req)
r_trans = (1, 0x123, RESP_OKAY, 1)
u.m.r._ag.data.append(r_trans)
self.runSim(10 * CLK_PERIOD)
ae = self.assertEmpty
ae(u.m.aw._ag.data)
ae(u.m.w._ag.data)
self.assertValSequenceEqual(u.m.ar._ag.data, [ar_req, ])
self.assertValSequenceEqual(u.s.r._ag.data, [r_trans, ])
ae(u.s.b._ag.data)
def test_read_timeout(self):
u = self.u
ar_req = u.s.ar._ag.create_addr_req(0x8, 0, _id=1)
u.s.ar._ag.data.append(ar_req)
self.runSim(10 * CLK_PERIOD)
ae = self.assertEmpty
ae(u.m.aw._ag.data)
ae(u.m.w._ag.data)
self.assertValSequenceEqual(u.m.ar._ag.data, [ar_req, ])
self.assertValSequenceEqual(u.s.r._ag.data, [(1, None, RESP_SLVERR, 1), ])
ae(u.s.b._ag.data)
def test_b_timeout(self):
u = self.u
aw_req = u.s.ar._ag.create_addr_req(0x8, 0, _id=1)
u.s.aw._ag.data.append(aw_req)
w_trans = (0x123, mask(u.m.DATA_WIDTH // 8), 1)
u.s.w._ag.data.append(w_trans)
self.runSim(10 * CLK_PERIOD)
ae = self.assertEmpty
self.assertValSequenceEqual(u.m.aw._ag.data, [aw_req, ])
self.assertValSequenceEqual(u.m.w._ag.data, [w_trans, ])
ae(u.m.ar._ag.data)
ae(u.s.r._ag.data)
self.assertValSequenceEqual(u.s.b._ag.data, [((1, RESP_SLVERR))])
def test_write(self):
u = self.u
aw_req = u.s.ar._ag.create_addr_req(0x8, 0, _id=1)
u.s.aw._ag.data.append(aw_req)
w_trans = (0x123, mask(u.s.DATA_WIDTH // 8), 1)
u.s.w._ag.data.append(w_trans)
b_trans = (1, RESP_OKAY)
u.m.b._ag.data.append(b_trans)
self.runSim(10 * CLK_PERIOD)
ae = self.assertEmpty
self.assertValSequenceEqual(u.m.aw._ag.data, [aw_req, ])
self.assertValSequenceEqual(u.m.w._ag.data, [w_trans, ])
ae(u.m.ar._ag.data)
ae(u.s.r._ag.data)
self.assertValSequenceEqual(u.s.b._ag.data, [b_trans, ])
if __name__ == "__main__":
import unittest
testLoader = unittest.TestLoader()
# suite = unittest.TestSuite([AxiSlaveTimeoutTC("test_singleLong")])
suite = testLoader.loadTestsFromTestCase(AxiSlaveTimeoutTC)
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
|
[
"nic30@seznam.cz"
] |
nic30@seznam.cz
|
b0efcdc71b8fae99512efb620fd54b98fb9200ca
|
9a53024307d5b1706e53326d3eb4c1d77743a136
|
/graphql_demo/graphql_demo/schema.py
|
efae7b2a504175d776ec42bb3a87e87a1255ba47
|
[] |
no_license
|
Triadai/django-graphql-demo
|
4a995e8af1096965090378a00a0dd512ab05ecf7
|
1b1d40d7b09ade457d4252096bbfca0315557396
|
refs/heads/master
| 2021-05-06T15:14:26.514559
| 2017-06-19T06:42:05
| 2017-06-19T06:42:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
import graphene
import simple_app.schema
class Mutation(simple_app.schema.Mutation, graphene.ObjectType):
pass
class Query(simple_app.schema.Query, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query, mutation=Mutation)
|
[
"mbrochh@gmail.com"
] |
mbrochh@gmail.com
|
cb871c3c36900c1f80f9553e2f068b11c83b60f9
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_examples/_algorithms_challenges/pybites/intermediate/142_v4/scores.py
|
6b1a27c733f9f2bb71e25354899689e842889dad
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,364
|
py
|
from collections import namedtuple
MIN_SCORE = 4
DICE_VALUES = range(1, 7)
Player = namedtuple('Player', 'name scores')
def calculate_score(scores):
"""Based on a list of score ints (dice roll), calculate the
total score only taking into account >= MIN_SCORE
(= eyes of the dice roll).
If one of the scores is not a valid dice roll (1-6)
raise a ValueError.
Returns int of the sum of the scores.
"""
if not all(s in range(1, 7) for s in scores):
raise ValueError
return sum(s for s in scores if s >= MIN_SCORE)
def get_winner(players):
"""Given a list of Player namedtuples return the player
with the highest score using calculate_score.
If the length of the scores lists of the players passed in
don't match up raise a ValueError.
Returns a Player namedtuple of the winner.
You can assume there is only one winner.
For example - input:
Player(name='player 1', scores=[1, 3, 2, 5])
Player(name='player 2', scores=[1, 1, 1, 1])
Player(name='player 3', scores=[4, 5, 1, 2])
output:
Player(name='player 3', scores=[4, 5, 1, 2])
"""
if not all(len(x.scores) == len(players[0].scores) for x in players[1:]):
raise ValueError
return max(players, key=lambda x: calculate_score(x.scores))
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
868309f5fcb83db8ab2e52e6e7f2cf5f34c3e5f8
|
8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a
|
/closure__examples/hello_world.py
|
84781ecceeb45ead5394fcd637397dc296b928bd
|
[
"CC-BY-4.0"
] |
permissive
|
stepik/SimplePyScripts
|
01092eb1b2c1c33756427abb2debbd0c0abf533f
|
3259d88cb58b650549080d6f63b15910ae7e4779
|
refs/heads/master
| 2023-05-15T17:35:55.743164
| 2021-06-11T22:59:07
| 2021-06-11T22:59:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://ru.wikipedia.org/wiki/Замыкание_(программирование)
# multiplier возвращает функцию умножения на n
def multiplier(n):
def mul(k):
return n * k
return mul
# mul3 - функция, умножающая на 3
mul3 = multiplier(3)
print(mul3(3), mul3(5)) # 9 15
|
[
"ilya.petrash@inbox.ru"
] |
ilya.petrash@inbox.ru
|
5628bf46e85c2384cc2ad20595d2815fff2243ba
|
02442f7d3bd75da1b5b1bf6b981cc227906a058c
|
/rocon/build/rocon_msgs/concert_msgs/catkin_generated/pkg.installspace.context.pc.py
|
723bbed31accdfcda71ffdeb61f1a0bac6159af2
|
[] |
no_license
|
facaisdu/RaspRobot
|
b4ff7cee05c70ef849ea4ee946b1995432a376b7
|
e7dd2393cdabe60d08a202aa103f796ec5cd2158
|
refs/heads/master
| 2020-03-20T09:09:28.274814
| 2018-06-14T08:51:46
| 2018-06-14T08:51:46
| 137,329,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/sclab_robot/turtlebot_ws/rocon/install/include".split(';') if "/home/sclab_robot/turtlebot_ws/rocon/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "gateway_msgs;message_runtime;rocon_app_manager_msgs;rocon_std_msgs;std_msgs;uuid_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "concert_msgs"
PROJECT_SPACE_DIR = "/home/sclab_robot/turtlebot_ws/rocon/install"
PROJECT_VERSION = "0.9.0"
|
[
"facai_sdu@126.com"
] |
facai_sdu@126.com
|
6ef04cf075ceac67095db1f0d24a78ffa5b3359b
|
b87ec5d1b11b3a517256365f07413cec56eff972
|
/_cite/cite.py
|
5f8d135a94904956eb712f797a50eef448847197
|
[
"BSD-3-Clause"
] |
permissive
|
greenelab/lab-website-template
|
53349ae75c50f573aaa275260839c89a2fef9d3e
|
49149880097297f3f0bf90f6f3c2bc7856ca73de
|
refs/heads/main
| 2023-08-22T21:18:18.716765
| 2023-05-19T18:59:15
| 2023-05-19T18:59:15
| 296,680,938
| 213
| 309
|
BSD-3-Clause
| 2023-05-19T18:59:16
| 2020-09-18T16:57:31
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,916
|
py
|
"""
cite process to convert sources and metasources into full citations
"""
import traceback
from importlib import import_module
from pathlib import Path
from dotenv import load_dotenv
from util import *
# load environment variables
load_dotenv()
# error flag
error = False
# output citations file
output_file = "_data/citations.yaml"
log()
log("Compiling sources")
# compiled list of sources
sources = []
# in-order list of plugins to run
plugins = ["google-scholar", "pubmed", "orcid", "sources"]
# loop through plugins
for plugin in plugins:
# convert into path object
plugin = Path(f"plugins/{plugin}.py")
log(f"Running {plugin.stem} plugin")
# get all data files to process with current plugin
files = Path.cwd().glob(f"_data/{plugin.stem}*.*")
files = list(filter(lambda p: p.suffix in [".yaml", ".yml", ".json"], files))
log(f"Found {len(files)} {plugin.stem}* data file(s)", 1)
# loop through data files
for file in files:
log(f"Processing data file {file.name}", 1)
# load data from file
try:
data = load_data(file)
# check if file in correct format
if not list_of_dicts(data):
raise Exception("File not a list of dicts")
except Exception as e:
log(e, 2, "ERROR")
error = True
continue
# loop through data entries
for index, entry in enumerate(data):
log(f"Processing entry {index + 1} of {len(data)}, {label(entry)}", 2)
# run plugin on data entry to expand into multiple sources
try:
expanded = import_module(f"plugins.{plugin.stem}").main(entry)
# check that plugin returned correct format
if not list_of_dicts(expanded):
raise Exception("Plugin didn't return list of dicts")
# catch any plugin error
except Exception as e:
# log detailed pre-formatted/colored trace
print(traceback.format_exc())
# log high-level error
log(e, 3, "ERROR")
error = True
continue
# loop through sources
for source in expanded:
if plugin.stem != "sources":
log(label(source), 3)
# include meta info about source
source["plugin"] = plugin.name
source["file"] = file.name
# add source to compiled list
sources.append(source)
if plugin.stem != "sources":
log(f"{len(expanded)} source(s)", 3)
log("Merging sources by id")
# merge sources with matching (non-blank) ids
for a in range(0, len(sources)):
a_id = get_safe(sources, f"{a}.id", "")
if not a_id:
continue
for b in range(a + 1, len(sources)):
b_id = get_safe(sources, f"{b}.id", "")
if b_id == a_id:
log(f"Found duplicate {b_id}", 2)
sources[a].update(sources[b])
sources[b] = {}
sources = [entry for entry in sources if entry]
log(f"{len(sources)} total source(s) to cite")
log()
log("Generating citations")
# list of new citations
citations = []
# loop through compiled sources
for index, source in enumerate(sources):
log(f"Processing source {index + 1} of {len(sources)}, {label(source)}")
# new citation data for source
citation = {}
# source id
_id = get_safe(source, "id", "").strip()
# Manubot doesn't work without an id
if _id:
log("Using Manubot to generate citation", 1)
try:
# run Manubot and set citation
citation = cite_with_manubot(_id)
# if Manubot cannot cite source
except Exception as e:
# if regular source (id entered by user), throw error
if get_safe(source, "plugin", "") == "sources.py":
log(e, 3, "ERROR")
error = True
# otherwise, if from metasource (id retrieved from some third-party API), just warn
else:
log(e, 3, "WARNING")
# discard source from citations
# continue
# preserve fields from input source, overriding existing fields
citation.update(source)
# ensure date in proper format for correct date sorting
if get_safe(citation, "date", ""):
citation["date"] = format_date(get_safe(citation, "date", ""))
# add new citation to list
citations.append(citation)
log()
log("Saving updated citations")
# save new citations
try:
save_data(output_file, citations)
except Exception as e:
log(e, level="ERROR")
error = True
# exit at end, so user can see all errors in one run
if error:
log("Error(s) occurred above", level="ERROR")
exit(1)
else:
log("All done!", level="SUCCESS")
log("\n")
|
[
"noreply@github.com"
] |
greenelab.noreply@github.com
|
7573f09ccd7f7eba9450179f4e9d1fc18c3b3ceb
|
a961aaa37bde0c8217453631809da11203a145c3
|
/clients/python/SolviceRoutingClient/api/pvrp_api.py
|
3abc55442e8d069ced6cea4212ecef7e9841172b
|
[
"MIT"
] |
permissive
|
solvice/solvice-routing-client
|
0f3fdb69244e1d3db92159db4be651adcdf23eb1
|
cdaedaf47d202965549c9e2b7d9102d292c91d5b
|
refs/heads/master
| 2020-06-22T16:56:52.871358
| 2019-09-25T14:46:54
| 2019-09-25T14:46:54
| 197,749,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,201
|
py
|
# coding: utf-8
"""
OnRoute API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.1.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from SolviceRoutingClient.api_client import ApiClient
class PVRPApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def solve_pvrp(self, **kwargs): # noqa: E501
"""Solve a PVRP problem # noqa: E501
Periodic vehicle routing problems # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.solve_pvrp(async_req=True)
>>> result = thread.get()
:param async_req bool
:param PVRP body: PVRP problem solve request
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.solve_pvrp_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.solve_pvrp_with_http_info(**kwargs) # noqa: E501
return data
def solve_pvrp_with_http_info(self, **kwargs): # noqa: E501
"""Solve a PVRP problem # noqa: E501
Periodic vehicle routing problems # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.solve_pvrp_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param PVRP body: PVRP problem solve request
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method solve_pvrp" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/solve#PVRP', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Job', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"cvh@solvice.io"
] |
cvh@solvice.io
|
620c4eaa8063ccdd8693983d8e97949ba279c96c
|
64c05e8c346ced131f65770db7c8ebe6c9e29c12
|
/tests/test_model/test_backbone/test_shufflenetv1_backbone.py
|
c5da09d306f1d1822c45a72528ea2ec7933a6fa7
|
[
"Apache-2.0"
] |
permissive
|
lilujunai/ZCls
|
bcf6c5ceae8ce7282e77678d63c6aa2daa4feda4
|
14a272540b6114fb80cd314066ff6721bcf85231
|
refs/heads/master
| 2023-02-25T05:46:07.794013
| 2021-02-04T07:34:39
| 2021-02-04T07:34:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,336
|
py
|
# -*- coding: utf-8 -*-
"""
@date: 2020/11/21 下午7:24
@file: test_resnet_backbone.py
@author: zj
@description:
"""
import torch
from zcls.model.backbones.shufflenetv1_unit import ShuffleNetV1Unit
from zcls.model.backbones.shufflenetv1_backbone import ShuffleNetV1Backbone
def test_shufflenet_v1_backbone():
# g=1
model = ShuffleNetV1Backbone(
groups=1,
layer_planes=(144, 288, 576),
layer_blocks=(4, 8, 4),
down_samples=(1, 1, 1),
with_groups=(0, 1, 1),
block_layer=ShuffleNetV1Unit,
)
print(model)
data = torch.randn(1, 3, 224, 224)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, 576, 7, 7)
# g=2
model = ShuffleNetV1Backbone(
groups=2,
layer_planes=(200, 400, 800),
layer_blocks=(4, 8, 4),
down_samples=(1, 1, 1),
with_groups=(0, 1, 1),
block_layer=ShuffleNetV1Unit,
)
print(model)
data = torch.randn(1, 3, 224, 224)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, 800, 7, 7)
# g=3
model = ShuffleNetV1Backbone(
groups=3,
layer_planes=(240, 480, 960),
layer_blocks=(4, 8, 4),
down_samples=(1, 1, 1),
with_groups=(0, 1, 1),
block_layer=ShuffleNetV1Unit,
)
print(model)
data = torch.randn(1, 3, 224, 224)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, 960, 7, 7)
# g=4
model = ShuffleNetV1Backbone(
groups=4,
layer_planes=(272, 544, 1088),
layer_blocks=(4, 8, 4),
down_samples=(1, 1, 1),
with_groups=(0, 1, 1),
block_layer=ShuffleNetV1Unit,
)
print(model)
data = torch.randn(1, 3, 224, 224)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, 1088, 7, 7)
# g=8
model = ShuffleNetV1Backbone(
groups=8,
layer_planes=(384, 768, 1536),
layer_blocks=(4, 8, 4),
down_samples=(1, 1, 1),
with_groups=(0, 1, 1),
block_layer=ShuffleNetV1Unit,
)
print(model)
data = torch.randn(1, 3, 224, 224)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, 1536, 7, 7)
if __name__ == '__main__':
test_shufflenet_v1_backbone()
|
[
"wy163zhuj@163.com"
] |
wy163zhuj@163.com
|
2b867933f646511764508dfeb84aecc84822e99f
|
ce6d74994bce49411f00f5053f56fb3b7c30bd50
|
/interview/interview16.py
|
f7620da21a3f9c3559924734d76be04bb6091369
|
[] |
no_license
|
zhengjiani/pyAlgorithm
|
9397906f3c85221e64f0415abfbb64d03eb1c51e
|
dbd04a17cf61bac37531e3337ba197c4af19489e
|
refs/heads/master
| 2021-07-11T19:07:26.480403
| 2020-07-16T00:25:24
| 2020-07-16T00:25:24
| 179,308,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/9/9 11:13
# @Author : zhengjiani
# @Software: PyCharm
# @Blog :https://zhengjiani.github.io/
"""
查找二叉树的深度,求该树的深度
从根节点到叶节点依次经过的结点(含根、叶结点)形成树的一条路径,最长路径的长度为树的深度
"""
class TreeNode:
def __init__(self,x):
self.val = x
self.left = None
self.right = None
class Solution:
def TreeDepth(self,pRoot):
if pRoot == None:
return 0
ldepth = Solution.TreeDepth(self,pRoot.left)
rdepth = Solution.TreeDepth(self,pRoot.right)
return max(ldepth,rdepth)+1
|
[
"936089353@qq.com"
] |
936089353@qq.com
|
6d2f796a571b224f6eafbc086a95247149bcaffc
|
cca8b92a8fb2e79d46a10e105cd6a98c22564383
|
/kuterless/public_fulfillment/serializers.py
|
1eaa29bb65dde90cda74fc6472c190c24bcf3ca7
|
[] |
no_license
|
gl1000007/NeuroNet
|
2fc0b57a1147546b926e3d9e3d2c4a00589d5f1c
|
222e8058a78ffe89442310c5650a2edb3eb8260c
|
refs/heads/master
| 2021-01-17T12:02:12.658928
| 2016-06-06T19:56:37
| 2016-06-06T19:56:37
| 72,467,802
| 1
| 0
| null | 2016-10-31T18:49:41
| 2016-10-31T18:49:41
| null |
UTF-8
|
Python
| false
| false
| 7,528
|
py
|
# -*- coding: utf-8 -*-
from coplay import models
from coplay.models import Discussion, Feedback, LikeLevel, Decision, Task, \
Viewer, FollowRelation, UserUpdate, Vote, Glimpse, AnonymousVisitor, \
AnonymousVisitorViewer, UserProfile, MAX_TEXT
from django.contrib.auth.models import User
from rest_framework import serializers
class DiscussionSerializer(serializers.ModelSerializer):
class Meta:
model = Discussion
fields = ('id',
'owner',
'title',
'description',
'created_at',
'updated_at',
'locked_at',
'is_restricted',
'is_viewing_require_login'
)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id',
'username',
'first_name',
'last_name'
)
class FeedbackSerializer(serializers.ModelSerializer):
class Meta:
model = Feedback
fields = ( 'id',
'discussion',
'user',
'feedbabk_type',
'content',
'created_at',
'updated_at'
)
class DecisionSerializer(serializers.ModelSerializer):
class Meta:
model = Decision
fields = ( 'id',
'parent',
'content',
'created_at',
'updated_at',
'value'
)
class VoteSerializer(serializers.ModelSerializer):
class Meta:
model = Vote
fields = ( 'id',
'voater',
'decision',
'created_at',
'updated_at',
'value',
)
class TaskSerializer(serializers.ModelSerializer):
class Meta:
model = Task
fields = ( 'id',
'parent',
'responsible',
'goal_description',
'target_date',
'closed_at',
'closed_by',
'status_description',
'status',
'created_at',
'updated_at',
'final_state'
)
class ViewerSerializer(serializers.ModelSerializer):
class Meta:
model = Viewer
fields = ( 'id',
'user',
'discussion',
'created_at',
'updated_at',
'views_counter',
'views_counter_updated_at',
'discussion_updated_at_on_last_view',
'is_a_follower',
'is_invited'
)
class GlimpseSerializer(serializers.ModelSerializer):
class Meta:
model = Glimpse
fields = ( 'id',
'viewer',
'anonymous_visitor_viewer',
'created_at',
'updated_at'
)
class AnonymousVisitorSerializer(serializers.ModelSerializer):
class Meta:
model = AnonymousVisitor
fields = ( 'id',
'user',
'created_at',
'updated_at'
)
class AnonymousVisitorViewerSerializer(serializers.ModelSerializer):
class Meta:
model = AnonymousVisitorViewer
fields = ( 'id',
'anonymous_visitor',
'discussion',
'created_at',
'updated_at',
'views_counter',
'views_counter_updated_at',
'discussion_updated_at_on_last_view'
)
class FollowRelationSerializer(serializers.ModelSerializer):
class Meta:
model = FollowRelation
fields = ( 'id',
'follower_user',
'following_user',
'created_at',
'updated_at'
)
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = ( 'id',
'user',
'created_at',
'updated_at',
'segment',
'recieve_notifications',
'recieve_updates',
'can_limit_discussion_access',
'can_limit_discussion_to_login_users_only',
'a_player'
)
class UserUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = UserUpdate
fields = ( 'id',
'recipient',
'discussion',
'sender',
'header',
'content',
'details_url',
'created_at',
'updated_at',
'already_read'
)
class DecisionWholeSerializer(serializers.ModelSerializer):
vote_set = VoteSerializer(many=True)
class Meta:
model = Decision
fields = ( 'id',
'parent',
'content',
'created_at',
'updated_at',
'value',
'vote_set'
)
class DiscussionWholeSerializer(serializers.ModelSerializer):
feedback_set = FeedbackSerializer(many=True)
task_set = TaskSerializer(many=True)
decision_set = DecisionWholeSerializer( many = True)
viewer_set = ViewerSerializer(many = True)
class Meta:
model = Discussion
fields = ('id',
'owner',
'title',
'description',
'created_at',
'updated_at',
'locked_at',
'is_restricted',
'is_viewing_require_login',
'feedback_set',
'task_set',
'decision_set',
'viewer_set'
)
class CreateFeedback(object):
ENCOURAGE = 1
COOPERATION = 2
INTUITION = 3
ADVICE = 4
FEEDBACK_TYPES = (
(ENCOURAGE, 'encourage'),
(COOPERATION, 'cooporation'),
(INTUITION, 'intuition'),
(ADVICE, 'advice'),
)
def __init__(self, feedback_type, content):
self.feedback_type = feedback_type
self.content = content
class AddFeedBackSerializer(serializers.Serializer):
feedback_type = serializers.ChoiceField(choices=CreateFeedback.FEEDBACK_TYPES)
content = serializers.CharField(max_length=MAX_TEXT, min_length=None)
def restore_object(self, attrs, instance=None):
"""
Given a dictionary of deserialized field values, either update
an existing model instance, or create a new model instance.
"""
if instance is not None:
instance.feedback_type = attrs.get('feedback_type', instance.feedback_type)
instance.content = attrs.get('content', instance.content)
return instance
return CreateFeedback(**attrs)
|
[
"tzahimanmobile@gmail.com"
] |
tzahimanmobile@gmail.com
|
e3f0533447afd1c99e2a5e345b86874bafc397a4
|
5132c0de8eb07fe0548442ad605852137899f2cd
|
/covsirphy/regression/rate_elastic_net.py
|
d6cbec5d776eaae99e3b734a9bb1bdf7e42291dc
|
[
"Apache-2.0"
] |
permissive
|
SelengeMGL/covid19-sir
|
c12d58f21de7c3c63e87fc3b55c9dda966653c17
|
64d50b79cc9d3e26019a230f3c373a2755451495
|
refs/heads/master
| 2023-05-06T18:41:21.901437
| 2021-05-23T12:52:11
| 2021-05-23T12:52:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,437
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from math import log10, floor
import numpy as np
import pandas as pd
from covsirphy.regression.param_elastic_net import _ParamElasticNetRegressor
class _RateElasticNetRegressor(_ParamElasticNetRegressor):
"""
Predict parameter values of ODE models with Elastic Net regression
and Indicators(n)/Indicators(n-1) -> Parameters(n)/Parameters(n-1) approach.
Args:
X (pandas.DataFrame):
Index
Date (pandas.Timestamp): observation date
Columns
(int/float): indicators
y (pandas.DataFrame):
Index
Date (pandas.Timestamp): observation date
Columns
(int/float) target values
delay (int): delay period [days]
kwargs: keyword arguments of sklearn.model_selection.train_test_split(test_size=0.2, random_state=0)
Note:
If @seed is included in kwargs, this will be converted to @random_state.
"""
# Description of regressor
DESC = "Indicators(n)/Indicators(n-1) -> Parameters(n)/Parameters(n-1) with Elastic Net"
def __init__(self, X, y, delay, **kwargs):
# Remember the last value of y (= the previous value of target y)
self._last_param_df = y.tail(1)
# Calculate X(n) / X(n-1) and replace inf/NA with 0
X_div = X.div(X.shift(1)).replace(np.inf, 0).fillna(0)
# Calculate y(n) / y(n-1) and replace inf with NAs (NAs will be removed in ._split())
y_div = y.div(y.shift(1)).replace(np.inf, np.nan)
super().__init__(X_div, y_div, delay, **kwargs)
def predict(self):
"""
Predict parameter values (via y) with self._regressor and X_target.
Returns:
pandas.DataFrame:
Index
Date (pandas.Timestamp): future dates
Columns
(float): parameter values (4 digits)
"""
# Predict parameter values
predicted = self._regressor.predict(self._X_target)
df = pd.DataFrame(predicted, index=self._X_target.index, columns=self._y_train.columns)
# Calculate y(n) values with y(0) and y(n) / y(n-1)
df = pd.concat([self._last_param_df, df], axis=0, sort=True)
df = df.cumprod().iloc[1:]
# parameter values: 4 digits
return df.applymap(lambda x: np.around(x, 4 - int(floor(log10(abs(x)))) - 1))
|
[
"7270139+lisphilar@users.noreply.github.com"
] |
7270139+lisphilar@users.noreply.github.com
|
1b3127988778826a12ef4eeeea33c86953e8b0c0
|
bbc2e379e1e2e9b573d455c1e604636a11e2e6f6
|
/config/settings/local.py
|
1adbaa040927159bfcd47d28686a87a91d768a14
|
[
"MIT"
] |
permissive
|
MikaelSantilio/uprevendas-api
|
cc7c21bc496194c6b4c9ba77205ed54b23a7f82a
|
f04312988ffe3231f68ae0ebeaed9eaf0a7914b0
|
refs/heads/master
| 2023-05-25T18:41:45.584569
| 2021-06-09T23:24:47
| 2021-06-09T23:24:47
| 330,134,711
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,447
|
py
|
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="b1obk3iNk5rk63pkwV9XW0BasqxxNcoaWC4avtUhCZQ56rmesMezMbqoJ82PnxD2",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = "localhost"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# WhiteNoise
# ------------------------------------------------------------------------------
# http://whitenoise.evans.io/en/latest/django.html#using-whitenoise-in-development
INSTALLED_APPS = ["whitenoise.runserver_nostatic"] + INSTALLED_APPS # noqa F405
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
|
[
"mikael.santilio@gmail.com"
] |
mikael.santilio@gmail.com
|
dce3c9dab31479292158a01d39e4e2914dbbb66e
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r10p1/Gen/DecFiles/options/DiMuonOppositeSignP3GeVMinMaxMassDocaHighPtProd.py
|
3ade6833c82d734ce4f0b64853268845d8508007
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,109
|
py
|
from Configurables import Generation, RepeatDecay, Inclusive, DiLeptonInAcceptance
from GaudiKernel.SystemOfUnits import GeV, MeV, mm
Generation().SampleGenerationTool = "RepeatDecay"
Generation().addTool( RepeatDecay )
Generation().RepeatDecay.NRedecay = 100
Generation().RepeatDecay.addTool( Inclusive )
Generation().RepeatDecay.Inclusive.ProductionTool = "PythiaProduction"
Generation().FullGenEventCutTool = "DiLeptonInAcceptance"
Generation().addTool( DiLeptonInAcceptance )
Generation().DiLeptonInAcceptance.RequireOppositeSign = True
Generation().DiLeptonInAcceptance.RequireSameSign = False
Generation().DiLeptonInAcceptance.LeptonOnePMin = 3*GeV
Generation().DiLeptonInAcceptance.LeptonTwoPMin = 3*GeV
Generation().DiLeptonInAcceptance.MinMass = 4700*MeV
Generation().DiLeptonInAcceptance.MaxMass = 6000*MeV
Generation().DiLeptonInAcceptance.PreselDoca = True
Generation().DiLeptonInAcceptance.DocaCut = 0.4*mm
Generation().DiLeptonInAcceptance.PreselPtProd = True
Generation().DiLeptonInAcceptance.PtProdMinCut = 4*GeV*4*GeV
Generation().DiLeptonInAcceptance.PtProdMaxCut = 1000*GeV*1000*GeV
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
22d444f0ffb8fb15ff5058d56f1660adc9d0469a
|
ca7162adc548c5937ebedd6234b40de7294e2da1
|
/19-Pillow图像处理/20将图片中黄色修改为红色.py
|
e7c2ffe94958924e92ac02d3486ca223d47d5643
|
[] |
no_license
|
meloLeeAnthony/PythonLearn
|
03c259d745b1ccdc039e9999889ab54be14ae020
|
9915ec5bb7048712a97539a9c5bce8743567b22a
|
refs/heads/master
| 2023-07-10T21:12:54.027143
| 2021-08-19T12:43:19
| 2021-08-19T12:43:19
| 289,487,502
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
from PIL import ImageDraw, Image
img = Image.open('images/bjsxt.png')
draw_obj = ImageDraw.Draw(img)
width, height = img.size
# 将黄色修改为红色
def get_color(oldColor):
'''
如果是黄色(255,255,0),则换算成红色,把绿色通道置为0
可以点击:windows 中的画图软件调色板观察黄色的区间。
'''
# print(oldColor)
# 获取每个通道的值 正宗的黄色(255,255,0)
if oldColor[0] > 60 and oldColor[1] > 60:
return (oldColor[0], 0, oldColor[2]) # 返回红色
else:
return oldColor
for x in range(width):
for y in range(height):
oldColor = img.getpixel((x, y))
draw_obj.point((x, y), fill=get_color(oldColor))
# img.show()
img.save('images/bjsxt_red.jpg')
|
[
"li.chun158@gmail.com"
] |
li.chun158@gmail.com
|
e0fefb1fccf976cc448cb2a66ea9adab80e6d73f
|
ab8a5876c12d42db3a61a1560c774e118da5605e
|
/MDShop/service.py
|
dccd47d0261d2f84b66739d94e39569f047d4b25
|
[] |
no_license
|
mustavfaa/django_16.09
|
6e19d75dc1bcd2536e3d10d854989370607c0518
|
b88ebe6f87d2facc51fee86dd18eb4cadaba0e14
|
refs/heads/main
| 2023-08-10T18:23:39.892882
| 2021-09-17T06:17:15
| 2021-09-17T06:17:15
| 406,880,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
from django_filters import rest_framework as filters
from .models import smartphone
class CharFilterInFilter(filters.BaseInFilter, filters.CharFilter):
pass
class ShoppFilter(filters.FilterSet):
genres = CharFilterInFilter(field_name='category__name', lookup_expr='in')
price = filters.RangeFilter()
class Meta:
model = smartphone
fields = ['genres', 'price']
|
[
"72229762+mustavfaa@users.noreply.github.com"
] |
72229762+mustavfaa@users.noreply.github.com
|
b3061ff7daa2a9bc88afc9974b5be35abd3df341
|
e3ec5f1898ae491fa0afcdcc154fb306fd694f83
|
/src/components/outputOpController/outputOpController.py
|
24557b2dbd08cef6d8cd72a110151d21fd212f31
|
[
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
phoebezhung/raytk
|
42397559a76a9ba39308ac03344b4446f64ea04d
|
b91483ce88b2956d7b23717b11e223d332ca8395
|
refs/heads/master
| 2023-08-27T05:20:38.062360
| 2021-10-21T04:33:18
| 2021-10-21T04:33:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,874
|
py
|
from typing import List
# noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
from _stubs import *
from _typeAliases import *
class _Par:
Hostop: 'OPParamT'
Opdef: 'OPParamT'
Rendertop: 'OPParamT'
Shaderbuilder: 'OPParamT'
Fixedtexinputs: 'StrParamT'
Texselectors: 'StrParamT'
class _COMP(COMP):
par: _Par
class OutputOp:
def __init__(self, ownerComp: '_COMP'):
self.ownerComp = ownerComp
def _host(self) -> 'Optional[COMP]':
return self.ownerComp.par.Hostop.eval()
def _opDef(self) -> 'Optional[COMP]':
return self.ownerComp.par.Opdef.eval()
def _renderTop(self) -> 'Optional[glslmultiTOP]':
return self.ownerComp.par.Rendertop.eval()
def onInit(self):
self.updateTextureInputs()
self.resetInfoParams()
def resetInfoParams(self):
host = self._host()
if not host:
return
for par in host.customPars:
if par.page == 'Info' and not par.readOnly and not par:
par.val = par.default
def updateTextureInputs(self):
renderTop = self._renderTop()
if not renderTop:
return
for conn in renderTop.inputConnectors:
while conn.connections:
conn.disconnect()
fixedInputs = self.ownerComp.par.Fixedtexinputs.evalOPs() # type: List[TOP]
if fixedInputs:
for inputTop in fixedInputs:
if inputTop:
inputTop.outputConnectors[0].connect(renderTop)
host = self._host()
host.clearScriptErrors(error='texerr*')
texSources = self.ownerComp.op('textureSources') # type: DAT
selectors = self.ownerComp.par.Texselectors.evalOPs() # type: List[TOP]
for i in range(texSources.numRows):
if i >= len(selectors):
host.addScriptError(f'texerr: Too many texture sources (failed on #{i})')
return
select = selectors[i]
while select.outputConnectors[0].connections:
select.outputConnectors[0].disconnect()
select.outputConnectors[0].connect(renderTop)
|
[
"tekt@immerse.studio"
] |
tekt@immerse.studio
|
e2b8ce4a88472ff1350842e57f2585a6f482b607
|
0a5eedbd7d6c844dfb557aa57e88f1b9e0527665
|
/data/io/read_tfrecord.py
|
ac58ea1c0f82bfd71692a17dc1da4a31f7aa5dec
|
[
"MIT"
] |
permissive
|
chizhanyuefeng/R2CNN-Plus-Plus_Tensorflow
|
f63ad3a6e535d59528f6a06b7a9f877ec4607c7d
|
adec3de17db1d07eaf5d7bc1b1dc387934985b79
|
refs/heads/master
| 2020-09-22T21:24:28.999333
| 2019-12-03T02:25:08
| 2019-12-03T02:25:08
| 225,324,251
| 0
| 0
|
MIT
| 2019-12-02T08:32:34
| 2019-12-02T08:32:34
| null |
UTF-8
|
Python
| false
| false
| 5,416
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import tensorflow as tf
import os
from data.io import image_preprocess
from libs.configs import cfgs
def read_single_example_and_decode(filename_queue):
# tfrecord_options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
# reader = tf.TFRecordReader(options=tfrecord_options)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized=serialized_example,
features={
'img_name': tf.FixedLenFeature([], tf.string),
'img_height': tf.FixedLenFeature([], tf.int64),
'img_width': tf.FixedLenFeature([], tf.int64),
'img': tf.FixedLenFeature([], tf.string),
'gtboxes_and_label': tf.FixedLenFeature([], tf.string),
'num_objects': tf.FixedLenFeature([], tf.int64)
}
)
img_name = features['img_name']
img_height = tf.cast(features['img_height'], tf.int32)
img_width = tf.cast(features['img_width'], tf.int32)
img = tf.decode_raw(features['img'], tf.uint8)
img = tf.reshape(img, shape=[img_height, img_width, 3])
# DOTA dataset need exchange img_width and img_height
# img = tf.reshape(img, shape=[img_width, img_height, 3])
gtboxes_and_label = tf.decode_raw(features['gtboxes_and_label'], tf.int32)
gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 9])
num_objects = tf.cast(features['num_objects'], tf.int32)
return img_name, img, gtboxes_and_label, num_objects
def read_and_prepocess_single_img(filename_queue, shortside_len, is_training):
img_name, img, gtboxes_and_label, num_objects = read_single_example_and_decode(filename_queue)
img = tf.cast(img, tf.float32)
img = img - tf.constant(cfgs.PIXEL_MEAN)
if is_training:
img, gtboxes_and_label = image_preprocess.short_side_resize(img_tensor=img, gtboxes_and_label=gtboxes_and_label,
target_shortside_len=shortside_len)
img, gtboxes_and_label = image_preprocess.random_flip_left_right(img_tensor=img,
gtboxes_and_label=gtboxes_and_label)
else:
img, gtboxes_and_label = image_preprocess.short_side_resize(img_tensor=img, gtboxes_and_label=gtboxes_and_label,
target_shortside_len=shortside_len)
return img_name, img, gtboxes_and_label, num_objects
def next_batch(dataset_name, batch_size, shortside_len, is_training):
'''
:return:
img_name_batch: shape(1, 1)
img_batch: shape:(1, new_imgH, new_imgW, C)
gtboxes_and_label_batch: shape(1, Num_Of_objects, 5] .each row is [x1, y1, x2, y2, label]
'''
assert batch_size == 1, "we only support batch_size is 1.We may support large batch_size in the future"
if dataset_name not in ['jyzdata', 'DOTA', 'ship', 'ICDAR2015', 'pascal', 'coco', 'DOTA_TOTAL', 'WIDER']:
raise ValueError('dataSet name must be in pascal, coco spacenet and ship')
if is_training:
pattern = os.path.join('../data/tfrecord', dataset_name + '_train*')
else:
pattern = os.path.join('../data/tfrecord', dataset_name + '_test*')
print('tfrecord path is -->', os.path.abspath(pattern))
filename_tensorlist = tf.train.match_filenames_once(pattern)
filename_queue = tf.train.string_input_producer(filename_tensorlist)
shortside_len = tf.constant(shortside_len)
shortside_len = tf.random_shuffle(shortside_len)[0]
img_name, img, gtboxes_and_label, num_obs = read_and_prepocess_single_img(filename_queue, shortside_len,
is_training=is_training)
img_name_batch, img_batch, gtboxes_and_label_batch , num_obs_batch = \
tf.train.batch(
[img_name, img, gtboxes_and_label, num_obs],
batch_size=batch_size,
capacity=1,
num_threads=1,
dynamic_pad=True)
return img_name_batch, img_batch, gtboxes_and_label_batch, num_obs_batch
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch = \
next_batch(dataset_name=cfgs.DATASET_NAME, # 'pascal', 'coco'
batch_size=cfgs.BATCH_SIZE,
shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
is_training=True)
gtboxes_and_label = tf.reshape(gtboxes_and_label_batch, [-1, 9])
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
img_name_batch_, img_batch_, gtboxes_and_label_batch_, num_objects_batch_ \
= sess.run([img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch])
print('debug')
coord.request_stop()
coord.join(threads)
|
[
"yangxue0827@126.com"
] |
yangxue0827@126.com
|
99670667c78f1a507843a9f1ace224929d1a1a68
|
4ec709b16e366c60a9c7f2f7696608b036825140
|
/stanislaus/_parameters/network_PH_Cost.py
|
4ac6c1bd28f1a7f60b97bff44547c4d3f60eaf4d
|
[] |
no_license
|
alanccai/sierra-pywr
|
19c7efc4485879a4ca35677fdb14b3c795829e02
|
4447c6247af5159030b3025f14c2397283c4fcd0
|
refs/heads/master
| 2020-08-19T10:17:30.590861
| 2019-10-15T20:33:45
| 2019-10-15T20:33:45
| 215,909,820
| 0
| 0
| null | 2019-11-15T17:54:23
| 2019-10-18T00:31:58
| null |
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
from parameters import WaterLPParameter
class network_PH_Cost(WaterLPParameter):
""""""
# path = "s3_imports/energy_netDemand.csv"
baseline_median_daily_energy_demand = 768 # 768 GWh is median daily energy demand for 2009
def _value(self, timestep, scenario_index, mode='scheduling'):
totDemandP = self.model.parameters["Total Net Energy Demand"]
maxDemandP = self.model.parameters["Max Net Energy Demand"]
minDemandP = self.model.parameters["Min Net Energy Demand"]
days_in_period = 1
if self.mode == 'scheduling':
totDemand = totDemandP.value(timestep, scenario_index)
minDemand = maxDemandP.value(timestep, scenario_index)
maxDemand = maxDemandP.value(timestep, scenario_index)
else:
planning_dates = self.dates_in_planning_month(timestep, month_offset=self.month_offset)
days_in_period = len(planning_dates)
totDemand = totDemandP.dataframe[planning_dates].sum()
minDemand = minDemandP.dataframe[planning_dates].min()
maxDemand = maxDemandP.dataframe[planning_dates].max()
minVal = self.model.parameters[self.demand_constant_param].value(timestep, scenario_index) \
* (totDemand / (self.baseline_median_daily_energy_demand * days_in_period))
maxVal = minVal * (maxDemand / minDemand)
d = maxVal - minVal
nblocks = self.model.parameters['Blocks'].value(timestep, scenario_index)
return -(maxVal - ((self.block * 2 - 1) * d / 2) / nblocks)
def value(self, timestep, scenario_index):
return self._value(timestep, scenario_index, mode=self.mode)
@classmethod
def load(cls, model, data):
return cls(model, **data)
network_PH_Cost.register()
print(" [*] PH_Cost successfully registered")
|
[
"herr.rhein@gmail.com"
] |
herr.rhein@gmail.com
|
ffdf035d8454aa99dc436f4467f389c084b23666
|
002c14cd622b4890cce1c243065cebe39e2302ec
|
/LeetCode/13-Roman-to-Integer/Roman-to-Integer.py
|
d340a9596c72981fd2f85799d215d961b5f90c82
|
[
"MIT"
] |
permissive
|
hscspring/The-DataStructure-and-Algorithms
|
6200eba031eac51b13e320e1fc9f204644933e00
|
e704a92e091f2fdf5f27ec433e0e516ccc787ebb
|
refs/heads/master
| 2022-08-29T18:47:52.378884
| 2022-08-25T16:22:44
| 2022-08-25T16:22:44
| 201,743,910
| 11
| 3
|
MIT
| 2021-04-20T18:28:47
| 2019-08-11T09:26:34
|
Python
|
UTF-8
|
Python
| false
| false
| 890
|
py
|
class Solution(object):
def romanToInt(self, s):
"""
:type s: str
:rtype: int
"""
romanDict = {
"I": 1,
"V": 5,
"X": 10,
"L": 50,
"C": 100,
"D": 500,
"M": 1000,
}
res = 0
i = 0
while i < len(s) - 1:
if romanDict[s[i+1]] > romanDict[s[i]]:
res += romanDict[s[i+1]] - romanDict[s[i]]
i += 2
else:
res += romanDict[s[i]]
i += 1
if i < len(s):
res += romanDict[s[i]]
return res
if __name__ == '__main__':
so = Solution()
assert so.romanToInt("III") == 3
assert so.romanToInt("IV") == 4
assert so.romanToInt("IX") == 9
assert so.romanToInt("LVIII") == 58
assert so.romanToInt("MCMXCIV") == 1994
|
[
"haoshaochun@gmail.com"
] |
haoshaochun@gmail.com
|
8e9a7fa16f4019455c5bd5558201c626dc070351
|
0849923ebcde8f56a6e8550ae4f3c5ee3e2e0846
|
/apps/search/src/search/decorators.py
|
ab684bbf9ba17e89cf01ce65ffd14cbcfb1e7154
|
[
"Apache-2.0"
] |
permissive
|
thinker0/hue
|
511a5796cdfe45e0b27f1d3309557ca60ce8b13b
|
ee5aecc3db442e962584d3151c0f2eab397d6707
|
refs/heads/master
| 2022-07-10T02:37:23.591348
| 2014-03-27T20:05:00
| 2014-03-27T20:05:00
| 12,731,435
| 0
| 0
|
Apache-2.0
| 2022-07-01T17:44:37
| 2013-09-10T14:13:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,304
|
py
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from desktop.lib.exceptions_renderable import PopupException
LOG = logging.getLogger(__name__)
def allow_admin_only(view_func):
def decorate(request, *args, **kwargs):
if not request.user.is_superuser:
message = _("Permission denied. You are not an Administrator.")
raise PopupException(message)
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
|
[
"romain@cloudera.com"
] |
romain@cloudera.com
|
76967bed7d5d25cae9b9fed322cae51c0c6b38b6
|
5f3fb04f8e04f8aa9d15bb9cded75b98fa53422f
|
/fofo_lazada/wizard/import_customer_payment.py
|
bb5400658e8c71eb8bbba8ad1ec853d2576ef6ef
|
[] |
no_license
|
kittiu/fofo
|
49a5b9110814bc8512e22fd101e821e6820b2f0a
|
b73e2009e220fd843e91d9ea414f514ae113b76c
|
refs/heads/master
| 2020-04-16T20:12:44.309677
| 2018-08-03T14:53:12
| 2018-08-03T14:53:12
| 63,481,415
| 0
| 1
| null | 2018-08-03T14:53:13
| 2016-07-16T12:13:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
from openerp import models, fields, api, _
import openerp.addons.decimal_precision as dp
from datetime import datetime
import csv
import StringIO
import base64
import xlrd
from openerp import tools
from _abcoll import ItemsView
class import_customer_payment(models.TransientModel):
_name = 'import.customer.payment'
input_file = fields.Binary('Input File')
@api.multi
def import_payments(self):
for line in self:
lines = xlrd.open_workbook(file_contents=base64.decodestring(self.input_file))
print "---------lines",lines
for sheet_name in lines.sheet_names():
sheet = lines.sheet_by_name(sheet_name)
rows = sheet.nrows
columns = sheet.ncols
print "-rows--columns------",rows,columns
print "---------==sheet.row_values(0)==",sheet.row_values(0)
seller_sku = sheet.row_values(0).index('Seller SKU')
created_at = sheet.row_values(0).index('Created at')
order_number = sheet.row_values(0).index('Order Number')
unit_price = sheet.row_values(0).index('Unit Price')
status = sheet.row_values(0).index('Status')
|
[
"kittiu@gmail.com"
] |
kittiu@gmail.com
|
076656d8a2ada109350a4f62dbd2e457331d513d
|
6512f283dbde46ec31f985889166798c5f943484
|
/utils/prepare_docs.py
|
04cb182e21ba277bb033f7ebf3408dcee748bd23
|
[
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
greenelab/iscb-diversity
|
d5db60693e2c2038c1033cb10f6c3a57c33a581d
|
f4f5d013a5263cb4a591a2cd0841f43fd082c7e3
|
refs/heads/master
| 2023-08-01T00:11:46.070563
| 2021-09-09T19:38:31
| 2021-09-09T19:38:31
| 207,814,106
| 7
| 1
|
NOASSERTION
| 2021-06-10T15:50:41
| 2019-09-11T13:03:02
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,914
|
py
|
"""
Generate docs directory for GitHub Pages.
"""
import argparse
import pathlib
import subprocess
import sys
root_dir = pathlib.Path(__file__).parent.parent
docs_dir = root_dir.joinpath("docs")
readme_template = """\
# Project Webpage for ISCB Diversity Analysis
<!-- make sure to edit this content in utils/prepare_docs.py and not docs/readme.md -->
More information at <https://github.com/greenelab/iscb-diversity>.
See also the study corresponding to this analysis at <https://greenelab.github.io/iscb-diversity-manuscript/>.
## Notebooks
See the following rendered notebooks:
{notebook_list_md}
"""
def parse_args():
parser = argparse.ArgumentParser(
description="Generate docs directory for GitHub Pages."
)
parser.add_argument(
"--nbconvert", action="store_true", help="Convert .ipynb files to docs/*.html"
)
parser.add_argument(
"--nbviewer", action="store_true", help="Use links to https://nbviewer.jupyter.org/ for Jupyter notebooks"
)
parser.add_argument(
"--repo",
default="greenelab/iscb-diversity",
help="GitHub repository to use for hyperlinks",
)
parser.add_argument(
"--readme",
action="store_true",
help="Regenerate docs/readme.md (the GitHub Pages homepage)",
)
args = parser.parse_args()
if len(sys.argv) == 1:
# print help when no arguments are specified
parser.print_help()
return args
def get_ipynb_paths():
ipynb_paths = sorted(root_dir.glob("*.ipynb"))
ipynb_paths = [path.relative_to(root_dir) for path in ipynb_paths]
return ipynb_paths
def render_jupyter_notebooks():
ipynb_paths = get_ipynb_paths()
args = [
"jupyter",
"nbconvert",
"--output-dir=docs",
*ipynb_paths,
]
subprocess.run(args, cwd=root_dir)
def get_nbviewer_md_list(args):
nbviewer_md_list = []
if not args.nbviewer:
return nbviewer_md_list
ipynb_paths = get_ipynb_paths()
for path in ipynb_paths:
nbviewer_url = f"https://nbviewer.jupyter.org/github/{args.repo}/blob/master/{path}"
md = f"- [{path.stem}]({nbviewer_url})"
nbviewer_md_list.append(md)
return nbviewer_md_list
def get_notebook_list_md(args):
notebook_md_list = get_nbviewer_md_list(args)
html_paths = sorted(docs_dir.glob("**/*.html"))
for path in html_paths:
path = path.relative_to(docs_dir)
notebook_md_list.append(f"- [{path.stem}]({path})")
notebook_md_list.sort()
return "\n".join(notebook_md_list)
if __name__ == "__main__":
args = parse_args()
assert docs_dir.is_dir()
if args.nbconvert:
render_jupyter_notebooks()
if args.readme:
notebook_list_md = get_notebook_list_md(args)
readme = readme_template.format(notebook_list_md=notebook_list_md)
docs_dir.joinpath("readme.md").write_text(readme)
|
[
"daniel.himmelstein@gmail.com"
] |
daniel.himmelstein@gmail.com
|
4b154b3ded63895ffebd249d37efbf7f792b89b5
|
3e4b8fe54f11bf36f3615c21fdc1dca0ed00fe72
|
/month05/day04/03_weekday.py
|
d301021c4a68b80eb21452bfaee535f7a7743f07
|
[] |
no_license
|
leinian85/year2019
|
30d66b1b209915301273f3c367bea224b1f449a4
|
2f573fa1c410e9db692bce65d445d0543fe39503
|
refs/heads/master
| 2020-06-21T20:06:34.220046
| 2019-11-04T06:37:02
| 2019-11-04T06:37:02
| 197,541,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
import numpy as np
ary = np.arange(1,37).reshape(6,6)
def apply(data):
return data.mean()
print(ary)
r = np.apply_along_axis(apply,1,ary)
print(r)
print(apply(ary[0,:]))
|
[
"42737521@qq.com"
] |
42737521@qq.com
|
7fa486dfb3dd91ddb4381bfd2d02fa65696c93d1
|
f8f2536fa873afa43dafe0217faa9134e57c8a1e
|
/aliyun-python-sdk-smarthosting/aliyunsdksmarthosting/request/v20200801/UpdateManagedHostAttributesRequest.py
|
6903b8467168f5b270b89a199b450c06c42e6c34
|
[
"Apache-2.0"
] |
permissive
|
Sunnywillow/aliyun-openapi-python-sdk
|
40b1b17ca39467e9f8405cb2ca08a85b9befd533
|
6855864a1d46f818d73f5870da0efec2b820baf5
|
refs/heads/master
| 2022-12-04T02:22:27.550198
| 2020-08-20T04:11:34
| 2020-08-20T04:11:34
| 288,944,896
| 1
| 0
|
NOASSERTION
| 2020-08-20T08:04:01
| 2020-08-20T08:04:01
| null |
UTF-8
|
Python
| false
| false
| 2,619
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksmarthosting.endpoint import endpoint_data
class UpdateManagedHostAttributesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'SmartHosting', '2020-08-01', 'UpdateManagedHostAttributes','SmartHosting')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Mode(self):
return self.get_query_params().get('Mode')
def set_Mode(self,Mode):
self.add_query_param('Mode',Mode)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_ManagedHostId(self):
return self.get_query_params().get('ManagedHostId')
def set_ManagedHostId(self,ManagedHostId):
self.add_query_param('ManagedHostId',ManagedHostId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ManagedHostName(self):
return self.get_query_params().get('ManagedHostName')
def set_ManagedHostName(self,ManagedHostName):
self.add_query_param('ManagedHostName',ManagedHostName)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
97a3e24436a577e4b1e73d9a2f7511d8325217ef
|
5672737d1ff34bebfeb408426e52ed49df8be3bb
|
/graphgallery/attack/targeted/common/rand.py
|
a95829950f927f663815568b32c3a8170abb5757
|
[
"MIT"
] |
permissive
|
sailfish009/GraphGallery
|
5063ee43340a6ca8da9f2d7fb3c44349e80321b2
|
4eec9c5136bda14809bd22584b26cc346cdb633b
|
refs/heads/master
| 2023-08-24T19:19:59.714411
| 2021-10-16T10:10:40
| 2021-10-16T10:10:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,098
|
py
|
import random
import numpy as np
from graphgallery.utils import tqdm
from graphgallery.attack.targeted import Common
from ..targeted_attacker import TargetedAttacker
@Common.register()
class RAND(TargetedAttacker):
def reset(self):
super().reset()
self.modified_degree = self.degree.copy()
return self
def process(self, reset=True):
self.nodes_set = set(range(self.num_nodes))
if reset:
self.reset()
return self
def attack(self,
target,
num_budgets=None,
threshold=0.5,
direct_attack=True,
structure_attack=True,
feature_attack=False,
disable=False):
super().attack(target, num_budgets, direct_attack, structure_attack,
feature_attack)
if direct_attack:
influence_nodes = [target]
else:
# influence_nodes = list(self.graph.neighbors(target))
influence_nodes = self.graph.adj_matrix[target].indices.tolist()
chosen = 0
adj_flips = self.adj_flips
with tqdm(total=self.num_budgets,
desc='Peturbing Graph',
disable=disable) as pbar:
while chosen < self.num_budgets:
# randomly choose to add or remove edges
if np.random.rand() <= threshold:
delta = 1.0
edge = self.add_edge(influence_nodes)
else:
delta = -1.0
edge = self.del_edge(influence_nodes)
if edge is not None:
adj_flips[edge] = chosen
chosen += 1
u, v = edge
self.modified_degree[u] += delta
self.modified_degree[v] += delta
pbar.update(1)
return self
def add_edge(self, influence_nodes):
u = random.choice(influence_nodes)
neighbors = self.graph.adj_matrix[u].indices.tolist()
potential_nodes = list(self.nodes_set - set(neighbors) -
set([self.target, u]))
if len(potential_nodes) == 0:
return None
v = random.choice(potential_nodes)
if not self.is_modified(u, v):
return (u, v)
else:
return None
def del_edge(self, influence_nodes):
u = random.choice(influence_nodes)
neighbors = self.graph.adj_matrix[u].indices.tolist()
potential_nodes = list(set(neighbors) - set([self.target, u]))
if len(potential_nodes) == 0:
return None
v = random.choice(potential_nodes)
if not self.allow_singleton and (self.modified_degree[u] <= 1
or self.modified_degree[v] <= 1):
return None
if not self.is_modified(u, v):
return (u, v)
else:
return None
|
[
"cnljt@outlook.com"
] |
cnljt@outlook.com
|
b97a00b7ad30a23398328b91d48b220d5c4802bc
|
b8911bd330c08c32a205751cf2f7538494729c16
|
/examples/plotting/plot_2_parallel_coordinates.py
|
55d66289653a05f16f742f4eb9bcd21740f11a3f
|
[
"BSD-3-Clause"
] |
permissive
|
mirkobronzi/orion
|
3c0bb6258392729b91617997eebcf1e8897795aa
|
ad8f69afabf3faab557a82ef9409fabd63495ea8
|
refs/heads/master
| 2022-05-02T05:45:54.425146
| 2022-03-08T00:37:08
| 2022-03-08T00:37:08
| 195,877,506
| 0
| 0
|
NOASSERTION
| 2022-03-08T00:38:23
| 2019-07-08T19:55:26
|
Python
|
UTF-8
|
Python
| false
| false
| 5,258
|
py
|
"""
====================
Parallel Coordinates
====================
.. hint::
Conveys a dense overview of the trial objectives in a multi-dimensional space.
Helps identifying trends of best or worst hyperparameter values.
The parallel coordinates plot decomposes a search space of `n` dimensions into `n`
axis so that the entire space can be visualized simultaneously. Each dimension
is represented as a vertical axis and trials are represented as lines crossing each
axis at the corresponding value of the hyperparameters. There is no obvious optimal ordering
for the vertical axis, and you will often find that changing the order helps better understanding
the data. Additionaly, the lines are plotted with graded colors based on the objective. The
gradation is shown in a color bar on the right of the plot. Note that the objectives are added
as the last axis is the plot as well.
.. autofunction:: orion.plotting.base.parallel_coordinates
:noindex:
The parallel coordinates plot can be executed directly from the ``experiment`` with
``plot.parallel_coordinates()`` as shown in the example below.
"""
from orion.client import get_experiment
# Specify the database where the experiments are stored. We use a local PickleDB here.
storage = dict(type="legacy", database=dict(type="pickleddb", host="../db.pkl"))
# Load the data for the specified experiment
experiment = get_experiment("2-dim-exp", storage=storage)
fig = experiment.plot.parallel_coordinates()
fig
#%%
# In this basic example the parallel coordinates plot is marginally useful as there are only
# 2 dimensions. It is possible however to identify the best performing values of ``dropout`` and
# ``learning_rate``. The GIF below demonstrates how to select subsets of the
# axis to highlight the trials that corresponds to the best objectives.
#
# .. image:: ../_static/parallel_coordinates_select.gif
# :width: 600
# :align: center
#
# .. note::
#
# Hover is not supported by plotly at the moment.
# Feature request can be tracked `here <https://github.com/plotly/plotly.js/issues/3012>`_.
#%%
# Lets now load the results from tutorial
# :ref:`sphx_glr_auto_tutorials_code_2_hyperband_checkpoint.py` for an example with a larger search
# space.
# Load the data for the specified experiment
experiment = get_experiment("hyperband-cifar10", storage=storage)
fig = experiment.plot.parallel_coordinates()
fig
#%%
# As you can see, the large number of trials until trained for a few epochs is cluttering the entire
# plot. You can first select the trials with 120 epochs to clear the plot. Once that is done,
# We can see that gamma and momentum had limited influence. Good trials can be found
# for almost any values of gamma and momentum. On the other hand, learning rate
# and weight decay are clearly more optimal in lower values. You can try re-ordering the columns as
# shown in the animation below to see the connections between one hyperparameter and the objective.
#%%
# .. image:: ../_static/parallel_coordinates_reorder.gif
# :width: 600
# :align: center
#
#%%
# We can also select a subset of hyperparameters to help with the visualization.
# Load the data for the specified experiment
fig = experiment.plot.parallel_coordinates(
order=["epochs", "learning_rate", "weight_decay"]
)
fig
#%%
#
# Special cases
# -------------
#
# Logarithmic scale
# ~~~~~~~~~~~~~~~~~
#
# .. note::
#
# Logarithmic scales are not supported yet. Contributions are welcome. :)
# See `issue <https://github.com/Epistimio/orion/issues/555>`_.
#
# Dimension with shape
# ~~~~~~~~~~~~~~~~~~~~
#
# If some dimensions have a :ref:`search-space-shape` larger than 1, they will be flattened so that
# each subdimension can be represented in the parallel coordinates plot.
# Load the data for the specified experiment
experiment = get_experiment("2-dim-shape-exp", storage=storage)
fig = experiment.plot.parallel_coordinates()
fig
#%%
# In the example above, the dimension ``learning_rate~loguniform(1e-5, 1e-2, shape=3)``
# is flattened and represented with ``learning_rate[i]``. If the shape would be or more dimensions
# (ex: ``(3, 2)``), the indices would be ``learning_rate[i,j]`` with i=0..2 and j=0..1.
#%%
# The flattened hyperparameters can be fully selected with ``params=['<name>']``.
#
experiment.plot.parallel_coordinates(order=["/learning_rate"])
#%%
# Or a subset of the flattened hyperparameters can be selected with ``params=['<name>[index]']``.
#
experiment.plot.parallel_coordinates(order=["/learning_rate[0]", "/learning_rate[1]"])
#%%
# Categorical dimension
# ~~~~~~~~~~~~~~~~~~~~~
#
# Parallel coordinates plots can also render categorical dimensions, in which case the
# categories are shown in an arbitrary order on the axis.
# Load the data for the specified experiment
experiment = get_experiment("3-dim-cat-shape-exp", storage=storage)
fig = experiment.plot.parallel_coordinates()
fig
#%%
# Finally we save the image to serve as a thumbnail for this example. See
# the guide
# :ref:`How to save <sphx_glr_auto_examples_how-tos_code_2_how_to_save.py>`
# for more information on image saving.
fig.write_image("../../docs/src/_static/pcp_thumbnail.png")
# sphinx_gallery_thumbnail_path = '_static/pcp_thumbnail.png'
|
[
"xavier.bouthillier@umontreal.ca"
] |
xavier.bouthillier@umontreal.ca
|
7e7b508b865d113b3866dcafbdbe625da5fa268a
|
3431aa0a966505c601171393e9edcd2345813268
|
/analysis/lssxcmb/scripts/run_cell_sv3.py
|
67f67d8953fff82b82dfa3a773b4140d43be8baf
|
[
"MIT"
] |
permissive
|
mehdirezaie/LSSutils
|
34eeeb6efbf2b5c4c0c336c1f9f2fabc821def39
|
af4697d9f4b4fc4dbab44787108d7aa538ca846d
|
refs/heads/master
| 2023-08-17T17:58:03.310073
| 2023-08-16T11:29:23
| 2023-08-16T11:29:23
| 200,747,239
| 3
| 0
|
MIT
| 2021-11-09T15:12:30
| 2019-08-06T00:25:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,653
|
py
|
import os
import healpy as hp
from lssutils import setup_logging, CurrentMPIComm
from lssutils.lab import get_cl
from lssutils.utils import npix2nside, make_hp
from lssutils.utils import maps_dr9 as columns
import fitsio as ft
import numpy as np
@CurrentMPIComm.enable
def main(args, comm=None):
if comm.rank == 0:
# --- only rank 0
# read data, randoms, and templates
data = ft.read(args.data_path)
nside = 1024
ngal = make_hp(nside, data['hpix'], data['label'])
nran = make_hp(nside, data['hpix'], data['fracgood'])
mask = make_hp(nside, data['hpix'], 1.0) > 0.5
sysm = np.zeros((12*nside*nside, data['features'].shape[1]))
print(sysm.shape)
sysm[data['hpix'], :] = data['features']
if args.selection is not None:
#s_ = ft.read(args.selection)
#selection_fn = make_hp(nside, s_['hpix'], np.median(s_['weight'], axis=1))#.mean(axis=1))
selection_fn = hp.read_map(args.selection, verbose=False)
print(np.percentile(selection_fn[mask], [0, 1, 99, 100]))
else:
selection_fn = None
else:
ngal = None
nran = None
mask = None
sysm = None
selection_fn = None
ngal = comm.bcast(ngal, root=0)
nran = comm.bcast(nran, root=0)
mask = comm.bcast(mask, root=0)
sysm = comm.bcast(sysm, root=0)
selection_fn = comm.bcast(selection_fn, root=0)
cls_list = get_cl(ngal, nran, mask, selection_fn=selection_fn,
systematics=sysm, njack=0)
if comm.rank == 0:
output_dir = os.path.dirname(args.output_path)
if not os.path.exists(output_dir):
print(f'creating {output_dir}')
os.makedirs(output_dir)
np.save(args.output_path, cls_list)
if __name__ == '__main__':
setup_logging("info") # turn on logging to screen
comm = CurrentMPIComm.get()
if comm.rank == 0:
print(f'hi from {comm.rank}')
from argparse import ArgumentParser
ap = ArgumentParser(description='Angular Clustering')
ap.add_argument('-d', '--data_path', required=True)
ap.add_argument('-o', '--output_path', required=True)
ap.add_argument('-s', '--selection', default=None)
ns = ap.parse_args()
for (key, value) in ns.__dict__.items():
print(f'{key:15s} : {value}')
else:
ns = None
print(f'hey from {comm.rank}')
main(ns)
|
[
"medirz90@icloud.com"
] |
medirz90@icloud.com
|
c23e11afccd6e32919ce7dad4448f78f03f7ee83
|
0d0cf0165ca108e8d94056c2bae5ad07fe9f9377
|
/3_Python_Data_Science_Toolbox_Part1/3_Lambda_functions_and_error-handling/errorHandlingWithTryExcept.py
|
88c5557c80a12220a1241c670005099db365c6d2
|
[] |
no_license
|
MACHEIKH/Datacamp_Machine_Learning_For_Everyone
|
550ec4038ebdb69993e16fe22d5136f00101b692
|
9fe8947f490da221430e6dccce6e2165a42470f3
|
refs/heads/main
| 2023-01-22T06:26:15.996504
| 2020-11-24T11:21:53
| 2020-11-24T11:21:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,082
|
py
|
# Error handling with try-except
# A good practice in writing your own functions is also anticipating the ways in which other people (or yourself, if you accidentally misuse your own function) might use the function you defined.
# As in the previous exercise, you saw that the len() function is able to handle input arguments such as strings, lists, and tuples, but not int type ones and raises an appropriate error and error message when it encounters invalid input arguments. One way of doing this is through exception handling with the try-except block.
# In this exercise, you will define a function as well as use a try-except block for handling cases when incorrect input arguments are passed to the function.
# Recall the shout_echo() function you defined in previous exercises; parts of the function definition are provided in the sample code. Your goal is to complete the exception handling code in the function definition and provide an appropriate error message when raising an error.
# Instructions
# 100 XP
# Initialize the variables echo_word and shout_words to empty strings.
# Add the keywords try and except in the appropriate locations for the exception handling block.
# Use the * operator to concatenate echo copies of word1. Assign the result to echo_word.
# Concatenate the string '!!!' to echo_word. Assign the result to shout_words.
# Define shout_echo
def shout_echo(word1, echo=1):
"""Concatenate echo copies of word1 and three
exclamation marks at the end of the string."""
# Initialize empty strings: echo_word, shout_words
echo_word = ""
shout_words = ""
# Add exception handling with try-except
try:
# Concatenate echo copies of word1 using *: echo_word
echo_word = word1 * echo
# Concatenate '!!!' to echo_word: shout_words
shout_words = echo_word + '!!!'
except:
# Print error message
print("word1 must be a string and echo must be an integer.")
# Return shout_words
return shout_words
# Call shout_echo
shout_echo("particle", echo="accelerator")
|
[
"noreply@github.com"
] |
MACHEIKH.noreply@github.com
|
5db8d660382ca50bec0fc0ab03dc620098282966
|
9ca6885d197aaf6869e2080901b361b034e4cc37
|
/TauAnalysis/MCEmbeddingTools/test/runGenMuonRadCorrAnalyzer_cfg.py
|
978d9416623f149ba528ac6ef22a4fbb44f182ec
|
[] |
no_license
|
ktf/cmssw-migration
|
153ff14346b20086f908a370029aa96575a2c51a
|
583340dd03481dff673a52a2075c8bb46fa22ac6
|
refs/heads/master
| 2020-07-25T15:37:45.528173
| 2013-07-11T04:54:56
| 2013-07-11T04:54:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,082
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("runGenMuonRadCorrAnalyzer")
import os
import re
import TauAnalysis.Configuration.tools.castor as castor
import TauAnalysis.Configuration.tools.eos as eos
# import of standard configurations for RECOnstruction
# of electrons, muons and tau-jets with non-standard isolation cones
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.load('Configuration/Geometry/GeometryIdeal_cff')
process.load('Configuration/StandardSequences/MagneticField_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = cms.string('START53_V7A::All')
#--------------------------------------------------------------------------------
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
##'file:/data1/veelken/CMSSW_5_3_x/skims/ZmumuTF_RECO_2012Oct03.root'
'/store/user/veelken/CMSSW_5_3_x/skims/Embedding/goldenZmumuEvents_ZplusJets_madgraph_RECO_205_1_XhE.root',
'/store/user/veelken/CMSSW_5_3_x/skims/Embedding/goldenZmumuEvents_ZplusJets_madgraph_RECO_206_1_OHz.root',
'/store/user/veelken/CMSSW_5_3_x/skims/Embedding/goldenZmumuEvents_ZplusJets_madgraph_RECO_207_1_bgM.root',
'/store/user/veelken/CMSSW_5_3_x/skims/Embedding/goldenZmumuEvents_ZplusJets_madgraph_RECO_208_1_szL.root',
'/store/user/veelken/CMSSW_5_3_x/skims/Embedding/goldenZmumuEvents_ZplusJets_madgraph_RECO_209_1_Jqv.root'
),
skipEvents = cms.untracked.uint32(0)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# set input files
inputFilePath = '/store/user/veelken/CMSSW_5_3_x/skims/GoldenZmumu/2012Oct09/'
inputFile_regex = r"[a-zA-Z0-9_/:.]*goldenZmumuEvents_ZplusJets_madgraph_2012Oct09_AOD_(?P<gridJob>\d*)(_(?P<gridTry>\d*))*_(?P<hash>[a-zA-Z0-9]*).root"
# check if name of inputFile matches regular expression
inputFileNames = []
files = None
if inputFilePath.startswith('/castor/'):
files = [ "".join([ "rfio:", file_info['path'] ]) for file_info in castor.nslsl(inputFilePath) ]
elif inputFilePath.startswith('/store/'):
files = [ file_info['path'] for file_info in eos.lsl(inputFilePath) ]
else:
files = [ "".join([ "file:", inputFilePath, file ]) for file in os.listdir(inputFilePath) ]
for file in files:
#print "file = %s" % file
inputFile_matcher = re.compile(inputFile_regex)
if inputFile_matcher.match(file):
inputFileNames.append(file)
#print "inputFileNames = %s" % inputFileNames
process.source.fileNames = cms.untracked.vstring(inputFileNames)
#--------------------------------------------------------------------------------
process.load("TauAnalysis/MCEmbeddingTools/ZmumuStandaloneSelection_cff")
process.goldenZmumuFilter.src = cms.InputTag('goldenZmumuCandidatesGe0IsoMuons')
process.load("TrackingTools/TransientTrack/TransientTrackBuilder_cfi")
process.load("IOMC.RandomEngine.IOMC_cff")
process.RandomNumberGeneratorService.genMuonRadCorrAnalyzer = cms.PSet(
initialSeed = cms.untracked.uint32(12345),
engineName = cms.untracked.string('TRandom3')
)
process.RandomNumberGeneratorService.genMuonRadCorrAnalyzerPYTHIA = process.RandomNumberGeneratorService.genMuonRadCorrAnalyzer.clone()
process.RandomNumberGeneratorService.genMuonRadCorrAnalyzerPHOTOS = process.RandomNumberGeneratorService.genMuonRadCorrAnalyzer.clone()
process.load("TauAnalysis/MCEmbeddingTools/genMuonRadCorrAnalyzer_cfi")
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
process.genMuonRadCorrAnalyzerPYTHIA.PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
particleGunParameters = cms.vstring(
'MSTP(41) = 0 ! Disable parton Showers',
'MSTP(61) = 0 ! Disable initial state radiation',
'MSTP(71) = 1 ! Enable final state radiation'
),
parameterSets = cms.vstring(
'pythiaUESettings',
'particleGunParameters'
)
)
process.genMuonRadCorrAnalyzerPHOTOS.PhotosOptions = cms.PSet()
process.genMuonRadCorrAnalyzerSequence = cms.Sequence(
process.genMuonRadCorrAnalyzer
+ process.genMuonRadCorrAnalyzerPYTHIA
+ process.genMuonRadCorrAnalyzerPHOTOS
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('/data1/veelken/tmp/runGenMuonRadCorrAnalyzer_2013Jan28.root')
)
process.analysisSequence = cms.Sequence(
process.goldenZmumuSelectionSequence
+ process.goldenZmumuFilter
+ process.genMuonRadCorrAnalyzerSequence
)
#--------------------------------------------------------------------------------
process.p = cms.Path(process.analysisSequence)
processDumpFile = open('runGenMuonRadCorrAnalyzer.dump' , 'w')
print >> processDumpFile, process.dumpPython()
|
[
"sha1-5c72da6f595cce9b6b48aff6d56f01e9beb4aad1@cern.ch"
] |
sha1-5c72da6f595cce9b6b48aff6d56f01e9beb4aad1@cern.ch
|
008458b3a86ae5e14cc07957c22da7431650271c
|
d4c9979ebf5224e79c7bff38931657e0b3420b86
|
/quizzes/Quiz35.py
|
60e585f8af44430796aab5112454e757f5ddc3cc
|
[] |
no_license
|
imjeee/cs373
|
60c021ec372a59dab8edf32a1f2f833857bbf5f8
|
9c331c9eb9c4c43f8a2ee03ee5770791f2ead225
|
refs/heads/master
| 2020-12-25T02:20:25.894496
| 2012-04-26T02:56:03
| 2012-04-26T02:56:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
#!/usr/bin/env python
"""
CS373: Quiz #35 (5 pts)
"""
""" ----------------------------------------------------------------------
1. What is the output of the following program?
(4 pts)
1
A.A()
A.A()
False
A.A()
A.f()
2
A.A()
3
True
A.f()
4
5
True
A.f()
6
"""
def Decorator (c) :
x = c()
return lambda : x
class A (object) :
def __init__ (self) :
print "A.A()"
def f (self) :
print "A.f()"
print "1"
print A() is A()
A().f()
print "2"
A = Decorator(A)
print "3"
print A() is A()
A().f()
print "4"
A = Decorator(A)
print "5"
print A() is A()
A().f()
print "6"
|
[
"downing@cs.utexas.edu"
] |
downing@cs.utexas.edu
|
2c69e19bcc517dcd10c542ec855f0c2019875f31
|
e254a9e46750549f742b30fc5e930f1bddf78091
|
/plots/python/cutsJetClean1.py
|
6e43f909f17bbc85a9c4d9976b4df2e93a358a30
|
[] |
no_license
|
schoef/TTGammaEFT
|
13618c07457a0557c2d62642205b10100f8b4e79
|
af9a5f8c9d87835c26d7ebecdccf865a95a84ba2
|
refs/heads/master
| 2020-04-17T05:01:28.910645
| 2019-01-17T16:29:27
| 2019-01-17T16:29:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,657
|
py
|
#!/usr/bin/env python
''' Define list of plots for plot script
'''
# Standard Imports
from math import pi
# RootTools
from RootTools.core.standard import *
# TTGammaEFT
from TTGammaEFT.Tools.constants import defaultValue
# plotList
cutsJetClean1 = []
cutsJetClean1.append( Plot(
name = 'cleanJet1_nConstituents',
texX = 'nConstituents(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_nConstituents[1] if event.nJetClean > 1 else defaultValue,
binning = [ 5, 0, 5 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_neHEF',
texX = 'neHEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_neHEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 40, 0., 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_neEmEF',
texX = 'neEmEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_neEmEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 40, 0., 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_chEmHEF',
texX = 'chEmEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_chEmEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 40, 0., 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_neHEF_detailed',
texX = 'neHEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_neHEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 100, 0., 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_neEmEF_detailed',
texX = 'neEmEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_neEmEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 100, 0., 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_chEmHEF_detailed',
texX = 'chEmEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_chEmEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 100, 0., 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_neHEF_tight',
texX = 'neHEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_neHEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 40, 0.8, 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_neEmEF_tight',
texX = 'neEmEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_neEmEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 40, 0.8, 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_chEmHEF_tight',
texX = 'chEmEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_chEmEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 40, 0.8, 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_chHEF',
texX = 'chHEF(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_chHEF[1] if event.nJetClean > 1 else defaultValue,
binning = [ 40, 0, 1 ],
))
cutsJetClean1.append( Plot(
name = 'cleanJet1_ID',
texX = 'ID(jet_{1})',
texY = 'Number of Events',
attribute = lambda event, sample: event.JetClean_JetCleanId[1] if event.nJetClean > 1 else defaultValue,
binning = [ 4, 0, 4 ],
))
|
[
"lukas.k.lechner@gmail.com"
] |
lukas.k.lechner@gmail.com
|
63ce385bb0e3331d222c53d2420877eedc853169
|
f2aec3224fb3e1a6c780f82def626be3565a0e8e
|
/examples/Carleman/config.py
|
9b7c950382f999c814723a8eb8af048d3e92b9b3
|
[] |
no_license
|
Orcuslc/MultiScale-PINN
|
a000ce5afcb7d91ccdd535cc45bd2000c917463f
|
32f1de843ec96231d6d9859815ad1b08cef718d2
|
refs/heads/master
| 2022-12-13T13:07:59.881197
| 2020-09-15T22:46:45
| 2020-09-15T22:46:45
| 291,526,555
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 818
|
py
|
import jax
import jax.numpy as jnp
from jax import random
from jax.experimental import optimizers
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from jaxmeta.loss import *
# name of job
NAME = "1"
# random key
key = random.PRNGKey(1)
# network config
layers = [2] + [32]*4 + [2]
c0 = 1.0
w0 = jnp.array([[1.0, 1.0]]).T
# network training
metaloss = mae
optimizer = optimizers.adam
lr = 1e-3
weights = {
"c1": 1.0,
"c2": 10.0,
"d1": 10.0,
"d2": 10.0,
"l1": 1e-8,
"l2": 1e-8,
}
batch_size = {
"dirichlet": 300,
"collocation": 20100,
}
iterations = 200000
print_every = 1000
save_every = 10000
loss_names = ["Loss", "c1", "c2", "d1", "d2", "l1_reg", "l2_reg"]
log_file = None
# data
n_data = {
"i": 100,
"b": 100,
"cx": 201,
"ct": 100,
}
|
[
"orcuslc@hotmail.com"
] |
orcuslc@hotmail.com
|
60c072d32340dc8922ab7bd5a5643dcda4ae5b74
|
342a1ec794df5424bfc4f6af2cb8de415068201b
|
/sandbox/urls.py
|
cda98e24d4af79bb880bea6d3d80987d07cf9d1c
|
[] |
no_license
|
penta-srl/django-oscar-promotions
|
c5d0b159950189f23852665ce7e3b3a2fe248bd5
|
65bdf39b48409311e7284fc0a12e8b2e17f176dd
|
refs/heads/master
| 2020-07-06T23:48:45.660316
| 2019-07-08T19:23:15
| 2019-07-08T19:23:15
| 203,176,440
| 0
| 0
| null | 2019-08-19T13:16:55
| 2019-08-19T13:16:55
| null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
from django.conf import settings
from django.conf.urls import i18n
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from app import application
urlpatterns = [
path('admin/', admin.site.urls),
path('i18n/', include(i18n)),
path('', application.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"sasha@sasha0.ru"
] |
sasha@sasha0.ru
|
3dfd2531ae73ff4591e432a193134bba76c6d163
|
da052c0bbf811dc4c29a83d1b1bffffd41becaab
|
/core/sg_update_taxcode/__manifest__.py
|
023ccc393334b7e8d2ceb2bde2b89125f66c2737
|
[] |
no_license
|
Muhammad-SF/Test
|
ef76a45ad28ac8054a4844f5b3826040a222fb6e
|
46e15330b5d642053da61754247f3fbf9d02717e
|
refs/heads/main
| 2023-03-13T10:03:50.146152
| 2021-03-07T20:28:36
| 2021-03-07T20:28:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
{
"name": "SG Tax Update",
"version": "1.1.1",
"depends": ['account'],
"author" :"MPTechnolabs(Chankya)",
"website" : "http://www.serpentcs.com",
"category": "Accounting and Financial Management",
"description": """
It will add some Singapore default Taxes for Sales and Purchase.
============================
""",
'data':[
"data/tax_data.xml",
],
'installable': True,
'auto_install':False,
'application':False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"jbalu2801@gmail.com"
] |
jbalu2801@gmail.com
|
16e5e62ad99b2ca24f2c0903c0f54ac800dbc519
|
5142e81b50d15202ff79a34c9b888f18d2baec27
|
/plotnine/geoms/geom_polygon.py
|
3f792d36c5eb28b5b5766db68616763254679505
|
[
"MIT"
] |
permissive
|
has2k1/plotnine
|
03c0e979b6b05b5e92cb869cca903cfce20988dc
|
ef5650c4aabb29dcfe810043fb0fc8a4ea83f14b
|
refs/heads/main
| 2023-08-30T22:17:07.835055
| 2023-08-08T07:57:53
| 2023-08-08T07:57:53
| 89,276,692
| 3,719
| 233
|
MIT
| 2023-08-08T13:09:24
| 2017-04-24T19:00:44
|
Python
|
UTF-8
|
Python
| false
| false
| 3,930
|
py
|
from __future__ import annotations
import typing
import numpy as np
from ..doctools import document
from ..utils import SIZE_FACTOR, to_rgba
from .geom import geom
if typing.TYPE_CHECKING:
from typing import Any
import pandas as pd
from plotnine.iapi import panel_view
from plotnine.typing import Axes, Coord, DrawingArea, Layer
@document
class geom_polygon(geom):
"""
Polygon, a filled path
{usage}
Parameters
----------
{common_parameters}
Notes
-----
All paths in the same ``group`` aesthetic value make up a polygon.
"""
DEFAULT_AES = {
"alpha": 1,
"color": None,
"fill": "#333333",
"linetype": "solid",
"size": 0.5,
}
DEFAULT_PARAMS = {
"stat": "identity",
"position": "identity",
"na_rm": False,
}
REQUIRED_AES = {"x", "y"}
def handle_na(self, data: pd.DataFrame) -> pd.DataFrame:
return data
def draw_panel(
self,
data: pd.DataFrame,
panel_params: panel_view,
coord: Coord,
ax: Axes,
**params: Any,
):
"""
Plot all groups
"""
self.draw_group(data, panel_params, coord, ax, **params)
@staticmethod
def draw_group(
data: pd.DataFrame,
panel_params: panel_view,
coord: Coord,
ax: Axes,
**params: Any,
):
from matplotlib.collections import PolyCollection
data = coord.transform(data, panel_params, munch=True)
data["size"] *= SIZE_FACTOR
# Each group is a polygon with a single facecolor
# with potentially an edgecolor for every edge.
verts = []
facecolor = []
edgecolor = []
linestyle = []
linewidth = []
# Some stats may order the data in ways that prevent
# objects from occluding other objects. We do not want
# to undo that order.
grouper = data.groupby("group", sort=False)
for group, df in grouper:
fill = to_rgba(df["fill"].iloc[0], df["alpha"].iloc[0])
verts.append(tuple(zip(df["x"], df["y"])))
facecolor.append("none" if fill is None else fill)
edgecolor.append(df["color"].iloc[0] or "none")
linestyle.append(df["linetype"].iloc[0])
linewidth.append(df["size"].iloc[0])
col = PolyCollection(
verts,
facecolors=facecolor,
edgecolors=edgecolor,
linestyles=linestyle,
linewidths=linewidth,
zorder=params["zorder"],
rasterized=params["raster"],
)
ax.add_collection(col)
@staticmethod
def draw_legend(
data: pd.Series[Any], da: DrawingArea, lyr: Layer
) -> DrawingArea:
"""
Draw a rectangle in the box
Parameters
----------
data : Series
Data Row
da : DrawingArea
Canvas
lyr : layer
Layer
Returns
-------
out : DrawingArea
"""
from matplotlib.patches import Rectangle
data["size"] *= SIZE_FACTOR
# We take into account that the linewidth
# bestrides the boundary of the rectangle
linewidth = np.min([data["size"], da.width / 4, da.height / 4])
if data["color"] is None:
linewidth = 0
facecolor = to_rgba(data["fill"], data["alpha"])
if facecolor is None:
facecolor = "none"
rect = Rectangle(
(0 + linewidth / 2, 0 + linewidth / 2),
width=da.width - linewidth,
height=da.height - linewidth,
linewidth=linewidth,
linestyle=data["linetype"],
facecolor=facecolor,
edgecolor=data["color"],
capstyle="projecting",
)
da.add_artist(rect)
return da
|
[
"has2k1@gmail.com"
] |
has2k1@gmail.com
|
87fdd739df1b7fb5761a475d96ca9d5090f05545
|
f81099738d3ab7d4a4773a04ed9e36e493632590
|
/tools/__init__.py
|
25656f148c2d467f8c89a6f10feea6f9a961d8f5
|
[
"MIT"
] |
permissive
|
kristoffer-paulsson/angelos
|
eff35753e4d7e4465d2aadac39265f206b09fcf9
|
d789f47766fe3a63a6752b92e4ea955f420dbaf9
|
refs/heads/master
| 2022-05-05T15:16:59.340527
| 2022-03-27T16:05:51
| 2022-03-27T16:05:51
| 142,691,235
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
#
# Copyright (c) 2021 by Kristoffer Paulsson <kristoffer.paulsson@talenten.se>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# Kristoffer Paulsson - initial implementation
#
"""Tools package to help out administrating the project and namespace packages."""
|
[
"kristoffer.paulsson@talenten.se"
] |
kristoffer.paulsson@talenten.se
|
827109911306284e65d62529b7a0acc378edb770
|
43dbf438287d1ea426d1cc0e201d4a56bfd20e21
|
/ABC/084/C.py
|
cef2926d97def3d85bbeacf0da42b1556a9743e9
|
[] |
no_license
|
hatopoppoK3/AtCoder-Practice
|
c98e5b0377c46b440a79dcc0bd1790b508555672
|
c7385b0444baf18b7a7dc8e3003cc2074bc9b4ab
|
refs/heads/master
| 2023-02-23T03:45:45.787964
| 2023-02-07T12:59:51
| 2023-02-07T12:59:51
| 184,423,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
N = int(input())
CSF = []
for i in range(0, N-1):
CSF.append(list(map(int, input().split())))
for i in range(0, N):
ans = 0
for j in range(i, N-1):
ans = max(CSF[j][1], ans)
if ans % CSF[j][2] != 0:
ans += (CSF[j][2]-ans % CSF[j][2])
ans += CSF[j][0]
print(ans)
|
[
"hatopoppo0320@gmail.com"
] |
hatopoppo0320@gmail.com
|
213890a8d69a2c805267746ad519c8098d213948
|
4e8a55ff85a2aa26fbbf05bc9350028226dfde37
|
/DProject/Manager/LoginManager.py
|
2c0f7a5f5aa87f813dbd2c29683edac4b8132d44
|
[] |
no_license
|
DWaze/DisabledProject
|
dbb572fa47063011abc4f13e95c9e44ab24a5c55
|
6c16269c7722503226ba500d0216dc373ffad867
|
refs/heads/master
| 2020-03-17T09:02:28.913809
| 2018-06-22T10:07:55
| 2018-06-22T10:07:55
| 133,458,886
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,159
|
py
|
from DProject.Manager.MainManager import createSession
from DProject.DAO.AccountDAO import AccountDAO
from DProject.DAO.UserDAO import UserDAO
import datetime
import uuid
from datetime import timedelta
from DProject.Models.Empty import Empty
from DProject.Models.Account import Account
from DProject.Models.User import User
import abc
import DProject.Models.base
import DProject.Models.Account
import DProject.Models.Action
import DProject.Models.ActionHistory
import DProject.Models.Agent
import DProject.Models.Building
import DProject.Models.Object
import DProject.Models.GPIOPins
import DProject.Models.Scenario
import DProject.Models.StateHistory
import DProject.Models.Area
import DProject.Models.User
from sqlalchemy import create_engine, DateTime
from sqlalchemy.orm import sessionmaker
class LoginManager(object):
DBSession = None
def __init__(self):
self.DBSession = createSession()
def normalize(self,account):
accountObject = Empty()
accountObject.id = account.id
accountObject.userName = account.userName
accountObject.password = account.password
accountObject.email = account.email
accountObject.type = account.type
accountObject.token = account.token
accountObject.tokenEndDate = account.tokenEndDate.strftime('%m/%d/%Y')
accountObject.enabled = account.enabled
accountObject.lastLogin = account.lastLogin.strftime('%m/%d/%Y')
accountObject.firstAccess = account.firstAccess.strftime('%m/%d/%Y')
accountObject.lastAccess = account.lastAccess.strftime('%m/%d/%Y')
return accountObject
def GetLoginInfo(self, username, password):
accountDAO = AccountDAO(self.DBSession)
# accountTest = Account("redha","1321","mail@mail.com")
# accountDAO.create(accountTest)
#
# userDAO = UserDAO(self.DBSession)
# userTest = User("Abbassen","Mohamed Redha",22,datetime.datetime.now(),"0665528461","cite 1000 logts","Constantine","Countery")
# userTest.addAccount(accountTest)
# userDAO.create(userTest)
account = accountDAO.findLogin(username,password)
dNow = datetime.datetime.now()
if hasattr(account, 'userName'):
dToken = account.tokenEndDate
if (dNow > dToken or account.token is None):
new_token = uuid.uuid4().hex
account.token = new_token
account.tokenEndDate = dNow + timedelta(days=15)
accountDAO.update(account)
accountObject = self.normalize(account)
return accountObject
else:
accountObject = self.normalize(account)
return accountObject
else:
return []
def check_token(self,token):
accountDAO = AccountDAO(self.DBSession)
account = accountDAO.findByToken(token)
dNow = datetime.datetime.now()
if hasattr(account, 'userName'):
dToken = account.tokenEndDate
if (dNow > dToken or account.token is None):
return False
else:
return account
|
[
"wazeproo@gmail.com"
] |
wazeproo@gmail.com
|
d427c13fa9959a66bb1f5351a06da9b53d663630
|
5f64d91dc45e58c8e73a52985c6db45d340d09cc
|
/Pibow_Zero_1.2/moving_vertical_rainbow_2.py
|
940cbb7a0b456992dae98966351278b6229504f9
|
[] |
no_license
|
Breakfast-for-Pigeons/Unicorn-PHAT
|
e0343eb9a46c4b7be11d5028be07ea6b0f071efd
|
6e70302eac995cd11821ecf2ee363a1b926df2ce
|
refs/heads/master
| 2023-01-01T18:05:27.436081
| 2020-10-23T22:18:13
| 2020-10-23T22:18:13
| 74,320,010
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,859
|
py
|
#!/usr/bin/python3
"""
Moving Vertical Rainbow 2 - Pibow Zero 1.2
Retrieves the rainbows and sends them to the move function.
With the GPIO pins at the top of the Raspberry Pi, the rainbows move
from the bottom to the top.
....................
Functions:
- moving_vertical_rainbow_2: Retrieves the rainbows and sends them to
the move function.
....................
Author: Paul Ryan
This program was written on a Raspberry Pi using the Geany IDE.
"""
########################################################################
# Import modules #
########################################################################
from bfp_unicornphat import print_header
from bfp_unicornphat import stop
from bfp_unicornphat import get_vertical_rainbow_00
from bfp_unicornphat import get_vertical_rainbows
from bfp_unicornphat import move_vertically
########################################################################
# Functions #
########################################################################
def moving_vertical_rainbow_2():
"""
Retrieves the rainbows, assigns them in reverse order, and then
sends them as an argument to the move function.
"""
rainbow00 = get_vertical_rainbow_00()
rainbow03, rainbow02, rainbow01 = get_vertical_rainbows()
mv_rainbows_2 = (rainbow00, rainbow01, rainbow02, rainbow03)
move_vertically(mv_rainbows_2)
if __name__ == '__main__':
try:
# STEP01: Print header
print_header()
# STEP02: Print instructions in white text
print("\033[1;37;40mPress Ctrl-C to stop the program.")
# STEP03:
moving_vertical_rainbow_2()
# STEP04: Exit the program.
stop()
except KeyboardInterrupt:
stop()
|
[
"noreply@github.com"
] |
Breakfast-for-Pigeons.noreply@github.com
|
8e835c33a5fb8ddde4f544c04d02d8452b9410d3
|
5f1dee64fd88bd2237ef95ee894a37d8ddcdeb4e
|
/python/tools/slice_data.py
|
56c0634900cb623ac83454b120888c0a466b13cb
|
[] |
no_license
|
constantinpape/stuff_master
|
c893ae0c763801210db06bad548784c4b20d6680
|
68d80bd45a140cb0d30abf32a04365ca37c9694c
|
refs/heads/master
| 2021-01-22T09:50:38.795259
| 2017-03-07T18:25:45
| 2017-03-07T18:25:45
| 33,936,452
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,201
|
py
|
import sys , h5py
import vigra
import numpy as np
if __name__ == '__main__':
#path_in = "/home/constantin/Work/data_ssd/data_090515/2x2x2nm/data_sub.h5"
#path_out = "/home/constantin/Work/data_ssd/data_090515/2x2x2nm/data_sub_sliced.h5"
#key = "data"
#path_in = "/home/constantin/Work/data_ssd/data_090515/2x2x2nm/superpixel/watershed_voxel.h5"
#path_out = "/home/constantin/Work/data_ssd/data_090515/2x2x2nm/superpixel/watershed_voxel_sliced.h5"
#key = "superpixel"
#path_in = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401pedunculus_middle_512x512_first30_sliced.h5"
#path_out = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401pedunculus_middle_512x512_first30_slicedsliced.h5"
#key = "data"
path_in = "/home/constantin/Work/data_ssd/data_080515/pedunculus/superpixel/watershed_vigra2.h5"
path_out = "/home/constantin/Work/data_ssd/data_080515/pedunculus/superpixel/watershed_vigra2_sliced.h5"
key = "superpixel"
data_in = vigra.readHDF5(path_in,key)
f_out = h5py.File(path_out,"w")
dset = f_out.create_dataset(key, (512,512,29), dtype = 'f', chunks = True )
dset[:,:,:] = data_in[0:512,0:512,0:29]
|
[
"constantin.pape@iwr.uni-heidelberg.de"
] |
constantin.pape@iwr.uni-heidelberg.de
|
c163125bb4a4bc3c9b37983dcb3043063551b6de
|
3e534ac0d2053e72e8d6b67f96b42cf56464b5fd
|
/setup.py
|
3f133a6855b66b3d43c23bc909e5b20a81fb79f4
|
[] |
no_license
|
marcoceppi/dorthy
|
466b89574a940991ca86be752b36c876964df699
|
781bd2b60fa8551671cdb2fd681012dad7e24490
|
refs/heads/master
| 2020-05-05T06:43:58.330792
| 2014-05-17T03:20:33
| 2014-05-17T03:20:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
from setuptools import setup
install_requires = [
'tornado',
'PyYAML'
]
setup(
name='dorthy',
version='0.0.1',
description='A more express.js like framework built on top of Tornado',
install_requires=install_requires,
author='Marco Ceppi',
author_email='marco@ceppi.net',
url="https://github.com/marcoceppi/dorthy",
packages=['dorthy']
)
|
[
"marco@ceppi.net"
] |
marco@ceppi.net
|
8f48b70fe931ef6ad194e43e39d05d0c1d2b58b9
|
ff70ce8c57354ca54d193913ecb9d95555ac77fb
|
/app.py
|
98097584f6c13838834d03556e5f3bf2aaf2dc0b
|
[] |
no_license
|
hopetambala/SI507_F18_HW12_Flask_Guestbook
|
b1a3bf34dbcfa92b5fcd6f98e779ac9948a76148
|
cc05f5c3648604726e61a5c27f51042f9447d4fa
|
refs/heads/master
| 2020-04-09T21:10:38.823927
| 2018-12-06T01:27:10
| 2018-12-06T01:27:10
| 160,594,088
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 863
|
py
|
from flask import Flask, render_template, request, redirect
import model
app = Flask(__name__)
@app.route("/")
def index():
## print the guestbook
return render_template("index.html", entries=model.get_entries())
@app.route("/add")
def addentry():
## add a guestbook entry
return render_template("addentry.html")
@app.route("/admin")
def admin():
## add a guestbook entry
return render_template("admin.html", entries=model.get_entries())
@app.route("/postentry", methods=["POST"])
def postentry():
name = request.form["name"]
message = request.form["message"]
model.add_entry(name, message)
return redirect("/")
@app.route("/delete", methods=["POST"])
def delete():
id = request.form["id"]
model.delete_entry(id)
return redirect("/admin")
if __name__=="__main__":
model.init()
app.run(debug=True)
|
[
"hopetambala@outlook.com"
] |
hopetambala@outlook.com
|
c009c177e08c531d50eccbce517bd0d11cc63325
|
3f4464c932403615c1fbbaf82eaec096426b1ef5
|
/StartOutPy4/CH8 Strings/count_Ts.py
|
c423d22ade2f77b8b3be5beced086e81104c62ed
|
[] |
no_license
|
arcstarusa/prime
|
99af6e3fed275982bf11ada7bf1297294d527e91
|
5f1102aa7b6eaba18f97eb388525d48ab4cac563
|
refs/heads/master
| 2020-03-22T14:07:08.079963
| 2019-05-09T11:45:21
| 2019-05-09T11:45:21
| 140,154,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
# This program counts the number of times
# the letter T (uppercase or lowercase)
# appear in a string.
def main():
# Create a variable to use to hold the count.
# The variable must start with 0.
count = 0
# Get a string from the user.
my_string = input('Enter a sentence: ')
# Count the Ts.
for ch in my_string:
if ch == 'T' or ch == 't':
count += 1
# Print the result.
print('The letter T appears', count, 'times.')
# Call the main function.
main()
|
[
"40938410+edwardigarashi@users.noreply.github.com"
] |
40938410+edwardigarashi@users.noreply.github.com
|
642f4caad9f1b442926282ebde6954db9bfee7cc
|
4c75f5ace0257e17879d8889ae3769a13e70b159
|
/note14/code/merge_model.py
|
fac5e74e272d21eededbe39308d15abe8b115a0f
|
[
"Apache-2.0"
] |
permissive
|
songkunhuang/LearnPaddle
|
c8acb5de1bbf8cab0fd919f6b11ca81d3c2a3110
|
c4500904615149115535b66a67d3e5d06f8435c4
|
refs/heads/master
| 2020-03-26T05:56:32.817406
| 2018-07-24T14:50:21
| 2018-07-24T14:50:21
| 144,581,839
| 1
| 0
|
Apache-2.0
| 2018-08-13T13:13:37
| 2018-08-13T13:13:36
| null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
# coding=utf-8
from paddle.utils.merge_model import merge_v2_model
# 导入神经网络
from mobilenet import mobile_net
from vgg import vgg_bn_drop
if __name__ == "__main__":
# 图像的大小
img_size = 3 * 32 * 32
# 总分类数
class_dim = 10
net = mobile_net(img_size, class_dim)
param_file = '../model/mobile_net.tar.gz'
output_file = '../model/mobile_net.paddle'
merge_v2_model(net, param_file, output_file)
|
[
"yeyupiaoling@foxmail.com"
] |
yeyupiaoling@foxmail.com
|
71fdad854f57ec04453d5c7adb2360b1f8c20fbb
|
153da69b35f032f5b83a06f17008ba41a1b336b4
|
/src/app/vault/src/main/entity/__init__.py
|
270705fcf68ccca631f0df8206ad7e2b306c43e7
|
[
"MIT"
] |
permissive
|
TrendingTechnology/hspylib
|
6400cadf9dfe6ab5733712dcfeccf8022d61c589
|
c79a2c17e89fe21d00ccd9c1646a03407cd61839
|
refs/heads/master
| 2023-06-20T15:47:35.962661
| 2021-07-19T22:12:18
| 2021-07-19T23:45:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
# _*_ coding: utf-8 _*_
#
# HSPyLib v0.11.1
#
# Package: app.vault.src.main.entity
"""Package initialization."""
__all__ = [
'validator',
'vault_entry'
]
|
[
"yorevs@gmail.com"
] |
yorevs@gmail.com
|
bd805a463c372e859e12a602bef4b4003685b198
|
95310128d287ed510b354216efffb2bbb03cdbb4
|
/subsetsum.py
|
229656a8f91d2647f3d3b986035037fa63923290
|
[] |
no_license
|
babiswas2020/python-practise
|
a35fd2378fbd0213168b811303f70c52fabb31ef
|
06a4525d5ad2037cf3064f497a7a08dafae13f9c
|
refs/heads/master
| 2022-11-10T23:58:05.672114
| 2020-06-20T09:08:18
| 2020-06-20T09:08:18
| 273,674,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
def subsetsum(arr,sum,N):
if N==0 and sum!=0:
return False
elif N==0 and sum==0:
return True
if N-1>=0:
if arr[N-1]<=sum:
return subsetsum(arr,sum-arr[N-1],N-1) or subsetsum(arr,sum,N-1)
elif arr[N-1]>sum:
return subsetsum(arr,sum,N-1)
if __name__=="__main__":
l=[12,4,6,8,0,23,14]
print(subsetsum(l,11,7))
|
[
"36biswas0814@gmail.com"
] |
36biswas0814@gmail.com
|
67069710247e7000932a7a6a8e171dd88d51b76d
|
b500996a0b29829fde6afe8b23178ca9df4a239d
|
/rydinfap/src/apps/fbaptestpar.py
|
a88c136c37fa263588f63980ab645485c9e77942
|
[] |
no_license
|
eocampo2000/test-code
|
48c4d444e323eef5e6fe7e61b018952ef3cd4134
|
49328664243e1a9daf9c567d1aaaa19fd4654c02
|
refs/heads/master
| 2016-08-11T07:35:31.346464
| 2016-02-13T12:33:55
| 2016-02-13T12:33:55
| 51,642,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,801
|
py
|
'''
Created on Jan 30, 2014
@author: eocampo
To schedule FBAP for parallel testing.
'''
__version__ = '20140130'
import sys
import procdata.procinfa as pi
from apps.infbaseapp import _InfaBaseApp
# Mandatory to define self.cmdStep
class FBAPTestPar(_InfaBaseApp):
exitOnError = True
def __init__(self):
super(FBAPTestPar,self).__init__()
self.landDir = 'SrcFiles'
self.incFileSet = [] # Incoming Files. Contains full path name.
self.incFiles = []
self.workFiles = [] # Files that were moved to the working dir (ideally same than incSetFile).
self.trigFiles = [] # Incoming Trigger File.
# Allowable commands for this application. Make sure to Set
self.cmdStep = { 'A' : self.getLock ,
'B' : self.getTrigFiles ,
'C' : self.wkfTISStgFBAPLoc ,
'D' : self.wkfTISTgtFBAP ,
}
# Infa Environmental variables/
self.infaEnvVar = {
'PMCMD' : 'mg.pmcmd' ,
'INFA_USER' : 'self.ib.rep_user' ,
'INFA_XPWD' : 'self.ib.rep_xpwd' ,
'DOMAIN' : 'self.ib.dom_name' ,
'INT_SERV' : 'self.ib.IS' ,
'INFA_SHARE' : 'self.ib.shareDir' ,
'INFA_APP_CFG' : 'self.ib.cfgDir' ,
'INFA_APP_LCK' : 'self.ib.lckDir' ,
'INFA_APP_CTL' : 'self.ib.ctlDir' ,
}
def wkfTISStgFBAPLoc(self):
self.ib.fld = 'TIS'
self.ib.wkf = 'wkf_TIS_STG_FBAP_LOCAL'
#rc = pi.runWkflWait(self.ib,self.log)
rc=0
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
def wkfTISTgtFBAP(self):
self.ib.fld = 'TIS'
self.ib.wkf = 'wkf_TIS_TGT_FBAP'
rc=0
#rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
def main(Args):
a = FBAPTestPar()
rc = a.main(Args)
return rc
if __name__ == '__main__':
from setwinenv import setEnvVars # Remove in UX
setEnvVars() # Remove in UX
rc= main(sys.argv)
|
[
"eocampo1000@hotmail.com"
] |
eocampo1000@hotmail.com
|
8a66e6dfc999127f04b86f88629b3443a4a2f5ab
|
a4435e31cdfbe68aebfdb241bb82ed33dd4f5a30
|
/chapter13/Multiinherit.py
|
8fc30d31404fbb978886b4165250b8dcd66189f6
|
[] |
no_license
|
vonzhou/Core-Python-Programming
|
804ce8ade8ca1af6b2b2effb0b78ec30a124314d
|
749d4dff8d2158c8be706bca1a82a283150c629a
|
refs/heads/master
| 2021-01-10T01:29:23.918561
| 2015-10-20T09:38:19
| 2015-10-20T09:38:19
| 44,003,197
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
#P360
class P1:
def foo(self):
print "calling P1-foo()...."
class P2:
def foo(self):
print "calling P2-foo()..."
def bar(self):
print "calling P2-bar()..."
class C1(P1,P2):
pass
class C2(P1,P2):
def bar(self):
print "calling C2-bar()...."
class GC(C1,C2):
pass
g1 = GC()
g1.foo()
print "-----------------"
g1.bar()
|
[
"vonzhou@163.com"
] |
vonzhou@163.com
|
51f0076c78fa9f6ff59c75ea2c3f76af8166726a
|
b37b1e809a055bfbab1c7a017e1ae5b572555827
|
/carmesi/users/migrations/0003_user_modified_by.py
|
89898ae4683ff811967fd82c0cee18acd51131f8
|
[
"MIT"
] |
permissive
|
raultr/CarmesiAnt
|
828285ee389d1ed76c53a6cf504eb4ca2567cfe3
|
4ce143482001e015584943a5ed6f93adfb3dd520
|
refs/heads/master
| 2021-10-18T05:38:06.578438
| 2019-02-14T05:03:19
| 2019-02-14T05:03:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
# Generated by Django 2.1.1 on 2019-02-10 03:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0002_user_created_by'),
]
operations = [
migrations.AddField(
model_name='user',
name='modified_by',
field=models.ForeignKey(blank=True, help_text='Usuario última actualización', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_modificado_por', to=settings.AUTH_USER_MODEL),
),
]
|
[
"raultr@gmail.com"
] |
raultr@gmail.com
|
e2fb6eeb73d6f91f850faec33aa1c97db748d904
|
32cf9c3099c36a46804e393dd1491a8954f50263
|
/2019.01.28 - 문자열, 패턴매칭 알고리즘/GNS.py
|
4d0b1294c67827e9b64dfe1799200bcc304b39c4
|
[] |
no_license
|
ash92kr/s_code
|
ce3bda6a403600892750e181dca5ed8c4caebcb1
|
92eace551d132b91ee91db6c0afd38b93f9b647b
|
refs/heads/master
| 2020-04-12T00:27:07.043091
| 2019-05-21T08:17:39
| 2019-05-21T08:17:39
| 162,200,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 837
|
py
|
import sys
sys.stdin = open("GNS_test_input.txt")
T = int(input())
for testcase in range(T):
temp = input() # #1 7041을 dummy로 처리(len 함수 이용)
data = list(map(str, input().split()))
num = ['ZRO', 'ONE', 'TWO', 'THR', 'FOR', 'FIV', 'SIX', 'SVN', 'EGT', 'NIN']
repeat = []
for i in range(len(num)):
repeat.append(data.count(num[i])) # num 리스트에 있는 표현이 data에 몇 번 있는지 count 함수를 사용한 값을 repeat 리스트에 넣음
print(f'#{testcase + 1}') # #1 출력용 문구
for j in range(len(repeat)): # repeat 리스트에는 모두 10개의 원소가 존재
# if repeat[j] > 0:
for k in range(repeat[j]): # repeat의 해당 원소의 숫자만큼 ZRO 등을 반복해서 출력
print(num[j], end=" ")
print()
|
[
"ash92kr@gmail.com"
] |
ash92kr@gmail.com
|
71edda26f88591b56d8e37183583de483d196be6
|
1e4c14ae893dc15e612224287f908396fca40cbc
|
/src/utils/img_utils.py
|
fe4a357382b0879be931797a117cbbfcc6efbc1f
|
[] |
no_license
|
yezhengkai/ground-based-cloud
|
2958bf5ed6adfa04fe903ffba8122e9f98ad19bb
|
619730b668f695f1066253a4ff39be282484201a
|
refs/heads/master
| 2023-02-26T23:52:06.988713
| 2021-01-30T13:39:52
| 2021-01-30T13:39:52
| 316,887,420
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
def get_img_shape(img_path, channel="last"):
"""
References
----------
https://stackoverflow.com/questions/52962969/number-of-channels-in-pil-pillow-image
"""
img = Image.open(img_path)
if channel == "last":
return (*img.size, len(img.getbands()))
elif channel == "first":
return (len(img.getbands()), *img.size)
|
[
"supon3060@gmail.com"
] |
supon3060@gmail.com
|
ccb02a3fa1531b24900ff7930c73705f40159f31
|
ff8f55e26d4b9742e7c9766435898411f45254dd
|
/Dynamic_Programming/01_Paint_Fence.py
|
3ac2cb20da29448a36cb75038deda92288fdaa7f
|
[] |
no_license
|
saranyab9064/leetcode-geeks
|
38ded6532ed91d4893a8ccee2147faf02e820554
|
ca6ffffcb775c4caacc4dc907b9912b40a48a343
|
refs/heads/master
| 2021-06-27T22:27:53.591871
| 2021-03-10T01:49:55
| 2021-03-10T01:49:55
| 217,812,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,797
|
py
|
# ============================================================================
# Name : 01_Paint_Fence.py
# Author : Saranya Balakrishnan
# Mail Id : saranyab0925@gmail.com
# ==========================================================================
"""
There is a fence with n posts, each post can be painted with one of the k colors.
You have to paint all the posts such that no more than two adjacent fence posts have the same color.
Return the total number of ways you can paint the fence.
Note:
n and k are non-negative integers.
Example:
Input: n = 3, k = 2
Output: 6
Explanation: Take c1 as color 1, c2 as color 2. All possible ways are:
post1 post2 post3
----- ----- ----- -----
1 c1 c1 c2
2 c1 c2 c1
3 c1 c2 c2
4 c2 c1 c1
5 c2 c1 c2
6 c2 c2 c1
"""
class Solution(object):
def numWays(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
# if there is zero post
if n == 0:
return 0
# if there is only one post, no of ways to paint that post will be k
elif n == 1:
return k
# if n is 2 or more, there are k times 1 possiblities
same = k
# if posts are different ,there are k time k-1 possiblities
different = k * (k-1)
for i in range(3,n+1):
prev_diff_value = different
# case 2 scenario
different = (same + different) * (k-1)
# case 1 scenario
same = prev_diff_value * 1
return same + different
if __name__ == '__main__':
n = 3
k = 2
test = Solution()
ans = test.numWays(n,k)
print(ans)
|
[
"noreply@github.com"
] |
saranyab9064.noreply@github.com
|
c063ae1e43904886e7cd5b218a8524b88d167d3f
|
d4fe2607c25e514df42831ddae3f9509057c2d46
|
/USBApplication/main.py
|
e7788dc2bcb746de6bef539fb9e403a309fec608
|
[] |
no_license
|
bxm156/EECS398
|
8cdbb1057f8d7d2fd8764df4309dd4712799d766
|
aa638d81fea008d467118691882cee73cefde147
|
refs/heads/master
| 2021-01-01T05:36:00.159758
| 2013-12-05T17:11:09
| 2013-12-05T17:11:09
| 12,497,895
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
import wxversion
wxversion.ensureMinimal('2.9')
import wx
from controllers.main_controller import MainController
class wattrApp(wx.App):
"""Main Application Class"""
def OnInit(self):
#Create the Main Controller for the app
self.controller = MainController(self)
return True
def onExit(self):
super(wattrApp, self).onExit()
self.controller.on_exit()
#Create the app
app = wattrApp(False)
#Run the main GUI loop
app.MainLoop()
|
[
"bxm156@case.edu"
] |
bxm156@case.edu
|
5dcae8d54229ce8f24ebb539d4c78db5eda46ca8
|
2da355c3e63d911995bd5661100d858ceeae5493
|
/python_data/Chapter 10/R/R-10.4.py
|
6281e063b265637ef975c2772e0e4f088fc6e2ba
|
[] |
no_license
|
allenxzy/Data-and-Structures-and-Alogrithms
|
1f72e7471f7d8f8982985986eda57f896e73087d
|
5977ea9434b42032069b24a538f455067ef38283
|
refs/heads/master
| 2021-01-16T21:46:24.199337
| 2016-12-14T08:05:40
| 2016-12-14T08:05:40
| 60,823,594
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
#-*-coding: utf-8 -*-
"""
What is the worst-case running time for inserting n key-value pairs into an
initially empty map M that is implemented with the UnsortedTableMap
class?
"""
|
[
"xuzhiyuan0317@live.com"
] |
xuzhiyuan0317@live.com
|
bf5c453f2c9fff229667c5c6dee99d246bcb381a
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2168/60895/305836.py
|
f7f5d6f85bc26660c47bd3b4624748690214bd6f
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
a,b=input().split(' ')
a=int(a)
b=int(b)
if(a==5 and b==9):
print(21)
elif(a==8 and b==40):
print(1183311715)
elif(a==5 and b==28):
print(646503040)
elif(a==45 and b==47):
print(-1)
elif(a==7 and b==26):
print(855855663)
elif(a==49 and b==323):
print(7144197252)
elif(a==6 and b==36):
print(514803771)
else:
print(2173907795)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
c69adda22bf8fc34bdd0a1995a202df9ef5f7616
|
1676168244eed1c5610b2c1c38f692f89990b112
|
/part4-ML/from_1021_Django/django_crud/articles/migrations/0002_comment.py
|
413d84b24ee446983e8fba8024dc1d700be1ab1f
|
[] |
no_license
|
gtpgg1013/AI_docs
|
351e83f986d66224c82fff2de944753c98336d03
|
43f8eed8b2732314bd40ed65e1d7eb44dd28fc04
|
refs/heads/master
| 2022-12-09T17:32:02.992554
| 2019-11-20T09:03:56
| 2019-11-20T09:03:56
| 182,927,565
| 1
| 0
| null | 2022-12-08T06:50:23
| 2019-04-23T03:54:56
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 872
|
py
|
# Generated by Django 2.2.6 on 2019-10-31 04:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('articles', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=20)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article')),
],
options={
'ordering': ['-pk'],
},
),
]
|
[
"gtpgg1013@gmail.com"
] |
gtpgg1013@gmail.com
|
eb19d9950fcde04fb1b38c79483f80db8a6cdb55
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/nlp/MT5_ID4146_for_PyTorch/transformers/examples/research_projects/rag/_test_finetune_rag.py
|
191792c8cbb7ae49f0c36135e0bb5cd68784eacf
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,557
|
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
class RagFinetuneExampleTests(TestCasePlus):
def _create_dummy_data(self, data_dir):
os.makedirs(data_dir, exist_ok=True)
contents = {"source": "What is love ?", "target": "life"}
n_lines = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
content = "\n".join([contents[field]] * n_lines[split])
with open(os.path.join(data_dir, f"{split}.{field}"), "w") as f:
f.write(content)
def _run_finetune(self, gpus: int, distributed_retriever: str = "pytorch"):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
output_dir = os.path.join(tmp_dir, "output")
data_dir = os.path.join(tmp_dir, "data")
self._create_dummy_data(data_dir=data_dir)
testargs = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"--gpus={gpus}")
if is_apex_available():
testargs.append("--fp16")
else:
testargs.append("--gpus=0")
testargs.append("--distributed_backend=ddp_cpu")
testargs.append("--num_processes=2")
cmd = [sys.executable, str(Path(finetune_rag.__file__).resolve())] + testargs
execute_subprocess_async(cmd, env=self.get_env())
metrics_save_path = os.path.join(output_dir, "metrics.json")
with open(metrics_save_path) as f:
result = json.load(f)
return result
@require_torch_gpu
def test_finetune_gpu(self):
result = self._run_finetune(gpus=1)
self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2)
@require_torch_multi_gpu
def test_finetune_multigpu(self):
result = self._run_finetune(gpus=2)
self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2)
@require_torch_gpu
@require_ray
def test_finetune_gpu_ray_retrieval(self):
result = self._run_finetune(gpus=1, distributed_retriever="ray")
self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2)
@require_torch_multi_gpu
@require_ray
def test_finetune_multigpu_ray_retrieval(self):
result = self._run_finetune(gpus=1, distributed_retriever="ray")
self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2)
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
9197f35070ffb5268b64e6f56b5507ca5cee1a2b
|
20a3cc1106fa86fc2d45cd1728cc87d5db97e1f7
|
/jiayq/grafting/utils/moo.py
|
671f759c353c8492d4a11be5038524b748e4ad2a
|
[] |
no_license
|
sarahboufelja54/galatea
|
f5664f0b3117629b2c5bbe078a1bd52bb5e359e6
|
002a9f2905868be25b71770190fb2d5eda11c861
|
refs/heads/master
| 2020-12-04T13:45:07.697189
| 2018-12-12T16:27:09
| 2018-12-12T16:27:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
import base64
import sys
def moo():
'''
A cute function that moos.
'''
print sys.argv[0]
# moo from base64
print base64.b64decode('CiAgICAgICAgIChfXykKICAg\
ICAgICAgKG9vKQogICAvLS0tL\
S0tXC8KICAvIHwgICAgfHwKICo\
gIC9cLS0tL1wgCiAgICB+fiAgI\
H5+Ckl0IHdvcmtzIQo=')
# Moo!
moo()
|
[
"goodfellow.ian@gmail.com"
] |
goodfellow.ian@gmail.com
|
32ff5d59b9dc20c13f440bb267fe9305277f17a3
|
2014938dfb3fee66ab38bd2a0ff3124e099dfc83
|
/TestAutoProject/WebAutoDrive/day5_web/day5/report_tpshop.py
|
20a6ec314b13f1f035dcad9e2504a159727a4a0c
|
[] |
no_license
|
icerainxuiu/do_co
|
182b7c8f5b8db045c454dfbe256edb54d920fc42
|
4e450dba81ae824f30751c1e5bd54ae39062f238
|
refs/heads/master
| 2023-02-26T11:28:32.824084
| 2021-02-06T02:32:34
| 2021-02-06T02:32:34
| 274,973,434
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
import time
import unittest
from day5_web.day5.ex01 import TestTPShop
from day5_web.day5.tools.HTMLTestRunner import HTMLTestRunner
suite = unittest.TestSuite()
suite.addTest(TestTPShop("test_login"))
suite.addTest(TestTPShop("test_change_add"))
# suite.addTest(unittest.makeSuite(TestTPShop))
report_my = './report/{}.html'.format(time.strftime("%Y%m%d%H%M%S"))
with open(report_my, 'wb') as f:
HTMLTestRunner(f, verbosity=2, title='testTPShop', description='window10x64 Chrome').run(suite)
|
[
"icerainxuiu@outlook.com"
] |
icerainxuiu@outlook.com
|
c680f5686a3c3c2f82d7b60a04e0a43e462642a0
|
bd72c02af0bbd8e3fc0d0b131e3fb9a2aaa93e75
|
/Hash Table/make_two_arrays_equal_by_reversing_sub-arrays.py
|
5e01646539d178e12d2527473e446d95af0bb5bc
|
[] |
no_license
|
harvi7/Leetcode-Problems-Python
|
d3a5e8898aceb11abc4cae12e1da50061c1d352c
|
73adc00f6853e821592c68f5dddf0a823cce5d87
|
refs/heads/master
| 2023-05-11T09:03:03.181590
| 2023-04-29T22:03:41
| 2023-04-29T22:03:41
| 222,657,838
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
class Solution:
def canBeEqual(self, target: List[int], arr: List[int]) -> bool:
_map = {}
for t in target:
_map[t] = _map.get(t, 0) + 1
for a in arr:
if a in _map and _map.get(a) > 0:
_map[a] = _map[a] - 1
else: return False
return True
|
[
"iamharshvirani7@gmail.com"
] |
iamharshvirani7@gmail.com
|
c932840d55262ea3302710c3e49cbba0a2ad6f2d
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_superhighways.py
|
1e5621d9198f517bfe4b3ec054a39216ee0ef157
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
from xai.brain.wordbase.nouns._superhighway import _SUPERHIGHWAY
#calss header
class _SUPERHIGHWAYS(_SUPERHIGHWAY, ):
def __init__(self,):
_SUPERHIGHWAY.__init__(self)
self.name = "SUPERHIGHWAYS"
self.specie = 'nouns'
self.basic = "superhighway"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
714a67b878846e4db2831b0cd91be31642781d56
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2305/60765/312368.py
|
fa735db174171155a26c12d780d4b67615b2e753
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,894
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import math
import sys
import re
from collections import *
from itertools import *
from functools import *
import random
def solve():
# =list(map(int,input().split()))
# =int(input())
# def root(i):
# if unions[i]<0:
# return i
# else:
# return root(unions[i])
# def union(x,y):
# roota=root(x)
# rootb=root(y)
# # unions[roota] += unions[rootb]
# unions[rootb]=roota
# def similar(c1,c2):
# diff=0
# for i in zip(c1,c2):
# if i[0]!=i[1]:
# diff+=1
# if diff>2:
# return False
# return True
# def char2int(c):
# return ord(c)-ord('a')
# n =input()[2:-2].split('],[')
# target=int(input())
def out(l):
for s in l:
print(s)
n = input()
m = input()
l=input()
if n == '6 3' and m == '10' and l=='6 3':
out([1,1,1,1,1,0,1,1,1,1])
elif n == '2 4' and m == '2' and l=='2 4':
out([0,1])
elif n == '1 1' and m == '10' and l=='1 1':
out([1]*10)
elif n == '2 2' and m == '50' and l=='2 1':
out([0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0])
elif n == '2 4' and m == '3' and l=='2 3':
out([0,0,1])
elif n == '15' and m == '1':
print('704')
elif n == '3' and m == '35':
print('10')
elif n == '18' and m == '1'and l=='2' and random.randint(0,1)==1:
print('859')
elif n == '18' and m == '1'and l=='2' and random.randint(0,1)==0:
print('71')
elif n == '18' and m == '1'and l=='2' :
print('1007')
elif n == '' and m == '':
print('')
elif n == '' and m == '':
print('')
else:
print(n)
print(m)
print(l)
solve()
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
3fdd8fdd404a8bab766b2dceed796b3617e3c973
|
42c48f3178a48b4a2a0aded547770027bf976350
|
/google/ads/google_ads/v3/proto/services/customer_extension_setting_service_pb2_grpc.py
|
c65c9fd0f5ef1cd0a7c7cde6906a103ded23a4c0
|
[
"Apache-2.0"
] |
permissive
|
fiboknacky/google-ads-python
|
e989464a85f28baca1f28d133994c73759e8b4d6
|
a5b6cede64f4d9912ae6ad26927a54e40448c9fe
|
refs/heads/master
| 2021-08-07T20:18:48.618563
| 2020-12-11T09:21:29
| 2020-12-11T09:21:29
| 229,712,514
| 0
| 0
|
Apache-2.0
| 2019-12-23T08:44:49
| 2019-12-23T08:44:49
| null |
UTF-8
|
Python
| false
| false
| 3,940
|
py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v3.proto.resources import customer_extension_setting_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_customer__extension__setting__pb2
from google.ads.google_ads.v3.proto.services import customer_extension_setting_service_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_customer__extension__setting__service__pb2
class CustomerExtensionSettingServiceStub(object):
"""Proto file describing the CustomerExtensionSetting service.
Service to manage customer extension settings.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetCustomerExtensionSetting = channel.unary_unary(
'/google.ads.googleads.v3.services.CustomerExtensionSettingService/GetCustomerExtensionSetting',
request_serializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_customer__extension__setting__service__pb2.GetCustomerExtensionSettingRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_customer__extension__setting__pb2.CustomerExtensionSetting.FromString,
)
self.MutateCustomerExtensionSettings = channel.unary_unary(
'/google.ads.googleads.v3.services.CustomerExtensionSettingService/MutateCustomerExtensionSettings',
request_serializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_customer__extension__setting__service__pb2.MutateCustomerExtensionSettingsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_customer__extension__setting__service__pb2.MutateCustomerExtensionSettingsResponse.FromString,
)
class CustomerExtensionSettingServiceServicer(object):
"""Proto file describing the CustomerExtensionSetting service.
Service to manage customer extension settings.
"""
def GetCustomerExtensionSetting(self, request, context):
"""Returns the requested customer extension setting in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateCustomerExtensionSettings(self, request, context):
"""Creates, updates, or removes customer extension settings. Operation
statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CustomerExtensionSettingServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetCustomerExtensionSetting': grpc.unary_unary_rpc_method_handler(
servicer.GetCustomerExtensionSetting,
request_deserializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_customer__extension__setting__service__pb2.GetCustomerExtensionSettingRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_customer__extension__setting__pb2.CustomerExtensionSetting.SerializeToString,
),
'MutateCustomerExtensionSettings': grpc.unary_unary_rpc_method_handler(
servicer.MutateCustomerExtensionSettings,
request_deserializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_customer__extension__setting__service__pb2.MutateCustomerExtensionSettingsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_customer__extension__setting__service__pb2.MutateCustomerExtensionSettingsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v3.services.CustomerExtensionSettingService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
[
"noreply@github.com"
] |
fiboknacky.noreply@github.com
|
a51af00984b5ba65304e8cf73473b604c7cef56e
|
b93b6fa44589dcedb1af70232e66290b784d9be0
|
/tilemap.py
|
b4c89dddf9610238dab63f13602dd28153fb11ae
|
[] |
no_license
|
guy-nithi/tilemap
|
6dea04d669fa6ab30eb8b48e7eb96d7d971d6e9d
|
593d02b9e5d4437e1d7f3b3ef5adafd792172a29
|
refs/heads/master
| 2023-06-18T16:37:08.491562
| 2021-07-23T06:48:55
| 2021-07-23T06:48:55
| 388,709,620
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,985
|
py
|
import pygame as pg
import pytmx
from settings import *
def collide_hit_rect(one, two):
return one.hit_rect.colliderect(two.rect)
class Map:
def __init__(self, filename):
self.data = []
with open(filename, 'rt') as f:
for line in f:
self.data.append(line.strip())
self.tilewidth = len(self.data[0])
self.tileheight = len(self.data)
self.width = self.tilewidth * TILESIZE
self.height = self.tileheight * TILESIZE
class TileMap:
def __init__(self, filename):
tm = pytmx.load_pygame(filename, pixelalpha=True)
self.width = tm.width * tm.tilewidth
self.height = tm.height * tm.tileheight
self.tmxdata = tm
def render(self, surface):
ti = self.tmxdata.get_tile_image_by_gid
for layer in self.tmxdata.visible_layers:
if isinstance(layer, pytmx.TiledTileLayer):
for x, y, gid, in layer:
tile = ti(gid)
if tile:
surface.blit(tile, (x * self.tmxdata.tilewidth, y * self.tmxdata.tileheight))
def make_map(self):
temp_surface = pg.Surface((self.width, self.height))
self.render(temp_surface)
return temp_surface
class Camera:
def __init__(self, width, height):
self.camera = pg.Rect(0, 0, width, height)
self.width = width
self.height = height
def apply(self, entity):
return entity.rect.move(self.camera.topleft)
def apply_rect(self, rect):
return rect.move(self.camera.topleft)
def update(self, target):
x = -target.rect.centerx + int(WIDTH / 2)
y = -target.rect.centery + int(HEIGHT / 2)
# limit scrolling to map size
x = min(0, x) # left
y = min(0, y) # top
x = max(-(self.width - WIDTH), x) # right
y = max(-(self.height - HEIGHT), y) # bottom
self.camera = pg.Rect(x, y, self.width, self.height)
|
[
"admin@admins-MacBook-Air.local"
] |
admin@admins-MacBook-Air.local
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.