code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Payjp.Plans do
@moduledoc """
Basic List, Create, Delete API for Plans
- create a plan
- get a plan
- update a plan
- delete a single plan
- delete all plan
- list plans
- get all plans
https://pay.jp/docs/api/#plan-プラン
"""
@endpoint "plans"
@doc """
Creates a Plan. Note that `currency` and `interval` are required parameters, and are defaulted to "JPY" and "month"
## Example
```
{:ok, plan} = Payjp.Plans.create [id: "test-plan", name: "Test Plan", amount: 1000, interval: "month"]
```
"""
def create(params) do
create params, Payjp.config_or_env_key
end
@doc """
Creates a Plan using a given key. Note that `currency` and `interval` are required parameters, and are defaulted to "JPY" and "month"
## Example
```
{:ok, plan} = Payjp.Plans.create [id: "test-plan", name: "Test Plan", amount: 1000, interval: "month"], key
```
"""
def create(params, key) do
#default the currency and interval
params = Keyword.put_new params, :currency, "JPY"
params = Keyword.put_new params, :interval, "month"
Payjp.make_request_with_key(:post, @endpoint, key, params)
|> Payjp.Util.handle_payjp_response
end
@doc """
Retrieves a given Plan with the specified ID. Returns 404 if not found.
## Example
```
{:ok, cust} = Payjp.Plans.get "plan_id"
```
"""
def get(id) do
get id, Payjp.config_or_env_key
end
@doc """
Retrieves a given Plan with the specified ID. Returns 404 if not found.
Using a given payjp key to apply against the account associated.
## Example
```
{:ok, cust} = Payjp.Plans.get "plan_id", key
```
"""
def get(id, key) do
Payjp.make_request_with_key(:get, "#{@endpoint}/#{id}", key)
|> Payjp.Util.handle_payjp_response
end
@doc """
Returns a list of Plans.
## Example
```
{:ok, plans} = Payjp.Plans.list
{:ok, plans} = Payjp.Plans.list(20)
```
"""
def list(limit \\ 10) do
list Payjp.config_or_env_key, limit
end
@doc """
Returns a list of Plans using the given key.
"""
def list(key, limit) do
Payjp.make_request_with_key(:get, "#{@endpoint}?limit=#{limit}", key)
|> Payjp.Util.handle_payjp_response
end
@doc """
"""
def retrieve(id) do
retrieve id, Payjp.config_or_env_key
end
@doc """
Returns a single Plan using the given Plan ID.
"""
def retrieve(id, key) do
Payjp.make_request_with_key(:get, "#{@endpoint}/#{id}", key)
|> Payjp.Util.handle_payjp_response
end
@doc """
Deletes a Plan with the specified ID.
## Example
```
{:ok, res} = Payjp.Plans.delete "test-plan"
```
"""
def delete(id) do
delete id, Payjp.config_or_env_key
end
@doc """
Deletes a Plan with the specified ID using the given key.
## Example
```
{:ok, res} = Payjp.Plans.delete "test-plan", key
```
"""
def delete(id, key) do
Payjp.make_request_with_key(:delete, "#{@endpoint}/#{id}", key)
|> Payjp.Util.handle_payjp_response
end
@doc """
Changes Plan information. See Payjp docs as to what you can change.
## Example
```
{:ok, plan} = Payjp.Plans.change("test-plan",[name: "Other Plan"])
```
"""
def change(id, params) do
change(id, params, Payjp.config_or_env_key)
end
def change(id, params, key) do
Payjp.make_request_with_key(:post, "#{@endpoint}/#{id}", key, params)
|> Payjp.Util.handle_payjp_response
end
@max_fetch_size 100
@doc """
List all plans.
##Example
```
{:ok, plans} = Payjp.Plans.all
```
"""
def all( accum \\ [], opts \\ [limit: @max_fetch_size]) do
all Payjp.config_or_env_key, accum, opts
end
@max_fetch_size 100
@doc """
List all plans w/ given key.
##Example
```
{:ok, plans} = Payjp.Plans.all key
```
"""
def all( key, accum, opts) do
case Payjp.Util.list_raw("#{@endpoint}", key, opts) do
{:ok, resp} ->
case resp[:has_more] do
true ->
last_plan = List.last( resp[:data] )
all( key, resp[:data] ++ accum, until: last_plan["created"], limit: @max_fetch_size)
false ->
result = resp[:data] ++ accum
{:ok, result}
end
end
end
@doc """
Deletes all Plans
## Example
```
Payjp.Plans.delete_all
```
"""
def delete_all do
delete_all Payjp.config_or_env_key
end
@doc """
Deletes all Plans w/given key
## Example
```
Payjp.Plans.delete_all key
```
"""
def delete_all key do
case all() do
{:ok, plans} ->
Enum.each plans, fn p -> delete(p["id"], key) end
{:error, err} -> raise err
end
{:ok}
end
end
|
lib/payjp/plans.ex
| 0.887085
| 0.901271
|
plans.ex
|
starcoder
|
defmodule BSV.Util do
@moduledoc """
Collection of shared helper functions, used frequently throughout the library.
"""
@typedoc "Binary encoding format"
@type encoding() :: :base64 | :hex
@doc """
Decodes the given binary data using the specified `t:BSV.Util.encoding/0`.
Returns the result in an `:ok` / `:error` tuple pair.
## Examples
iex> BSV.Util.decode("aGVsbG8gd29ybGQ=", :base64)
{:ok, "hello world"}
iex> BSV.Util.decode("68656c6c6f20776f726c64", :hex)
{:ok, "hello world"}
"""
@spec decode(binary(), encoding()) :: {:ok, binary()} | {:error, term()}
def decode(data, encoding) do
case do_decode(data, encoding) do
{:ok, data} ->
{:ok, data}
:error ->
{:error, {:invalid_encoding, encoding}}
end
end
# Decodes the binary
defp do_decode(data, :base64), do: Base.decode64(data)
defp do_decode(data, :hex), do: Base.decode16(data, case: :mixed)
defp do_decode(data, _), do: {:ok, data}
@doc """
Decodes the given binary data using the specified `t:BSV.Util.encoding/0`.
As `decode/2` but returns the result or raises an exception.
"""
@spec decode!(binary(), encoding()) :: binary()
def decode!(data, encoding) do
case decode(data, encoding) do
{:ok, data} ->
data
{:error, error} ->
raise BSV.DecodeError, error
end
end
@doc """
Encodes the given binary data using the specified `t:BSV.Util.encoding/0`.
## Examples
iex> BSV.Util.encode("hello world", :base64)
"aGVsbG8gd29ybGQ="
iex> BSV.Util.encode("hello world", :hex)
"68656c6c6f20776f726c64"
"""
@spec encode(binary(), encoding()) :: binary()
def encode(data, :base64), do: Base.encode64(data)
def encode(data, :hex), do: Base.encode16(data, case: :lower)
def encode(data, _), do: data
@doc """
Returns a binary containing the specified number of random bytes.
"""
@spec rand_bytes(integer()) :: binary()
def rand_bytes(bytes) when is_integer(bytes),
do: :crypto.strong_rand_bytes(bytes)
@doc """
Reverses the bytes of the given binary data.
## Examples
iex> BSV.Util.reverse_bin("abcdefg")
"gfedcba"
iex> BSV.Util.reverse_bin(<<1, 2, 3, 0>>)
<<0, 3, 2, 1>>
"""
@spec reverse_bin(binary()) :: binary()
def reverse_bin(data) when is_binary(data) do
data
|> :binary.bin_to_list()
|> Enum.reverse()
|> :binary.list_to_bin()
end
end
|
lib/bsv/util.ex
| 0.93603
| 0.61025
|
util.ex
|
starcoder
|
defmodule Bonbon.API.Schema.Cuisine.Region do
use Absinthe.Schema
use Translecto.Query
@moduledoc false
@desc "A culinary region"
object :region do
field :id, :id, description: "The id of the region"
field :continent, :string, description: "The continent of the region"
field :subregion, :string, description: "The subregion of the region"
field :country, :string, description: "The country of the region"
field :province, :string, description: "The province of the region"
field :style, :string, description: "The culinary style"
end
@desc "A culinary region"
input_object :region_input do
field :id, :id, description: "The id of the region"
field :continent, :string, description: "The continent of the region"
field :subregion, :string, description: "The subregion of the region"
field :country, :string, description: "The country of the region"
field :province, :string, description: "The province of the region"
end
def format(result), do: Map.put(result, :style, result[:province] || result[:country] || result[:subregion] || result[:continent])
def get(%{ id: id, locale: locale }, env) do
query = from region in Bonbon.Model.Cuisine.Region,
where: region.id == ^id,
locales: ^Bonbon.Model.Locale.to_locale_id_list!(locale),
translate: continent in region.continent,
translate: subregion in region.subregion,
translate: country in region.country,
translate: province in region.province,
select: %{
id: region.id,
continent: continent.term,
subregion: subregion.term,
country: country.term,
province: province.term
}
case Bonbon.Repo.one(query) do
nil -> { :error, "Could not find region" }
result -> { :ok, format(result) }
end
end
defp query_all(args = %{ find: find }) do
find = find <> "%"
where(query_all(Map.delete(args, :find)), [i, c, s, n, p],
ilike(c.term, ^find) or
ilike(s.term, ^find) or
ilike(n.term, ^find) or
ilike(p.term, ^find)
)
end
defp query_all(args = %{ continent: continent }) do
continent = continent <> "%"
where(query_all(Map.delete(args, :continent)), [i, c, s, n, p], ilike(c.term, ^continent))
end
defp query_all(args = %{ subregion: subregion }) do
subregion = subregion <> "%"
where(query_all(Map.delete(args, :subregion)), [i, c, s, n, p], ilike(s.term, ^subregion))
end
defp query_all(args = %{ country: country }) do
country = country <> "%"
where(query_all(Map.delete(args, :country)), [i, c, s, n, p], ilike(n.term, ^country))
end
defp query_all(args = %{ province: province }) do
province = province <> "%"
where(query_all(Map.delete(args, :province)), [i, c, s, n, p], ilike(p.term, ^province))
end
defp query_all(%{ locale: locale, limit: limit, offset: offset }) do
from region in Bonbon.Model.Cuisine.Region,
locales: ^Bonbon.Model.Locale.to_locale_id_list!(locale),
translate: continent in region.continent,
translate: subregion in region.subregion,
translate: country in region.country,
translate: province in region.province,
limit: ^limit,
offset: ^offset, #todo: paginate
select: %{
id: region.id,
continent: continent.term,
subregion: subregion.term,
country: country.term,
province: province.term
}
end
def all(args, _) do
case Bonbon.Repo.all(query_all(args)) do
nil -> { :error, "Could not retrieve any regions" }
result -> { :ok, Enum.map(result, &format/1) }
end
end
end
|
web/api/schema/cuisine/region.ex
| 0.606498
| 0.546557
|
region.ex
|
starcoder
|
defmodule Kitt.Message.MAP do
@moduledoc """
Defines the structure and instantiation function
for creating a J2735-compliant MapData message
A MapData message defines the geometry of a roadway
intersection including the lanes, ingress/egress vectors
and other defined rules for traversing the intersection.
"""
@typedoc "Defines the MapData message and the data elements comprising its fields"
@type t :: %__MODULE__{
timeStamp: Kitt.Types.minute_of_year(),
msgIssueRevision: non_neg_integer(),
layerType:
:none
| :mixedContent
| :generalMapData
| :intersectionData
| :curveData
| :roadwaySectionData
| :parkingAreaData
| :sharedLaneData,
layerID: non_neg_integer(),
intersections: [intersection_geometry()],
roadSegments: [road_segment()],
dataParameters: data_parameters(),
restrictionList: [restriction_class_assignment()],
regional: [Kitt.Types.regional_extension()]
}
@type restriction_class_assignment :: %{
id: non_neg_integer(),
users: [restriction_user_type()]
}
@type restriction_user_type ::
{:basicType, restriction_applies_to()}
| {:regional, [Kitt.Types.regional_extension()]}
@type restriction_applies_to ::
:none
| :equippedTransit
| :equippedTaxis
| :equippedOther
| :emissionCompliant
| :equippedBicycle
| :weightCompliant
| :heightCompliant
| :pedestrians
| :slowMovingPersons
| :wheelchairUsers
| :visualDisabilities
| :audioDisabilities
| :otherUnknownDisabilities
@type data_parameters :: %{
processMethod: String.t(),
processAgency: String.t(),
lastCheckedDate: String.t(),
geoidUsed: String.t()
}
@type road_segment :: %{
name: String.t(),
id: Kitt.Types.road_segment_reference_id(),
refPoint: Kitt.Types.position_3d(),
laneWidth: non_neg_integer(),
speedLimits: [Kitt.Types.regulatory_speed_limit()],
roadLaneSet: [generic_lane()],
regional: [Kitt.Types.regional_extension()]
}
@type intersection_geometry :: %{
name: String.t(),
id: Kitt.Types.intersection_reference_id(),
revision: non_neg_integer(),
refPoint: Kitt.Types.position_3d(),
laneWidth: non_neg_integer(),
speedLimits: [Kitt.Types.regulatory_speed_limit()],
laneSet: [generic_lane()],
preemptPriorityData: [signal_control_zone()],
regional: [Kitt.Types.regional_extension()]
}
@type signal_control_zone :: %{
zone: Kitt.Types.regional_extension()
}
@type generic_lane :: %{
laneID: non_neg_integer(),
name: String.t(),
ingressApproach: non_neg_integer(),
egressApproach: non_neg_integer(),
laneAttributes: lane_attributes(),
maneuvers: maneuver(),
nodeList: Kitt.Types.node_list_xy(),
connectsTo: [connection()],
overlays: [non_neg_integer()],
regional: [Kitt.Types.regional_extension()]
}
@type maneuver ::
:maneuverStraightAllowed
| :maneuverLeftAllowed
| :maneuverRightAllowed
| :maneuverUTurnAllowed
| :maneuverLeftTurnOnRedAllowed
| :maneuverRightTurnOnRedAllowed
| :maneuverLaneChangeAllowed
| :maneuverNoStoppingAllowed
| :yieldAllwaysRequired
| :goWithHalt
| :caution
| :reserved1
@type connection :: %{
connectingLane: connecting_lane(),
remoteIntersection: Kitt.Types.intersection_reference_id(),
signalGroup: non_neg_integer(),
userClass: non_neg_integer(),
connectionID: non_neg_integer()
}
@type connecting_lane :: %{
lane: non_neg_integer(),
maneuver: maneuver()
}
@type lane_attributes :: %{
directionalUse: directional_use(),
sharedWith: shared_with(),
laneType: lane_attribute_type(),
regional: [Kitt.Types.regional_extension()]
}
@type directional_use ::
:ingressPath
| :egressPath
@type shared_with ::
:overlappingLaneDescriptionProvided
| :multipleLanesTreatedAsOneLane
| :otherNonMotorizedTrafficTypes
| :individualMotorizedVehicleTraffic
| :busVehicleTraffic
| :taxiVehicleTraffic
| :pedestriansTraffic
| :cyclistVehicleTraffic
| :trackedVehicleTraffic
| :pedestrianTraffic
@type lane_attribute_type ::
{:vehicle, vehicle_lane_attribute()}
| {:crosswalk, crosswalk_lane_attribute()}
| {:bikeLane, bike_lane_attribute()}
| {:sidewalk, sidewalk_lane_attribute()}
| {:median, median_lane_attribute()}
| {:striping, striping_lane_attribute()}
| {:trackedVehicle, tracked_vehicle_attribute()}
| {:parking, parking_lane_attribute()}
@type vehicle_lane_attribute ::
:isVehicleRevocableLane
| :isVehicleFlyOverLane
| :hovLaneUseOnly
| :restrictedToBusUse
| :restrictedToTaxiUse
| :restrictedFromPublicUse
| :hasIRbeaconCoverage
| :permissionOnRequest
@type crosswalk_lane_attribute ::
:crosswalkRevocableLane
| :bicyleUseAllowed
| :isXwalkFlyOverLane
| :fixedCycleTime
| :biDirectionalCycleTimes
| :hasPushToWalkButton
| :audioSupport
| :rfSignalRequestPresent
| :unsignalizedSegmentsPresent
@type bike_lane_attribute ::
:bikeRevocableLane
| :pedestrianUseAllowed
| :isBikeFlyOverLane
| :fixedCycleTime
| :biDirectionalCycleTimes
| :isolatedByBarrier
| :unsignalizedSegmentsPresent
@type sidewalk_lane_attribute ::
:"sidewalk-RevocableLane"
| :bicyleUseAllowed
| :isSidewalkFlyOverLane
| :walkBikes
@type median_lane_attribute ::
:"median-RevocableLane"
| :median
| :whiteLineHashing
| :stripedLines
| :doubleStripedLines
| :trafficCones
| :constructionBarrier
| :trafficChannels
| :lowCurbs
| :highCurbs
@type striping_lane_attribute ::
:stripeToConnectingLanesRevocableLane
| :stripeDrawOnLeft
| :stripeDrawOnRight
| :stripeToConnectingLanesLeft
| :stripeToConnectingLanesRight
| :stripeToConnectingLanesAhead
@type tracked_vehicle_attribute ::
:"spec-RevocableLane"
| :"spec-commuterRailRoadTrack"
| :"spec-lightRailRoadTrack"
| :"spec-heavyRailRoadTrack"
| :"spec-otherRailType"
@type parking_lane_attribute ::
:parkingRevocableLane
| :parallelParkingInUse
| :headInParkingInUse
| :doNotParkZone
| :parkingForBusUse
| :parkingForTaxiUse
| :noPublicParkingUse
@derive Jason.Encoder
@enforce_keys [:msgIssueRevision]
defstruct [
:dataParameters,
:intersections,
:layerID,
:layerType,
:msgIssueRevision,
:regional,
:restrictionList,
:roadSegments,
:timeStamp
]
@doc """
Produces a `MAP` message struct from an equivalent map or keyword input
"""
@spec new(map() | keyword()) :: t()
def new(message), do: struct(__MODULE__, message)
@doc """
Returns the `MAP` identifying integer
"""
@spec type_id() :: non_neg_integer()
def type_id(), do: :DSRC.mapData()
@doc """
Returns the `MAP` identifying atom recognized by the ASN1 spec
"""
@spec type() :: atom()
def type(), do: :MapData
end
|
lib/kitt/message/map.ex
| 0.865934
| 0.675611
|
map.ex
|
starcoder
|
defmodule DiodeClient.Rlp do
alias DiodeClient.Rlpx
@compile :inline_list_funcs
@type rlp() :: binary() | [rlp()]
@moduledoc """
Encoding and Decoding of Recursive Length Prefix (RLP) https://eth.wiki/fundamentals/rlp
RLP is easy to decode and encode and has only two types of data:
list() and binaries() for small binaries and lists there is
very space efficient encoding. But primarily it allows native
storing of binary data which is the main reason it's used within the
Diode Network.
By convention maps are stored like keyword lists in Erlang as a list of
key, value pairs such as:
`[[key1, value1], [key2, value2]]`
To ease working with maps they are automatically encoded to a keyword list
and there is the `Rlpx.list2map` to convert them back:
```
iex> alias DiodeClient.{Rlp, Rlpx}
iex> Rlp.encode!(%{key: "value"})
<<203, 202, 131, 107, 101, 121, 133, 118, 97, 108, 117, 101>>
iex> Rlp.encode!(%{key: "value"}) |> Rlp.decode!()
[["key", "value"]]
iex> Rlp.encode!(%{key: "value"}) |> Rlp.decode!()
[["key", "value"]]
iex> Rlp.encode!(%{key: "value"}) |> Rlp.decode! |> Rlpx.list2map
%{"key" => "value"}
```
"""
@doc """
Encode an Elixir term to RLP. Integers are converted
to binaries using `:binary.encode_unsigned/1`
If you want to encode integers as signed values pass
`encode!(term, unsigned: false)`
"""
def encode!(term, opts \\ []) do
do_encode!(term, opts)
|> :erlang.iolist_to_binary()
end
def encode_to_iolist!(term, opts \\ []) do
do_encode!(term, opts)
end
def do_encode!(<<x>>, _opts) when x < 0x80, do: <<x>>
def do_encode!(x, _opts) when is_binary(x) do
with_length!(0x80, x)
end
def do_encode!(list, opts) when is_list(list) do
with_length!(0xC0, :lists.map(&do_encode!(&1, opts), list))
end
def do_encode!(other, opts) do
encode_other!(other, opts)
|> do_encode!(opts)
end
defp with_length!(offset, data) do
size = :erlang.iolist_size(data)
if size <= 55 do
[offset + size, data]
else
bin = :binary.encode_unsigned(size)
[byte_size(bin) + offset + 55, bin, data]
end
end
@spec decode!(binary()) :: rlp()
def decode!(bin) do
{term, ""} = do_decode!(bin)
term
end
defp encode_other!(nil, _opts) do
""
end
defp encode_other!(struct, _opts) when is_struct(struct) do
Map.from_struct(struct)
|> Enum.map(fn {key, value} -> [Atom.to_string(key), value] end)
end
defp encode_other!(map, _opts) when is_map(map) do
Map.to_list(map)
|> Enum.map(fn {key, value} ->
[if(is_atom(key), do: Atom.to_string(key), else: key), value]
end)
end
defp encode_other!(tuple, _opts) when is_tuple(tuple) do
:erlang.tuple_to_list(tuple)
end
defp encode_other!(bits, _opts) when is_bitstring(bits) do
for <<x::size(1) <- bits>>, do: if(x == 1, do: "1", else: "0"), into: ""
end
defp encode_other!(0, _opts) do
# Sucks but this is the quasi standard by Go and Node.js
# This is why we have bin2uint
""
end
defp encode_other!(num, %{unsigned: false}) when is_integer(num) do
Rlpx.int2bin(num)
end
defp encode_other!(num, _opts) when is_integer(num) do
Rlpx.uint2bin(num)
end
defp do_decode!(<<x::unsigned-size(8), rest::binary>>) when x <= 0x7F do
{<<x::unsigned>>, rest}
end
defp do_decode!(<<head::unsigned-size(8), rest::binary>>) when head <= 0xB7 do
size = head - 0x80
<<item::binary-size(size), rest::binary>> = rest
{item, rest}
end
defp do_decode!(<<head::unsigned-size(8), rest::binary>>) when head <= 0xBF do
length_size = (head - 0xB7) * 8
<<size::unsigned-size(length_size), item::binary-size(size), rest::binary>> = rest
{item, rest}
end
defp do_decode!(<<head::unsigned-size(8), rest::binary>>) when head <= 0xF7 do
size = head - 0xC0
<<list::binary-size(size), rest::binary>> = rest
{do_decode_list!([], list), rest}
end
defp do_decode!(<<head::unsigned-size(8), rest::binary>>) when head <= 0xFF do
length_size = (head - 0xF7) * 8
<<size::unsigned-size(length_size), list::binary-size(size), rest::binary>> = rest
{do_decode_list!([], list), rest}
end
defp do_decode_list!(list, "") do
Enum.reverse(list)
end
defp do_decode_list!(list, rest) do
{item, rest} = do_decode!(rest)
do_decode_list!([item | list], rest)
end
def test_perf() do
# req_id = 1
size = 1
index = 0
frag = :rand.bytes(8000)
# data = [[<<>>, req_id], size, index, frag]
:timer.tc(fn ->
for req_id <- 1..1_000_000 do
encode_to_iolist!([[<<>>, req_id], size, index, frag])
end
nil
end)
end
end
|
lib/diode_client/rlp.ex
| 0.744006
| 0.847463
|
rlp.ex
|
starcoder
|
defmodule Accent.Plug.Response do
@moduledoc """
Transforms the keys of an HTTP response to the case requested by the client.
A client can request what case the keys are formatted in by passing the case
as a header in the request. By default the header key is `Accent`. If the
client does not request a case or requests an unsupported case then no
conversion will happen. By default the supported cases are `camel`, `pascal`
and `snake`.
## Options
* `:header` - the HTTP header used to determine the case to convert the
response body to before sending the response (default: `Accent`)
* `:json_encoder` - module used to encode JSON. The module is expected to
define a `encode!/1` function for encoding the response body as JSON.
(required)
* `:json_decoder` - module used to decode JSON. The module is expected to
define a `decode!/1` function for decoding JSON into a map. (required)
* `:supported_cases` - map that defines what cases a client can request. By
default `camel`, `pascal` and `snake` are supported.
## Examples
```
plug Accent.Plug.Response, header: "x-accent",
supported_cases: %{"pascal" => Accent.Transformer.PascalCase},
json_encoder: Poison,
json_decoder: Poison
```
"""
import Plug.Conn
import Accent.Transformer
@default_cases %{
"camel" => Accent.Transformer.CamelCase,
"pascal" => Accent.Transformer.PascalCase,
"snake" => Accent.Transformer.SnakeCase
}
@doc false
def init(opts \\ []) do
%{
header: opts[:header] || "accent",
json_decoder: opts[:json_decoder] || raise(ArgumentError, "Accent.Plug.Response expects a :json_decoder option"),
json_encoder: opts[:json_encoder] || raise(ArgumentError, "Accent.Plug.Response expects a :json_encoder option"),
supported_cases: opts[:supported_cases] || @default_cases
}
end
@doc false
def call(conn, opts) do
if do_call?(conn, opts) do
conn
|> register_before_send(fn(conn) -> before_send_callback(conn, opts) end)
else
conn
end
end
# private
defp before_send_callback(conn, opts) do
response_content_type =
conn
|> get_resp_header("content-type")
|> Enum.at(0)
# Note - we don't support "+json" content types, and probably shouldn't add
# as a general feature because they may have specifications for the param
# names - e.g. https://tools.ietf.org/html/rfc7265#page-6 that mean the
# translation would be inappropriate
is_json_response = String.contains?(response_content_type || "", "application/json")
if is_json_response do
json_decoder = opts[:json_decoder]
json_encoder = opts[:json_encoder]
resp_body =
conn.resp_body
|> json_decoder.decode!
|> transform(select_transformer(conn, opts))
|> json_encoder.encode!
%{conn | resp_body: resp_body}
else
conn
end
end
defp do_call?(conn, opts) do
content_type =
conn
|> get_req_header("content-type")
|> Enum.at(0)
is_json = String.contains?(content_type || "", "application/json")
has_transformer = select_transformer(conn, opts)
is_json && has_transformer
end
defp select_transformer(conn, opts) do
accent = get_req_header(conn, opts[:header]) |> Enum.at(0)
supported_cases = opts[:supported_cases]
supported_cases[accent]
end
end
|
lib/accent/plugs/response.ex
| 0.919881
| 0.771672
|
response.ex
|
starcoder
|
defmodule Ravix.RQL.Tokens.Condition do
@moduledoc """
Supported RQL Conditions
"""
defstruct [
:token,
:field,
:params
]
alias Ravix.RQL.Tokens.Condition
@type t :: %Condition{
token: atom(),
field: String.t() | Condition.t(),
params: list()
}
@doc """
Greater than condition
Returns a `Ravix.RQL.Tokens.Condition`
## Examples
iex> import Ravix.RQL.Tokens.Condition
iex> greater_than("field", 10)
"""
@spec greater_than(atom() | String.t(), number()) :: Condition.t()
def greater_than(field_name, value) do
%Condition{
token: :greater_than,
field: field_name,
params: [value]
}
end
@doc """
Greater than or equal to condition
Returns a `Ravix.RQL.Tokens.Condition`
## Examples
iex> import Ravix.RQL.Tokens.Condition
iex> greater_than_or_equal_to("field", 10)
"""
@spec greater_than_or_equal_to(atom() | String.t(), number()) :: Condition.t()
def greater_than_or_equal_to(field_name, value) do
%Condition{
token: :greater_than_or_eq,
field: field_name,
params: [value]
}
end
@doc """
Lower than condition
Returns a `Ravix.RQL.Tokens.Condition`
## Examples
iex> import Ravix.RQL.Tokens.Condition
iex> lower_than("field", 10)
"""
@spec lower_than(atom() | String.t(), number()) :: Condition.t()
def lower_than(field_name, value) do
%Condition{
token: :lower_than,
field: field_name,
params: [value]
}
end
@doc """
Lower than or equal to condition
Returns a `Ravix.RQL.Tokens.Condition`
## Examples
iex> import Ravix.RQL.Tokens.Condition
iex> lower_than_or_equal_to("field", 10)
"""
@spec lower_than_or_equal_to(atom() | String.t(), number()) :: Condition.t()
def lower_than_or_equal_to(field_name, value) do
%Condition{
token: :lower_than_or_eq,
field: field_name,
params: [value]
}
end
@doc """
Equal to condition
Returns a `Ravix.RQL.Tokens.Condition`
## Examples
iex> import Ravix.RQL.Tokens.Condition
iex> equal_to("field", "value")
"""
@spec equal_to(atom() | String.t(), any) :: Condition.t()
def equal_to(field_name, value) do
%Condition{
token: :eq,
field: field_name,
params: [value]
}
end
@doc """
Not Equal to condition
Returns a `Ravix.RQL.Tokens.Condition`
## Examples
iex> import Ravix.RQL.Tokens.Condition
iex> not_equal_to("field", "value")
"""
@spec not_equal_to(atom() | String.t(), any) :: Condition.t()
def not_equal_to(field_name, value) do
%Condition{
token: :ne,
field: field_name,
params: [value]
}
end
@doc """
Specifies that the value is in a list
Returns a `Ravix.RQL.Tokens.Condition`
## Examples
iex> import Ravix.RQL.Tokens.Condition
iex> in?("field", [1,2,3])
"""
@spec in?(atom() | String.t(), list()) :: Condition.t()
def in?(field_name, values) do
%Condition{
token: :in,
field: field_name,
params: values
}
end
@doc """
Specifies that the value is not in a list
Returns a `Ravix.RQL.Tokens.Condition`
## Examples
iex> import Ravix.RQL.Tokens.Condition
iex> not_in("field", ["a", "b", "c"])
"""
@spec not_in(atom() | String.t(), list()) :: Condition.t()
def not_in(field_name, values) do
%Condition{
token: :nin,
field: field_name,
params: values
}
end
@doc """
Specifies that the value is between two values
Returns a `Ravix.RQL.Tokens.Condition`
## Examples
iex> import Ravix.RQL.Tokens.Condition
iex> between("field", [15,25])
"""
@spec between(atom() | String.t(), list()) :: Condition.t()
def between(field_name, values) do
%Condition{
token: :between,
field: field_name,
params: values
}
end
end
|
lib/rql/tokens/conditions.ex
| 0.907382
| 0.404243
|
conditions.ex
|
starcoder
|
defmodule Waffle.Storage.Google.UrlV2 do
@moduledoc """
This is an implementation of the v2 URL signing for Google Cloud Storage. See
[the Google documentation](https://cloud.google.com/storage/docs/access-control/signed-urls-v2)
for more details.
The bulk of the major logic is taken from Martide's `arc_gcs` work:
https://github.com/martide/arc_gcs.
"""
use Waffle.Storage.Google.Url
alias Waffle.Types
alias Waffle.Storage.Google.{CloudStorage, Util}
# Default expiration time is 3600 seconds, or 1 hour
@default_expiry 3600
# It's unlikely, but in the event that someone accidentally tries to give a
# zero or negative expiration time, this will be used to correct that mistake
@min_expiry 1
# Maximum expiration time is 7 days from the creation of the signed URL
@max_expiry 604800
# The official Google Cloud Storage host
@endpoint "storage.googleapis.com"
@doc """
Returns the amount of time, in seconds, before a signed URL becomes invalid.
Assumes the key for the option is `:expires_in`.
"""
@spec expiry(Keyword.t) :: pos_integer
def expiry(opts \\ []) do
case Util.option(opts, :expires_in, @default_expiry) do
val when val < @min_expiry -> @min_expiry
val when val > @max_expiry -> @max_expiry
val -> val
end
end
@doc """
Determines whether or not the URL should be signed. Assumes the key for the
option is `:signed`.
"""
@spec signed?(Keyword.t) :: boolean
def signed?(opts \\ []), do: Util.option(opts, :signed, false)
@doc """
Returns the remote asset host. The config key is assumed to be `:asset_host`.
"""
@spec endpoint(Keyword.t) :: String.t
def endpoint(opts \\ []) do
opts
|> Util.option(:asset_host, @endpoint)
|> Util.var()
end
@impl Waffle.Storage.Google.Url
def build(definition, version, meta, options) do
path = CloudStorage.path_for(definition, version, meta)
if signed?(options) do
build_signed_url(definition, path, options)
else
build_url(definition, path)
end
end
@spec build_url(Types.definition, String.t) :: String.t
defp build_url(definition, path) do
%URI{
host: endpoint(),
path: build_path(definition, path),
scheme: "https"
}
|> URI.to_string()
end
@spec build_signed_url(Types.definition, String.t, Keyword.t) :: String.t
defp build_signed_url(definition, path, options) do
{:ok, client_id} = Goth.Config.get(:client_email)
expiration = System.os_time(:second) + expiry(options)
signature = definition
|> build_path(path)
|> canonical_request(expiration)
|> sign_request()
base_url = build_url(definition, path)
"#{base_url}?GoogleAccessId=#{client_id}&Expires=#{expiration}&Signature=#{signature}"
end
@spec build_path(Types.definition, String.t) :: String.t
defp build_path(definition, path) do
definition
|> bucket_and_path(path)
|> Util.prepend_slash()
|> URI.encode()
end
@spec bucket_and_path(Types.definition, String.t) :: String.t
defp bucket_and_path(definition, path) do
definition
|> CloudStorage.bucket()
|> Path.join(path)
end
@spec canonical_request(String.t, pos_integer) :: String.t
defp canonical_request(resource, expiration) do
"GET\n\n\n#{expiration}\n#{resource}"
end
@spec sign_request(String.t) :: String.t
defp sign_request(request) do
{:ok, pem_bin} = Goth.Config.get("private_key")
[pem_key_data] = :public_key.pem_decode(pem_bin)
otp_release = System.otp_release() |> String.to_integer()
rsa_key =
case otp_release do
n when n >= 21 ->
:public_key.pem_entry_decode(pem_key_data)
_ ->
pem_key = :public_key.pem_entry_decode(pem_key_data)
:public_key.der_decode(:RSAPrivateKey, elem(pem_key, 3))
end
request
|> :public_key.sign(:sha256, rsa_key)
|> Base.encode64()
|> URI.encode_www_form()
end
end
|
lib/waffle/storage/google/url_v2.ex
| 0.782372
| 0.501892
|
url_v2.ex
|
starcoder
|
defmodule AshPolicyAuthorizer.Authorizer do
@moduledoc false
defstruct [
:actor,
:resource,
:query,
:changeset,
:data,
:action,
:api,
:verbose?,
:scenarios,
:real_scenarios,
:check_scenarios,
:access_type,
policies: [],
facts: %{true => true, false => false},
data_facts: %{}
]
@type t :: %__MODULE__{}
alias AshPolicyAuthorizer.{Checker, Policy}
require Logger
@check_schema [
check: [
type: {:custom, __MODULE__, :validate_check, []},
required: true,
doc: """
A check is a tuple of `{module, keyword}`.
The module must implement the `AshPolicyAuthorizer.Check` behaviour.
Generally, you won't be passing `{module, opts}`, but will use one
of the provided functions that return that, like `always()` or
`actor_attribute_matches_record(:foo, :bar)`. To make custom ones
define a module that implements the `AshPolicyAuthorizer.Check` behaviour,
put a convenience function in that module that returns {module, opts}, and
import that into your resource.
```elixir
defmodule MyResource do
use Ash.Resource, authorizers: [AshPolicyAuthorizer.Authorizer]
import MyCustomCheck
policies do
...
policy do
authorize_if my_custom_check(:foo)
end
end
end
```
"""
],
name: [
type: :string,
required: false,
doc: "A short name or description for the check, used when explaining authorization results"
]
]
@authorize_if %Ash.Dsl.Entity{
name: :authorize_if,
describe: "If the check is true, the request is authorized, otherwise run remaining checks.",
args: [:check],
schema: @check_schema,
examples: [
"authorize_if logged_in()",
"authorize_if actor_attribute_matches_record(:group, :group)"
],
target: AshPolicyAuthorizer.Policy.Check,
transform: {AshPolicyAuthorizer.Policy.Check, :transform, []},
auto_set_fields: [
type: :authorize_if
]
}
@forbid_if %Ash.Dsl.Entity{
name: :forbid_if,
describe: "If the check is true, the request is forbidden, otherwise run remaining checks.",
args: [:check],
schema: @check_schema,
target: AshPolicyAuthorizer.Policy.Check,
transform: {AshPolicyAuthorizer.Policy.Check, :transform, []},
examples: [
"forbid_if not_logged_in()",
"forbid_if actor_attribute_matches_record(:group, :blacklisted_groups)"
],
auto_set_fields: [
type: :forbid_if
]
}
@authorize_unless %Ash.Dsl.Entity{
name: :authorize_unless,
describe: "If the check is false, the request is authorized, otherwise run remaining checks.",
args: [:check],
schema: @check_schema,
target: AshPolicyAuthorizer.Policy.Check,
transform: {AshPolicyAuthorizer.Policy.Check, :transform, []},
examples: [
"authorize_unless not_logged_in()",
"authorize_unless actor_attribute_matches_record(:group, :blacklisted_groups)"
],
auto_set_fields: [
type: :authorize_unless
]
}
@forbid_unless %Ash.Dsl.Entity{
name: :forbid_unless,
describe: "If the check is true, the request is forbidden, otherwise run remaining checks.",
args: [:check],
schema: @check_schema,
target: AshPolicyAuthorizer.Policy.Check,
transform: {AshPolicyAuthorizer.Policy.Check, :transform, []},
examples: [
"forbid_unless logged_in()",
"forbid_unless actor_attribute_matches_record(:group, :group)"
],
auto_set_fields: [
type: :forbid_unless
]
}
@policy %Ash.Dsl.Entity{
name: :policy,
describe: """
A policy has a name, a condition, and a list of checks.
Checks apply logically in the order they are specified, from top to bottom.
If no check explicitly authorizes the request, then the request is forbidden.
This means that, if you want to "blacklist" instead of "whitelist", you likely
want to add an `authorize_if always()` at the bottom of your policy, like so:
```elixir
policy action_type(:read) do
forbid_if not_logged_in()
forbid_if user_is_denylisted()
forbid_if user_is_in_denylisted_group()
authorize_if always()
end
```
If the policy should always run, use the `always()` check, like so:
```elixir
policy always() do
...
end
```
""",
schema: [
description: [
type: :string,
doc: "A description for the policy, used when explaining authorization results",
required: true
],
bypass?: [
type: :boolean,
doc: "If `true`, and the policy passes, no further policies will be run",
default: false
],
condition: [
type: {:custom, __MODULE__, :validate_condition, []},
doc: """
A check or list of checks that must be true in order for this policy to apply.
If the policy does not apply, it is not run, and some other policy
will need to authorize the request. If no policies apply, the request
is forbidden. If multiple policies apply, they must each authorize the
request.
"""
]
],
args: [:condition],
target: AshPolicyAuthorizer.Policy,
entities: [
policies: [
@authorize_if,
@forbid_if,
@authorize_unless,
@forbid_unless
]
]
}
@policies %Ash.Dsl.Section{
name: :policies,
describe: """
A section for declaring authorization policies.
Each policy that applies must pass independently in order for the
request to be authorized.
""",
entities: [
@policy
],
imports: [
AshPolicyAuthorizer.Check.BuiltInChecks
],
schema: [
access_type: [
type: {:one_of, [:strict, :filter, :runtime]},
default: :filter,
doc: """
There are three choices for access_type:
* `:filter` - this is probably what you want. Automatically removes unauthorized data by altering the request filter.
* `:strict` - authentication uses *only* the request context, failing when unknown.
* `:runtime` - authentication tries to verify strict, but if it cannot, it fetches the records and checks authorization.
"""
]
]
}
use Ash.Dsl.Extension, sections: [@policies]
@behaviour Ash.Authorizer
@doc false
def validate_check({module, opts}) do
if Ash.implements_behaviour?(module, AshPolicyAuthorizer.Check) do
{:ok, {module, opts}}
else
{:error, "#{inspect({module, opts})} is not a valid check"}
end
end
def validate_check(other) do
{:error, "#{inspect(other)} is not a valid check"}
end
def validate_condition(conditions) when is_list(conditions) do
Enum.reduce_while(conditions, {:ok, []}, fn condition, {:ok, conditions} ->
{condition, opts} =
case condition do
{condition, opts} -> {condition, opts}
condition -> {condition, []}
end
if Ash.implements_behaviour?(condition, AshPolicyAuthorizer.Check) do
{:cont, {:ok, [{condition, opts} | conditions]}}
else
{:halt, {:error, "Expected all conditions to be valid checks"}}
end
end)
end
def validate_condition(condition) do
validate_condition([condition])
end
@impl true
def initial_state(actor, resource, action, verbose?) do
%__MODULE__{
resource: resource,
actor: actor,
action: action,
verbose?: verbose?
}
end
@impl true
def strict_check_context(_authorizer) do
[:query, :changeset, :api, :resource]
end
@impl true
def check_context(_authorizer) do
[:query, :changeset, :data, :api, :resource]
end
@impl true
def check(authorizer, context) do
check_result(%{authorizer | data: context.data})
end
@impl true
def strict_check(authorizer, context) do
access_type = AshPolicyAuthorizer.access_type(authorizer.resource)
%{
authorizer
| query: context.query,
changeset: context.changeset,
api: context.api,
access_type: access_type
}
|> get_policies()
|> do_strict_check_facts()
|> strict_check_result()
end
defp strict_filter(authorizer) do
{filterable, require_check} =
authorizer.scenarios
|> Enum.split_with(fn scenario ->
Enum.all?(scenario, fn {{check_module, opts}, _value} ->
AshPolicyAuthorizer.Policy.fetch_fact(authorizer.facts, {check_module, opts}) != :error ||
check_module.type() == :filter
end)
end)
filter = strict_filters(filterable, authorizer)
case {filter, require_check} do
{[], []} ->
raise "unreachable"
{_filters, []} ->
case filter do
[filter] ->
log(authorizer, "filtering with: #{inspect(filter)}, authorization complete")
{:filter, filter}
filters ->
log(authorizer, "filtering with: #{inspect(or: filter)}, authorization complete")
{:filter, [or: filters]}
end
{_filters, _require_check} ->
case global_filters(authorizer) do
nil ->
maybe_forbid_strict(authorizer)
{[single_filter], scenarios_without_global} ->
log(
authorizer,
"filtering with: #{inspect(single_filter)}, continuing authorization process"
)
{:filter_and_continue, single_filter,
%{authorizer | check_scenarios: scenarios_without_global}}
{filters, scenarios_without_global} ->
log(
authorizer,
"filtering with: #{inspect(and: filters)}, continuing authorization process"
)
{:filter_and_continue, [and: filters],
%{authorizer | check_scenarios: scenarios_without_global}}
end
end
end
defp strict_filters(filterable, authorizer) do
filterable
|> Enum.reduce([], fn scenario, or_filters ->
scenario
|> Enum.filter(fn {{check_module, _check_opts}, _} ->
check_module.type() == :filter
end)
|> Enum.reject(fn {{check_module, check_opts}, result} ->
match?({:ok, ^result}, Policy.fetch_fact(authorizer.facts, {check_module, check_opts}))
end)
|> Enum.map(fn
{{check_module, check_opts}, true} ->
check_module.auto_filter(authorizer.actor, authorizer, check_opts)
{{check_module, check_opts}, false} ->
[not: check_module.auto_filter(authorizer.actor, authorizer, check_opts)]
end)
|> case do
[] -> or_filters
filter -> [[and: filter] | or_filters]
end
end)
end
defp maybe_forbid_strict(authorizer) do
if authorizer.access_type == :runtime do
log(authorizer, "could not determine authorization filter, checking at runtime")
{:continue, %{authorizer | check_scenarios: authorizer.scenarios}}
else
log(authorizer, "could not determine authorization filter, not allowed to check at runtime")
{:error, :forbidden}
end
end
defp global_filters(authorizer, scenarios \\ nil, filter \\ []) do
scenarios = scenarios || authorizer.scenarios
global_check_value =
Enum.find_value(scenarios, fn scenario ->
Enum.find(scenario, fn {{check_module, _opts} = check, value} ->
check_module.type == :filter &&
Enum.all?(scenarios, &(Map.fetch(&1, check) == {:ok, value}))
end)
end)
case global_check_value do
nil ->
case filter do
[] ->
nil
filter ->
{filter, scenarios}
end
{{check_module, check_opts}, value} ->
required_status = check_opts[:__auto_filter__] && value
additional_filter =
if required_status do
check_module.auto_filter(authorizer.actor, authorizer, check_opts)
else
[not: check_module.auto_filter(authorizer.actor, authorizer, check_opts)]
end
scenarios = remove_clause(authorizer.scenarios, {check_module, check_opts})
new_facts = Map.put(authorizer.facts, {check_module, check_opts}, required_status)
global_filters(%{authorizer | facts: new_facts}, scenarios, [additional_filter | filter])
end
end
defp remove_clause(scenarios, clause) do
Enum.map(scenarios, &Map.delete(&1, clause))
end
defp check_result(%{access_type: :filter} = authorizer) do
case authorizer.check_scenarios do
[] ->
raise "unreachable"
[scenario] ->
case scenario_to_check_filter(scenario, authorizer) do
{:ok, filter} ->
{:filter, filter}
{:error, error} ->
{:error, error}
end
scenarios ->
scenarios_to_filter(scenarios, authorizer)
end
end
defp check_result(%{access_type: :runtime} = authorizer) do
Enum.reduce_while(authorizer.data, {:ok, authorizer}, fn record, {:ok, authorizer} ->
authorizer.scenarios
|> Enum.reject(&scenario_impossible?(&1, authorizer, record))
|> case do
[] ->
{:halt, {:error, :forbidden, authorizer}}
scenarios ->
scenarios
|> AshPolicyAuthorizer.SatSolver.remove_irrelevant_clauses()
|> do_check_result(authorizer, record)
end
end)
end
defp scenarios_to_filter(scenarios, authorizer) do
result =
Enum.reduce_while(scenarios, {:ok, []}, fn scenario, {:ok, filters} ->
case scenario_to_check_filter(scenario, authorizer) do
{:ok, filter} -> {:cont, {:ok, [filter | filters]}}
{:error, error} -> {:halt, {:error, error}}
end
end)
case result do
{:ok, filters} -> {:filter, [or: filters]}
{:error, error} -> {:error, error}
end
end
defp do_check_result(cleaned_scenarios, authorizer, record) do
if Enum.any?(cleaned_scenarios, &scenario_applies?(&1, authorizer, record)) do
{:cont, {:ok, authorizer}}
else
check_facts_until_known(cleaned_scenarios, authorizer, record)
end
end
defp scenario_applies?(scenario, authorizer, record) do
Enum.all?(scenario, fn {clause, requirement} ->
case Map.fetch(authorizer.facts, clause) do
{:ok, ^requirement} ->
true
_ ->
scenario_applies_to_record?(authorizer, clause, record)
end
end)
end
defp scenario_applies_to_record?(authorizer, clause, record) do
case Map.fetch(authorizer.data_facts, clause) do
{:ok, ids_that_match} ->
pkey = Map.take(record, Ash.Resource.primary_key(authorizer.resource))
MapSet.member?(ids_that_match, pkey)
_ ->
false
end
end
defp scenario_impossible?(scenario, authorizer, record) do
Enum.any?(scenario, fn {clause, requirement} ->
case Map.fetch(authorizer.facts, clause) do
{:ok, value} when value != requirement ->
true
_ ->
scenario_impossible_by_data?(authorizer, clause, record)
end
end)
end
defp scenario_impossible_by_data?(authorizer, clause, record) do
case Map.fetch(authorizer.data_facts, clause) do
{:ok, ids_that_match} ->
pkey = Map.take(record, Ash.Resource.primary_key(authorizer.resource))
not MapSet.member?(ids_that_match, pkey)
_ ->
false
end
end
defp check_facts_until_known(scenarios, authorizer, record) do
new_authorizer =
scenarios
|> find_fact_to_check(authorizer)
|> check_fact(authorizer)
scenarios
|> Enum.reject(&scenario_impossible?(&1, new_authorizer, record))
|> case do
[] ->
log(authorizer, "Checked all facts, no real scenarios")
{:halt, {:forbidden, authorizer}}
scenarios ->
cleaned_scenarios = AshPolicyAuthorizer.SatSolver.remove_irrelevant_clauses(scenarios)
if Enum.any?(cleaned_scenarios, &scenario_applies?(&1, new_authorizer, record)) do
{:cont, {:ok, new_authorizer}}
else
check_facts_until_known(scenarios, new_authorizer, record)
end
end
end
defp check_fact({check_module, check_opts}, authorizer) do
if check_module.type() == :simple do
raise "Assumption failed"
else
authorized_records =
check_module.check(authorizer.actor, authorizer.data, authorizer, check_opts)
pkey = Ash.Resource.primary_key(authorizer.resource)
pkeys = MapSet.new(authorized_records, &Map.take(&1, pkey))
%{
authorizer
| data_facts: Map.put(authorizer.data_facts, {check_module, check_opts}, pkeys)
}
end
end
defp find_fact_to_check(scenarios, authorizer) do
scenarios
|> Enum.concat()
|> Enum.find(fn {key, _value} ->
not Map.has_key?(authorizer.facts, key) and not Map.has_key?(authorizer.data_facts, key)
end)
|> case do
nil -> raise "Assumption failed"
{key, _value} -> key
end
end
defp scenario_to_check_filter(scenario, authorizer) do
filters =
Enum.reduce_while(scenario, {:ok, []}, fn {{check_module, check_opts}, required_value},
{:ok, filters} ->
new_filter =
case check_module.type() do
:simple ->
filter_for_simple_check(authorizer, check_module, check_opts, required_value)
:filter ->
{:ok, check_module.auto_filter(authorizer.actor, authorizer, check_opts)}
:manual ->
filter_for_manual_check(authorizer, check_module, check_opts)
end
case new_filter do
{:ok, nil} -> {:cont, {:ok, filters}}
{:ok, new_filter} -> {:cont, {:ok, [new_filter | filters]}}
{:error, error} -> {:halt, {:error, error}}
end
end)
case filters do
{:ok, [filter]} -> {:ok, filter}
{:ok, filters} -> {:ok, [and: filters]}
{:error, error} -> {:error, error}
end
end
defp filter_for_manual_check(authorizer, check_module, check_opts) do
case check_module.check(authorizer.actor, authorizer.data, authorizer, check_opts) do
{:ok, true} ->
{:ok, nil}
{:ok, records} ->
primary_keys_to_filter(authorizer.resource, records)
end
end
defp filter_for_simple_check(authorizer, check_module, check_opts, required_value) do
case AshPolicyAuthorizer.Policy.fetch_fact(
authorizer.facts,
{check_module, check_opts}
) do
:error ->
{:ok, false}
{:ok, value} ->
if value == required_value do
{:ok, nil}
else
{:ok, false}
end
end
end
defp primary_keys_to_filter(resource, records) do
pkey = Ash.Resource.primary_key(resource)
Enum.map(records, fn record ->
record
|> Map.take(pkey)
|> Enum.to_list()
end)
|> case do
[single] -> single
pkey_filters -> [or: pkey_filters]
end
end
defp strict_check_result(authorizer) do
case Checker.strict_check_scenarios(authorizer) do
{:ok, scenarios} ->
report_scenarios(authorizer, scenarios, "Potential Scenarios")
case Checker.find_real_scenarios(scenarios, authorizer.facts) do
[] ->
maybe_strict_filter(authorizer, scenarios)
real_scenarios ->
report_scenarios(authorizer, real_scenarios, "Real Scenarios")
:authorized
end
{:error, :unsatisfiable} ->
{:error,
AshPolicyAuthorizer.Forbidden.exception(
verbose?: authorizer.verbose?,
facts: authorizer.facts,
scenarios: []
)}
end
end
defp maybe_strict_filter(authorizer, scenarios) do
if authorizer.access_type == :strict do
log(authorizer, "No real scenarios, forbidden due to strict access type")
{:error,
AshPolicyAuthorizer.Forbidden.exception(
verbose?: authorizer.verbose?,
facts: authorizer.facts,
scenarios: scenarios
)}
else
log(authorizer, "No real scenarios, attempting to filter")
strict_filter(%{authorizer | scenarios: scenarios})
end
end
defp do_strict_check_facts(authorizer) do
new_facts = Checker.strict_check_facts(authorizer)
%{authorizer | facts: new_facts}
end
defp get_policies(authorizer) do
%{authorizer | policies: AshPolicyAuthorizer.policies(authorizer.resource)}
end
defp report_scenarios(%{verbose?: true}, scenarios, title) do
scenario_description =
scenarios
|> Enum.map(fn scenario ->
scenario
|> Enum.reject(fn {{module, _}, _} ->
module == AshPolicyAuthorizer.Check.Static
end)
|> Enum.map(fn {{module, opts}, requirement} ->
[" ", module.describe(opts) <> " => #{requirement}"]
end)
|> Enum.intersperse("\n")
end)
|> Enum.intersperse("\n--\n")
Logger.info([title, "\n", scenario_description])
end
defp report_scenarios(_, _, _), do: :ok
defp log(%{verbose?: true}, message) do
Logger.info(message)
end
defp log(_, _), do: :ok
end
|
lib/ash_policy_authorizer/authorizer.ex
| 0.90644
| 0.689812
|
authorizer.ex
|
starcoder
|
defmodule Vela do
@moduledoc """
`Vela` is a tiny library providing easy management of
validated cached state with some history.
Including `use Vela` in your module would turn the module
into struct, setting field accordingly to the specification,
passed as a parameter.
`Vela` allows the following configurable parameters per field:
- `limit` — length of the series to keep (default: `5`)
- `compare_by` — comparator extraction function to extract the value, to be used for
comparison, from the underlying terms (default: `& &1`)
- `comparator` — the function that accepts a series name and two values and returns
the greater one to be used in `Vela.δ/1` (default: `&</2`)
- `threshold` — if specified, the inserted value is checked to fit in `δ ± threshold`;
whether it does not, it goes to `errors` (`float() | nil`, default: `nil`)
- `validator` — the function to be used to invalidate the accumulated values (default:
`fn _ -> true end`)
- `sorter` — the function to be used to sort values within one serie, if
none is given, it sorts in the natural order, FIFO, newest is the one to `pop`
- `corrector` — the function to be used to correct the values rejected by `validator`;
the function should return `{:ok, corrected_value}` to enforce insertion into `Vela`,
or `:error` if the value cannot be corrected and should be nevertheless rejected
- `errors` — number of errors to keep (default: `5`)
Also, Vela accepts `:mη` keyword parameter for the cases when the consumer needs
the very custom meta to be passed to the struct.
`Vela` implements `Access` behaviour.
## Usage
defmodule Vela.Test do
use Vela,
series1: [limit: 3, errors: 1], # no validation
series2: [limit: 2, validator: Vela.Test]
series3: [
compare_by: &Vela.Test.comparator/1,
validator: &Vela.Test.validator/2
]
@behaviour Vela.Validator
@impl Vela.Validator
def valid?(_serie, value), do: value > 0
@spec comparator(%{created_at :: DateTime.t()}) :: DateTime.t()
def comparator(%{created_at: created_at}),
do: created_at
@spec validator(value :: t()) :: boolean()
def validator(value),
do: is_integer(value) and value > 300
end
In the example above, before any structure update attempt
(via `Access`,) this `valid?/2` function would be called.
If it returns `true`, the value gets inserted / updated, and
the series behind is truncated if needed. It it returns `false`,
the state _is not updated_, and the value is put into the map
under `__errors__` key of the struct. The length of errors
is also configurable via `errors:` keyword parameter.
"""
@typedoc "Represents a key in the Vela structure"
@type serie :: atom()
@typedoc "Represents a value in the Vela structure"
@type value :: any()
@typedoc "Represents a key-value pair in errors and unmatched"
@type kv :: {serie(), value()}
@typedoc """
The type of validator function to be passed as `:validator` keyword parameter
to the series.
"""
@type validator :: (value() -> boolean()) | (serie(), value() -> boolean())
@typedoc """
The type of comparator function to be passed as `:comparator` keyword parameter
to the series.
"""
@type comparator :: (value(), value() -> boolean())
@typedoc """
The type of sorter function to be passed as `:sorter` keyword parameter
to the series.
"""
@type sorter :: (value(), value() -> boolean())
@typedoc """
The type of sorter function to be passed as `:sorter` keyword parameter
to the series.
"""
@type corrector :: (t(), serie(), value() -> {:ok, value()} | :error)
@typedoc "Represents the struct created by this behaviour module"
@type t :: %{
:__struct__ => atom(),
:__errors__ => [kv()],
:__meta__ => keyword(),
optional(serie()) => [value()]
}
@typedoc "Options allowed in series configuration"
@type option ::
{:limit, non_neg_integer()}
| {:type, any()}
| {:initial, [term()]}
| {:compare_by, (value() -> any())}
| {:comparator, comparator()}
| {:threshold, number()}
| {:validator, validator()}
| {:sorter, sorter()}
| {:corrector, corrector()}
| {:errors, keyword()}
@typedoc "Series configuration"
@type options :: [option()]
@doc """
Returns a keyword with series as keys and the hottest value as a value
_Example:_
```elixir
defmodule AB do
use Vela, a: [], b: []
end
AB.slice(struct(AB, [a: [1, 2], b: [3, 4]]))
#⇒ [a: 1, b: 3]
```
"""
@callback slice(t()) :: [kv()]
@doc """
Removes obsoleted elements from the series using the validator given as a second parameter,
or a default validator for this serie.
"""
@callback purge(t(), nil | validator()) :: t()
@doc """
Returns `{min, max}` tuple for each serie, using the comparator given as a second parameter,
or a default comparator for this serie.
"""
@callback delta(t(), nil | (serie(), value(), value() -> boolean())) :: [
{atom(), {value(), value()}}
]
@doc "Checks two velas given as an input for equality"
@callback equal?(t(), t()) :: boolean()
use Boundary, exports: [Access, AccessError, Stubs, Macros]
alias Vela.Stubs
import Vela.Macros
@doc false
defmacro __using__(opts) when is_list(opts) do
fields = Keyword.keys(opts)
{meta, opts} = Keyword.pop(opts, :mη, [])
typedefs = for {k, v} <- opts, do: {k, v[:type]}
typedef = use_types(typedefs)
opts = for {serie, desc} <- opts, do: {serie, Keyword.delete(desc, :type)}
quote generated: true, location: :keep do
@compile {:inline, series: 0, config: 0, config: 1, config: 2}
@after_compile {Vela, :implement_enumerable}
import Vela.Macros
@meta unquote(meta)
@fields unquote(fields)
@type t :: unquote(typedef)
@config use_config(unquote(opts))
@field_count Enum.count(@fields)
fields_index = Enum.with_index(@fields)
@fields_ordered Enum.sort(
@fields,
Keyword.get(@meta, :order_by, &(fields_index[&1] <= fields_index[&2]))
)
defstruct [
{:__errors__, []},
{:__meta__, @meta}
| Enum.zip(@fields_ordered, Stream.cycle([[]]))
]
main_ast()
end
end
defmacro __using__(opts) do
quote generated: true,
location: :keep,
bind_quoted: [opts: opts] do
@compile {:inline, series: 0, config: 0, config: 1, config: 2}
@after_compile {Vela, :implement_enumerable}
import Vela.Macros
{meta, opts} = Keyword.pop(opts, :mη, [])
typedefs = for {k, v} <- opts, do: {k, v[:type]}
typedef = use_types(typedefs)
@meta meta
@fields Keyword.keys(typedefs)
@type t :: unquote(typedef)
@config use_config(opts)
@field_count Enum.count(@fields)
fields_index = Enum.with_index(@fields)
@fields_ordered Enum.sort(
@fields,
Keyword.get(meta, :order_by, &(fields_index[&1] <= fields_index[&2]))
)
defstruct [
{:__errors__, []},
{:__meta__, meta}
| Enum.zip(@fields_ordered, Stream.cycle([[]]))
]
main_ast()
end
end
defmacrop do_implement_enumerable(module) do
quote location: :keep, bind_quoted: [module: module] do
defimpl Enumerable, for: module do
@moduledoc false
@module module
@fields @module.series()
@field_count Enum.count(@fields)
def count(%@module{} = vela), do: {:ok, @field_count}
Enum.each(@fields, fn field ->
def member?(%@module{} = vela, unquote(field)), do: {:ok, true}
end)
def member?(%@module{} = vela, _), do: {:ok, false}
def reduce(vela, state_acc, fun, inner_acc \\ @fields)
def reduce(%@module{} = vela, {:halt, acc}, _fun, _inner_acc),
do: {:halted, acc}
def reduce(%@module{} = vela, {:suspend, acc}, fun, inner_acc),
do: {:suspended, acc, &reduce(vela, &1, fun, inner_acc)}
def reduce(%@module{} = vela, {:cont, acc}, _fun, []),
do: {:done, acc}
Enum.each(@fields, fn field ->
def reduce(%@module{unquote(field) => list} = vela, {:cont, acc}, fun, [
unquote(field) | tail
]),
do: reduce(vela, fun.({unquote(field), list}, acc), fun, tail)
end)
def slice(%@module{}), do: {:error, @module}
end
end
end
@doc false
def implement_enumerable(%Macro.Env{module: module}, _bytecode),
do: do_implement_enumerable(module)
@spec map(vela :: t(), (kv() -> kv())) :: t()
@doc """
Maps the series using `fun` and returns the new `Vela` instance with series mapped
"""
def map(%mod{} = vela, fun) do
mapped =
vela
|> Map.take(mod.series())
|> Enum.map(fun)
struct(vela, mapped)
end
@spec flat_map(vela :: t(), (kv() -> kv()) | (serie(), value() -> kv())) :: [kv()]
@doc """
Flat maps the series using `fun` and returns the keyword with
duplicated keys and mapped values.
_Example:_
```elixir
defmodule EO do
use Vela,
even: [limit: 2],
odd: [limit: 2]
def flat_map(%EO{} = v),
do: Vela.flat_map(v, & {&1, &2+1})
end
EO.flat_map(struct(EO, [even: [2, 4], odd: [1, 3]]))
#⇒ [even: 3, even: 5, odd: 2, odd: 4]
```
"""
def flat_map(vela, fun \\ & &1)
def flat_map(%mod{} = vela, fun) when is_function(fun, 2) do
for {serie, list} <- Map.take(vela, mod.series()),
value <- list,
do: fun.(serie, value)
end
def flat_map(%_mod{} = vela, fun) when is_function(fun, 1),
do: flat_map(vela, &fun.({&1, &2}))
@spec validator!(data :: t(), serie :: serie()) :: validator()
@doc false
def validator!(%type{} = data, serie) do
validator = type.config(serie, :validator, data)
compare_by = type.config(serie, :compare_by, data)
within_threshold =
within_threshold?(Vela.δ(data)[serie], type.config(serie, :threshold, data), compare_by)
validator = make_arity_2(validator)
fn serie, value ->
within_threshold.(value) && validator.(serie, value)
end
end
@spec δ(t(), nil | (serie(), value(), value() -> boolean())) ::
[{atom(), {value(), value()}}]
@doc """
Returns `{min, max}` tuple for each serie, using the comparator given as a second parameter,
or a default comparator for each serie.
"""
@doc since: "0.7.0"
def δ(%type{} = vela, comparator \\ nil) do
for {serie, list} <- vela,
serie in type.series(),
{compare_by, comparator} =
if(is_nil(comparator),
do: {type.config(serie, :compare_by, vela), type.config(serie, :comparator, vela)},
else: {&Stubs.itself/1, comparator}
) do
min_max =
Enum.reduce(list, {nil, nil}, fn
v, {nil, nil} ->
{v, v}
v, {min, max} ->
{
if(comparator.(compare_by.(v), compare_by.(min)), do: v, else: min),
if(comparator.(compare_by.(max), compare_by.(v)), do: v, else: max)
}
end)
{serie, min_max}
end
end
@spec purge(vela :: t(), validator :: nil | validator()) :: t()
@doc false
@doc since: "0.9.2"
def purge(%type{} = vela, validator \\ nil) do
purged =
for {serie, list} <- vela,
serie in type.series(),
validator = if(is_nil(validator), do: Vela.validator!(vela, serie), else: validator),
do: {serie, Enum.filter(list, &validator.(serie, &1))}
struct(vela, purged)
end
@spec put(vela :: t(), serie :: serie(), value :: value()) :: t()
@doc """
Inserts the new value into the serie, going through all the validation and sorting.
If the value has not passed validation, it’s put into `:__errors__` internal list.
If the new length of the serie exceeds the limit set for this serie, the last value
(after sorting) gets discarded.
"""
def put(%_{} = vela, serie, value),
do: put_in(vela, [serie], value)
@spec equal?(v1 :: t(), v2 :: t()) :: boolean()
@doc false
@doc since: "0.9.2"
def equal?(%type{} = v1, %type{} = v2) do
[v1, v2]
|> Enum.map(&Vela.flat_map/1)
|> Enum.reduce(&do_equal?/2)
end
def equal?(_, _), do: false
@spec do_equal?(kw1 :: [Vela.kv()], kw2 :: [Vela.kv()]) :: boolean()
defp do_equal?(kw1, kw2) when length(kw1) != length(kw2), do: false
defp do_equal?(kw1, kw2) do
[kw1, kw2]
|> Enum.zip()
|> Enum.reduce_while(true, fn
{{serie, %mod{} = value1}, {serie, %mod{} = value2}}, true ->
equal? =
cond do
mod.__info__(:functions)[:equal?] == 2 -> mod.equal?(value1, value2)
mod.__info__(:functions)[:compare] == 2 -> mod.compare(value1, value2) == :eq
true -> value1 == value2
end
if equal?, do: {:cont, true}, else: {:halt, false}
{{serie, value1}, {serie, value2}}, true ->
if value1 == value2, do: {:cont, true}, else: {:halt, false}
_, true ->
{:halt, false}
end)
end
@spec within_threshold?({value(), value()}, nil | number(), (value() -> number())) ::
validator()
defp within_threshold?(_minmax, nil, _compare_by), do: fn _ -> true end
defp within_threshold?({nil, nil}, _threshold, _compare_by), do: fn _ -> true end
defp within_threshold?({min, max}, threshold, compare_by) do
[min, max] = Enum.map([min, max], compare_by)
band = max - min
min_threshold = if band > 0, do: min - band * threshold, else: min * ((100 - threshold) / 100)
max_threshold = if band > 0, do: max + band * threshold, else: max * ((100 + threshold) / 100)
&(compare_by.(&1) >= min_threshold && compare_by.(&1) <= max_threshold)
end
defmodule Stubs do
@moduledoc false
@spec itself(Vela.value()) :: Vela.value()
def itself(v), do: v
@spec validate(Vela.value()) :: boolean()
def validate(_value), do: true
@spec compare(Vela.value(), Vela.value()) :: boolean()
def compare(v1, v2), do: v1 < v2
@spec sort(Vela.value(), Vela.value()) :: boolean()
def sort(_v1, _v2), do: true
@spec correct(Vela.t(), Vela.serie(), Vela.value()) :: {:ok, Vela.value()} | :error
def correct(_vela, _serie, _value), do: :error
end
end
|
lib/vela.ex
| 0.918412
| 0.909867
|
vela.ex
|
starcoder
|
defmodule Shared.Zeitperiode do
@moduledoc """
Repräsentiert eine Arbeitszeit-Periode oder Schicht
"""
@type t :: Timex.Interval.t()
@type interval :: [
start: DateTime.t() | NaiveDateTime.t(),
ende: DateTime.t() | NaiveDateTime.t()
]
@default_base_timezone_name "Europe/Berlin"
@spec new(kalendertag :: Date.t(), von :: Time.t(), bis :: Time.t()) :: t
def new(%Date{} = kalendertag, %Time{} = von, %Time{} = bis) when von < bis do
von_als_datetime = to_datetime(kalendertag, von)
bis_als_datetime = to_datetime(kalendertag, bis)
to_interval(von_als_datetime, bis_als_datetime)
end
def new(%Date{} = kalendertag, %Time{} = von, %Time{} = bis) when von >= bis do
von_als_datetime = to_datetime(kalendertag, von)
next_day = Timex.shift(kalendertag, days: 1)
bis_als_datetime = to_datetime(next_day, bis)
to_interval(von_als_datetime, bis_als_datetime)
end
# Basiszeitzone ist die Zeitzone, in der die Zeit erfasst wurde, aktuell immer Dtl.
@spec new(von :: DateTime.t(), bis :: DateTime.t(), base_timezone_name :: String.t()) :: t
def new(%DateTime{} = von, %DateTime{} = bis, base_timezone_name) do
von = Shared.Zeitperiode.Timezone.convert(von, base_timezone_name)
bis = Shared.Zeitperiode.Timezone.convert(bis, base_timezone_name)
base_timezone_von = Shared.Zeitperiode.Timezone.timezone_info_for(von, base_timezone_name)
base_timezone_bis = Shared.Zeitperiode.Timezone.timezone_info_for(bis, base_timezone_name)
summertime_offset = base_timezone_bis.offset_std - base_timezone_von.offset_std
von_naive = von |> DateTime.to_naive()
bis_naive =
bis
|> DateTime.to_naive()
|> Timex.shift(seconds: -summertime_offset)
to_interval(von_naive, bis_naive)
end
@spec new(von :: DateTime.t(), bis :: DateTime.t()) :: t
def new(%DateTime{} = von, %DateTime{} = bis) do
# default value using `\\` produces the warning `definitions with multiple clauses and default values require a header.`
new(von, bis, @default_base_timezone_name)
end
@spec new(von :: NaiveDateTime.t(), bis :: NaiveDateTime.t()) :: t
def new(%NaiveDateTime{} = von, %NaiveDateTime{} = bis), do: to_interval(von, bis)
@spec from_interval(interval :: String.t()) :: t
def from_interval(interval) when is_binary(interval) do
[start: start, ende: ende] = parse(interval)
new(start, ende)
end
@spec von(t) :: Timex.Types.valid_datetime()
def von(periode), do: periode.from
@spec bis(t) :: Timex.Types.valid_datetime()
def bis(periode), do: periode.until
@spec von_datum(t) :: Date.t()
def von_datum(periode), do: periode |> von() |> NaiveDateTime.to_date()
@spec bis_datum(t) :: Date.t()
def bis_datum(%{until: %{hour: 0, minute: 0, second: 0}} = periode) do
periode |> bis() |> NaiveDateTime.to_date() |> Timex.shift(days: -1)
end
def bis_datum(periode) do
periode |> bis() |> NaiveDateTime.to_date()
end
@spec dauer(t) :: number | {:error, any} | Timex.Duration.t()
def dauer(periode), do: duration(periode, :duration)
@spec dauer_in_stunden(t) :: number | {:error, any} | Timex.Duration.t()
def dauer_in_stunden(periode), do: duration(periode, :hours)
@spec dauer_in_minuten(t) :: number | {:error, any} | Timex.Duration.t()
def dauer_in_minuten(periode), do: duration(periode, :minutes)
@spec ueberschneidung?(periode :: t, andere_periode :: t) :: boolean
def ueberschneidung?(periode, andere_periode) do
periode.from in andere_periode || andere_periode.from in periode
end
@spec teil_von?(zu_testende_periode :: t, periode :: t) :: boolean
def teil_von?(zu_testende_periode, periode) do
zu_testende_periode.from in periode && zu_testende_periode.until in periode
end
@spec beginnt_vor?(periode1 :: t, periode2 :: t) :: boolean
def beginnt_vor?(periode1, periode2) do
NaiveDateTime.compare(periode1.from, periode2.from) == :lt
end
@spec to_string(t) :: String.t()
def to_string(periode), do: Timex.Interval.format!(periode, "%Y-%m-%d %H:%M", :strftime)
@spec to_iso8601(Timex.Interval.t()) :: binary()
def to_iso8601(%Timex.Interval{from: %NaiveDateTime{} = von, until: %NaiveDateTime{} = bis}) do
[von, bis] |> Enum.map(&NaiveDateTime.to_iso8601/1) |> Enum.join("/")
end
@spec to_iso8601(start: DateTime.t(), ende: DateTime.t()) :: binary()
def to_iso8601(start: %DateTime{} = start, ende: %DateTime{} = ende) do
[start, ende] |> Enum.map(&DateTime.to_iso8601/1) |> Enum.join("/")
end
@spec to_iso8601(start: DateTime.t(), ende: DateTime.t()) :: binary()
def to_iso8601(start: %NaiveDateTime{} = start, ende: %NaiveDateTime{} = ende) do
[start, ende] |> Enum.map(&NaiveDateTime.to_iso8601/1) |> Enum.join("/")
end
defp to_interval(von, bis) do
Timex.Interval.new(
from: von,
until: bis,
left_open: false,
right_open: true,
step: [seconds: 1]
)
end
@deprecated "Use parse/1 instead"
def parse_interval(interval) when is_binary(interval) do
parse(interval)
end
@spec parse(binary()) :: interval()
def parse(interval) when is_binary(interval) do
[start, ende] =
interval
|> String.replace("--", "/")
|> String.split("/")
[start: start |> Shared.Zeit.parse(), ende: ende |> Shared.Zeit.parse()]
end
@deprecated "Use Shared.Zeit.parse/1 instead"
def parse_time(time) when is_binary(time) do
Shared.Zeit.parse(time)
end
defp to_datetime(date, time) do
{:ok, naive_datetime} = NaiveDateTime.new(date, time)
naive_datetime
end
defp duration(periode, :duration), do: Timex.Interval.duration(periode, :duration)
defp duration(periode, :hours),
do: periode |> duration(:duration) |> Timex.Duration.to_hours()
defp duration(periode, :minutes),
do: periode |> duration(:duration) |> Timex.Duration.to_minutes() |> Float.round()
@spec dauer_der_ueberschneidung(periode1 :: Timex.Interval.t(), periode2 :: Timex.Interval.t()) ::
Timex.Duration.t()
def dauer_der_ueberschneidung(periode1, periode2) do
dauer1 = dauer(periode1)
dauer_differenz =
case Timex.Interval.difference(periode1, periode2) do
[] -> Shared.Dauer.leer()
[periode] -> dauer(periode)
[periode1, periode2] -> Shared.Dauer.addiere(dauer(periode1), dauer(periode2))
end
Shared.Dauer.subtrahiere(dauer1, dauer_differenz)
end
@doc """
Zieht von einer Liste von Zeitperioden eine andere Liste von Zeitperioden ab,
so dass alle Überlappungen mit der zweiten Liste aus der ersten Liste entfernt
werden.
"""
@spec differenz(list(Timex.Interval.t()) | Timex.Interval.t(), list(Timex.Interval.t())) ::
list(Timex.Interval.t())
def differenz(basis_intervalle, abzuziehende_intervalle)
when is_list(basis_intervalle) and is_list(abzuziehende_intervalle) do
basis_intervalle |> Enum.flat_map(&differenz(&1, abzuziehende_intervalle))
end
def differenz(%Timex.Interval{} = basis_intervall, []) do
[basis_intervall]
end
def differenz(%Timex.Interval{} = basis_intervall, [abzuziehendes_intervall]) do
basis_intervall |> differenz(abzuziehendes_intervall)
end
def differenz(%Timex.Interval{} = basis_intervall, [abzuziehendes_intervall | rest]) do
basis_intervall |> differenz(abzuziehendes_intervall) |> differenz(rest)
end
# Zieht von einer einzelnen Zeitperiode eine andere ab. Für tolle Beispiele siehe
# https://hexdocs.pm/timex/3.5.0/Timex.Interval.html#difference/2
defdelegate differenz(basis_intervall, abzuziehendes_intervall),
to: Timex.Interval,
as: :difference
defmodule Timezone do
@spec convert(
DateTime.t(),
binary() | Timex.AmbiguousTimezoneInfo.t() | Timex.TimezoneInfo.t()
) :: DateTime.t() | Timex.AmbiguousDateTime.t()
def convert(datetime, timezone) do
case Timex.Timezone.convert(datetime, timezone) do
{:error, _} ->
shifted = %{datetime | hour: datetime.hour + 1}
converted = Timex.Timezone.convert(shifted, timezone)
_shifted_back = %{converted | hour: converted.hour - 1}
converted ->
converted
end
end
def timezone_info_for(time, timezone_name) do
case Timex.Timezone.get(timezone_name, time) do
# 02:00 - 03:00 day of the summer time / winter time switch
{:error, _} ->
# Timex.shift/2 funktioniert nicht, weil dann dieselbe Exception geworfen wird
time = %{time | hour: time.hour + 1}
Timex.Timezone.get(timezone_name, time)
%Timex.TimezoneInfo{} = timezone_info ->
timezone_info
end
end
end
end
|
lib/zeitperiode.ex
| 0.810366
| 0.477371
|
zeitperiode.ex
|
starcoder
|
defmodule Ecto.Query.Typespec do
@moduledoc false
# A very simple type system to declare the operators and functions
# available in Ecto with their types. At runtime, the type system
# does no inference, it simply maps input types to output types.
@doc """
Defines a new Ecto.Query type.
"""
defmacro deft(expr) do
name = extract_type(expr)
quote do
@ecto_deft [unquote(name)|@ecto_deft]
end
end
@doc """
Defines a new Ecto.Query alias.
Aliases are only used by the type system, they are not
exposed to the user.
"""
defmacro defa({:::, _, [_, _]} = expr) do
quote bind_quoted: [expr: Macro.escape(expr)] do
{left, right} = Ecto.Query.Typespec.__defa__(__MODULE__, expr)
@ecto_defa Dict.put(@ecto_defa, left, right)
end
end
@doc """
Defines a new Ecto.Query spec.
"""
defmacro defs({:::, _, [head, _]} = expr) do
quote bind_quoted: [expr: Macro.escape(expr), head: Macro.escape(head)] do
var!(defs, Ecto.Query.Typespec).(expr, head)
end
end
## Callbacks
defmacro __using__(_) do
quote unquote: false do
import Ecto.Query.Typespec
@before_compile Ecto.Query.Typespec
@ecto_deft []
@ecto_defs HashDict.new
@ecto_defa HashDict.new
@aggregate false
# Instead of injecting this code multiple times into the user module,
# we simply define this function at the beginning and invoke it. This
# reduces the memory consumption when compiling Ecto.Query.API from
# 270MB to 70MB.
var!(defs, Ecto.Query.Typespec) = fn expr, head ->
{name, args, guards, return, catch_all} = Ecto.Query.Typespec.__defs__(__MODULE__, expr)
arity = length(args)
if @aggregate do
@doc false
def aggregate?(unquote(name), unquote(arity)), do: true
end
@aggregate false
if catch_all do
@ecto_defs Dict.delete(@ecto_defs, {name, arity})
else
@ecto_defs Dict.update(@ecto_defs, {name, arity}, [head], &[head|&1])
end
def unquote(name)(unquote_splicing(args)) when unquote(guards), do: {:ok, unquote(return)}
end
end
end
def __defs__(mod, {:::, meta, [left, right]}) do
{name, args} = Macro.decompose_call(left)
deft = Module.get_attribute(mod, :ecto_deft)
defa = Module.get_attribute(mod, :ecto_defa)
meta = {meta, deft, defa}
{args, var_types} =
Enum.reduce(Stream.with_index(args), {[], []}, fn
{{arg, _, [inner]}, index}, {args, types} ->
{var1, var_types} = var_types(arg, index, "x", meta)
types = types ++ [{var1, var_types}]
{var2, var_types} = var_types(inner, index, "y", meta)
arg = {var1, var2}
{args ++ [arg], types ++ [{var2, var_types}]}
{arg, index}, {args, types} ->
{var, var_types} = var_types(arg, index, "x", meta)
{args ++ [var], types ++ [{var, var_types}]}
end)
right = extract_return_type(right)
catch_all = Enum.all?(var_types, &match?({_, nil}, &1)) and
Enum.uniq(var_types) == var_types
{name, args, compile_guards(var_types), right, catch_all}
end
def __defa__(mod, {:::, _, [left, right]}) do
deft = Module.get_attribute(mod, :ecto_deft)
defa = Module.get_attribute(mod, :ecto_defa)
{extract_type(left), right |> extract_types([]) |> expand_types(deft, defa)}
end
defmacro __before_compile__(env) do
defs = Module.get_attribute(env.module, :ecto_defs)
defs_quote = Enum.map(defs, fn {{name, arity}, exprs} ->
args = Stream.repeatedly(fn -> {:_, [], __MODULE__} end) |> Enum.take(arity)
exprs = Macro.escape(Enum.reverse(exprs))
quote do
def unquote(name)(unquote_splicing(args)) do
{:error, unquote(exprs)}
end
end
end)
defs_quote ++ [quote do def aggregate?(_, _), do: false end]
end
## Helpers
defp var_types({:var, _, nil} = var, _index, _prefix, _meta) do
{var, nil}
end
defp var_types({:_, _, nil}, index, prefix, {meta, _, _}) do
{{:"#{prefix}#{index}", meta, __MODULE__}, nil}
end
defp var_types(arg, index, prefix, {meta, deft, defa}) do
types = arg |> extract_types([]) |> expand_types(deft, defa)
{{:"#{prefix}#{index}", meta, __MODULE__}, types}
end
defp extract_type({name, _, [_]}) when is_atom(name) do
name
end
defp extract_type({name, _, context}) when is_atom(name) and is_atom(context) do
name
end
defp extract_type(name) when is_atom(name) do
name
end
defp extract_type(expr) do
raise "invalid type expression: #{Macro.to_string(expr)}"
end
defp extract_types({:|, _, [left, right ]}, acc) do
extract_types(left, extract_types(right, acc))
end
defp extract_types(other, acc) do
[extract_type(other)|acc]
end
defp extract_return_type({name, _, [{:var, _, nil} = var]}) when is_atom(name) do
{name, var}
end
defp extract_return_type({name, _, [{var, _, nil}]}) when is_atom(name) and is_atom(var) do
{name, var}
end
defp extract_return_type({:var, _, nil} = var) do
var
end
defp extract_return_type({name, _, context}) when is_atom(name) and is_atom(context) do
name
end
defp extract_return_type(name) when is_atom(name) do
name
end
defp extract_return_type(expr) do
raise "invalid type expression: #{Macro.to_string(expr)}"
end
defp expand_types(types, deft, defa) do
Enum.reduce types, [], fn type, acc ->
cond do
type in deft ->
[type|acc]
aliases = defa[type] ->
aliases ++ acc
true ->
raise "unknown type or alias #{type}"
end
end
end
defp compile_guards(var_types) do
guards = Enum.filter(var_types, fn({_, types}) -> types != nil end)
case guards do
[] -> true
[h|t] ->
Enum.reduce t, compile_guard(h), fn tuple, acc ->
quote do
unquote(compile_guard(tuple)) and unquote(acc)
end
end
end
end
defp compile_guard({var, types}) do
quote do: unquote(var) in unquote(types) or unquote(var) == :any
end
end
|
lib/ecto/query/typespec.ex
| 0.725746
| 0.555797
|
typespec.ex
|
starcoder
|
defmodule DynamoMigration do
@moduledoc """
Version management module for migration file of DynamoDB.
Dependes on ExAws and ExAws.Dynamo.
See also https://github.com/ex-aws/ex_aws_dynamo.
Usage:
1. $ mix dynamo.setup # Creates migrations table.
2. $ mix dynamo.gen.migration create_tests_table --table Tests # Generates migration file.
```elixir
defmodule Dynamo.Migrations.CreateTestsTable do
@table_name "Tests"
def table_name, do: @table_name
def change do
# Rewrite any migration code.
ExAws.Dynamo.create_table(
"Tests",
[id: :hash],
%{id: :number},
1,
1,
:provisioned
)
|> ExAws.request!()
end
end
```
3. $ mix dynamo.migrate # Migrates `priv/dynamo/migrations/*`.
"""
require Logger
alias ExAws.Dynamo
@table "migrations"
@migration_file_path "priv/dynamo/migrations"
@doc false
@spec migration_file_path :: String.t()
def migration_file_path, do: @migration_file_path
@doc """
Called from `mix dynamo.migrate`
Executes migration files if there had not migrated.
"""
@spec migrate :: :ok
def migrate do
case File.ls(migration_file_path()) do
{:ok, files} ->
files
|> compile_migrations()
|> Enum.filter(fn {_, version} -> migration_required?(version) end)
|> Enum.each(fn {mod, version} ->
mod.change()
insert_migration_version(mod.table_name(), version)
Logger.debug("Migrate #{mod} was succeed")
end)
error ->
error
end
end
@doc """
Creates migrations table for version management.
Called from `mix dynamo.setup`
"""
@spec setup :: :ok
def setup do
if @table in table_names() do
Logger.debug("Migrations table was already created.")
else
Dynamo.create_table(
@table,
[version: :hash],
[version: :number],
1,
1,
:provisioned
)
|> ExAws.request!()
Logger.debug("Migrations table was created.")
end
end
@doc """
Reset table migrations.
"""
def reset(only_prefix \\ nil) do
IO.inspect(only_prefix)
current_versions = versions()
table_names()
|> Enum.reject(&(&1 == @table))
|> Enum.filter(fn table_name ->
unless only_prefix do
true
else
table_name =~ Regex.compile!("\A#{table_name}")
end
end)
|> Enum.each(&delete_table(&1, current_versions))
migrate()
end
@doc false
@spec migration_required?(binary()) :: boolean
def migration_required?(version) do
Dynamo.get_item(@table, %{version: version})
|> ExAws.request!()
|> (fn result -> result == %{} end).()
end
defp versions() do
Dynamo.scan(@table)
|> ExAws.request!()
|> Map.get("Items", [])
|> Enum.map(fn version ->
%{
version: version["version"]["N"] |> String.to_integer(),
table_name: version["table_name"]["S"]
}
end)
end
defp insert_migration_version(table_name, version) do
Dynamo.put_item(
@table,
%{table_name: table_name, version: version}
)
|> ExAws.request!()
end
defp compile_migrations(files) do
files
|> Enum.map(fn file ->
[mod | _] =
(migration_file_path() <> "/" <> file)
|> Code.compile_file()
|> Enum.map(&elem(&1, 0))
version =
Regex.scan(~r/[0-9]/, file)
|> Enum.join()
|> String.to_integer()
{mod, version}
end)
end
defp delete_table(table_name, versions) do
table_name
|> Dynamo.delete_table()
|> ExAws.request!()
versions
|> Enum.filter(&(table_name == &1[:table_name]))
|> Enum.each(fn version ->
Dynamo.delete_item(@table, %{version: version[:version]})
|> ExAws.request!()
end)
end
defp table_names do
Dynamo.list_tables()
|> ExAws.request!()
|> Map.get("TableNames", [])
end
end
|
lib/dynamo_migration.ex
| 0.717705
| 0.712657
|
dynamo_migration.ex
|
starcoder
|
defmodule Publishing.Markdown do
@moduledoc """
Module for handling raw markdown texts.
"""
@heading_tags ["h1", "h2", "h3", "h4", "h5", "h6"]
@heading_default "Untitled"
@description_default ""
@cover_default ""
@doc """
Transform markdown into HMTL performing additional mutations.
## Features
* Add `language-none` to inline and code blocks.
Example:
iex> parse("# title")
"<h1>\\ntitle</h1>\\n"
iex> parse("## title")
"<h2>\\ntitle</h2>\\n"
iex> parse("`some code`")
"<p>\\n<code class=\\"language-none\\">some code</code></p>\\n"
iex> parse("```\\nsome code\\n```")
"<pre><code class=\\"language-none\\">some code</code></pre>\\n"
iex> parse("```elixir\\nsome code\\n```")
"<pre><code class=\\"elixir language-elixir\\">some code</code></pre>\\n"
"""
@spec parse(String.t()) :: list
def parse(markdown) do
markdown
|> to_ast()
|> add_code_class()
|> Earmark.Transform.transform()
end
@doc """
Returns the markdown's title or first subtitle or the given `default` (optional).
Examples:
iex> get_title("# Hello World!\\nLorem ipsum...")
"Hello World!"
iex> get_title("## Hello World!\\nLorem ipsum...")
"Hello World!"
iex> get_title("Lorem ipsum dolor sit amet...", "Untitled")
"Untitled"
"""
@spec get_title(String.t()) :: String.t()
def get_title(markdown, default \\ @heading_default) when is_binary(markdown) do
get_content(markdown, default, &title_tags/1)
end
@doc """
Returns the markdown's first paragraph or the given `default` (optional).
Examples:
iex> get_description("# Hello World!\\nLorem ipsum...")
"Lorem ipsum..."
iex> get_description("## Hello World!", "Untitled")
"Untitled"
"""
@spec get_description(String.t()) :: String.t()
def get_description(markdown, default \\ @description_default) when is_binary(markdown) do
get_content(markdown, default, ¶graph_tag/1)
end
@doc """
Returns the markdown's first image or the given `default` (optional).
Examples:
iex> get_cover("# Hello World!\\n")
"image.png"
iex> get_cover("## Hello World!", "img.png")
"img.png"
"""
def get_cover(markdown, default \\ @cover_default) when is_binary(markdown) do
get_content(markdown, default, &image_tag/1)
end
defp get_content(markdown, default, tag_function) do
markdown
|> to_ast()
|> Enum.find(tag_function)
|> tag_content_or_default(default)
end
defp title_tags({tag, _, [content], _}) when tag in @heading_tags and is_binary(content),
do: true
defp title_tags(_), do: false
defp paragraph_tag({"p", _, [content], _}) when is_binary(content), do: true
defp paragraph_tag(_), do: false
defp image_tag({"img", _, _, _}), do: true
defp image_tag({"p", _, [{"img", _, _, _}], _}), do: true
defp image_tag(_), do: false
defp tag_content_or_default({"p", _, [{"img", _, _, _} = img], _}, default) do
tag_content_or_default(img, default)
end
defp tag_content_or_default({"img", attrs, _, _}, _default) do
%{"src" => src} = Map.new(attrs)
src
end
defp tag_content_or_default({_, _, [content], _}, _default), do: content
defp tag_content_or_default(_, default), do: default
defp to_ast(markdown) do
{:ok, ast, _} = EarmarkParser.as_ast(markdown, code_class_prefix: "language-")
ast
end
defp add_code_class(ast) do
Earmark.Transform.map_ast(ast, fn
{"code", [], [content], %{}} ->
{"code", [{"class", "language-none"}], [content], %{}}
{"code", [{"class", "inline"}], [content], %{}} ->
{"code", [{"class", "language-none"}], [content], %{}}
tag ->
tag
end)
end
end
|
apps/publishing/lib/publishing/markdown.ex
| 0.826011
| 0.481941
|
markdown.ex
|
starcoder
|
defmodule Serum.Build do
@moduledoc """
This module contains functions for actually building a Serum project.
"""
import Serum.Util
alias Serum.Error
alias Serum.Build.Pass1
alias Serum.Build.Pass2
alias Serum.TemplateLoader
@type mode :: :parallel | :sequential
@type template_ast :: Macro.t | nil
@type state :: map
@doc """
Starts building the website in given build mode (parallel or sequential).
This function does the followings:
1. Checks if the effective user has a write permission to the output
directory.
2. Checks if the system timezone is properly set. Timex application relies on
the system timezone to format the date string. So if the system timezone is
not set, Timex will fail.
3. Cleans the output directory. However, files or directories starting with a
dot (`'.'`) will not be deleted, as these may contain important version
control stuff and so on.
4. Launches 2-pass build process. Refer to `Serum.Build.Pass1` and
`Serum.Build.Pass2` for more information about 2-pass build process.
5. Finally copies `assets/` and `media/` directory to the output directory
(if any).
"""
@spec build(mode, state) :: Error.result(binary)
def build(mode, state) do
with :ok <- check_dest_perm(state.dest),
:ok <- check_tz(),
:ok <- clean_dest(state.dest),
{:ok, state} <- Pass1.run(mode, state),
{:ok, state} <- prepare_templates(state),
:ok <- Pass2.run(mode, state)
do
copy_assets state
{:ok, state}
else
{:error, _} = error -> error
end
end
@spec check_dest_perm(binary) :: Error.result
defp check_dest_perm(dest) do
parent = dest |> Path.join("") |> Path.dirname()
result =
case File.stat parent do
{:error, reason} -> reason
{:ok, %File.Stat{access: :none}} -> :eacces
{:ok, %File.Stat{access: :read}} -> :eacces
{:ok, _} -> :ok
end
case result do
:ok -> :ok
err -> {:error, {err, dest, 0}}
end
end
@spec check_tz() :: Error.result
defp check_tz() do
try do
Timex.local()
:ok
rescue
_ -> {:error, "system timezone is not set"}
end
end
@spec clean_dest(binary) :: :ok
defp clean_dest(dest) do
File.mkdir_p! dest
msg_mkdir dest
# exclude dotfiles so that git repository is not blown away
dest
|> File.ls!
|> Enum.reject(&String.starts_with?(&1, "."))
|> Enum.map(&Path.join(dest, &1))
|> Enum.each(&File.rm_rf!(&1))
end
@spec prepare_templates(state) :: Error.result(state)
defp prepare_templates(state) do
with {:ok, state} <- TemplateLoader.load_includes(state),
{:ok, state} <- TemplateLoader.load_templates(state)
do
{:ok, state}
else
{:error, _} = error -> error
end
end
@spec copy_assets(state) :: :ok
defp copy_assets(%{src: src, dest: dest}) do
IO.puts "Copying assets and media..."
try_copy Path.join(src, "assets"), Path.join(dest, "assets")
try_copy Path.join(src, "media"), Path.join(dest, "media")
end
@spec try_copy(binary, binary) :: :ok
defp try_copy(src, dest) do
case File.cp_r src, dest do
{:error, reason, _} ->
warn "Cannot copy #{src}: #{:file.format_error(reason)}. Skipping."
{:ok, _} -> :ok
end
end
end
|
lib/serum/build.ex
| 0.695545
| 0.523481
|
build.ex
|
starcoder
|
defmodule GGity.Geom.Ribbon do
@moduledoc false
alias GGity.{Draw, Geom, Plot}
@type t() :: %__MODULE__{}
@type record() :: map()
@type mapping() :: map()
defstruct data: nil,
mapping: nil,
stat: :identity,
position: :identity,
key_glyph: :rect,
fill: "black",
alpha: 1,
color: nil,
size: nil
@spec new(mapping(), keyword()) :: Geom.Ribbon.t()
def new(mapping, options \\ []) do
struct(Geom.Ribbon, [{:mapping, mapping} | options])
end
@spec draw(Geom.Ribbon.t(), list(map()), Plot.t()) :: iolist()
def draw(%Geom.Ribbon{} = geom_ribbon, _data, plot) do
ribbons(geom_ribbon, plot)
end
defp ribbons(%Geom.Ribbon{position: :stack} = geom_ribbon, plot) do
plot_height = plot.width / plot.aspect_ratio
ribbons =
geom_ribbon
|> group_by_aesthetics(plot)
|> Enum.sort_by(fn {value, _group} -> value end, :desc)
|> Enum.map(fn {_value, group} -> ribbon(geom_ribbon, group, plot) end)
stacked_coords =
ribbons
|> Enum.map(fn group -> group.coords end)
|> stack_coordinates(plot_height)
|> Enum.reverse()
:lists.zipwith(
fn ribbon, stacked_coords -> Map.put(ribbon, :coords, stacked_coords) end,
ribbons,
stacked_coords
)
|> Enum.map(fn ribbon -> draw_ribbon(ribbon, plot) end)
|> Enum.reverse()
end
defp ribbons(%Geom.Ribbon{} = geom_ribbon, plot) do
geom_ribbon
|> group_by_aesthetics(plot)
|> Enum.map(fn {_value, group} ->
geom_ribbon
|> ribbon(group, plot)
|> draw_ribbon(plot)
end)
end
defp ribbon(%Geom.Ribbon{} = geom_ribbon, data, plot) do
plot_height = plot.width / plot.aspect_ratio
scale_transforms =
geom_ribbon.mapping
|> Map.keys()
|> List.insert_at(0, :y)
|> List.delete(:y_max)
|> Enum.reduce(%{}, fn aesthetic, mapped ->
Map.put(mapped, aesthetic, Map.get(plot.scales[aesthetic], :transform))
end)
transforms =
geom_ribbon
|> Map.take([:alpha, :color, :fill, :size])
|> Enum.reduce(%{}, fn {aesthetic, fixed_value}, fixed ->
Map.put(fixed, aesthetic, fn _value -> fixed_value end)
end)
|> Map.merge(scale_transforms)
row = hd(data)
[alpha, color, fill, size] = [
transforms.alpha.(row[geom_ribbon.mapping[:alpha]]),
transforms.color.(row[geom_ribbon.mapping[:color]]),
transforms.fill.(row[geom_ribbon.mapping[:fill]]),
transforms.size.(row[geom_ribbon.mapping[:size]])
]
y_max_coords = format_coordinates(:y_max, geom_ribbon, data, plot)
all_coords =
if geom_ribbon.mapping[:y_min] do
y_min_coords =
:y_min
|> format_coordinates(geom_ribbon, data, plot)
|> Enum.reverse()
[y_max_coords, y_min_coords]
else
first_x = List.first(y_max_coords)[:x]
last_x = List.last(y_max_coords)[:x]
[y_max_coords, %{x: last_x, y_max: plot_height}, %{x: first_x, y_max: plot_height}]
end
%{fill: fill, alpha: alpha, color: color, size: size, coords: List.flatten(all_coords)}
end
defp group_by_aesthetics(geom, plot) do
(geom.data || plot.data)
|> Enum.group_by(fn row ->
{
row[geom.mapping[:alpha]],
row[geom.mapping[:fill]]
}
end)
end
defp draw_ribbon(ribbon, plot) do
ribbon.coords
|> Enum.map_join(" ", fn row ->
"#{row.x + plot.area_padding},#{row.y_max + plot.area_padding}"
end)
|> Draw.polygon(
stroke: ribbon.color,
stroke_width: ribbon.size,
fill: ribbon.fill,
fill_opacity: ribbon.alpha
)
end
defp format_coordinates(y_aesthetic, geom, data, plot) do
data
|> sort_by_x(geom)
|> Stream.map(fn row ->
[
plot.scales.x.transform.(row[geom.mapping[:x]]),
plot.scales.y.transform.(row[geom.mapping[y_aesthetic]])
]
end)
|> Stream.map(fn row -> Map.new(Enum.zip([:x, :y_max], row)) end)
|> Enum.map(fn row ->
Map.put(row, :y_max, (plot.width - row.y_max) / plot.aspect_ratio)
end)
end
defp sort_by_x(data, %Geom.Ribbon{} = geom_ribbon) do
case hd(data)[geom_ribbon.mapping.x] do
%Date{} ->
Enum.sort_by(data, fn row -> row[geom_ribbon.mapping.x] end, Date)
_number ->
Enum.sort_by(data, fn row -> row[geom_ribbon.mapping.x] end)
end
end
defp stack_coordinates([only_group | []], _plot_height), do: only_group
defp stack_coordinates([first_group, next_group | rest_of_groups], plot_height) do
stack_coordinates(
rest_of_groups,
sum_ymax_coordinates(first_group, next_group, plot_height),
[first_group],
plot_height
)
end
defp stack_coordinates([], updated_group, stacked_coords, _plot_height) do
[updated_group | stacked_coords]
end
defp stack_coordinates(
[next_group | rest_of_groups],
updated_group,
stacked_coords,
plot_height
) do
stack_coordinates(
rest_of_groups,
sum_ymax_coordinates(next_group, updated_group, plot_height),
[updated_group | stacked_coords],
plot_height
)
end
defp sum_ymax_coordinates(first_list, second_list, plot_height) do
:lists.zipwith(
fn first_row, second_row ->
%{x: first_row.x, y_max: first_row.y_max + second_row.y_max - plot_height}
end,
first_list,
second_list
)
end
end
|
lib/ggity/geom/ribbon.ex
| 0.846641
| 0.482307
|
ribbon.ex
|
starcoder
|
defmodule RayTracer.Camera do
@moduledoc """
This module defines a camera which maps the three-dimensional scene onto a
two-dimensional canvas.
Camera's canvas will always be exactly one unit in front of the camera. This makes
the math a bit cleaner.
"""
alias RayTracer.Matrix
alias RayTracer.Ray
alias RayTracer.RTuple
alias RayTracer.World
alias RayTracer.Canvas
import RTuple, only: [point: 3]
@type t :: %__MODULE__{
hsize: integer,
vsize: integer,
field_of_view: number,
transform: Matrix.matrix,
inv_transform: Matrix.matrix,
half_width: number,
half_height: number,
pixel_size: number
}
defstruct [
:hsize, :vsize, :field_of_view, :transform, :inv_transform, :half_width, :half_height, :pixel_size
]
@doc """
Returns a new camera
`hsize` - horizontal size (in pixels) of the canvas that the picture will be rendered to
`vsize` - vertical size
`field_of_view` - angle that describes how much the camera can see.
When the field of view is small, the view will be "zoomed in",
magnifying a smaller area of the scene.
`transform` - matrix describing how the world should be oriented relative to the
camera. This is usually a view transformation matrix.
`inv_transform` - inversion of computation matrix
"""
@spec new(integer, integer, number, Matrix.matrix) :: t
def new(hsize, vsize, field_of_view, transform \\ Matrix.ident) do
%__MODULE__{
hsize: hsize,
vsize: vsize,
field_of_view: field_of_view,
transform: transform,
inv_transform: Matrix.inverse(transform)
} |> Map.merge(compute_sizes(hsize, vsize, field_of_view))
end
@doc """
Renders the provided world on a canvas defined by camera's properties
"""
@spec render(t, World.t) :: Canvas.t
def render(camera, world) do
Enum.reduce(0..(camera.vsize - 1), Canvas.new(camera.hsize, camera.vsize), fn y, canvas ->
if rem(y, 10) == 0, do: IO.puts("#{y}/#{camera.vsize}")
0..(camera.hsize - 1)
|> Flow.from_enumerable()
|> Flow.partition()
|> Flow.map(&compute_pixel(world, camera, &1, y))
|> Enum.reduce(canvas, fn r, canvas ->
canvas |> Canvas.write_pixel(r.x, y, r.color)
end)
end)
end
@doc """
Computes the world coordinates at the center of the given pixel and then constructs
a ray that passes through that point.
`px` - x position of the pixel
`py` - y position of the pixel
"""
@spec ray_for_pixel(t, number, number) :: Ray.t
def ray_for_pixel(camera, px, py) do
# The offset from the edge of the canvas to the pixel's center
x_offset = (px + 0.5) * camera.pixel_size
y_offset = (py + 0.5) * camera.pixel_size
# The untransformed coordinates of the pixel in world space.
# Remember that the camera looks toward -z, so +x is to the left.
world_x = camera.half_width - x_offset
world_y = camera.half_height - y_offset
# Using the camera matrix transform the canvas point and the origin.
# Then compute the ray's direction vector.
# Remember that the canvas is at z = -1
invct = camera.inv_transform
world_point = point(world_x, world_y, -1)
pixel = invct |> Matrix.mult(world_point)
origin = invct |> Matrix.mult(point(0, 0, 0))
direction = RTuple.sub(pixel, origin) |> RTuple.normalize
Ray.new(origin, direction)
end
# Computes:
# `pixel_size` - the size (in world-space units) of the pixels on the canvas.
# `half_height` - half of the canvas height in world units
# `half_width` - half of the canvas width in world units
@spec compute_sizes(number, number, number) :: map
defp compute_sizes(hsize, vsize, field_of_view) do
half_view = :math.tan(field_of_view / 2)
aspect = hsize / vsize
sizes = if aspect >= 1 do
%{half_width: half_view, half_height: half_view / aspect}
else
%{half_width: half_view * aspect, half_height: half_view}
end
%{pixel_size: sizes.half_width * 2 / hsize} |> Map.merge(sizes)
end
defp compute_pixel(world, camera, x, y) do
ray = ray_for_pixel(camera, x, y)
%{x: x, color: World.color_at(world, ray)}
end
end
|
lib/camera.ex
| 0.941048
| 0.880181
|
camera.ex
|
starcoder
|
defmodule Saucexages.AnsiFlags do
@moduledoc """
ANSiFlags allow an author of ANSi and similar files to provide a clue to a viewer / editor how to render the image.
ANSiFlags use the following binary layout:
`<<0, 0, 0, aspect_ratio::2, letter_spacing::2, non_blink_mode::1>>`
## Aspect ratio
Most modern display devices have square pixels, but that has not always been the case. Displaying an ANSI file that was created for one of the older devices on a device with square pixels will vertically compress the image slightly. This can be compensated for by either taking a font that is slightly taller than was used on the legacy device, or by stretching the rendered image.
These 2 bits can be used to signal that the image was created with square pixels in mind, or that it was created for a legacy device with the elongated pixels.
The following values are used to represent decoded aspect ratio flags with respect to image display preferences:
* `:none` - Legacy value. No preference.
* `:legacy` - Image was created for a legacy device. When displayed on a device with square pixels, either the font or the image needs to be stretched.
* `:modern` - Image was created for a modern device with square pixels. No stretching is desired on a device with square pixels.
* `:invalid` - Not currently a valid value. This is used to represent an invalid state such as when both aspect ratio bits are set.
## Letter Spacing
Letter-spacing for font selection.
Fixed-width text mode as used in DOS and early graphics based computers such as the Amiga used bitmap fonts to display text. Letter-spacing and line spacing is a part of the font bitmap, so the font box inside each bitmap was a bit smaller than the font bitmap.
For the VGA, IBM wanted a smoother font. 2 more lines were added, and the letter-spacing was removed from the font and instead inserted by the VGA hardware during display. All 8 pixels in the font could thus be used, and still have a 1 pixel letter spacing. For the line graphics characters, this was a problem because they needed to match up with their neighbours on both sides. The VGA hardware was wired to duplicate the 8th column of the font bitmap for the character range C0h to DFh. In some code pages was undesired, so this feature could be turned off to get an empty letter spacing on all characters as well (via the ELG field (Enable Line Graphics Character Codes) in the Mode Control register (10h) of the VGA Attribute Controller (3C0h).
While the VGA allows you to enable or disable the 8th column duplication, there is no way to specify which range of characters this needs to be done for, the C0h to DFh range is hardwired into the VGA. These 2 bits can be used to select the 8 pixel or 9 pixel variant of a particular font.
The following values are used to represent decoded letter-spacing flags:
`:none` - Legacy value. No preference.
`:eight_pixel_font` - Select 8 pixel font.
`:nine_pixel_font` - Select 9 pixel font.
`invalid` - Not currently a valid value.
Changing the font width and wanting to remain at 80 characters per row means that you need to adjust for a change in horizontal resolution (from 720 pixels to 640 or vice versa). When you are trying to match the original aspect ratio (see the AR bits), you will need to adjust the vertical stretching accordingly.
Only the VGA (and the Hercules) video cards actually supported fonts 9 pixels wide. SAUCE does not prevent you from specifying you want a 9 pixel wide font for a font that technically was never used that way. Note that duplicating the 8th column on non-IBM fonts (and even some code pages for IBM fonts) may not give the desired effect.
Some people like the 9 pixel fonts, some do not because it causes a visible break in the 3 ‘shadow blocks’ (B0h, B1h and B2h)
## Non-blink Mode (iCE Color)
When 0, only the 8 low intensity colors are supported for the character background. The high bit set to 1 in each attribute byte results in the foreground color blinking repeatedly.
When 1, all 16 colors are supported for the character background. The high bit set to 1 in each attribute byte selects the high intensity color instead of blinking.
The following values are used to represent a decode non-blink mode flag:
* `true` - All 16 color support for character backgrounds, i.e. iCE Color
* `false` - Only 8 low intensity colors for character backgrounds, i.e. iCE Color is *not* supported.
## Source
The content in these docs was adapted from the following source:
* [Sauce SPEC - ANSI Flags](http://www.acid.org/info/sauce/sauce.htm#ANSiFlags)
"""
alias __MODULE__, as: AnsiFlags
@type ansi_flags :: integer() | binary()
@type aspect_ratio :: :none | :legacy | :modern | :invalid
@type letter_spacing :: :none | :eight_pixel_font | :nine_pixel_font | :invalid
@type t :: %__MODULE__{
aspect_ratio: aspect_ratio(),
letter_spacing: letter_spacing(),
non_blink_mode?: boolean()
}
defstruct [:aspect_ratio, :letter_spacing, :non_blink_mode?]
aspect_ratio_flag_items = [{:none, 0, 0}, {:legacy, 0, 1}, {:modern, 1, 0}, {:invalid, 1, 1}]
letter_spacing_flag_items = [{:none, 0, 0}, {:eight_pixel_font, 0, 1}, {:nine_pixel_font, 1, 0}, {:invalid, 1, 1}]
non_blink_mode_flag_items = [{false, 0}, {true, 1}]
@doc """
Decodes and returns human-readable ansi flags, as specified in the given ANSi flags.
## Examples
iex> Saucexages.AnsiFlags.ansi_flags(0)
%Saucexages.AnsiFlags{
aspect_ratio: :none,
letter_spacing: :none,
non_blink_mode?: false
}
iex> Saucexages.AnsiFlags.ansi_flags(17)
%Saucexages.AnsiFlags{
aspect_ratio: :modern,
letter_spacing: :none,
non_blink_mode?: true
}
iex> Saucexages.AnsiFlags.ansi_flags(2)
%Saucexages.AnsiFlags{
aspect_ratio: :none,
letter_spacing: :eight_pixel_font,
non_blink_mode?: false
}
iex> Saucexages.AnsiFlags.ansi_flags(5)
%Saucexages.AnsiFlags{
aspect_ratio: :none,
letter_spacing: :nine_pixel_font,
non_blink_mode?: true
}
"""
@spec ansi_flags(ansi_flags()) :: t()
def ansi_flags(t_flags)
for {aspect_ratio, ar_hi, ar_lo} <- aspect_ratio_flag_items, {letter_spacing, ls_hi, ls_lo} <- letter_spacing_flag_items, {non_blink_mode?, nb_flag} <- non_blink_mode_flag_items do
def ansi_flags(<<_ :: 3, unquote(ar_hi) :: 1, unquote(ar_lo) :: 1, unquote(ls_hi) :: 1, unquote(ls_lo) :: 1, unquote(nb_flag) :: 1>> = t_flags) when is_binary(t_flags) do
%AnsiFlags {
aspect_ratio: unquote(aspect_ratio),
letter_spacing: unquote(letter_spacing),
non_blink_mode?: unquote(non_blink_mode?)
}
end
end
def ansi_flags(t_flags) when is_integer(t_flags) do
to_binary(t_flags)
|> ansi_flags()
end
@doc """
Returns the aspect ratio to be used for display as specified in the given ANSi flags.
## Examples
iex> Saucexages.AnsiFlags.aspect_ratio(<<16>>)
:modern
iex> Saucexages.AnsiFlags.aspect_ratio(1)
:none
iex> Saucexages.AnsiFlags.aspect_ratio(8)
:legacy
iex> Saucexages.AnsiFlags.aspect_ratio(16)
:modern
"""
@spec aspect_ratio(ansi_flags()) :: aspect_ratio()
def aspect_ratio(t_flags)
for {aspect_ratio, ar_hi, ar_lo} <- aspect_ratio_flag_items do
def aspect_ratio(<<_ :: 3, unquote(ar_hi) :: 1, unquote(ar_lo) :: 1, _ :: 3>> = t_flags) when is_binary(t_flags) do
# 00: Legacy value. No preference.
# 01: Image was created for a legacy device. When displayed on a device with square pixels, either the font or the image needs to be stretched.
# 10: Image was created for a modern device with square pixels. No stretching is desired on a device with square pixels.
# 11: Not currently a valid value
unquote(aspect_ratio)
end
end
def aspect_ratio(t_flags) when is_integer(t_flags) do
to_binary(t_flags)
|> aspect_ratio()
end
@doc """
Returns the letter spacing as specified in the given ANSi flags to be used for fonts.
## Examples
iex> Saucexages.AnsiFlags.letter_spacing(<<2>>)
:eight_pixel_font
iex> Saucexages.AnsiFlags.letter_spacing(11)
:eight_pixel_font
iex> Saucexages.AnsiFlags.letter_spacing(12)
:nine_pixel_font
iex> Saucexages.AnsiFlags.letter_spacing(14)
:invalid
"""
@spec letter_spacing(ansi_flags()) :: letter_spacing()
def letter_spacing(t_flags)
for {letter_spacing, ls_hi, ls_lo} <- letter_spacing_flag_items do
def letter_spacing(<<_ :: 5, unquote(ls_hi) :: 1, unquote(ls_lo) :: 1, _ :: 1>> = t_flags) when is_binary(t_flags) do
# 00: Legacy value. No preference.
# 01: Select 8 pixel font.
# 10: Select 9 pixel font.
# 11: Not currently a valid value.
unquote(letter_spacing)
end
end
def letter_spacing(t_flags) when is_integer(t_flags) do
to_binary(t_flags)
|> letter_spacing()
end
@doc """
Returns whether or not non-blink mode (iCE Colors) are set in the given ANSi flags.
`true` - Use non-blink mode.
`false` - Do not use non-blink mode, use 8 background colors.
## Examples
iex> Saucexages.AnsiFlags.non_blink_mode?(<<16>>)
false
iex> Saucexages.AnsiFlags.non_blink_mode?(<<17>>)
true
iex> Saucexages.AnsiFlags.non_blink_mode?(16)
false
iex> Saucexages.AnsiFlags.non_blink_mode?(17)
true
"""
@spec non_blink_mode?(ansi_flags()) :: boolean()
def non_blink_mode?(t_flags)
for {non_blink_mode?, nb_flag} <- non_blink_mode_flag_items do
def non_blink_mode?(<<_ :: 7, unquote(nb_flag) :: 1>> = t_flags) when is_binary(t_flags) do
unquote(non_blink_mode?)
end
end
def non_blink_mode?(t_flags) when is_binary(t_flags) do
false
end
def non_blink_mode?(t_flags) when is_integer(t_flags) do
to_binary(t_flags)
|> non_blink_mode?()
end
@doc """
Converts an integer t_flags value to binary.
"""
@spec to_binary(integer()) :: binary()
def to_binary(t_flags) when is_integer(t_flags) do
<<t_flags :: little - unsigned - integer - unit(8) - size(1)>>
end
@doc """
Converts a binary t_flags value to an integer.
"""
@spec to_integer(binary()) :: integer()
def to_integer(<<flag_value :: little - unsigned - integer - unit(8) - size(1)>> = t_flags) when is_binary(t_flags) do
flag_value
end
def to_integer(t_flags) when is_binary(t_flags) do
raise ArgumentError, "ANSi flags must be specified as a single byte representing a binary little unsigned integer."
end
end
|
lib/saucexages/ansi_flags.ex
| 0.902615
| 0.748053
|
ansi_flags.ex
|
starcoder
|
defmodule Telegex.Marked.LinkRule do
@moduledoc false
# Matching and parsing of link nodes.
use Telegex.Marked.Rule
@ntype :link
@open_bracket "["
@close_bracket "]"
@open_parenthesis "("
@close_parenthesis ")"
@impl true
def match(state) do
%{line: %{src: src, len: len}, pos: pos} = state
prev_char = String.at(src, pos - 1)
if String.at(src, pos) != @open_bracket || escapes_char?(prev_char) do
{:nomatch, state}
else
chars = String.graphemes(String.slice(src, pos + 1, len))
# 找到中括号的关闭位置,用于取出链接文字
# 找到小括号的关闭位置,用于取出链接地址
# 任其一没有找到,则不匹配
with {:ok, close_bracket_pos} <- find_close_bracket_pos(chars),
{:ok, close_parenthesis_pos} <-
find_close_parenthesis_pos(close_bracket_pos, chars) do
text = String.slice(src, pos + 1, close_bracket_pos)
href = String.slice(src, close_bracket_pos + pos + 3, close_parenthesis_pos - 1)
state = %{state | pos: close_bracket_pos + close_parenthesis_pos + pos + 2}
state =
State.push_node(state, %Node{
type: @ntype,
data: [href: href],
children: text
})
{:match, state}
else
_ ->
{:nomatch, state}
end
end
end
@spec find_close_bracket_pos([String.t()]) :: {:ok, integer()} | nil
defp find_close_bracket_pos(chars) do
result =
chars
|> Enum.with_index()
|> Enum.filter(fn {char, index} ->
# 后一个字符是 @open_parenthesis
char == @close_bracket && Enum.at(chars, index + 1) == @open_parenthesis
end)
# 始终匹配最后一个 @close_bracket + @open_parenthesis
|> List.last()
case result do
{_, pos} ->
{:ok, pos}
nil ->
nil
end
end
@spec find_close_parenthesis_pos(integer(), [String.t()]) :: {:ok, integer()} | nil
defp find_close_parenthesis_pos(close_bracket_pos, chars) do
parentheses_chars = chars |> Enum.slice((close_bracket_pos + 1)..-1)
pos =
parentheses_chars
|> Enum.find_index(&(&1 == @close_parenthesis))
if pos, do: {:ok, pos}, else: nil
end
end
|
lib/telegex/marked/rules/link_rule.ex
| 0.506836
| 0.402421
|
link_rule.ex
|
starcoder
|
defmodule StopWatch.Server do
@moduledoc """
A simple stopwatch GenServer used for exploring the common pattern
of autonomous GenServers that make notifications.
Implements a basic counter with configurable resolution (down to 10ms).
Implements "go", "stop", and "clear" functions. Also implements a "time"
genserver call, to return the number of ms passed. Also takes a
"resolution" parameter (in ms).
This branch explores using *Informant* for notifications,
with connection to Hub and HubRestAPI.
"""
use GenServer
@public_state_keys [:ticks, :msec, :resolution, :running]
defmodule State, do: defstruct(
ticks: 0,
running: true,
resolution: 100,
delegate: nil,
tref: nil
)
def start_link(params, _options \\ []) do
# REVIEW WHY NEED NAME HERE? Why can't pass as option?
GenServer.start_link __MODULE__, params, name: :stop_watch
end
def init(_) do
state=%State{}
{:ok, tref} = :timer.send_after(state.resolution, :tick)
{:ok, delegate} = Informant.publish(StopWatch, {:watch, 0}, state: public(state))
{:ok, %{state | tref: tref, delegate: delegate}}
end
@doc "start the stopwatch"
def go(pid), do: GenServer.cast(pid, :go)
@doc "stop the stopwatch"
def stop(pid), do: GenServer.cast(pid, :stop)
@doc "clear the time on the stopwatch"
def clear(pid), do: GenServer.cast(pid, :clear)
@doc "get the current time of the stopwatch"
def time(pid), do: GenServer.call(pid, :time)
@doc "set multiple attributes of the stopwatch"
def set(pid, settings), do: GenServer.cast(pid, {:set, settings})
# public (server) genserver handlers, which modify state
def handle_cast(:go, state) do
{:ok, tref} = :timer.send_after(state.resolution, :tick)
%{state | running: true, tref: tref} |> announce() |> noreply()
end
def handle_cast(:stop, state) do
%{state | running: false} |> announce() |> noreply()
end
def handle_cast(:clear, state) do
%{state | ticks: 0} |> announce() |> noreply()
end
def handle_cast({:set, settings}, state) do
{:noreply, apply_changes(settings, state)}
end
# GenServer handle_call callbacks
def handle_call(:time, _from, state) do
{:reply, state.ticks, state}
end
# handle a request from informant. Ignore domain/topic because
# we are only published under one, so can't get any requests other
# than for our own.
def handle_cast({:request, {:changes, changes}, _}, state) do
IO.puts "request changes: #{inspect changes}"
{:noreply, apply_changes(changes, state)}
end
defp apply_changes(changes, old_state) do
Enum.reduce changes, old_state, fn({k,v}, state) ->
handle_set(k,v,state)
end
end
# handle setting "running" to true or false for go/stop (hub)
def handle_set(:running, true, state) do
if not state.running do
cancel_any_current_timer(state)
{:ok, tref} = :timer.send_after(state.resolution, :tick)
announce(%{state | running: true, tref: tref})
else
state
end
end
def handle_set(:running, false, state) do
if state.running do
cancel_any_current_timer(state)
announce(%{state | running: false, tref: nil})
else
state
end
end
# handle setting "ticks" to zero to clear (hub)
def handle_set(:ticks, 0, state) do
%{state | ticks: 0} |> announce()
end
# handle setting "resolution" (hub)
# changes the resolution of the stopwatch. Try to keep the current time
# by computing a new tick count based on the new offset, and cancelling
# timers. Returns a new state
def handle_set(:resolution, nr, state) do
cur_msec = state.ticks * state.resolution
cancel_any_current_timer(state)
{:ok, tref} = :timer.send_after(nr, :tick)
%{state | resolution: nr, ticks: div(cur_msec,nr), tref: tref}
|> announce()
end
# catch-all for handling bogus properties
def handle_set(_, _, state), do: state
# internal (timing) genserver handlers
def handle_info(:tick, state) do
if state.running do
{:ok, tref} = :timer.send_after(state.resolution, :tick)
%{state | ticks: (state.ticks + 1), tref: tref}
|> announce_time_only()
|> noreply
else
{:noreply, state}
end
end
# private helpers
# cancel current timer if present, and set timer ref to nil
defp cancel_any_current_timer(state) do
if (state.tref) do
{:ok, :cancel} = :timer.cancel state.tref
end
%{state | tref: nil}
end
# announce all public state, return all state so piplenes easy
defp announce(state) do
Informant.update state.delegate, public(state)
state
end
defp public(state) do
state
|> Map.take(@public_state_keys)
|> Map.merge(%{sec: (state.ticks * state.resolution)})
end
# announce just the time, return all state so pipelines easy
defp announce_time_only(state) do
Informant.update(state.delegate, %{ticks: state.ticks,
msec: (state.ticks * state.resolution)})
state
end
# genserver response helpers to make pipe operator prettier
defp noreply(state), do: {:noreply, state}
end
|
lib/stop_watch/server.ex
| 0.725746
| 0.573887
|
server.ex
|
starcoder
|
defmodule StrictlySpeaking.En do
@singles {"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}
@tens {"twenty", "thirty", "fourty", "fifty", "sixty", "seventy", "eighty", "ninety"}
@teens {"ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen"}
@bigs {"thousand", "million", "billion", "trillion"}
@doc """
Accepts an integer. Returns a string containing human-readable representation of given number.
## Examples
iex> StrictlySpeaking.En.say(0)
"zero"
iex> StrictlySpeaking.En.say(7)
"seven"
iex> StrictlySpeaking.En.say(17)
"seventeen"
iex> StrictlySpeaking.En.say(23)
"twenty three"
iex> StrictlySpeaking.En.say(117)
"one hundred and seventeen"
iex> StrictlySpeaking.En.say(200)
"two hundred"
iex> StrictlySpeaking.En.say(217)
"two hundred and seventeen"
iex> StrictlySpeaking.En.say(227)
"two hundred and twenty seven"
iex> StrictlySpeaking.En.say(1_007)
"one thousand seven"
iex> StrictlySpeaking.En.say(1_017)
"one thousand seventeen"
iex> StrictlySpeaking.En.say(1_117)
"one thousand one hundred and seventeen"
iex> StrictlySpeaking.En.say(1_227)
"one thousand two hundred and twenty seven"
iex> StrictlySpeaking.En.say(17_117)
"seventeen thousand one hundred and seventeen"
iex> StrictlySpeaking.En.say(117_117)
"one hundred and seventeen thousand one hundred and seventeen"
iex> StrictlySpeaking.En.say(117_227)
"one hundred and seventeen thousand two hundred and twenty seven"
"""
def say(number, acc \\ << >>, order \\ 0)
def say(0, _acc, _order), do: elem(@singles, 0)
def say(number, acc, order) when number > 0 do
{div1000, rem1000} = {div(number, 1000), rem(number, 1000)}
{div100, rem100} = {div(rem1000, 100), rem(rem1000, 100)}
{div10, rem10} = {div(rem100, 10), rem(rem100, 10)}
result =
case {order, div100, div10, rem10} do
{_, 0, 0, 0} -> << >>
{0, 0, 0, s} -> << elem(@singles, s)::binary >>
{0, 0, 1, s} -> << elem(@teens, s)::binary >>
{0, 0, t, s} -> << elem(@tens, t - 2)::binary, ?\s, elem(@singles, s)::binary >>
{0, h, 0, 0} -> << elem(@singles, h)::binary, ?\s, "hundred" >>
{0, h, 0, s} -> << elem(@singles, h)::binary, ?\s, "hundred", ?\s, "and", ?\s, elem(@singles, s)::binary >>
{0, h, 1, s} -> << elem(@singles, h)::binary, ?\s, "hundred", ?\s, "and", ?\s, elem(@teens, s)::binary >>
{0, h, t, s} -> << elem(@singles, h)::binary, ?\s, "hundred", ?\s, "and", ?\s, elem(@tens, t - 2)::binary, ?\s, elem(@singles, s)::binary >>
{o, 0, 0, s} -> << elem(@singles, s)::binary, ?\s, elem(@bigs, o - 1)::binary >>
{o, 0, 1, s} -> << elem(@teens, s)::binary, ?\s, elem(@bigs, o - 1)::binary >>
{o, 0, t, s} -> << elem(@tens, t - 1)::binary, ?\s, elem(@singles, s)::binary, ?\s, elem(@bigs, o - 1)::binary >>
{o, h, 0, 0} -> << elem(@singles, h)::binary, ?\s, "hundred", ?\s, elem(@bigs, o - 1)::binary >>
{o, h, 0, s} -> << elem(@singles, h)::binary, ?\s, "hundred", ?\s, "and", ?\s, elem(@singles, s)::binary, ?\s, elem(@bigs, o - 1)::binary >>
{o, h, 1, s} -> << elem(@singles, h)::binary, ?\s, "hundred", ?\s, "and", ?\s, elem(@teens, s)::binary, ?\s, elem(@bigs, o - 1)::binary >>
{o, h, t, s} -> << elem(@singles, h)::binary, ?\s, "hundred", ?\s, "and", ?\s, elem(@tens, t - 2)::binary, ?\s, elem(@singles, s)::binary, ?\s, elem(@bigs, o - 1)::binary >>
end
case {result, div1000} do
{<< >>, div1000} -> say(div1000, acc, order + 1)
{result, 0} -> << result::binary, acc::binary >>
{result, div1000} -> say(div1000, << ?\s, result::binary, acc::binary >>, order + 1)
end
end
end
|
lib/strictly_speaking/en.ex
| 0.562297
| 0.44571
|
en.ex
|
starcoder
|
defmodule Resourceful.Collection.Ecto.Filters do
@moduledoc """
Functions for converting Resourceful-style filters into Ecto-style SQL
filters.
See `Resourceful.Collection.Filter` for more details on filters.
There's a lot of duplication here that can, hopefully, be resolved with a
better understanding of macros.
"""
import Ecto.Query, warn: false
def equal(queryable, {alias_name, col}, nil) do
where(queryable, [_, {^alias_name, q}], is_nil(field(q, ^col)))
end
def equal(queryable, {alias_name, col}, val) do
where(queryable, [_, {^alias_name, q}], field(q, ^col) == ^val)
end
def equal(queryable, col, nil), do: where(queryable, [q], is_nil(field(q, ^col)))
def equal(queryable, col, val), do: where(queryable, [q], field(q, ^col) == ^val)
def greater_than(queryable, {alias_name, col}, val) do
where(queryable, [_, {^alias_name, q}], field(q, ^col) > ^val)
end
def greater_than(queryable, col, val), do: where(queryable, [q], field(q, ^col) > ^val)
def greater_than_or_equal(queryable, {alias_name, col}, val) do
where(queryable, [_, {^alias_name, q}], field(q, ^col) >= ^val)
end
def greater_than_or_equal(queryable, col, val) do
where(queryable, [q], field(q, ^col) >= ^val)
end
def exclude(queryable, {alias_name, col}, val) when is_list(val) do
where(queryable, [_, {^alias_name, q}], field(q, ^col) not in ^val)
end
def exclude(queryable, col, val) when is_list(val) do
where(queryable, [q], field(q, ^col) not in ^val)
end
def include(queryable, {alias_name, col}, val) when is_list(val) do
where(queryable, [_, {^alias_name, q}], field(q, ^col) in ^val)
end
def include(queryable, col, val) when is_list(val) do
where(queryable, [q], field(q, ^col) in ^val)
end
def less_than(queryable, {alias_name, col}, val) do
where(queryable, [_, {^alias_name, q}], field(q, ^col) < ^val)
end
def less_than(queryable, col, val), do: where(queryable, [q], field(q, ^col) < ^val)
def less_than_or_equal(queryable, {alias_name, col}, val) do
where(queryable, [_, {^alias_name, q}], field(q, ^col) <= ^val)
end
def less_than_or_equal(queryable, col, val) do
where(queryable, [q], field(q, ^col) <= ^val)
end
def not_equal(queryable, {alias_name, col}, nil) do
where(queryable, [_, {^alias_name, q}], not is_nil(field(q, ^col)))
end
def not_equal(queryable, {alias_name, col}, val) do
where(queryable, [_, {^alias_name, q}], field(q, ^col) != ^val)
end
def not_equal(queryable, col, nil), do: where(queryable, [q], not is_nil(field(q, ^col)))
def not_equal(queryable, col, val), do: where(queryable, [q], field(q, ^col) != ^val)
def starts_with(queryable, {alias_name, col}, val) when is_binary(val) do
where(
queryable,
[_, {^alias_name, q}],
like(field(q, ^col), ^"#{String.replace(val, "%", "\\%")}%")
)
end
def starts_with(queryable, col, val) when is_binary(val) do
where(queryable, [q], like(field(q, ^col), ^"#{String.replace(val, "%", "\\%")}%"))
end
end
|
lib/resourceful/collection/ecto/filters.ex
| 0.689306
| 0.680968
|
filters.ex
|
starcoder
|
defmodule Bot do
@moduledoc """
Intcode Wired Cameras and Vacuum Bot
"""
defstruct map: %{}, loc: {0, 0}, bot: nil, dir: nil, output: []
def print(map) do
{{min_x, max_x}, {min_y, max_y}} = bounds(map)
for y <- min_y..max_y do
for x <- min_x..max_x do
IO.write(print_char(Map.get(map, {x, y}, :empty)))
end
IO.write("\n")
end
:ok
end
defp bounds(map) do
pts = Map.keys(map)
x = Enum.map(pts, fn {x, _y} -> x end) |> Enum.min_max()
y = Enum.map(pts, fn {_x, y} -> y end) |> Enum.min_max()
{x, y}
end
defp print_char(:empty), do: "."
defp print_char(:scaffold), do: "#"
defp print_char(:bot), do: "B"
@doc """
Return the dust captured on a tour of the scaffold
"""
def tour(path) do
result = build_map(path)
route = wander(result)
{cnt, lst} = Enum.reduce(route, {0, []}, &collapse/2)
[_hd | route] = [cnt | lst] |> Enum.reverse()
route = Enum.join(route, ",")
IO.puts "ROUTE: #{inspect route}"
# "R,4,L,12,L,8,R,4,L,8,R,10,R,10,R,6,R,4,L,12,L,8,R,4,R,4,R,10,L,12,R,4,L,12,L,8,R,4,L,8,R,10,R,10,R,6,R,4,L,12,L,8,R,4,R,4,R,10,L,12,L,8,R,10,R,10,R,6,R,4,R,10,L,12"
# Built pattern by hand from the route string....
main = "A,B,A,C,A,B,A,C,B,C"
a = "R,4,L,12,L,8,R,4"
b = "L,8,R,10,R,10,R,6"
c = "R,4,R,10,L,12"
result = evaulate(path, main, a, b, c)
val = List.last(result)
if val == ?\n, do: nil, else: val
end
def collapse(1, {cnt, lst}) do
{cnt+1, lst}
end
def collapse(x, {cnt, lst}) do
{0, [x | [Integer.to_string(cnt) | lst]]}
end
def wander(%Bot{map: map, bot: loc, dir: d}) do
heading = dir(d)
num_cells = Map.keys(map) |> Enum.count()
step(map, loc, heading, num_cells, [loc], [])
end
def step(map, loc, heading, needed, seen, acc) do
f = forward(loc, heading)
left = forward(loc, turn(heading, :left))
right = forward(loc, turn(heading, :right))
cond do
Map.has_key?(map, f) ->
new_seen = [loc | seen] |> Enum.uniq()
step(map, f, heading, needed, new_seen, [1 | acc])
Map.has_key?(map, left) ->
step(map, loc, turn(heading, :left), needed, seen, ["L" | acc])
Map.has_key?(map, right) ->
step(map, loc, turn(heading, :right), needed, seen, ["R" | acc])
true -> Enum.reverse(acc)
end
end
def forward({x, y}, :north), do: {x, y-1}
def forward({x, y}, :south), do: {x, y+1}
def forward({x, y}, :east), do: {x+1, y}
def forward({x, y}, :west), do: {x-1, y}
defp evaulate(path, main, a, b, c) do
{:ok, _pid} = Agent.start_link(fn -> %Bot{} end, name: __MODULE__)
code = Intcode.load(path) |> Intcode.poke(0, 2)
input = Enum.join([main, a, b, c, "n\n"], "\n") |> String.to_charlist()
Intcode.run(code, input, nil, &display_output/1)
result = Agent.get(__MODULE__, fn state -> state.output end) |> Enum.reverse()
Agent.stop(__MODULE__)
result
end
# Appears to print the starting map, then the prompts,
# then the final map, then the final output (if the robot stayed on the scaffold)
defp display_output(val) do
output = Agent.get(__MODULE__, fn state -> state.output end)
Agent.update(__MODULE__, fn state -> %Bot{state | output: [val | output]} end)
end
defp dir(?^), do: :north
defp dir(?v), do: :south
defp dir(?<), do: :east
defp dir(?>), do: :west
for {curr, dir, new} <- [{:north, :left, :west},
{:north, :right, :east},
{:south, :left, :east},
{:south, :right, :west},
{:east, :right, :south},
{:east, :left, :north},
{:west, :left, :south},
{:west, :right, :north}] do
def turn(unquote(curr), unquote(dir)), do: unquote(new)
end
@doc """
Compute the alignment value of the image
"""
def alignment(path) do
result = build_map(path)
print(result.map)
compute_params(result.map)
end
def compute_params(map) do
Map.keys(map)
|> get_intersections(map, [])
|> Enum.reduce(0, fn {x, y}, acc -> x * y + acc end)
end
defp get_intersections([], _map, acc), do: acc
defp get_intersections([head = {x, y} | rest], map, acc) do
neighbors = [head, {x - 1, y}, {x + 1, y}, {x, y - 1}, {x, y + 1}]
is_intersection = Enum.all?(neighbors, &is_scaffold?(map, &1))
new_acc = if is_intersection, do: [head | acc], else: acc
get_intersections(rest, map, new_acc)
end
defp is_scaffold?(map, loc), do: Map.has_key?(map, loc)
defp build_map(path) do
{:ok, _pid} = Agent.start_link(fn -> %Bot{loc: {0, 0}} end, name: __MODULE__)
code = Intcode.load(path)
Intcode.run(code, [], nil, &camera/1)
result = Agent.get(__MODULE__, fn state -> state end)
Agent.stop(__MODULE__)
result
end
defp camera(i) do
{map, loc, bot, dir} = Agent.get(__MODULE__, fn state -> {state.map, state.loc, state.bot, state.dir} end)
{new_map, new_loc, new_bot, new_dir} = update_state(i, map, loc, bot, dir)
Agent.update(__MODULE__, fn state -> %Bot{state | map: new_map, loc: new_loc, bot: new_bot, dir: new_dir} end)
end
def update_state(i, map, loc = {x, y}, bot, dir) do
cond do
i in [?^, ?<, ?v, ?>, ?X] ->
{Map.put(map, loc, :bot), {x + 1, y}, {x, y}, i}
i == ?# ->
{Map.put(map, loc, :scaffold), {x + 1, y}, bot, dir}
i == ?. ->
{map, {x + 1, y}, bot, dir}
i == ?\n ->
{map, {0, y + 1}, bot, dir}
true ->
IO.puts("HUH? #{inspect(i)}")
{map, loc, bot, dir}
end
end
end
|
apps/day17/lib/bot.ex
| 0.72662
| 0.47457
|
bot.ex
|
starcoder
|
defmodule Terminus.BitFS do
@moduledoc """
Module for interfacing with the [BitFS](https://bitfs.network) API.
BitFS crawls the Bitcoin blockchain to find and store all the bitcoin script
pushdata chunks **larger than 512 bytes**.
> BitFS is an autonomous file system constructed from Bitcoin transactions.
## BitFS URI scheme
Files are referenced using the [BitFS URI scheme](https://bitfs.network/about#urischeme).
# bitfs://<TRANSACTION_ID>.(in|out).<SCRIPT_INDEX>.<CHUNK_INDEX>
"bitfs://13513153d455cdb394ce01b5238f36e180ea33a9ccdf5a9ad83973f2d423684a.out.0.4"
Terminus accepts given BitFS URI strings with or without the `bitfs://` prefix.
## Usage
To simply return the binary data for any BitFS URI use `fetch/2`.
iex> Terminus.BitFS.fetch(uri)
{:ok, <<...>>}
It is also possible to scan entire transactions and automatically fetch the
binary data for any BitFS URIs discovered in the transaction. The binary data
is added to the same output script at the same index with a `d` prefixed attribute.
iex> tx = %{
...> "out" => [%{
...> "f4" => "13513153d455cdb394ce01b5238f36e180ea33a9ccdf5a9ad83973f2d423684a.out.0.4",
...> ...
...> }]
...> }
iex> Terminus.BitFS.scan_tx(tx)
%{
"out" => [%{
"f4" => "13513153d455cdb394ce01b5238f36e180ea33a9ccdf5a9ad83973f2d423684a.out.0.4",
"d4" => <<...>>,
...
}]
}
"""
use Terminus.HTTPStream, hosts: [bitfs: "https://x.bitfs.network"]
@doc """
Fetches the binary blob data from BitFS using the given BitFS URI.
Returns the result in an `:ok` / `:error` tuple pair.
By default the entire data response is returned, although optionally a
streaming `t:Enumerable.t/0` can be returned or a linked
GenStage [`pid`](`t:pid/0`).
## Options
The accepted options are:
* `stream` - Return a streaming `t:Enumerable.t/0`. Defaults to `false`.
* `stage` - Return a linked GenStage [`pid`](`t:pid/0`). Defaults to `false`.
## Examples
Files are references using the [BitFS URI scheme](https://bitfs.network/about#urischeme).
# <TRANSACTION_ID>.(in|out).<SCRIPT_INDEX>.<CHUNK_INDEX>
"13513153d455cdb394ce01b5238f36e180ea33a9ccdf5a9ad83973f2d423684a.out.0.4"
By default `Terminus.BitFS.fetch/2` returns the binary data of the file.
iex> Terminus.BitFS.fetch(uri)
{:ok, <<...>>}
Optionally a streaming `t:Enumerable.t/0` can be returned.
iex> Terminus.BitFS.fetch(uri, stream: true)
{:ok, %Stream{}}
Or the [`pid`](`t:pid/0`) of the GenStage producer can be returned.
iex> Terminus.BitFS.fetch(uri, stage: true)
{:ok, #PID<>}
"""
@spec fetch(Terminus.bitfs_uri, keyword) :: {:ok, binary} | {:error, Exception.t}
def fetch(uri, options \\ [])
def fetch("bitfs://" <> uri, options),
do: fetch(uri, options)
def fetch(uri, options) when is_binary(uri) do
path = "/" <> uri
options = Keyword.take(options, [:token, :host, :stream, :stage])
case Keyword.get(options, :stream, false) do
true ->
request(:stream, "GET", path, options)
_ ->
request(:fetch, "GET", path, options)
end
end
@doc """
As `fetch/2` but returns the result or raises an exception if it fails.
"""
@spec fetch!(Terminus.bitfs_uri, keyword) :: binary
def fetch!(uri, options \\ []) do
case fetch(uri, options) do
{:ok, data} ->
data
{:error, error} ->
raise error
end
end
@doc """
Scans the given transaction [`map`](`t:map/0`), fetches data for any
BitFS URI references, and returns a modified [`map`](`t:map/0`) with the data
added.
The function handles both `txo` and `bob` schema transactions. See the
examples for `scan_script/1`.
"""
@spec scan_tx(map) :: map
def scan_tx(%{"out" => outputs} = tx) when is_list(outputs) do
outputs = outputs
|> Enum.map(&scan_script/1)
put_in(tx["out"], outputs)
end
@doc """
Scans the given transaction script [`map`](`t:map/0`), fetches data for any
BitFS URI references, and returns a modified [`map`](`t:map/0`) with the data
added.
Handles both `txo` and `bob` schema scripts.
## Examples
Where a BitFS reference is found in a `txo` script, the fetched data is added
to the script at the same index as the reference. For example, if a BitFS
reference is found at `f4`, a new attribute `d4` is put into the script
containing the data.
iex> output = %{
...> "f4" => "13513153d455cdb394ce01b5238f36e180ea33a9ccdf5a9ad83973f2d423684a.out.0.4",
...> ...
...> }
iex> Terminus.BitFS.scan_script(output)
%{
"f4" => "13513153d455cdb394ce01b5238f36e180ea33a9ccdf5a9ad83973f2d423684a.out.0.4",
"d4" => <<...>>, # binary data added
...
}
With `bob` scripts, each cell is scanned and where a BitFS reference is found,
a new `d` attribute is added to that cell containing the fetched data.
iex> output = %{"tape" => [
...> %{"cell" => %{
...> "f" => "13513153d455cdb394ce01b5238f36e180ea33a9ccdf5a9ad83973f2d423684a.out.0.4",
...> ...
...> }},
...> ...
...> ]}
iex> Terminus.BitFS.scan_script(output)
%{"tape" => [
%{"cell" => %{
"f" => "13513153d455cdb394ce01b5238f36e180ea33a9ccdf5a9ad83973f2d423684a.out.0.4",
"d" => <<...>>, # binary data added
...
}},
...
]}
"""
@spec scan_script(map) :: map
# Handles TXO script
def scan_script(%{"len" => len} = output) when is_integer(len) do
Map.keys(output)
|> Enum.filter(& Regex.match?(~r/^f\d+/, &1))
|> Enum.reduce(output, &reduce_txo/2)
end
# Handles BOB script
def scan_script(%{"tape" => tape} = output) when is_list(tape) do
tape = Enum.map(tape, &map_bob_cell/1)
put_in(output["tape"], tape)
end
# Reduce TXO output
defp reduce_txo(key, output) do
[_, num] = String.split(key, "f")
case fetch(output[key]) do
{:ok, data} -> Map.put(output, "d"<>num, data)
{:error, _} -> output
end
end
# Map BOB cell
defp map_bob_cell(%{"cell" => params} = cell) do
params
|> Enum.filter(& Map.has_key?(&1, "f"))
|> Enum.reduce(cell, &reduce_bob/2)
end
# Reduce BOB cell
defp reduce_bob(%{"f" => uri, "i" => i} = params, cell) do
params = case fetch(uri) do
{:ok, data} -> Map.put(params, "d", data)
{:error, _} -> params
end
update_in(cell["cell"], & List.replace_at(&1, i, params))
end
end
|
lib/terminus/bitfs.ex
| 0.888099
| 0.639989
|
bitfs.ex
|
starcoder
|
defmodule Ecto.Sort do
@moduledoc """
Ecto.Sort is a simple module which provides a macro for explicitly applying ecto order_by expressions.
### Example
```elixir
defmodule Post do
use Ecto.Schema
schema "post" do
field(:name, :string)
field(:featured, :boolean)
has_many(:comments, Comment)
timestamps()
end
end
defmodule Comment do
use Ecto.Schema
schema "comment" do
field(:body, :string)
timestamps()
end
end
defmodule Posts do
import Ecto.Query, warn: false
use Ecto.Sort
add_sort(:inserted_at)
add_sort([:name, :featured])
add_sort(:comments_inserted_at, fn direction, query ->
query
|> join(:left, [p], c in assoc(p, :comments), as: :comments)
|> order_by([comments: comments], [{^direction, comments.inserted_at}])
end)
defp query(params \\ %{}) do
apply_sorting(Post, params)
end
def list_posts(params \\ %{}), do: Repo.all(query_posts(params))
def get_post(params \\ %{}), do: Repo.one(query_posts(params))
end
```
"""
defmacro add_sort(key) when is_atom(key) do
quote location: :keep do
add_sort([unquote(key)])
end
end
defmacro add_sort(keys) when is_list(keys) do
for key <- keys do
quote location: :keep do
defp sort({unquote(key), direction}, query) do
order_by(query, {^String.to_existing_atom(direction), ^unquote(key)})
end
end
end
end
defmacro add_sort(key, fun) do
quote location: :keep do
defp sort({unquote(key), direction}, query) do
unquote(fun).(String.to_existing_atom(direction), query)
end
end
end
defmacro __using__(_opts) do
quote location: :keep do
import Ecto.Sort
defp apply_sorting(original_query, params) do
params = convert_params(params)
Enum.reduce(params, original_query, fn {key, value}, query ->
try do
sort({key, value}, query)
rescue
_ -> query
end
end)
end
defp convert_params(%{"s" => params}), do: params |> Map.to_list() |> convert_params()
defp convert_params(%{s: params}), do: params |> Map.to_list() |> convert_params()
defp convert_params(s: params), do: convert_params(params)
defp convert_params(params) when is_list(params) do
Enum.map(params, fn
{key, direction} when is_binary(key) and is_atom(direction) ->
{String.to_atom(key), Atom.to_string(direction)}
{key, direction} when is_atom(key) and is_atom(direction) ->
{key, Atom.to_string(direction)}
{key, direction} when is_binary(key) and is_binary(direction) ->
{String.to_atom(key), direction}
param ->
param
end)
end
defp convert_params(_), do: []
end
end
end
|
lib/ecto/sort.ex
| 0.863837
| 0.787032
|
sort.ex
|
starcoder
|
defmodule NotQwerty123.WordlistManager do
@moduledoc """
Module to manage the common password list and handle password checks.
The main function that NotQwerty123 performs is to check that the
password or a variant of the password (in other words, even with minor changes)
is not in the list of common passwords that this WordlistManager stores.
By default, this common password list contains one file, which
has a list of over 40,000 common passwords in it - this file
was collated by [zxcvbn](https://github.com/dropbox/zxcvbn),
a reputable password strength meter.
Other files can be added to this common password list, and there
are example files in the `priv/data` directory (these files are
also taken from the zxcvbn repository).
In order to make the password strength check even stronger, it
is also recommended to add to the list words that are associated with
the site you are managing.
## Managing the common password list
The following functions can be used to manage this list:
* list_wordlists/0 - list the files used to create the wordlist
* push/1 - add a file to the wordlist
* pop/1 - remove a file from the wordlist
"""
use GenServer
@sub_dict %{
"!" => ["i"],
"@" => ["a"],
"$" => ["s"],
"%" => ["x"],
"(" => ["c"],
"[" => ["c"],
"+" => ["t"],
"|" => ["i", "l"],
"0" => ["o"],
"1" => ["i", "l"],
"2" => ["z"],
"3" => ["e"],
"4" => ["a"],
"5" => ["s"],
"6" => ["g"],
"7" => ["t"],
"8" => ["b"],
"9" => ["g"]
}
def start_link() do
GenServer.start_link(__MODULE__, [], name: __MODULE__)
end
def init([]) do
{:ok, create_list()}
end
@doc """
Search the wordlist to see if the password is too common.
If the password is greater than 24 characters long, this
function returns false without performing any checks.
"""
def query(password, word_len) when word_len < 25 do
GenServer.call(__MODULE__, {:query, password})
end
def query(_, _), do: false
@doc """
List the files used to create the common password list.
"""
def list_files, do: GenServer.call(__MODULE__, :list_files)
@doc """
Add a file to the common password list.
`path` is the pathname of the file, which should contain one
password on each line, that you want to include.
The file is parsed and the words are added to the common password
list. A copy of the file is also copied to the
not_qwerty123/priv/wordlists directory.
If adding the file results in a timeout error, try splitting
the file into smaller files and adding them.
"""
def push(path), do: GenServer.cast(__MODULE__, {:push, path})
@doc """
Remove a file from the common password list.
`path` is the file name as it is printed out in the `list_files`
function.
"""
def pop(path), do: GenServer.cast(__MODULE__, {:pop, path})
def handle_call({:query, password}, _from, state) do
{:reply, run_check(state, password), state}
end
def handle_call(:list_files, _from, state) do
{:reply, File.ls!(wordlist_dir()), state}
end
def handle_cast({:push, path}, state) do
new_state =
case File.read(path) do
{:ok, words} ->
Path.join(wordlist_dir(), Path.basename(path)) |> File.write(words)
add_words(words) |> :sets.union(state)
_ ->
state
end
{:noreply, new_state}
end
def handle_cast({:pop, "common_passwords.txt"}, state) do
{:noreply, state}
end
def handle_cast({:pop, path}, state) do
new_state =
case File.rm(Path.join(wordlist_dir(), path)) do
:ok -> create_list()
_ -> state
end
{:noreply, new_state}
end
def handle_info(_msg, state) do
{:noreply, state}
end
defp wordlist_dir(), do: Application.app_dir(:not_qwerty123, ~w(priv wordlists))
defp create_list do
File.ls!(wordlist_dir())
|> Enum.map(
&(Path.join(wordlist_dir(), &1)
|> File.read!()
|> add_words)
)
|> :sets.union()
end
defp add_words(data) do
data
|> String.downcase()
|> String.split("\n")
|> Enum.flat_map(&list_alternatives/1)
|> :sets.from_list()
end
defp run_check(wordlist, password) do
words = list_alternatives(password)
alternatives =
words ++
Enum.map(words, &String.slice(&1, 1..-1)) ++
Enum.map(words, &String.slice(&1, 0..-2)) ++ Enum.map(words, &String.slice(&1, 1..-2))
reversed = Enum.map(alternatives, &String.reverse(&1))
Enum.any?(alternatives ++ reversed, &:sets.is_element(&1, wordlist))
end
defp list_alternatives(""), do: [""]
defp list_alternatives(password) do
for i <- substitute(password) |> product, do: Enum.join(i)
end
defp substitute(word) do
for <<letter <- word>>, do: Map.get(@sub_dict, <<letter>>, [<<letter>>])
end
defp product([h]), do: for(i <- h, do: [i])
defp product([h | t]) do
for i <- h,
j <- product(t),
do: [i | j]
end
end
|
lib/not_qwerty123/wordlist_manager.ex
| 0.672977
| 0.484746
|
wordlist_manager.ex
|
starcoder
|
defmodule Relax.Resource.FetchAll do
use Behaviour
@moduledoc """
Include in your resource to respond to GET / and GET /?filter[foo]=bar.
Typically brought into your resource via `use Relax.Resource`.
FetchAll defines three callback behaviours, all of which have a default,
overrideable implementation.
In addition this module uses `Relax.Resource.Fetchable`, for shared model
lookup logic with Relax.Resource.FetchOne.
"""
@type fetchable :: module | Ecto.Query.t | list
@doc """
Lookup a collection to return.
This often returns an Ecto.Query or and Ecto.Model module name that has not
yet been sent to the repo. Relax will execute the query after filtering and
format the response appropriately.
# Return all models
def fetch_all(_conn), do: MyApp.Models.Post
# Return models limited to the current_user
def fetch_all(conn), do: Ecto.assoc(conn.assigns[:current_user], :posts)
It may also return a list of structs/models:
def fetch_all(_conn), do: [%Post{title: "foo"}, %Post{title: "bar"}]
A conn may also be returned:
def fetch_all(conn), do: halt send_resp(conn, 401, "no posts for you")
By default it returns the value of `fetchable/1` directly:
def fetch_all(conn), do: fetchable(conn)
"""
defcallback fetch_all(Plug.Conn.t) :: Plug.Conn.t | fetchable
@doc """
This callback can be used to completely override the fetch_all action.
It accepts a Plug.Conn and must return a Plug.Conn.t
"""
defcallback fetch_all_resources(Plug.Conn.t) :: Plug.Conn.t
@doc ~S"""
Defines allowed filters for your resource.
By default filters are ignored unless defined. Filters recieve the filter
keyword, the query or collection returned from `fetch_all/1` and the filter
value.
Examples:
# Ecto
def filter("search", query, value) do
Ecto.Query.where(queryable, [p], ilike(a.body, ^"%#{value}%"))
end
# Lists
def filter("popular", posts, value) do
Enum.filter(posts, fn(p) -> p.likes > 10 end)
end
"""
defcallback filter(String.t, fetchable, String.t) :: fetchable
@doc """
Defines allowed sort fields for your resource.
By default sort params are ignored unless defined. Sorts recieve the field,
the query or collection returned from `fetch_all/1` and the atom :asc or :desc.
Examples:
# Ecto (asc)
# /resource/?sort=likes,rating
def sort("likes", query, :asc) do
Ecto.Query.order_by(query, [t], asc: :likes)
end
def sort("rating", query, :asc) do
Ecto.Query.order_by(query, [t], asc: :rating)
end
# Lists (desc)
# /resource/?sort=-name
def sort("name", list, :desc) do
Enum.sort_by(list, &(&1.name)) |> Enum.reverse
end
"""
defcallback sort(String.t, fetchable, atom) :: fetchable
@doc false
defmacro __using__(_) do
quote location: :keep do
use Relax.Resource.Fetchable
@behaviour Relax.Resource.FetchAll
@before_compile Relax.Resource.FetchAll
def do_resource(conn, "GET", []) do
fetch_all_resources(conn)
end
def fetch_all_resources(conn) do
conn
|> fetch_all
|> Relax.Resource.FetchAll.filter(conn, __MODULE__)
|> Relax.Resource.FetchAll.sort(conn, __MODULE__)
|> Relax.Resource.FetchAll.execute_query(__MODULE__)
|> Relax.Resource.FetchAll.respond(conn, __MODULE__)
end
def fetch_all(conn), do: fetchable(conn)
defoverridable [fetch_all_resources: 1, fetch_all: 1]
end
end
defmacro __before_compile__(_) do
quote do
def filter(_, results, _), do: results
def sort(_, results, _), do: results
end
end
@doc false
def filter(results, conn, resource) do
case conn.query_params["filter"] do
nil -> results
%{} = filters ->
filters
|> Dict.keys
|> Enum.reduce results, fn(k, acc) ->
resource.filter(k, acc, filters[k])
end
end
end
@sort_regex ~r/(-?)(\S*)/
@doc false
def sort(results, conn, resource) do
case conn.query_params["sort"] do
nil -> results
fields ->
fields
|> String.split(",")
|> Enum.reduce results, fn(field, acc) ->
case Regex.run(@sort_regex, field) do
[_, "", field] -> resource.sort(field, results, :asc)
[_, "-", field] -> resource.sort(field, results, :desc)
end
end
end
end
@doc false
def execute_query(results, resource) do
module = resource.model
case results do
%Ecto.Query{} = q -> resource.repo.all(q)
^module = q -> resource.repo.all(q)
other -> other
end
end
@doc false
def respond(%Plug.Conn{} = c, _oldconn, _resource), do: c
def respond(models, conn, resource) do
Relax.Responders.send_json(conn, 200, models, resource)
end
end
|
lib/relax/resource/fetch_all.ex
| 0.717903
| 0.406302
|
fetch_all.ex
|
starcoder
|
defmodule Sanbase.Utils.Transform do
def wrap_ok(data), do: {:ok, data}
@doc ~s"""
Transform the maps from the :ok tuple data so the `key` is duplicated under the
`new_key` name, preserving the original value.
## Examples:
iex> Sanbase.Utils.Transform.duplicate_map_keys({:ok, [%{a: 1}, %{a: 2}]}, old_key: :a, new_key: :b)
{:ok, [%{a: 1, b: 1}, %{a: 2, b: 2}]}
iex> Sanbase.Utils.Transform.duplicate_map_keys({:ok, [%{a: 1}, %{d: 2}]}, old_key: :a, new_key: :b)
{:ok, [%{a: 1, b: 1}, %{d: 2}]}
iex> Sanbase.Utils.Transform.duplicate_map_keys({:error, "bad"}, old_key: :a, new_key: :b)
{:error, "bad"}
"""
@spec duplicate_map_keys({:ok, list(map)}, keyword(atom)) :: {:ok, list(map)}
@spec duplicate_map_keys({:error, any()}, keyword(atom)) :: {:error, any()}
def duplicate_map_keys({:ok, data}, opts) do
old_key = Keyword.fetch!(opts, :old_key)
new_key = Keyword.fetch!(opts, :new_key)
result =
data
|> Enum.map(fn
%{^old_key => value} = elem -> elem |> Map.put(new_key, value)
elem -> elem
end)
{:ok, result}
end
def duplicate_map_keys({:error, error}, _opts) do
{:error, error}
end
@doc ~s"""
Transform the maps from the :ok tuple data so the `key` duplicated with a key
named `new_key`
## Examples:
iex> Sanbase.Utils.Transform.rename_map_keys({:ok, [%{a: 1}, %{a: 2}]}, old_key: :a, new_key: :b)
{:ok, [%{b: 1}, %{b: 2}]}
iex> Sanbase.Utils.Transform.rename_map_keys({:ok, [%{a: 1}, %{d: 2}]}, old_key: :a, new_key: :b)
{:ok, [%{b: 1}, %{d: 2}]}
iex> Sanbase.Utils.Transform.rename_map_keys({:error, "bad"}, old_key: :a, new_key: :b)
{:error, "bad"}
"""
@spec rename_map_keys({:ok, list(map)}, keyword(atom())) :: {:ok, list(map)}
@spec rename_map_keys({:error, any()}, keyword(atom())) :: {:error, any()}
def rename_map_keys({:ok, data}, opts) do
old_key = Keyword.fetch!(opts, :old_key)
new_key = Keyword.fetch!(opts, :new_key)
result =
data
|> Enum.map(fn
%{^old_key => value} = elem ->
elem |> Map.delete(old_key) |> Map.put(new_key, value)
elem ->
elem
end)
{:ok, result}
end
def rename_map_keys({:error, error}, _) do
{:error, error}
end
def rename_map_keys!(map, old_keys: old_keys, new_keys: new_keys) do
old_new_keys_map = Enum.zip(old_keys, new_keys) |> Enum.into(%{})
map
|> Enum.map(fn {k, v} -> {old_new_keys_map[k] || k, v} end)
|> Enum.into(%{})
end
def maybe_unwrap_ok_value({:ok, [value]}), do: {:ok, value}
def maybe_unwrap_ok_value({:ok, []}), do: {:ok, nil}
def maybe_unwrap_ok_value({:error, error}), do: {:error, error}
def maybe_apply_function({:ok, list}, fun) when is_function(fun, 1),
do: {:ok, fun.(list)}
def maybe_apply_function({:error, error}, _), do: {:error, error}
@doc ~s"""
Sums the values of all keys with the same datetime
## Examples:
iex> Sanbase.Utils.Transform.sum_by_datetime([%{datetime: ~U[2019-01-01 00:00:00Z], val: 2}, %{datetime: ~U[2019-01-01 00:00:00Z], val: 3}, %{datetime: ~U[2019-01-02 00:00:00Z], val: 2}], :val)
[%{datetime: ~U[2019-01-01 00:00:00Z], val: 5}, %{datetime: ~U[2019-01-02 00:00:00Z], val: 2}]
iex> Sanbase.Utils.Transform.sum_by_datetime([], :key)
[]
"""
@spec sum_by_datetime(list(map), atom()) :: list(map)
def sum_by_datetime(data, key) do
data
|> Enum.group_by(& &1[:datetime], & &1[key])
|> Enum.map(fn {datetime, list} ->
value =
case list do
[] -> 0
[_ | _] = list -> Enum.sum(list)
end
%{:datetime => datetime, key => value}
end)
|> Enum.sort_by(&DateTime.to_unix(&1[:datetime]))
end
def merge_by_datetime(list1, list2, func, field) do
map = list2 |> Enum.into(%{}, fn %{datetime: dt} = item2 -> {dt, item2[field]} end)
list1
|> Enum.map(fn %{datetime: datetime} = item1 ->
value2 = Map.get(map, datetime, 0)
new_value = func.(item1[field], value2)
%{datetime: datetime, value: new_value}
end)
|> Enum.reject(&(&1.value == 0))
end
def maybe_transform_from_address("0x0000000000000000000000000000000000000000"), do: "mint"
def maybe_transform_from_address(address), do: address
def maybe_transform_to_address("0x0000000000000000000000000000000000000000"), do: "burn"
def maybe_transform_to_address(address), do: address
@doc ~s"""
Remove the `separator` inside the value of the key `key` in the map `map`
## Examples:
iex> Sanbase.Utils.Transform.remove_separator(%{a: "100,000"}, :a, ",")
%{a: "100000"}
iex> Sanbase.Utils.Transform.remove_separator(%{a: "100,000", b: "5,000"}, :a, ",")
%{a: "100000", b: "5,000"}
iex> Sanbase.Utils.Transform.remove_separator(%{a: "100,000"}, :c, ",")
%{a: "100,000"}
"""
def remove_separator(map, key, separator) do
case Map.fetch(map, key) do
:error -> map
{:ok, value} -> Map.put(map, key, String.replace(value, separator, ""))
end
end
def maybe_fill_gaps_last_seen({:ok, values}, key) do
result =
values
|> Enum.reduce({[], 0}, fn
%{has_changed: 0} = elem, {acc, last_seen} ->
elem = Map.put(elem, key, last_seen) |> Map.delete(:has_changed)
{[elem | acc], last_seen}
%{has_changed: 1} = elem, {acc, _last_seen} ->
elem = Map.delete(elem, :has_changed)
{[elem | acc], elem[key]}
end)
|> elem(0)
|> Enum.reverse()
{:ok, result}
end
def maybe_fill_gaps_last_seen({:error, error}, _key), do: {:error, error}
@spec opts_to_limit_offset(page: non_neg_integer(), page_size: pos_integer()) ::
{pos_integer(), non_neg_integer()}
def opts_to_limit_offset(opts) do
page = Keyword.get(opts, :page, 1)
page_size = Keyword.get(opts, :page_size, 10)
offset = (page - 1) * page_size
{page_size, offset}
end
end
|
lib/sanbase/utils/transform.ex
| 0.84653
| 0.67652
|
transform.ex
|
starcoder
|
defmodule Day23 do
def part1(lines) do
bots = lines
|> Enum.map(&parse_line/1)
{strongest_pos, radius} = Enum.max_by(bots, fn {_, r} -> r end)
bots
|> Stream.filter(fn {pos, _} ->
manhattan_distance(strongest_pos, pos) <= radius
end)
|> Enum.count
end
def part2(lines) do
bots = lines
|> Enum.map(&parse_line/1)
bounding_box = bounding_box(bots)
scale_while(bots, bounding_box)
end
def num_overlapping(lines, coordinate) do
lines
|> Enum.map(&parse_line/1)
|> Enum.filter(fn bot -> in_range?(bot, coordinate) end)
|> Enum.count
end
defp scale_while(bots, bounding_box) do
{scaled_bots, scaled_bounding_box, scaling} =
scale_bots(bots, bounding_box)
#IO.inspect bounding_box, label: 'bounding box'
#IO.inspect scaled_bounding_box, label: 'scaled bounding box'
{_, most_freq_pos} = result = scaled_bots
|> most_frequent_coord(scaled_bounding_box)
case scaling do
nil ->
result
_ ->
bounding_box = reduce_bounding_box(scaling, most_freq_pos)
scale_while(bots, bounding_box)
end
end
defp scale_bots(bots, bounding_box) do
min_coord = min_coords(bots)
case scale_factor(bounding_box) do
nil ->
#IO.inspect 'no scaling'
#IO.inspect bounding_box, label: 'bounding box'
{bots, bounding_box, nil};
scale ->
#IO.inspect scale, label: 'scale factor'
bots =
Enum.map(bots, fn {coord, r} ->
{scale(sub(coord, min_coord), scale), scale(r, scale)}
end)
{min, max} = bounding_box
bounding_box = {scale(sub(min, min_coord), scale),
scale(sub(max, min_coord), scale)}
{bots, bounding_box, {min_coord, scale}}
end
end
defp scale_factor({min, max}) do
extents = sub(max, min)
axis = shortest_axis(extents)
extent = elem(extents, axis)
n = 4
if extent > n do
div(extent, n)
else
nil
end
end
# Reduce the bounding area to be around the area with the
# highest number of overlapping coordinates. Add some considerable
# margin.
defp reduce_bounding_box({{x0, y0, z0}, scale}, {x, y, z}) do
x_min = (x - 1) * scale + x0
y_min = (y - 1) * scale + y0
z_min = (z - 1) * scale + z0
min = {x_min, y_min, z_min}
max = {x_min + 3 * scale - 1,
y_min + 3 * scale - 1,
z_min + 3 * scale - 1}
{min, max}
end
defp min_coords(bots) do
{acc, _} = hd(bots)
Enum.reduce(bots, acc, fn {{x, y, z}, _}, {min_x, min_y, min_z} ->
{min(x, min_x), min(y, min_y), min(z, min_z)}
end)
end
defp sub({x, y, z}, {x0, y0, z0}) do
{x - x0, y - y0, z - z0}
end
defp scale({x, y, z}, scale) do
{div(x, scale), div(y, scale), div(z, scale)}
end
defp scale(integer, scale) do
# Round up the radius.
div(integer + scale - 1, scale)
end
defp most_frequent_coord(bots, bounding_box) do
bots
|> count_coordinates(bounding_box, [])
|> Enum.min_by(fn {pos, count} ->
{-count, manhattan_distance(pos, {0, 0, 0})}
end)
|> (fn {pos, _} ->
{manhattan_distance(pos, {0, 0, 0}), pos}
end).()
end
defp count_coordinates(bots, bb, acc) do
{{min_x, min_y, min_z}, {max_x, max_y, max_z}} = bb
Enum.reduce(min_x..max_x, acc, fn x, acc ->
Enum.reduce(min_y..max_y, acc, fn y, acc ->
Enum.reduce(min_z..max_z, acc, fn z, acc ->
pos = {x, y, z}
num_coords = bots
|> Stream.filter(fn bot -> in_range?(bot, pos) end)
|> Enum.count
[{pos, num_coords} | acc]
end)
end)
end)
end
defp in_range?({center, radius}, coordinate) do
manhattan_distance(center, coordinate) <= radius
end
defp shortest_axis(extents) do
Tuple.to_list(extents)
|> Enum.with_index
|> Enum.min
|> (fn {_, axis} -> axis end).()
end
defp bounding_box(bots) do
{pos, _r} = hd(bots)
acc = {pos, pos}
Enum.reduce(bots, acc, fn {{x, y, z}, _r}, {min, max} ->
{min_x, min_y, min_z} = min
min = {min(x, min_x), min(y, min_y), min(z, min_z)}
{max_x, max_y, max_z} = max
max = {max(x, max_x), max(y, max_y), max(z, max_z)}
{min, max}
end)
end
defp manhattan_distance({x0, y0, z0}, {x, y, z}) do
abs(x0 - x) + abs(y0 - y) + abs(z0 - z)
end
defp parse_line(line) do
re = ~r/^pos=<(-?\d+),(-?\d+),(-?\d+)>,\s*r=(\d+)$/
numbers = Regex.run re, line, capture: :all_but_first
[x, y, z, r] = Enum.map(numbers, &String.to_integer/1)
{{x, y, z}, r}
end
end
|
day23/lib/day23.ex
| 0.723016
| 0.694458
|
day23.ex
|
starcoder
|
defmodule KaufmannEx.ReleaseTasks.MigrateSchemas do
@moduledoc """
Task for registering all schemas in `priv/schemas` with the schema registry.
Expects
- schemas to be defined in `priv/schemas`.
- an `event_metadata.avsc` schema should be defined and required by all events
Can be called in a production attached console, or via a release task. Should not have any requirements beyont itself.
This script will load all required dependencies and should not need further configuration.
```
# Attempt to create or update all schemas in `priv/schemas`
KaufmannEx.ReleaseTasks.MigrateSchemas.migrate_schemas(:app_name)
# delete and recreate all schemas
KaufmannEx.ReleaseTasks.MigrateSchemas.reset_schemas(:app_name)
```
"""
# credo:disable-for-this-file Credo.Check.Warning.IoInspect
alias KaufmannEx.Transcoder.SevenAvro.Schema.Registry
defp ensure_startup do
:ok = Application.ensure_started(:logger)
{:ok, _} = Application.ensure_all_started(:httpoison)
{:ok, _} = Application.ensure_all_started(:kaufmann_ex)
end
defp priv_dir(app) do
app
|> :code.priv_dir()
|> to_string
end
@doc """
Attempts to update all schemas defined in `app/priv/schemas`.
Expects a `event_metadata.avsc` metadata scheme to be defined for all other schemas.
"""
def migrate_schemas(app \\ :kaufmann_ex)
def migrate_schemas(path) when is_binary(path) do
ensure_startup()
true = File.exists?(path)
_ = load_metadata(path)
path
|> read_schemas()
|> Enum.map(®ister_schema/1)
|> Enum.map(&IO.inspect/1)
end
def migrate_schemas(app) do
ensure_startup()
IO.puts("Migrating Schemas")
meta_data_schema = load_metadata(app)
app
|> priv_dir()
|> Path.join("schemas")
|> scan_dir()
|> Enum.map(&load_and_parse_schema/1)
|> Enum.map(&inject_metadata(&1, meta_data_schema))
|> Enum.map(®ister_schema/1)
|> Enum.map(&IO.inspect/1)
end
def read_schemas(path) do
meta_data_schema = load_metadata(path)
path
|> scan_dir()
|> Enum.map(&load_and_parse_schema/1)
|> Enum.map(&inject_metadata(&1, meta_data_schema))
end
@doc """
Attempts to delete and recreate all schemas defined in `app/priv/schemas`
Expects a `event_metadata.avsc` metadata scheme to be defined for all other schemas.
"""
def reset_schemas(app \\ :kaufmann_ex) do
ensure_startup()
IO.puts("Resetting Schemas")
meta_data_schema = load_metadata(app)
app
|> priv_dir()
|> Path.join("schemas")
|> scan_dir()
|> Enum.map(&load_and_parse_schema/1)
|> Enum.map(&inject_metadata(&1, meta_data_schema))
|> Enum.map(&reset_schema/1)
|> IO.inspect()
# |> Enum.map(&IO.inspect/1)
end
def load_metadata(path) when is_binary(path) do
meta_data_schema =
Path.wildcard([path, "/**/event_metadata.avsc"])
|> Enum.at(0)
|> load_and_parse_schema()
{:ok, _, _} = register_schema(meta_data_schema)
meta_data_schema
end
def load_metadata(app) do
app
|> priv_dir()
|> Path.join("schemas")
|> load_metadata()
end
@spec register_schema({String.t(), map}) :: {atom, String.t(), any}
def register_schema({event_name, schema}) do
with {:ok, status} <- update_schema({event_name, schema}) do
{:ok, event_name, status}
else
{:error, error} ->
{:error, event_name, error}
end
end
defp update_schema({event_name, schema}) do
case Registry.register(event_name, schema) do
{:ok, _} ->
{:ok, "Schema updated"}
{:error, %{"error_code" => 409}} ->
{:error, "Incompatible schema"}
{:error, error} ->
{:error, error}
end
end
def reset_schema({event_name, schema}) do
_ = Registry.delete(event_name)
{:ok, _} = Registry.register(event_name, schema)
end
@spec load_and_parse_schema(Path.t()) :: {String.t(), map}
defp load_and_parse_schema(schema_path) do
{:ok, schema} =
schema_path
|> File.read!()
|> Jason.decode()
schema_name = schema_path |> Path.basename() |> String.trim(".avsc")
{schema_name, schema}
end
def inject_metadata({event_name, event_schema}, {_, meta_data_schema}) do
# Only inject metadata into event-type schemas
if String.match?(event_name, ~r/command\.|event\.|query\./) do
{event_name, [meta_data_schema, event_schema]}
else
{event_name, event_schema}
end
end
def scan_dir(dir) do
Path.wildcard([dir, "/**/*.avsc"])
end
end
|
lib/kaufmann_ex/release_tasks/migrate_schemas.ex
| 0.703957
| 0.607023
|
migrate_schemas.ex
|
starcoder
|
defmodule AdventOfCode.Solutions.Day13 do
@moduledoc """
Solution for day 13 exercise.
### Exercise
https://adventofcode.com/2021/day/13
"""
require Logger
def run(filename) do
{points, instructions} =
filename
|> File.read!()
|> parse_input()
first_fold = hd(instructions)
result =
points
|> execute_folds([first_fold])
|> Enum.count()
IO.puts("Number of points after first fold: #{result}")
IO.puts("Code to introduce:")
points
|> execute_folds(instructions)
|> print_code()
end
defp parse_input(file_content) do
[points_raw, instructions_raw] =
file_content
|> String.replace("\r\n", "\n")
|> String.split("\n\n", trim: true)
points =
points_raw
|> String.split("\n", trim: true)
|> Enum.map(fn coords ->
[x, y] =
coords
|> String.split(",")
|> Enum.map(&String.to_integer/1)
{x, y}
end)
instructions =
instructions_raw
|> String.split("\n", trim: true)
|> Enum.map(fn
<<"fold along y=", coord::binary>> ->
{:y, String.to_integer(coord)}
<<"fold along x=", coord::binary>> ->
{:x, String.to_integer(coord)}
end)
{points, instructions}
end
defp execute_folds(points, folds) do
Enum.reduce(folds, points, &execute_fold/2)
end
defp execute_fold({:x, coord}, points) do
points_to_keep = Enum.filter(points, fn {x, _y} -> x < coord end)
points_folded =
points
|> Enum.filter(fn {x, _y} -> x > coord end)
|> Enum.map(fn {x, y} ->
{x - 2 * abs(x - coord), y}
end)
Enum.uniq(points_to_keep ++ points_folded)
end
defp execute_fold({:y, coord}, points) do
points_to_keep = Enum.filter(points, fn {_x, y} -> y < coord end)
points_folded =
points
|> Enum.filter(fn {_x, y} -> y > coord end)
|> Enum.map(fn {x, y} ->
{x, y - 2 * abs(y - coord)}
end)
Enum.uniq(points_to_keep ++ points_folded)
end
defp print_code(points) do
for y <- 0..max_coord(points, :y) do
for x <- 0..max_coord(points, :x) do
character = if {x, y} in points, do: "#", else: "."
IO.write(character)
end
IO.write("\n")
end
end
defp max_coord(points, :x), do: Enum.map(points, fn {x, _y} -> x end) |> Enum.max()
defp max_coord(points, :y), do: Enum.map(points, fn {_x, y} -> y end) |> Enum.max()
end
|
lib/advent_of_code/solutions/day13.ex
| 0.712732
| 0.41052
|
day13.ex
|
starcoder
|
defmodule Expool do
@moduledoc """
This module provides a simple interface for concurrent process usage. The
`Expool.create_pool/2` function is used to create a base pool of processes,
and is then used for task submission using `Expool.submit/2`.
"""
# alias some stuff
alias Expool.Balancers
alias Expool.Internal
alias Expool.Options
defstruct active: true,
balancer: nil,
opts: nil,
pool: nil,
size: nil
@doc """
Creates a new set of `size` processes, adding them to a Map so they can be
referenced at a later time. Parses various options using `Expool.Options.parse/1`,
and potentially binds the pool to a name via Agent.
## Options
* `:name` - a name to register the pool against (defaults to using `nil`)
* `:strategy` - the balancing strategy to use (defaults to `:round_robin`)
"""
@spec create_pool(number, list) :: { atom, Expool }
def create_pool(size, opts \\ []) when size > 0 and is_list(opts) do
options =
opts
|> Options.parse
pool =
1..size
|> Enum.reduce(%{}, fn(num, dict) ->
args =
options.arg_generate.()
|> List.wrap
{ :ok, worker } =
args
|> Internal.start_link
Map.put(dict, num, worker)
end)
base_pool = Balancers.setup(%Expool{
opts: options,
pool: pool,
size: size
})
Agent.start_link(fn -> base_pool end, case options.register do
nil -> []
name -> [ name: name ]
end)
end
@doc """
Retrieves a registered pool by name or PID. This is a shorthand for calling
the Agent manually, but it will return an error if a valid pool is not found.
"""
@spec get_pool(atom | pid) :: Expool
def get_pool(id) when is_atom(id) or is_pid(id) do
Agent.get(id, fn
(%Expool{ } = pool) -> { :ok, pool }
(_other) -> { :error, :not_found }
end)
end
@doc """
Submits a task to a pool, either provided or by name. Returns error tuples
if the pool is either inactive or invalid. If valid, a pid will be chosen
based on the selection methods of the pool, and the task will be forwarded.
If there is a name registered, the Agent will have the state updated when
appropriate, to reflect any changes inside the pool.
"""
@spec submit(Expool | atom | pid, function) :: { atom, pid }
def submit(%Expool{ active: true } = pool, action) when is_function(action) do
{ index, new_pool } =
pool
|> Balancers.balance
pool.pool
|> Map.get(index)
|> Internal.execute(action)
|> Tuple.append(new_pool)
end
def submit(%Expool{ active: false } = _pool, _action) do
{ :error, "Task submitted to inactive pool!" }
end
def submit(id, action) when is_atom(id) or is_pid(id) do
Agent.get_and_update(id, fn(pool) ->
case submit(pool, action) do
{ :error, _msg } = error ->
{ error, pool }
{ :ok, pid, new_pool } ->
{ { :ok, pid }, new_pool }
end
end)
end
@doc """
Terminates all processes inside a pool, either provided or by name. Returns
a pool marked as inactive to avoid developers sending uncaught messages through
the pipeline.
"""
@spec terminate(Expool | atom | pid) :: Expool
def terminate(%Expool{ pool: pids } = pool) do
pids
|> Map.values
|> Enum.each(&(Process.exit(&1, :normal)))
{ :ok, %Expool{ pool | active: false } }
end
def terminate(id) when is_atom(id) or is_pid(id) do
Agent.get_and_update(id, fn(pool) ->
case terminate(pool) do
{ :error, _msg } = error ->
{ error, pool }
{ :ok, new_pool } ->
{ { :ok, true }, new_pool }
end
end)
end
end
|
lib/expool.ex
| 0.819641
| 0.584775
|
expool.ex
|
starcoder
|
defmodule GenRouter.Conn do
@moduledoc """
Structure which represents connection with Telegram bot.
Inspired by %Plug.Conn{}, adapted for bots.
Attributes:
* __skip__: system buffer to keep track of skipped scopes;
* path: route to controller which should handle this object;
* params: payload which will be passed to controller;
* assigns: non-parsed data assigned by a system (auth, etc);
* scope: local scope of current request, clears for each new route;
* code: response code, we use common HTTP codes, currently only 200 is supported;
* response: response payload, usually JSON;
* halted: stop the pipeline execution if conn was settled.
"""
defstruct __skip__: %{},
path: "/",
params: %{},
assigns: %{},
scope: %{},
code: nil,
response: nil,
halted: false
@type t :: %GenRouter.Conn{}
@doc """
Build Conn object with system fields
"""
@spec build(module(), map()) :: t
def build(router_module, %{path: path, params: params, assigns: assigns, scope: scope}) do
%GenRouter.Conn{
path: path,
params: params,
assigns: assigns,
scope: scope
}
|> reset_router_matches(router_module)
end
@doc """
Assign variable to current request.
"""
@spec assign(t, atom(), any()) :: t
def assign(%GenRouter.Conn{assigns: assigns} = conn, key, value) do
assigns = Map.put(assigns, key, value)
%{conn | assigns: assigns}
end
@doc """
Update state and complete the current request.
"""
@spec complete(t, String.t() | nil | :default, map() | :default, integer() | :default) :: t
def complete(conn, response \\ :default, scope \\ :default, code \\ :default)
def complete(%GenRouter.Conn{} = conn, response, scope, code)
when (is_map(scope) or scope === :default) and (is_integer(code) or code === :default) do
response = from_conn_or_default(conn, :response, response)
scope = from_conn_or_default(conn, :scope, scope)
code = from_conn_or_default(conn, :code, code)
%{conn | response: response, scope: scope, code: code}
|> halt()
end
@doc """
Put the next path after the current request.
It unhalts settled conn, so all the pipelines will be executed again.
"""
@spec forward(t, String.t(), map(), Keyword.t()) :: t
def forward(%GenRouter.Conn{} = conn, path \\ "/", scope \\ %{}, opts \\ [])
when is_bitstring(path) do
router_module =
Keyword.get(opts, :router_module, false) ||
Application.get_env(:gen_router, GenRouter.Conn)
|> Keyword.fetch!(:default_router)
%{conn | path: path, scope: scope, code: 302, halted: false}
|> reset_router_matches(router_module)
|> router_module.do_match(opts)
end
@doc """
Halt execution pipeline, Conn is settled.
"""
@spec halt(t) :: t
def halt(%GenRouter.Conn{} = conn) do
%{conn | halted: true}
end
@doc """
Reset router matching pipeline
"""
@spec reset_router_matches(t, module()) :: t
def reset_router_matches(conn, router_module) do
skip_router_scopes = Enum.reduce(router_module.scopes(), %{}, &Map.put(&2, &1, false))
%{conn | __skip__: skip_router_scopes}
end
@spec from_conn_or_default(t, :response | :scope | :code, any()) :: any()
defp from_conn_or_default(conn, :response, :default), do: conn.response
defp from_conn_or_default(_conn, :response, response), do: response
defp from_conn_or_default(conn, :scope, :default), do: conn.scope
defp from_conn_or_default(_conn, :scope, scope), do: scope || %{}
defp from_conn_or_default(conn, :code, :default), do: conn.code || 200
defp from_conn_or_default(_conn, :code, code), do: code || 200
end
|
lib/conn.ex
| 0.798776
| 0.408926
|
conn.ex
|
starcoder
|
defmodule AdventOfCode.Day04 do
@spec problem1 :: any
def problem1 do
{numbers, boards} = input()
numbers
|> Enum.reduce_while([], fn elem, acc ->
matches = [elem | acc]
case Enum.find(boards, &winning_board?(&1, matches)) do
nil -> {:cont, matches}
board -> {:halt, Enum.sum(unmatched_numbers(board, matches)) * elem}
end
end)
end
@spec problem2 :: any
def problem2 do
{numbers, boards} = input()
numbers
|> Enum.reduce_while({boards, []}, fn elem, {boards, acc} ->
matches = [elem | acc]
case boards do
[board] ->
if winning_board?(board, matches) do
{:halt, Enum.sum(unmatched_numbers(board, matches)) * elem}
else
{:cont, {boards, matches}}
end
_ ->
{:cont, {Enum.reject(boards, &winning_board?(&1, matches)), matches}}
end
end)
end
defp input do
[numbers | boards] =
File.read!("data/04/input")
|> String.split("\n\n", trim: true)
numbers =
numbers
|> String.trim()
|> String.split(",")
|> Enum.map(&String.to_integer/1)
boards =
boards
|> Enum.map(fn board ->
board
|> String.split(~r/\s+/, trim: true)
|> Enum.map(&String.to_integer/1)
end)
{numbers, boards}
end
defp match_row_or_column?(list, numbers) do
Enum.all?(list, &(&1 in numbers))
end
defp unmatched_numbers(board, numbers) do
Enum.reject(board, &(&1 in numbers))
end
defp winning_board?(
[
a1, a2, a3, a4, a5,
b1, b2, b3, b4, b5,
c1, c2, c3, c4, c5,
d1, d2, d3, d4, d5,
e1, e2, e3, e4, e5
],
numbers
) do
match_row_or_column?([a1, a2, a3, a4, a5], numbers) or
match_row_or_column?([b1, b3, b3, b4, b5], numbers) or
match_row_or_column?([c1, c2, c3, c4, c5], numbers) or
match_row_or_column?([d1, d2, d3, d4, d5], numbers) or
match_row_or_column?([e1, e2, e3, e4, e5], numbers) or
match_row_or_column?([a1, b1, c1, d1, e1], numbers) or
match_row_or_column?([a2, b2, c2, d2, e2], numbers) or
match_row_or_column?([a3, b3, c3, d3, e3], numbers) or
match_row_or_column?([a4, b4, c4, d4, e4], numbers) or
match_row_or_column?([a5, b5, c5, d5, e5], numbers)
end
end
|
2021/lib/aoc04.ex
| 0.615203
| 0.44077
|
aoc04.ex
|
starcoder
|
defmodule Callisto.GraphDB do
alias Callisto.{Edge, Query, Vertex}
@moduledoc """
Defines a graph DB (repository).
When used, the graph DB expects `:otp_app` option, which should point to
the OTP application that has the repository configuration. For example,
defmodule Graph do
use Callisto.GraphDB, otp_app: :my_app
end
Could be configured with:
config :my_app, Graph,
adapter: Callisto.Adapters.Neo4j,
url: "http://localhost:7474",
basic_auth: [username: "neo4j", password: "password"]
Most of the configuration is specific to the adapter, check the adapter
source for details.
"""
# NOTE: I stole a ton of this from Ecto, and probably did it wrong in the
# process... ...Paul
defmacro __using__(options) do
quote bind_quoted: [opts: options] do
@behaviour Callisto.GraphDB
@otp_app Keyword.fetch!(opts, :otp_app)
@config Application.get_env(@otp_app, __MODULE__, [])
@adapter (opts[:adapter] || @config[:adapter])
unless @adapter do
raise ArgumentError, "missing :adapter configuration in config #{inspect @otp_app}, #{inspect __MODULE__}"
end
def config do
@config
end
def __adapter__ do
@adapter
end
def start_link(opts \\ []) do
Callisto.GraphDB.Supervisor.start_link(__MODULE__, @otp_app, @adapter, opts)
end
def stop(pid, timeout \\ 5000) do
Supervisor.stop(pid, :normal, timeout)
end
def query(cypher, parser \\ nil) do
Callisto.GraphDB.Queryable.query(@adapter, cypher, parser)
end
def query!(cypher, parser \\ nil) do
Callisto.GraphDB.Queryable.query!(@adapter, cypher, parser)
end
def count(matcher) do
Callisto.GraphDB.Queryable.count(@adapter, matcher)
end
def count!(matcher) do
Callisto.GraphDB.Queryable.count!(@adapter, matcher)
end
def exists?(matcher) do
Callisto.GraphDB.Queryable.exists?(@adapter, matcher)
end
def get(finder, labels, props \\ %{}) do
Callisto.GraphDB.Queryable.get(@adapter, finder, labels, props)
end
def get!(finder, labels, props \\ %{}) do
Callisto.GraphDB.Queryable.get!(@adapter, finder, labels, props)
end
def get_path(from, to, edge) do
Callisto.GraphDB.Queryable.get_path(@adapter, from, to, edge)
end
def create(vertex=%Callisto.Vertex{}) do
Callisto.GraphDB.Queryable.create(@adapter, vertex)
end
def create(triple=%Callisto.Triple{}) do
Callisto.GraphDB.Queryable.create(@adapter, triple)
end
def create(from=%Callisto.Vertex{},
edge=%Callisto.Edge{},
to=%Callisto.Vertex{}) do
create(Callisto.Triple.new(from: from, to: to, edge: edge))
end
def get_or_create(vertex=%Callisto.Vertex{}, details \\ nil) do
Callisto.GraphDB.Queryable.get_or_create(@adapter, vertex, details)
end
def delete(vertex=%Callisto.Vertex{}, opts \\ []) do
Callisto.GraphDB.Queryable.delete(@adapter, vertex, opts)
end
def update(vertex=%Callisto.Vertex{}, opts \\ nil) do
Callisto.GraphDB.Queryable.update(@adapter, vertex, opts)
end
end
end
@doc ~S"""
Runs an arbitrary Cypher query against Neo4j. Can take a straight string
or an Callisto.Query structure (if the latter, will attempt to convert
results to structs based on the :return key -- handle_return/2).
Optional function argument will receive the array of results (if status
is :ok); the return from the function will replace the return. Useful
for dereferencing a single key to return just a list of values -- or
for popping the first off)
# Example: Return only the first row's data.
{:ok, x} = Repo.query("MATCH (x) RETURN x", fn(r) -> hd(r)["x"] end)
# Example: Return dereferenced list.
%Query{match: "(v:Foo)"} |> Query.returning(v: MyApp.Foo)
|> GraphDB.query(fn(row) -> Enum.map(row, &(&1["v"])) end)
"""
@callback query(String.t | struct, fun | nil) :: tuple
@doc ~S"""
Runs an arbitrary Cypher query against Neo4j. Can take a straight string
or an Callisto.Query structure. Returns only the response.
"""
@callback query!(String.t | struct, fun | nil) :: list(map)
@doc ~S"""
Returns {:ok, count} of elements that match the <matcher> with the label
<kind>
iex> Repo.count("(x:Disease)")
{:ok, 0}
iex> Repo.count("(x:Claim)")
{:ok, 1}
"""
@callback count(String.t | struct) :: tuple
@callback count!(String.t | struct) :: integer | tuple
@doc ~S"""
Returns true/false if there is at least one element that matches the
parameters.
iex> Repo.exists?("(x:Disease)")
false
iex> Repo.exists?("(x:Claim)")
true
"""
@callback exists?(String.t | struct) :: boolean
@doc ~S"""
Constructs query to return objects of type <type> (Vertex or Edge),
with label(s) <labels>, and optionally properties or ID. Returns
tuple from query(), but on success, second element of tuple is a list
of results cast into the appropriate structs (Vertex or Edge).
The last argument, if provided, is expected to be a hash of property
values to match against. These values are completely ignored, though,
if the hash has an :id key -- in that case, or if the last argument is
a string or integer, the only property that will be searched against is
the ID given.
"""
@callback get(Vertex.t | Edge.t, list(String.t | module), any) :: tuple
@callback get!(Vertex.t | Edge.t, list(String.t | module), any) :: list(struct)
@doc ~S"""
Returns all Edges that match the third argument, linking from the first
to second vertices.
"""
@callback get_path(Vertex.t, Vertex.t, Edge.t) :: tuple
@doc ~S"""
Creates the given object
Given a Vertex, creates the vertex and returns the resulting Vertex.
Given a Triple, creates the path and returns the Triple.
Returns {:ok, [results]} on success.
"""
@callback create(Vertex.t | Triple.t) :: tuple
@doc ~S"""
Expects arguments from (vertex), edge (edge), to (vertex). Simply tosses
in a Triple and then creates the edges. See create()
Returns {:ok, [triples]} on success.
"""
@callback create(Vertex.t, Edge.t, Vertex.t) :: tuple
@doc ~S"""
Returns existing matching Vertex record; if none exist, one is created
and returned. Returns {:ok, [vertices]} on success.
"""
@callback get_or_create(Vertex.t) :: tuple
@doc ~S"""
Returns existing matching Vertex record; if none exist, one is created
using the details given in the second argument. Returns {:ok, [vertices]}
on success.
"""
@callback get_or_create(Vertex.t, keyword | map) :: tuple
@doc ~S"""
Deletes all vertices that match. Returns {:ok, []} on success (on success,
the right side is always an empty list.)
If detach: true is passed along, will delete the vertex and any remaining
edges attached to it (by default, will not detach)
Graph.delete(Vertex.new("Foo"), detach: true)
Cypher: "MATCH (x:Foo) DETACH DELETE x"
"""
@callback delete(Vertex.t, keyword) :: tuple
@doc ~S"""
Matches the vertex, by ID alone if the :id property exists, and
updates the properties on that vertex: If the second argument is
given, that hash is set on the properties (does not delete existing
properties, but will overwrite matching keys). If not, will use the
properties from the Vertex struct used to match.
"""
@callback update(Vertex.t, keyword | map | none) :: tuple
@doc """
Returns the adapter tied to the repository.
"""
@callback __adapter__ :: Ecto.Adapter.t
@doc """
Returns the adapter configuration stored in the `:otp_app` environment.
"""
@callback config() :: Keyword.t
@doc """
Starts any connection pooling or supervision and return `{:ok, pid}`
or just `:ok` if nothing needs to be done.
Returns `{:error, {:already_started, pid}}` if the repo is already
started or `{:error, term}` in case anything else goes wrong.
## Options
See the configuration in the moduledoc for options shared between adapters,
for adapter-specific configuration see the adapter's documentation.
"""
@callback start_link(opts :: Keyword.t) :: {:ok, pid} |
{:error, {:already_started, pid}} |
{:error, term}
@doc """
Shuts down the repository represented by the given pid.
"""
@callback stop(pid, timeout) :: :ok
# This takes a returned tuple from Neo4j and a Callisto.Query struct;
# it looks at the Query's return key and attempts to convert the
# returned data to the matching structs (if indicated). If there's
# no struct given for a key, it is unchanged. Finally, returns
# the tuple with the updated results.
def handle_return(rows, %Query{return: returning})
when is_list(returning) do
Enum.map rows, fn(row) ->
Enum.map(returning, fn({k, v}) ->
key = to_string(k)
cond do
is_nil(v) -> {key, row[key]}
v == true -> {key, Vertex.cast([], row[key])}
v == Vertex -> {key, Vertex.cast(row["labels(#{key})"], row[key])}
v == Edge -> {key, Edge.cast(row["type(#{key})"], row[key])}
is_binary(v) -> {key, Vertex.cast(v, row[key])}
is_atom(v) || is_list(v) -> {key, Vertex.cast(v, row[key])}
true -> {key, row[key]}
end
end)
|> Map.new
end
end
# No return structure defined, just return what we got, likely nothing.
def handle_return(rows, %Query{return: r}) when is_nil(r), do: rows
end
|
lib/callisto/graph_db.ex
| 0.867008
| 0.484441
|
graph_db.ex
|
starcoder
|
defmodule BigchaindbEx.Transaction do
alias BigchaindbEx.{Crypto, Utils, Http}
alias BigchaindbEx.Transaction.{Input, Output}
@type t :: %__MODULE__{
id: String.t,
operation: String.t,
asset: Map.t,
inputs: Enum.t,
outputs: Enum.t,
metadata: Map.t,
fulfillments: Enum.t,
conditions: Enum.t,
version: String.t,
timestamp: String.t
}
@enforce_keys [:operation, :version, :timestamp]
defstruct [
:id,
:operation,
:asset,
:inputs,
:outputs,
:metadata,
:version,
:fulfillments,
:conditions,
:timestamp
]
@typedoc """
Options given to prepare/1
"""
@type prepare_opts :: [
operation: String.t,
asset: Map.t,
signers: Enum.t | String.t | Tuple.t,
recipients: Enum.t | String.t | Tuple.t,
metadata: Map.t
]
@doc """
Prepares a transaction.
## Example
iex> BigchaindbEx.Transaction.prepare(operation: "CREATE", signers: [signer1, signer2], asset: %{data: %{bicycle: %{serial_no: 232134, manufacturer: "SpeedWheels"}}})
{:ok, tx}
"""
@spec prepare(prepare_opts) :: {:ok, __MODULE__.t} | {:error, String.t}
def prepare(opts) when is_list(opts), do: opts |> Enum.into(%{}) |> prepare()
def prepare(opts = %{signers: signer}) when is_binary(signer), do: prepare(Map.merge(opts, %{signers: [signer]}))
def prepare(opts = %{signers: signers}) when is_tuple(signers), do: prepare(Map.merge(opts, %{signers: Tuple.to_list(signers)}))
def prepare(%{signers: []}), do: {:error, "No signers given! Please provide at least one signer!"}
def prepare(%{recipients: []}), do: {:error, "Each `recipient` in the list must be a tuple of `{[<list of public keys>], <amount>}`"}
def prepare(opts = %{operation: "CREATE", asset: asset = %{data: _}, signers: signers}) when is_list(signers) do
{:ok, %__MODULE__{
operation: "CREATE",
asset: asset,
inputs: signers,
outputs: [{signers, 1}],
metadata: opts[:metadata],
version: "1.0",
timestamp: Utils.gen_timestamp()
}}
end
def prepare(opts = %{operation: "CREATE", asset: asset = %{data: _}, signers: signers, recipients: recipients})
when is_list(signers)
and is_list(recipients)
do
{:ok, %__MODULE__{
operation: "CREATE",
asset: asset,
inputs: signers,
outputs: recipients,
metadata: opts[:metadata],
version: "1.0",
timestamp: Utils.gen_timestamp()
}}
end
def prepare(opts = %{operation: "TRANSFER", asset: asset = %{data: _}, signers: signers})
when is_list(signers)
or is_binary(signers)
or is_tuple(signers)
do
{:ok, %__MODULE__{
operation: "TRANSFER",
asset: asset,
inputs: signers,
outputs: [{signers, 1}],
metadata: opts[:metadata],
version: "1.0",
timestamp: Utils.gen_timestamp()
}}
end
def prepare(opts = %{operation: "TRANSFER", asset: asset = %{data: _}, signers: signers, recipients: recipients})
when (is_list(signers) or is_binary(signers) or is_tuple(signers))
and (is_list(recipients) or is_binary(recipients) or is_tuple(recipients))
do
{:ok, %__MODULE__{
operation: "TRANSFER",
asset: asset,
inputs: signers,
outputs: recipients,
metadata: opts[:metadata],
version: "1.0",
timestamp: Utils.gen_timestamp()
}}
end
def prepare(_), do: {:error, "The given options are invalid!"}
@doc """
Fulfills a given transaction.
"""
@spec fulfill(__MODULE__.t, String.t | Enum.t | Tuple.t) :: {:ok, Transaction.t} | {:error, String.t}
def fulfill(%__MODULE__{} = tx, priv_key) when is_binary(priv_key), do: fulfill(tx, [priv_key])
def fulfill(%__MODULE__{} = tx, priv_keys) when is_tuple(priv_keys), do: fulfill(tx, Tuple.to_list(priv_keys))
def fulfill(%__MODULE__{} = tx, priv_keys) when is_list(priv_keys) do
# Generate public keys from the
# given private keys.
key_pairs = Enum.map(priv_keys, fn p_key ->
case Crypto.generate_pub_key(p_key) do
{:ok, pub_key} -> {pub_key, p_key}
{:error, _} -> {:error, p_key}
end
end)
# Check for errors
errors = key_pairs
|> Enum.filter(fn {atom, _} -> atom === :error end)
|> Enum.map(fn {_, key} -> key end)
if Enum.count(errors) > 0 do
{:error, "The following keys could not be decoded: #{inspect errors}"}
else
# Serialize the transaction to json
serialized_tx = tx
|> serialize_struct
|> Utils.encode_map_to_json_sorted_keys
# Sign the transaction using the
# keys and the serialized tx
signatures = Enum.map(key_pairs, fn {pub_key, priv_key} ->
case Crypto.sign(serialized_tx, priv_key) do
{:ok, signed} -> {signed, pub_key}
{:error, reason} -> {:error, reason}
end
end)
# Check for errors
errors = signatures
|> Enum.filter(fn {atom, _} -> atom === :error end)
|> Enum.map(fn {_, key} -> key end)
if Enum.count(errors) > 0 do
{:error, "Signing using the given private key/s failed: #{inspect errors}"}
else
verified_signatures = Enum.map(signatures, fn {sig, pub_key} ->
{pub_key, sig, Crypto.verify(serialized_tx, sig, pub_key)}
end)
errors = verified_signatures
|> Enum.filter(fn {_, _, valid} -> not valid end)
|> Enum.map(fn {key, _, _} -> key end)
if Enum.count(errors) > 0 do
{:error, "Verifying using the given private key/s failed: #{inspect errors}"}
else
fulfilled_inputs = Enum.map(verified_signatures, fn {pub_key, signature, _valid} ->
{:ok, input} = pub_key
|> Input.generate(signature)
|> Input.to_map
input
end)
fulfilled_outputs = Enum.map(tx.outputs, fn {pub_keys, amount} ->
{:ok, output} = pub_keys
|> Output.generate(amount)
|> Output.to_map
output
end)
fulfilled_tx = tx
|> Map.merge(%{ inputs: fulfilled_inputs, outputs: fulfilled_outputs })
|> compute_tx_id
{:ok, fulfilled_tx}
end
end
end
rescue
e in RuntimeError -> {:error, "Could not fulfill transaction: #{inspect e}"}
end
def fulfill(%__MODULE__{}, _), do: {:error, "Invalid private key/s!"}
def fulfill(_, _), do: {:error, "You must supply a transaction object as the first argument!"}
@doc """
Sends a fulfilled transaction to
the bigchaindb cluster.
"""
@spec send(__MODULE__.t, Map.t) :: {:ok, __MODULE__.t} | {:error, String.t}
def send(%__MODULE__{} = tx, headers \\ []) when is_list(headers) do
tx_body = tx
|> Map.from_struct
|> Map.delete(:conditions)
|> Map.delete(:timestamp)
|> Map.delete(:fulfillments)
|> Utils.encode_map_to_json_sorted_keys
{:ok, Http.post("api/v1/transactions", [headers: headers, body: tx_body])}
rescue
e in RuntimeError -> {:error, "Could not send transaction: #{inspect e}"}
end
@spec retrieve(String.t) :: {:ok, __MODULE__.t} | {:error, String.t}
def retrieve(tx_id) when is_binary(tx_id) do
end
@spec status(String.t) :: {:ok, __MODULE__.t} | {:error, String.t}
def status(tx_id) when is_binary(tx_id) do
end
defp compute_tx_id(%__MODULE__{} = tx) do
parsed_inputs = Enum.map(tx.inputs, fn x -> Map.merge(x, %{fulfillment: nil}) end)
serialized = Utils.encode_map_to_json_sorted_keys(%{
asset: tx.asset,
inputs: parsed_inputs,
metadata: tx.metadata,
operation: tx.operation,
outputs: tx.outputs,
version: tx.version
})
{:ok, id} = Crypto.sha3_hash256(serialized)
Map.merge(tx, %{id: id})
end
defp serialize_struct(tx) when is_map(tx) do
tx
|> Map.from_struct
|> Map.merge(%{
inputs: Enum.map(tx.inputs, &Crypto.encode_base58/1),
outputs: Enum.map(tx.outputs, fn {keys, amount} ->
[
Enum.map(keys, &Crypto.encode_base58/1),
amount
]
end)
})
end
end
|
lib/bigchaindb_ex/transaction.ex
| 0.90709
| 0.449513
|
transaction.ex
|
starcoder
|
defmodule ExWire.Packet.Capability.Par.SnapshotData.BlockChunk do
@moduledoc """
Block chunks contain raw block data: blocks themselves, and their transaction
receipts. The blocks are stored in the "abridged block" format (referred to
by AB), and the the receipts are stored in a list: [`receipt_1`: `P`,
`receipt_2`: `P`, ...] (referred to by RC).
"""
defmodule BlockHeader do
@type t :: %__MODULE__{
# Header fields
author: EVM.address(),
state_root: EVM.hash(),
logs_bloom: <<_::2048>>,
difficulty: integer(),
gas_limit: integer(),
gas_used: integer(),
timestamp: integer(),
extra_data: integer(),
# Ommers and transactions inline
transactions: list(Blockchain.Transaction.t()),
transactions_rlp: list(ExRLP.t()),
ommers: list(Block.Header.t()),
ommers_rlp: list(ExRLP.t()),
# Seal fields
mix_hash: EVM.hash(),
nonce: <<_::64>>
}
defstruct [
:author,
:state_root,
:logs_bloom,
:difficulty,
:gas_limit,
:gas_used,
:timestamp,
:extra_data,
:transactions,
:transactions_rlp,
:ommers,
:ommers_rlp,
:mix_hash,
:nonce
]
end
defmodule BlockData do
@type t :: %__MODULE__{
header: BlockHeader.t(),
receipts: list(Blockchain.Transaction.Receipt.t()),
receipts_rlp: list(ExRLP.t())
}
defstruct [
:header,
:receipts,
:receipts_rlp
]
end
@type t :: %__MODULE__{
number: integer(),
hash: EVM.hash(),
total_difficulty: integer(),
block_data_list: list(BlockData.t())
}
defstruct number: nil,
hash: nil,
total_difficulty: nil,
block_data_list: []
@doc """
Given a `BlockChunk`, serializes for transport within a SnapshotData packet.
## Examples
iex> %ExWire.Packet.Capability.Par.SnapshotData.BlockChunk{
...> number: 5,
...> hash: <<6::256>>,
...> total_difficulty: 7,
...> block_data_list: [
...> %ExWire.Packet.Capability.Par.SnapshotData.BlockChunk.BlockData{
...> header: %ExWire.Packet.Capability.Par.SnapshotData.BlockChunk.BlockHeader{
...> author: <<1fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b>>,
...> state_root: <<11::256>>,
...> logs_bloom: <<12::2048>>,
...> difficulty: 13,
...> gas_limit: 14,
...> gas_used: 15,
...> timestamp: 16,
...> extra_data: 17,
...> transactions: [
...> %Blockchain.Transaction{nonce: 5, gas_price: 6, gas_limit: 7, to: <<1::160>>, value: 8, v: 27, r: 9, s: 10, data: "hi"}
...> ],
...> ommers: [
...> %Block.Header{parent_hash: <<1::256>>, ommers_hash: <<2::256>>, beneficiary: <<3::160>>, state_root: <<4::256>>, transactions_root: <<5::256>>, receipts_root: <<6::256>>, logs_bloom: <<>>, difficulty: 5, number: 1, gas_limit: 5, gas_used: 3, timestamp: 6, extra_data: "Hi mom", mix_hash: <<7::256>>, nonce: <<8::64>>}
...> ],
...> mix_hash: <<18::256>>,
...> nonce: <<19::64>>,
...> },
...> receipts: [
...> %Blockchain.Transaction.Receipt{state: <<1,2,3>>, cumulative_gas: 5, bloom_filter: <<2,3,4>>, logs: []}
...> ]
...> }
...> ]
...> }
...> |> ExWire.Packet.Capability.Par.SnapshotData.BlockChunk.serialize()
[
5,
<<6::256>>,
7,
[
[
<<10::160>>,
<<11::256>>,
<<12::2048>>,
13,
14,
15,
16,
17,
[[<<5>>, <<6>>, <<7>>, <<1::160>>, <<8>>, "hi", <<27>>, <<9>>, <<10>>]],
[[<<1::256>>, <<2::256>>, <<3::160>>, <<4::256>>, <<5::256>>, <<6::256>>, <<>>, 5, 1, 5, 3, 6, "Hi mom", <<7::256>>, <<8::64>>]],
<<18::256>>,
<<19::64>>
],
[
[<<1,2,3>>, 5, <<2,3,4>>, []]
]
]
]
"""
@spec serialize(t) :: ExRLP.t()
def serialize(block_chunk = %__MODULE__{}) do
serialized_block_data =
for block_data <- block_chunk.block_data_list do
[
[
block_data.header.author,
block_data.header.state_root,
block_data.header.logs_bloom,
block_data.header.difficulty,
block_data.header.gas_limit,
block_data.header.gas_used,
block_data.header.timestamp,
block_data.header.extra_data,
Enum.map(block_data.header.transactions, &Blockchain.Transaction.serialize/1),
Enum.map(block_data.header.ommers, &Block.Header.serialize/1),
block_data.header.mix_hash,
block_data.header.nonce
],
Enum.map(block_data.receipts, &Blockchain.Transaction.Receipt.serialize/1)
]
end
[
block_chunk.number,
block_chunk.hash,
block_chunk.total_difficulty
] ++ serialized_block_data
end
@doc """
Given an RLP-encoded `BlockChunk` from a SnapshotData packet, decodes into a
`BlockChunk` struct.
## Examples
iex> [
...> <<5>>,
...> <<6::256>>,
...> <<7>>,
...> [
...> [
...> <<10::160>>,
...> <<11::256>>,
...> <<12::2048>>,
...> 13,
...> 14,
...> 15,
...> 16,
...> 17,
...> [[<<5>>, <<6>>, <<7>>, <<1::160>>, <<8>>, "hi", <<27>>, <<9>>, <<10>>]],
...> [[<<1::256>>, <<2::256>>, <<3::160>>, <<4::256>>, <<5::256>>, <<6::256>>, <<>>, <<5>>, <<1>>, <<5>>, <<3>>, <<6>>, "Hi mom", <<7::256>>, <<8::64>>]],
...> <<18::256>>,
...> <<19::64>>
...> ],
...> [
...> [<<1,2,3>>, <<5>>, <<2,3,4>>, []]
...> ]
...> ]
...> ]
...> |> ExWire.Packet.Capability.Par.SnapshotData.BlockChunk.deserialize()
%ExWire.Packet.Capability.Par.SnapshotData.BlockChunk{
number: 5,
hash: <<6::256>>,
total_difficulty: 7,
block_data_list: [
%ExWire.Packet.Capability.Par.SnapshotData.BlockChunk.BlockData{
header: %ExWire.Packet.Capability.Par.SnapshotData.BlockChunk.BlockHeader{
author: <<10::160>>,
state_root: <<11::256>>,
logs_bloom: <<12::2048>>,
difficulty: 13,
gas_limit: 14,
gas_used: 15,
timestamp: 16,
extra_data: 17,
transactions: [
%Blockchain.Transaction{nonce: 5, gas_price: 6, gas_limit: 7, to: <<1::160>>, value: 8, v: 27, r: 9, s: 10, data: "hi"}
],
transactions_rlp: [[<<5>>, <<6>>, <<7>>, <<1::160>>, <<8>>, "hi", <<27>>, <<9>>, <<10>>]],
ommers: [
%Block.Header{parent_hash: <<1::256>>, ommers_hash: <<2::256>>, beneficiary: <<3::160>>, state_root: <<4::256>>, transactions_root: <<5::256>>, receipts_root: <<6::256>>, logs_bloom: <<>>, difficulty: 5, number: 1, gas_limit: 5, gas_used: 3, timestamp: 6, extra_data: "Hi mom", mix_hash: <<7::256>>, nonce: <<8::64>>}
],
ommers_rlp: [[<<1::256>>, <<2::256>>, <<3::160>>, <<4::256>>, <<5::256>>, <<6::256>>, <<>>, <<5>>, <<1>>, <<5>>, <<3>>, <<6>>, "Hi mom", <<7::256>>, <<8::64>>]],
mix_hash: <<18::256>>,
nonce: <<19::64>>,
},
receipts: [
%Blockchain.Transaction.Receipt{state: <<1,2,3>>, cumulative_gas: 5, bloom_filter: <<2,3,4>>, logs: []}
],
receipts_rlp: [[<<1,2,3>>, <<5>>, <<2,3,4>>, []]]
}
]
}
"""
@spec deserialize(ExRLP.t()) :: t
def deserialize(rlp) do
[
number,
hash,
total_difficulty
| serialized_block_data
] = rlp
block_data_list =
for block_data_rlp <- serialized_block_data do
[
[
author,
state_root,
logs_bloom,
difficulty,
gas_limit,
gas_used,
timestamp,
extra_data,
transactions_rlp,
ommers_rlp,
mix_hash,
nonce
],
receipts_rlp
] = block_data_rlp
transactions = Enum.map(transactions_rlp, &Blockchain.Transaction.deserialize/1)
ommers = Enum.map(ommers_rlp, &Block.Header.deserialize/1)
receipts = Enum.map(receipts_rlp, &Blockchain.Transaction.Receipt.deserialize/1)
%BlockData{
header: %BlockHeader{
author: author,
state_root: state_root,
logs_bloom: logs_bloom,
difficulty: Exth.maybe_decode_unsigned(difficulty),
gas_limit: Exth.maybe_decode_unsigned(gas_limit),
gas_used: Exth.maybe_decode_unsigned(gas_used),
timestamp: Exth.maybe_decode_unsigned(timestamp),
extra_data: extra_data,
transactions: transactions,
transactions_rlp: transactions_rlp,
ommers: ommers,
ommers_rlp: ommers_rlp,
mix_hash: mix_hash,
nonce: nonce
},
receipts: receipts,
receipts_rlp: receipts_rlp
}
end
%__MODULE__{
number: Exth.maybe_decode_unsigned(number),
hash: hash,
total_difficulty: Exth.maybe_decode_unsigned(total_difficulty),
block_data_list: block_data_list
}
end
end
|
apps/ex_wire/lib/ex_wire/packet/capability/par/snapshot_data/block_chunk.ex
| 0.858318
| 0.516352
|
block_chunk.ex
|
starcoder
|
defmodule AWS.MigrationHubStrategy do
@moduledoc """
Migration Hub Strategy Recommendations
` This API reference provides descriptions, syntax, and other details about each
of the actions and data types for Migration Hub Strategy Recommendations
(Strategy Recommendations).
The topic for each action shows the API request parameters and the response.
Alternatively, you can use one of the AWS SDKs to access an API that is tailored
to the programming language or platform that you're using. For more information,
see [AWS SDKs](http://aws.amazon.com/tools/#SDKs).
`
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2020-02-19",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "migrationhub-strategy",
global?: false,
protocol: "rest-json",
service_id: "MigrationHubStrategy",
signature_version: "v4",
signing_name: "migrationhub-strategy",
target_prefix: nil
}
end
@doc """
Retrieves details about an application component.
"""
def get_application_component_details(
%Client{} = client,
application_component_id,
options \\ []
) do
url_path =
"/get-applicationcomponent-details/#{AWS.Util.encode_uri(application_component_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves a list of all the recommended strategies and tools for an application
component running on a server.
"""
def get_application_component_strategies(
%Client{} = client,
application_component_id,
options \\ []
) do
url_path =
"/get-applicationcomponent-strategies/#{AWS.Util.encode_uri(application_component_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves the status of an on-going assessment.
"""
def get_assessment(%Client{} = client, id, options \\ []) do
url_path = "/get-assessment/#{AWS.Util.encode_uri(id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves the details about a specific import task.
"""
def get_import_file_task(%Client{} = client, id, options \\ []) do
url_path = "/get-import-file-task/#{AWS.Util.encode_uri(id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves your migration and modernization preferences.
"""
def get_portfolio_preferences(%Client{} = client, options \\ []) do
url_path = "/get-portfolio-preferences"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves overall summary including the number of servers to rehost and the
overall number of anti-patterns.
"""
def get_portfolio_summary(%Client{} = client, options \\ []) do
url_path = "/get-portfolio-summary"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves detailed information about the specified recommendation report.
"""
def get_recommendation_report_details(%Client{} = client, id, options \\ []) do
url_path = "/get-recommendation-report-details/#{AWS.Util.encode_uri(id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves detailed information about a specified server.
"""
def get_server_details(
%Client{} = client,
server_id,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/get-server-details/#{AWS.Util.encode_uri(server_id)}"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves recommended strategies and tools for the specified server.
"""
def get_server_strategies(%Client{} = client, server_id, options \\ []) do
url_path = "/get-server-strategies/#{AWS.Util.encode_uri(server_id)}"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves a list of all the application components (processes).
"""
def list_application_components(%Client{} = client, input, options \\ []) do
url_path = "/list-applicationcomponents"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Retrieves a list of all the installed collectors.
"""
def list_collectors(%Client{} = client, max_results \\ nil, next_token \\ nil, options \\ []) do
url_path = "/list-collectors"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Retrieves a list of all the imports performed.
"""
def list_import_file_task(
%Client{} = client,
max_results \\ nil,
next_token \\ nil,
options \\ []
) do
url_path = "/list-import-file-task"
headers = []
query_params = []
query_params =
if !is_nil(next_token) do
[{"nextToken", next_token} | query_params]
else
query_params
end
query_params =
if !is_nil(max_results) do
[{"maxResults", max_results} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
200
)
end
@doc """
Returns a list of all the servers.
"""
def list_servers(%Client{} = client, input, options \\ []) do
url_path = "/list-servers"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Saves the specified migration and modernization preferences.
"""
def put_portfolio_preferences(%Client{} = client, input, options \\ []) do
url_path = "/put-portfolio-preferences"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Starts the assessment of an on-premises environment.
"""
def start_assessment(%Client{} = client, input, options \\ []) do
url_path = "/start-assessment"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Starts a file import.
"""
def start_import_file_task(%Client{} = client, input, options \\ []) do
url_path = "/start-import-file-task"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Starts generating a recommendation report.
"""
def start_recommendation_report_generation(%Client{} = client, input, options \\ []) do
url_path = "/start-recommendation-report-generation"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Stops the assessment of an on-premises environment.
"""
def stop_assessment(%Client{} = client, input, options \\ []) do
url_path = "/stop-assessment"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Updates the configuration of an application component.
"""
def update_application_component_config(%Client{} = client, input, options \\ []) do
url_path = "/update-applicationcomponent-config/"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
@doc """
Updates the configuration of the specified server.
"""
def update_server_config(%Client{} = client, input, options \\ []) do
url_path = "/update-server-config/"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
200
)
end
end
|
lib/aws/generated/migration_hub_strategy.ex
| 0.728459
| 0.473475
|
migration_hub_strategy.ex
|
starcoder
|
defmodule Holobot.Telegram.Commands.Ask do
@moduledoc """
Ask query command handler.
"""
use Holobot.Telegram.Commander
@about_cover "COVER corp. is a cutting edge 2D entertainment company. It is the parent company of Hololive Production."
@about_hololive "Hololive Production is a virtual YouTuber talent agency."
@about_yagoo """
YAGOO (<NAME>) is the CEO of COVER Corp.
Originally focusing on the development and creation of AR and VR technologies, in 2017, COVER Corp. created the talent agency, hololive production.
He likes to watch TV and YouTube before going to bed, sometimes falling asleep on the sofa.
The name YAGOO originated from one of Roboco's streams, where Subaru misread "Tanigo" as "Yagoo".
"""
def ask(update) do
Logger.info("Command /ask")
{:ok, _} =
send_message("What would you like to know?",
reply_markup: %Model.InlineKeyboardMarkup{
inline_keyboard: [
[
%{
callback_data: "/ask what-is-hololive",
text: "What is Hololive?"
},
%{
callback_data: "/ask what-is-cover",
text: "What is COVER?"
}
],
[
%{
callback_data: "/ask who-is-yagoo",
text: "Who is YAGOO?"
},
%{
callback_data: "/ask why-digital-avatars",
text: "Why digital avatars?"
}
]
]
}
)
end
def ask_query_command(update) do
Logger.info("Callback Query Command /ask")
case update.callback_query.data do
"/ask what-is-hololive" ->
send_message(@about_hololive)
"/ask what-is-cover" ->
send_message(@about_cover)
"/ask who-is-yagoo" ->
send_message(@about_yagoo)
"/ask why-digital-avatars" ->
send_message("""
Various reasons. It's fun, it's unique, and it provides a degree of anonymity.
It's also to showcase how technology has advanced over recent years.
The idea of being a cartoon character in real-time was essentially unheard of even five years ago.
""")
end
end
end
|
lib/holobot/telegram/commands/ask.ex
| 0.52756
| 0.400046
|
ask.ex
|
starcoder
|
defmodule Imagineer.Image.PNG.Chunk.Decoders.Transparency do
alias Imagineer.Image.PNG
@color_type_grayscale 0
@color_type_color 2
@color_type_palette_and_color 3
@color_type_grayscale_with_alpha 4
@color_type_color_and_alpha 6
# fully opaque
@palette_default_opacity 255
def decode(content, %PNG{color_type: color_type} = image) do
decode_transparency(color_type, content, image)
end
defp decode_transparency(@color_type_grayscale_with_alpha, _content, _image) do
raise("tRNS chunk should not appear for images with alpha channels")
end
defp decode_transparency(@color_type_color_and_alpha, _content, _image) do
raise("tRNS chunk should not appear for images with alpha channels")
end
defp decode_transparency(
@color_type_grayscale,
opacity_bytes,
%PNG{bit_depth: bit_depth} = image
) do
<<opacity::size(bit_depth), _rest::bits>> = opacity_bytes
%PNG{image | transparency: {opacity}}
end
defp decode_transparency(@color_type_color, opacity_bytes, %PNG{bit_depth: bit_depth} = image) do
%PNG{image | transparency: decode_rgb(opacity_bytes, bit_depth)}
end
defp decode_transparency(@color_type_palette_and_color, opacity_bytes, %PNG{} = image) do
%PNG{image | transparency: palette_opacities(opacity_bytes)}
end
defp decode_rgb(<<red::binary-size(2), green::binary-size(2), blue::binary-size(2)>>, bit_depth) do
{
take_integer(red, bit_depth),
take_integer(green, bit_depth),
take_integer(blue, bit_depth)
}
end
defp take_integer(bin, bit_size) do
<<taken::size(bit_size), _tossed::bits>> = bin
taken
end
defp palette_opacities(opacity_bytes) do
palette_opacities(opacity_bytes, [])
|> :array.from_list(@palette_default_opacity)
end
defp palette_opacities(<<>>, opacities) do
Enum.reverse(opacities)
end
defp palette_opacities(<<opacity::size(8), rest::binary>>, opacities) do
palette_opacities(rest, [opacity | opacities])
end
end
|
lib/imagineer/image/png/chunk/decoders/transparency.ex
| 0.505615
| 0.416648
|
transparency.ex
|
starcoder
|
defmodule ExTwilio.JWT.AccessToken do
@moduledoc """
A Twilio JWT access token, as described in the Twilio docs.
https://www.twilio.com/docs/iam/access-tokens
"""
alias ExTwilio.JWT.Grant
alias ExTwilio.Ext
use Joken.Config
@enforce_keys [:account_sid, :api_key, :api_secret, :identity, :grants, :expires_in]
defstruct token_identifier: nil,
account_sid: nil,
api_key: nil,
api_secret: nil,
identity: nil,
grants: [],
expires_in: nil
@type t :: %__MODULE__{
account_sid: String.t(),
api_key: String.t(),
api_secret: String.t(),
identity: String.t(),
grants: [ExTwilio.JWT.Grant.t()],
expires_in: integer
}
@doc """
Creates a new JWT access token.
## Examples
AccessToken.new(
account_sid: "account_sid",
api_key: "api_key",
api_secret: "secret",
identity: "<EMAIL>",
expires_in: 86_400,
grants: [AccessToken.ChatGrant.new(service_sid: "sid")]
)
"""
@spec new(attrs :: Keyword.t()) :: t
def new(attrs \\ []) do
struct(__MODULE__, attrs)
end
@doc """
Converts an access token into a string JWT.
Will raise errors if the `token` does not have all the required fields.
## Examples
token =
AccessToken.new(
account_sid: "account_sid",
api_key: "api_key",
api_secret: "secret",
identity: "<EMAIL>",
expires_in: 86_400,
grants: [AccessToken.ChatGrant.new(service_sid: "sid")]
)
AccessToken.to_jwt!(token)
# => "<KEY>"
"""
@spec to_jwt!(t) :: String.t() | no_return
def to_jwt!(token) do
token =
token
|> Ext.Map.validate!(:account_sid, &is_binary/1, "must be a binary")
|> Ext.Map.validate!(:api_key, &is_binary/1, "must be a binary")
|> Ext.Map.validate!(:api_secret, &is_binary/1, "must be a binary")
|> Ext.Map.validate!(:identity, &is_binary/1, "must be a binary")
|> Ext.Map.validate!(:grants, &list_of_grants?/1, "must be a list of grants")
|> Ext.Map.validate!(:expires_in, &is_integer/1, "must be an integer")
token_config =
%{}
|> add_claim("grants", fn -> grants(token) end)
|> add_claim("sub", fn -> token.account_sid end)
|> add_claim("jti", fn -> token.token_identifier || "#{token.api_key}-#{random_str()}" end)
|> add_claim("iss", fn -> token.api_key end)
|> add_claim("nbf", fn -> DateTime.utc_now() |> DateTime.to_unix() end)
|> add_claim("exp", fn -> (DateTime.utc_now() |> DateTime.to_unix()) + token.expires_in end)
|> add_claim("iat", fn -> DateTime.utc_now() |> DateTime.to_unix() end)
signer =
Joken.Signer.create("HS256", token.api_secret, %{
"typ" => "JWT",
"alg" => "HS256",
"cty" => "twilio-fpa;v=1"
})
Joken.generate_and_sign!(token_config, %{}, signer)
end
defp list_of_grants?(grants) when is_list(grants) do
Enum.all?(grants, &Grant.impl_for(&1))
end
defp list_of_grants?(_other), do: false
defp grants(token) do
grants =
Enum.reduce(token.grants, %{"identity" => token.identity}, fn grant, acc ->
Map.put(acc, Grant.type(grant), Grant.attrs(grant))
end)
grants
end
defp random_str do
16
|> :crypto.strong_rand_bytes()
|> Base.encode16()
|> String.downcase()
end
end
|
lib/ex_twilio/jwt/access_token.ex
| 0.854688
| 0.406479
|
access_token.ex
|
starcoder
|
defmodule ExNric do
@moduledoc """
Documentation for ExNric.
"""
@doc """
Validates Singapore NRIC
## Examples
iex> ExNric.validate("S7343684B")
{:ok, "S7343684B"}
iex> ExNric.validate("T9901744E")
{:ok, "T9901744E"}
iex> ExNric.validate("F4365949U")
{:ok, "F4365949U"}
iex> ExNric.validate("G0047750N")
{:ok, "G0047750N"}
iex> ExNric.validate("G0047750U")
{:error, :checksum_error}
iex> ExNric.validate("X")
{:error, :invalid_format}
"""
@spec validate(String.t()) ::
{:ok, String.t()} | {:error, :checksum_error} | {:error, :invalid_format}
def validate(nric) when is_binary(nric) do
nric_pattern = ~r/^([STFG])(\d{7})([A-Z])$/
case Regex.run(nric_pattern, nric) do
[_, prefix, digits, checksum] ->
calculated_checksum = calculate_checksum(prefix, numberize(digits))
do_check(calculated_checksum, checksum, nric)
_ ->
{:error, :invalid_format}
end
end
defp do_check(calculated_checksum, checksum, nric) do
if calculated_checksum == checksum do
{:ok, nric}
else
{:error, :checksum_error}
end
end
defp numberize(digits) do
digits |> String.codepoints() |> Enum.map(&String.to_integer/1)
end
defp calculate_checksum("S", digits), do: calculate_checksum_uin(digits, 0)
defp calculate_checksum("T", digits), do: calculate_checksum_uin(digits, 4)
defp calculate_checksum("F", digits), do: calculate_checksum_fin(digits, 0)
defp calculate_checksum("G", digits), do: calculate_checksum_fin(digits, 4)
defp calculate_checksum_uin(digits, d0) do
d = digits |> calculate_d(d0) |> Integer.mod(11)
ds() |> Enum.zip(uin_checksums()) |> Enum.into(%{}) |> Map.get(d)
end
defp calculate_checksum_fin(digits, d0) do
d = digits |> calculate_d(d0) |> Integer.mod(11)
ds() |> Enum.zip(fin_checksums()) |> Enum.into(%{}) |> Map.get(d)
end
defp calculate_d(digits, d0) do
d0 + (digits |> Enum.zip(weights()) |> Enum.map(fn {a, b} -> a * b end) |> Enum.sum())
end
defp weights do
[2, 7, 6, 5, 4, 3, 2]
end
defp ds do
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
end
defp uin_checksums do
"A B C D E F G H I Z J" |> String.split()
end
defp fin_checksums do
"K L M N P Q R T U W X" |> String.split()
end
end
|
lib/ex_nric.ex
| 0.796451
| 0.5564
|
ex_nric.ex
|
starcoder
|
defmodule AWS.SWF do
@moduledoc """
Amazon Simple Workflow Service
The Amazon Simple Workflow Service (Amazon SWF) makes it easy to build
applications that use Amazon's cloud to coordinate work across distributed
components.
In Amazon SWF, a *task* represents a logical unit of work that is performed by a
component of your workflow. Coordinating tasks in a workflow involves managing
intertask dependencies, scheduling, and concurrency in accordance with the
logical flow of the application.
Amazon SWF gives you full control over implementing tasks and coordinating them
without worrying about underlying complexities such as tracking their progress
and maintaining their state.
This documentation serves as reference only. For a broader overview of the
Amazon SWF programming model, see the * [Amazon SWF Developer Guide](https://docs.aws.amazon.com/amazonswf/latest/developerguide/) *.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "Amazon SWF",
api_version: "2012-01-25",
content_type: "application/x-amz-json-1.0",
credential_scope: nil,
endpoint_prefix: "swf",
global?: false,
protocol: "json",
service_id: "SWF",
signature_version: "v4",
signing_name: "swf",
target_prefix: "SimpleWorkflowService"
}
end
@doc """
Returns the number of closed workflow executions within the given domain that
meet the specified filtering criteria.
This operation is eventually consistent. The results are best effort and may not
exactly reflect recent updates and changes.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the following parameters by using a `Condition` element
with the appropriate keys.
* `tagFilter.tag`: String constraint. The key is
`swf:tagFilter.tag`.
* `typeFilter.name`: String constraint. The key is
`swf:typeFilter.name`.
* `typeFilter.version`: String constraint. The key is
`swf:typeFilter.version`.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def count_closed_workflow_executions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CountClosedWorkflowExecutions", input, options)
end
@doc """
Returns the number of open workflow executions within the given domain that meet
the specified filtering criteria.
This operation is eventually consistent. The results are best effort and may not
exactly reflect recent updates and changes.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the following parameters by using a `Condition` element
with the appropriate keys.
* `tagFilter.tag`: String constraint. The key is
`swf:tagFilter.tag`.
* `typeFilter.name`: String constraint. The key is
`swf:typeFilter.name`.
* `typeFilter.version`: String constraint. The key is
`swf:typeFilter.version`.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def count_open_workflow_executions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CountOpenWorkflowExecutions", input, options)
end
@doc """
Returns the estimated number of activity tasks in the specified task list.
The count returned is an approximation and isn't guaranteed to be exact. If you
specify a task list that no activity task was ever scheduled in then `0` is
returned.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the `taskList.name` parameter by using a `Condition`
element with the `swf:taskList.name` key to allow the action to access only
certain task lists.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def count_pending_activity_tasks(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CountPendingActivityTasks", input, options)
end
@doc """
Returns the estimated number of decision tasks in the specified task list.
The count returned is an approximation and isn't guaranteed to be exact. If you
specify a task list that no decision task was ever scheduled in then `0` is
returned.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the `taskList.name` parameter by using a `Condition`
element with the `swf:taskList.name` key to allow the action to access only
certain task lists.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def count_pending_decision_tasks(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CountPendingDecisionTasks", input, options)
end
@doc """
Deprecates the specified *activity type*.
After an activity type has been deprecated, you cannot create new tasks of that
activity type. Tasks of this type that were scheduled before the type was
deprecated continue to run.
This operation is eventually consistent. The results are best effort and may not
exactly reflect recent updates and changes.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the following parameters by using a `Condition` element
with the appropriate keys.
* `activityType.name`: String constraint. The key is
`swf:activityType.name`.
* `activityType.version`: String constraint. The key is
`swf:activityType.version`.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def deprecate_activity_type(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeprecateActivityType", input, options)
end
@doc """
Deprecates the specified domain.
After a domain has been deprecated it cannot be used to create new workflow
executions or register new types. However, you can still use visibility actions
on this domain. Deprecating a domain also deprecates all activity and workflow
types registered in the domain. Executions that were started before the domain
was deprecated continues to run.
This operation is eventually consistent. The results are best effort and may not
exactly reflect recent updates and changes.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* You cannot use an IAM policy to constrain this action's
parameters.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def deprecate_domain(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeprecateDomain", input, options)
end
@doc """
Deprecates the specified *workflow type*.
After a workflow type has been deprecated, you cannot create new executions of
that type. Executions that were started before the type was deprecated continues
to run. A deprecated workflow type may still be used when calling visibility
actions.
This operation is eventually consistent. The results are best effort and may not
exactly reflect recent updates and changes.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the following parameters by using a `Condition` element
with the appropriate keys.
* `workflowType.name`: String constraint. The key is
`swf:workflowType.name`.
* `workflowType.version`: String constraint. The key is
`swf:workflowType.version`.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def deprecate_workflow_type(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeprecateWorkflowType", input, options)
end
@doc """
Returns information about the specified activity type.
This includes configuration settings provided when the type was registered and
other general information about the type.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the following parameters by using a `Condition` element
with the appropriate keys.
* `activityType.name`: String constraint. The key is
`swf:activityType.name`.
* `activityType.version`: String constraint. The key is
`swf:activityType.version`.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def describe_activity_type(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeActivityType", input, options)
end
@doc """
Returns information about the specified domain, including description and
status.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* You cannot use an IAM policy to constrain this action's
parameters.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def describe_domain(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDomain", input, options)
end
@doc """
Returns information about the specified workflow execution including its type
and some statistics.
This operation is eventually consistent. The results are best effort and may not
exactly reflect recent updates and changes.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* You cannot use an IAM policy to constrain this action's
parameters.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def describe_workflow_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeWorkflowExecution", input, options)
end
@doc """
Returns information about the specified *workflow type*.
This includes configuration settings specified when the type was registered and
other information such as creation date, current status, etc.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the following parameters by using a `Condition` element
with the appropriate keys.
* `workflowType.name`: String constraint. The key is
`swf:workflowType.name`.
* `workflowType.version`: String constraint. The key is
`swf:workflowType.version`.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def describe_workflow_type(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeWorkflowType", input, options)
end
@doc """
Returns the history of the specified workflow execution.
The results may be split into multiple pages. To retrieve subsequent pages, make
the call again using the `nextPageToken` returned by the initial call.
This operation is eventually consistent. The results are best effort and may not
exactly reflect recent updates and changes.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* You cannot use an IAM policy to constrain this action's
parameters.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def get_workflow_execution_history(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetWorkflowExecutionHistory", input, options)
end
@doc """
Returns information about all activities registered in the specified domain that
match the specified name and registration status.
The result includes information like creation date, current status of the
activity, etc. The results may be split into multiple pages. To retrieve
subsequent pages, make the call again using the `nextPageToken` returned by the
initial call.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* You cannot use an IAM policy to constrain this action's
parameters.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def list_activity_types(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListActivityTypes", input, options)
end
@doc """
Returns a list of closed workflow executions in the specified domain that meet
the filtering criteria.
The results may be split into multiple pages. To retrieve subsequent pages, make
the call again using the nextPageToken returned by the initial call.
This operation is eventually consistent. The results are best effort and may not
exactly reflect recent updates and changes.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the following parameters by using a `Condition` element
with the appropriate keys.
* `tagFilter.tag`: String constraint. The key is
`swf:tagFilter.tag`.
* `typeFilter.name`: String constraint. The key is
`swf:typeFilter.name`.
* `typeFilter.version`: String constraint. The key is
`swf:typeFilter.version`.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def list_closed_workflow_executions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListClosedWorkflowExecutions", input, options)
end
@doc """
Returns the list of domains registered in the account.
The results may be split into multiple pages. To retrieve subsequent pages, make
the call again using the nextPageToken returned by the initial call.
This operation is eventually consistent. The results are best effort and may not
exactly reflect recent updates and changes.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains. The element must be set to
`arn:aws:swf::AccountID:domain/*`, where *AccountID* is the account ID, with no
dashes.
* Use an `Action` element to allow or deny permission to call this
action.
* You cannot use an IAM policy to constrain this action's
parameters.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def list_domains(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListDomains", input, options)
end
@doc """
Returns a list of open workflow executions in the specified domain that meet the
filtering criteria.
The results may be split into multiple pages. To retrieve subsequent pages, make
the call again using the nextPageToken returned by the initial call.
This operation is eventually consistent. The results are best effort and may not
exactly reflect recent updates and changes.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the following parameters by using a `Condition` element
with the appropriate keys.
* `tagFilter.tag`: String constraint. The key is
`swf:tagFilter.tag`.
* `typeFilter.name`: String constraint. The key is
`swf:typeFilter.name`.
* `typeFilter.version`: String constraint. The key is
`swf:typeFilter.version`.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def list_open_workflow_executions(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListOpenWorkflowExecutions", input, options)
end
@doc """
List tags for a given domain.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Returns information about workflow types in the specified domain.
The results may be split into multiple pages that can be retrieved by making the
call repeatedly.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* You cannot use an IAM policy to constrain this action's
parameters.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def list_workflow_types(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListWorkflowTypes", input, options)
end
@doc """
Used by workers to get an `ActivityTask` from the specified activity `taskList`.
This initiates a long poll, where the service holds the HTTP connection open and
responds as soon as a task becomes available. The maximum time the service holds
on to the request before responding is 60 seconds. If no task is available
within 60 seconds, the poll returns an empty result. An empty result, in this
context, means that an ActivityTask is returned, but that the value of taskToken
is an empty string. If a task is returned, the worker should use its type to
identify and process it correctly.
Workers should set their client side socket timeout to at least 70 seconds (10
seconds higher than the maximum time service may hold the poll request).
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the `taskList.name` parameter by using a `Condition`
element with the `swf:taskList.name` key to allow the action to access only
certain task lists.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def poll_for_activity_task(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PollForActivityTask", input, options)
end
@doc """
Used by deciders to get a `DecisionTask` from the specified decision `taskList`.
A decision task may be returned for any open workflow execution that is using
the specified task list. The task includes a paginated view of the history of
the workflow execution. The decider should use the workflow type and the history
to determine how to properly handle the task.
This action initiates a long poll, where the service holds the HTTP connection
open and responds as soon a task becomes available. If no decision task is
available in the specified task list before the timeout of 60 seconds expires,
an empty result is returned. An empty result, in this context, means that a
DecisionTask is returned, but that the value of taskToken is an empty string.
Deciders should set their client side socket timeout to at least 70 seconds (10
seconds higher than the timeout).
Because the number of workflow history events for a single workflow execution
might be very large, the result returned might be split up across a number of
pages. To retrieve subsequent pages, make additional calls to
`PollForDecisionTask` using the `nextPageToken` returned by the initial call.
Note that you do *not* call `GetWorkflowExecutionHistory` with this
`nextPageToken`. Instead, call `PollForDecisionTask` again.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the `taskList.name` parameter by using a `Condition`
element with the `swf:taskList.name` key to allow the action to access only
certain task lists.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def poll_for_decision_task(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PollForDecisionTask", input, options)
end
@doc """
Used by activity workers to report to the service that the `ActivityTask`
represented by the specified `taskToken` is still making progress.
The worker can also specify details of the progress, for example percent
complete, using the `details` parameter. This action can also be used by the
worker as a mechanism to check if cancellation is being requested for the
activity task. If a cancellation is being attempted for the specified task, then
the boolean `cancelRequested` flag returned by the service is set to `true`.
This action resets the `taskHeartbeatTimeout` clock. The `taskHeartbeatTimeout`
is specified in `RegisterActivityType`.
This action doesn't in itself create an event in the workflow execution history.
However, if the task times out, the workflow execution history contains a
`ActivityTaskTimedOut` event that contains the information from the last
heartbeat generated by the activity worker.
The `taskStartToCloseTimeout` of an activity type is the maximum duration of an
activity task, regardless of the number of `RecordActivityTaskHeartbeat`
requests received. The `taskStartToCloseTimeout` is also specified in
`RegisterActivityType`.
This operation is only useful for long-lived activities to report liveliness of
the task and to determine if a cancellation is being attempted.
If the `cancelRequested` flag returns `true`, a cancellation is being attempted.
If the worker can cancel the activity, it should respond with
`RespondActivityTaskCanceled`. Otherwise, it should ignore the cancellation
request.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* You cannot use an IAM policy to constrain this action's
parameters.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def record_activity_task_heartbeat(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RecordActivityTaskHeartbeat", input, options)
end
@doc """
Registers a new *activity type* along with its configuration settings in the
specified domain.
A `TypeAlreadyExists` fault is returned if the type already exists in the
domain. You cannot change any configuration settings of the type after its
registration, and it must be registered as a new version.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the following parameters by using a `Condition` element
with the appropriate keys.
* `defaultTaskList.name`: String constraint. The key is
`swf:defaultTaskList.name`.
* `name`: String constraint. The key is `swf:name`.
* `version`: String constraint. The key is
`swf:version`.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def register_activity_type(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterActivityType", input, options)
end
@doc """
Registers a new domain.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* You cannot use an IAM policy to control domain access for this
action. The name of the domain being registered is available as the resource of
this action.
* Use an `Action` element to allow or deny permission to call this
action.
* You cannot use an IAM policy to constrain this action's
parameters.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def register_domain(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterDomain", input, options)
end
@doc """
Registers a new *workflow type* and its configuration settings in the specified
domain.
The retention period for the workflow history is set by the `RegisterDomain`
action.
If the type already exists, then a `TypeAlreadyExists` fault is returned. You
cannot change the configuration settings of a workflow type once it is
registered and it must be registered as a new version.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the following parameters by using a `Condition` element
with the appropriate keys.
* `defaultTaskList.name`: String constraint. The key is
`swf:defaultTaskList.name`.
* `name`: String constraint. The key is `swf:name`.
* `version`: String constraint. The key is
`swf:version`.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def register_workflow_type(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RegisterWorkflowType", input, options)
end
@doc """
Records a `WorkflowExecutionCancelRequested` event in the currently running
workflow execution identified by the given domain, workflowId, and runId.
This logically requests the cancellation of the workflow execution as a whole.
It is up to the decider to take appropriate actions when it receives an
execution history with this event.
If the runId isn't specified, the `WorkflowExecutionCancelRequested` event is
recorded in the history of the current open workflow execution with the
specified workflowId in the domain.
Because this action allows the workflow to properly clean up and gracefully
close, it should be used instead of `TerminateWorkflowExecution` when possible.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* You cannot use an IAM policy to constrain this action's
parameters.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def request_cancel_workflow_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RequestCancelWorkflowExecution", input, options)
end
@doc """
Used by workers to tell the service that the `ActivityTask` identified by the
`taskToken` was successfully canceled.
Additional `details` can be provided using the `details` argument.
These `details` (if provided) appear in the `ActivityTaskCanceled` event added
to the workflow history.
Only use this operation if the `canceled` flag of a
`RecordActivityTaskHeartbeat` request returns `true` and if the activity can be
safely undone or abandoned.
A task is considered open from the time that it is scheduled until it is closed.
Therefore a task is reported as open while a worker is processing it. A task is
closed after it has been specified in a call to `RespondActivityTaskCompleted`,
RespondActivityTaskCanceled, `RespondActivityTaskFailed`, or the task has [timed out](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types).
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* You cannot use an IAM policy to constrain this action's
parameters.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def respond_activity_task_canceled(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RespondActivityTaskCanceled", input, options)
end
@doc """
Used by workers to tell the service that the `ActivityTask` identified by the
`taskToken` completed successfully with a `result` (if provided).
The `result` appears in the `ActivityTaskCompleted` event in the workflow
history.
If the requested task doesn't complete successfully, use
`RespondActivityTaskFailed` instead. If the worker finds that the task is
canceled through the `canceled` flag returned by `RecordActivityTaskHeartbeat`,
it should cancel the task, clean up and then call `RespondActivityTaskCanceled`.
A task is considered open from the time that it is scheduled until it is closed.
Therefore a task is reported as open while a worker is processing it. A task is
closed after it has been specified in a call to RespondActivityTaskCompleted,
`RespondActivityTaskCanceled`, `RespondActivityTaskFailed`, or the task has
[timed out](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types).
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* You cannot use an IAM policy to constrain this action's
parameters.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def respond_activity_task_completed(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RespondActivityTaskCompleted", input, options)
end
@doc """
Used by workers to tell the service that the `ActivityTask` identified by the
`taskToken` has failed with `reason` (if specified).
The `reason` and `details` appear in the `ActivityTaskFailed` event added to the
workflow history.
A task is considered open from the time that it is scheduled until it is closed.
Therefore a task is reported as open while a worker is processing it. A task is
closed after it has been specified in a call to `RespondActivityTaskCompleted`,
`RespondActivityTaskCanceled`, RespondActivityTaskFailed, or the task has [timed out](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dg-basic.html#swf-dev-timeout-types).
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* You cannot use an IAM policy to constrain this action's
parameters.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def respond_activity_task_failed(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RespondActivityTaskFailed", input, options)
end
@doc """
Used by deciders to tell the service that the `DecisionTask` identified by the
`taskToken` has successfully completed.
The `decisions` argument specifies the list of decisions made while processing
the task.
A `DecisionTaskCompleted` event is added to the workflow history. The
`executionContext` specified is attached to the event in the workflow execution
history.
## Access Control
If an IAM policy grants permission to use `RespondDecisionTaskCompleted`, it can
express permissions for the list of decisions in the `decisions` parameter. Each
of the decisions has one or more parameters, much like a regular API call. To
allow for policies to be as readable as possible, you can express permissions on
decisions as if they were actual API calls, including applying conditions to
some parameters. For more information, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def respond_decision_task_completed(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RespondDecisionTaskCompleted", input, options)
end
@doc """
Records a `WorkflowExecutionSignaled` event in the workflow execution history
and creates a decision task for the workflow execution identified by the given
domain, workflowId and runId.
The event is recorded with the specified user defined signalName and input (if
provided).
If a runId isn't specified, then the `WorkflowExecutionSignaled` event is
recorded in the history of the current open workflow with the matching
workflowId in the domain.
If the specified workflow execution isn't open, this method fails with
`UnknownResource`.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* You cannot use an IAM policy to constrain this action's
parameters.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def signal_workflow_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SignalWorkflowExecution", input, options)
end
@doc """
Starts an execution of the workflow type in the specified domain using the
provided `workflowId` and input data.
This action returns the newly started workflow execution.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the following parameters by using a `Condition` element
with the appropriate keys.
* `tagList.member.0`: The key is `swf:tagList.member.0`.
* `tagList.member.1`: The key is `swf:tagList.member.1`.
* `tagList.member.2`: The key is `swf:tagList.member.2`.
* `tagList.member.3`: The key is `swf:tagList.member.3`.
* `tagList.member.4`: The key is `swf:tagList.member.4`.
* `taskList`: String constraint. The key is
`swf:taskList.name`.
* `workflowType.name`: String constraint. The key is
`swf:workflowType.name`.
* `workflowType.version`: String constraint. The key is
`swf:workflowType.version`.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def start_workflow_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "StartWorkflowExecution", input, options)
end
@doc """
Add a tag to a Amazon SWF domain.
Amazon SWF supports a maximum of 50 tags per resource.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Records a `WorkflowExecutionTerminated` event and forces closure of the workflow
execution identified by the given domain, runId, and workflowId.
The child policy, registered with the workflow type or specified when starting
this execution, is applied to any open child workflow executions of this
workflow execution.
If the identified workflow execution was in progress, it is terminated
immediately.
If a runId isn't specified, then the `WorkflowExecutionTerminated` event is
recorded in the history of the current open workflow with the matching
workflowId in the domain.
You should consider using `RequestCancelWorkflowExecution` action instead
because it allows the workflow to gracefully close while
`TerminateWorkflowExecution` doesn't.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* You cannot use an IAM policy to constrain this action's
parameters.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def terminate_workflow_execution(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TerminateWorkflowExecution", input, options)
end
@doc """
Undeprecates a previously deprecated *activity type*.
After an activity type has been undeprecated, you can create new tasks of that
activity type.
This operation is eventually consistent. The results are best effort and may not
exactly reflect recent updates and changes.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the following parameters by using a `Condition` element
with the appropriate keys.
* `activityType.name`: String constraint. The key is
`swf:activityType.name`.
* `activityType.version`: String constraint. The key is
`swf:activityType.version`.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def undeprecate_activity_type(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UndeprecateActivityType", input, options)
end
@doc """
Undeprecates a previously deprecated domain.
After a domain has been undeprecated it can be used to create new workflow
executions or register new types.
This operation is eventually consistent. The results are best effort and may not
exactly reflect recent updates and changes.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* You cannot use an IAM policy to constrain this action's
parameters.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def undeprecate_domain(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UndeprecateDomain", input, options)
end
@doc """
Undeprecates a previously deprecated *workflow type*.
After a workflow type has been undeprecated, you can create new executions of
that type.
This operation is eventually consistent. The results are best effort and may not
exactly reflect recent updates and changes.
## Access Control
You can use IAM policies to control this action's access to Amazon SWF resources
as follows:
* Use a `Resource` element with the domain name to limit the action
to only specified domains.
* Use an `Action` element to allow or deny permission to call this
action.
* Constrain the following parameters by using a `Condition` element
with the appropriate keys.
* `workflowType.name`: String constraint. The key is
`swf:workflowType.name`.
* `workflowType.version`: String constraint. The key is
`swf:workflowType.version`.
If the caller doesn't have sufficient permissions to invoke the action, or the
parameter values fall outside the specified constraints, the action fails. The
associated event attribute's `cause` parameter is set to
`OPERATION_NOT_PERMITTED`. For details and example IAM policies, see [Using IAM to Manage Access to Amazon SWF
Workflows](https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html)
in the *Amazon SWF Developer Guide*.
"""
def undeprecate_workflow_type(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UndeprecateWorkflowType", input, options)
end
@doc """
Remove a tag from a Amazon SWF domain.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
end
|
lib/aws/generated/swf.ex
| 0.931174
| 0.632361
|
swf.ex
|
starcoder
|
defmodule Bingo.Game do
@enforce_keys [:squares]
defstruct squares: nil, scores: %{}, winner: nil
alias Bingo.{Buzzwords, Game, Square, BingoChecker}
@doc """
Creates a game with a `size` x `size` collection of squares
taken randomly from the given list of `buzzwords` where
each buzzword is of the form `%{phrase: "Upsell", points: 10}`.
"""
def new(size) when is_integer(size) do
buzzwords = Buzzwords.read_buzzwords()
Game.new(buzzwords, size)
end
@doc """
Creates a game with a `size` x `size` collection of squares
taken randomly from the given list of `buzzwords` where
each buzzword is of the form `%{phrase: "Upsell", points: 10}`.
"""
def new(buzzwords, size) do
squares =
buzzwords
|> Enum.shuffle()
|> Enum.take(size * size)
|> Enum.map(&Square.from_buzzword(&1))
|> Enum.chunk_every(size)
%Game{squares: squares}
end
@doc """
Marks the square that has the given `phrase` for the given `player`,
updates the scores, and checks for a bingo!
"""
def mark(game, phrase, player) do
game
|> update_squares_with_mark(phrase, player)
|> update_scores()
|> assign_winner_if_bingo(player)
end
defp update_squares_with_mark(game, phrase, player) do
new_squares =
game.squares
|> List.flatten()
|> Enum.map(&mark_square_having_phrase(&1, phrase, player))
|> Enum.chunk_every(Enum.count(game.squares))
%{game | squares: new_squares}
end
defp mark_square_having_phrase(square, phrase, player) do
case square.phrase == phrase do
true -> %Square{square | marked_by: player}
false -> square
end
end
defp update_scores(game) do
scores =
game.squares
|> List.flatten()
|> Enum.reject(&is_nil(&1.marked_by))
|> Enum.map(fn s -> {s.marked_by.name, s.points} end)
|> Enum.reduce(%{}, fn {name, points}, scores ->
Map.update(scores, name, points, &(&1 + points))
end)
%{game | scores: scores}
end
defp assign_winner_if_bingo(game, player) do
case BingoChecker.bingo?(game.squares) do
true -> %{game | winner: player}
false -> game
end
end
end
|
apps/bingo/lib/bingo/game.ex
| 0.842442
| 0.622244
|
game.ex
|
starcoder
|
defmodule Loom.RWORSet do
alias Loom.RWORSet, as: Set
alias Loom.Dots
@moduledoc """
A remove-wins (optimized) oberserved-remove set (without tombstones).
This CRDT breaks ties (concurrency) in favor of removing an element from a set.
This CRDT isn't as "natural" as the add-wins set, which usually matches user
expectations a bit more closely.
"""
@type actor :: term
@type value :: term
@opaque t :: %Set{
dots: Dots.t()
}
defstruct dots: Dots.new()
@doc """
Creates a new RWORSet.
The identity value is `[]`, an empty list.
iex> alias Loom.RWORSet, as: Set
iex> Set.new |> Set.value
[]
"""
@spec new() :: t
def new, do: %Set{}
@doc """
Add a term to the set.
iex> alias Loom.RWORSet, as: Set
iex> Set.new |> Set.add(:a, "test1") |> Set.add(:a, "test2") |> Set.value |> Enum.sort
["test1", "test2"]
"""
@spec add(t, actor, value) :: {t, t}
@spec add({t, t}, actor, value) :: {t, t}
def add(%Set{} = set, actor, value), do: add({set, Set.new()}, actor, value)
def add({%Set{dots: d}, %Set{dots: delta_dots}}, actor, value) do
{new_dots, new_delta_dots} =
{d, delta_dots}
|> Dots.remove(&({value, true} == &1 || {value, false} == &1))
|> Dots.add(actor, {value, true})
{%Set{dots: new_dots}, %Set{dots: new_delta_dots}}
end
@doc """
Removes a term from the set.
iex> alias Loom.RWORSet, as: Set
iex> Set.new |> Set.add(:a, "test1") |> Set.remove(:a, "test1") |> Set.member?("test1")
false
"""
@spec remove(t, actor, value) :: {t, t}
@spec remove({t, t}, actor, value) :: {t, t}
def remove(%Set{} = set, actor, value), do: remove({set, Set.new()}, actor, value)
def remove({%Set{dots: d} = set, %Set{dots: delta_dots}}, actor, value) do
if member?(set, value) do
{new_dots, new_delta_dots} =
{d, delta_dots}
|> Dots.remove(&({value, true} == &1 || {value, false} == &1))
|> Dots.add(actor, {value, false})
{%Set{dots: new_dots}, %Set{dots: new_delta_dots}}
else
raise Loom.PreconditionError, unobserved: value
end
end
@doc """
Tests if a value is an element of the set.
iex> alias Loom.RWORSet, as: Set
iex> Set.new |> Set.add(:a, "test1") |> Set.member?("test1")
true
"""
@spec member?({t, t}, value) :: boolean
@spec member?(t, value) :: boolean
def member?({set, _}, value), do: member?(set, value)
def member?(%Set{dots: d}, value) do
Dots.dots(d) |> Enum.any?(fn {_, term_pair} -> term_pair == {value, true} end)
end
@doc """
Returns a list of set members
iex> alias Loom.RWORSet, as: Set
iex> Set.new |> Set.add(:a, "test1") |> Set.value
["test1"]
"""
@spec value({t, t}) :: [value]
@spec value(t) :: [value]
def value({set, %Set{}}), do: value(set)
def value(%Set{dots: d}) do
for({_, {v, true}} <- Dots.dots(d), do: v) |> Enum.uniq()
end
@doc """
Joins 2 sets together.
iex> alias Loom.RWORSet, as: Set
iex> {setA, _} = Set.new |> Set.add(:a, 1)
iex> {setB, _} = Set.new |> Set.add(:b, 2)
iex> Set.join(setA, setB) |> Set.value
[1,2]
"""
@spec join(t, t) :: t
def join(%Set{dots: d1}, %Set{dots: d2}) do
%Set{dots: Dots.join(d1, d2)}
end
end
defimpl Loom.CRDT, for: Loom.RWORSet do
alias Loom.RWORSet, as: Set
@doc """
Returns a description of the operations that this CRDT takes.
Updates return a new CRDT, reads can return any natural datatype. This register
returns a value.
"""
def ops(_crdt) do
[
update: [
add: [:actor, :value],
remove: [:actor, :value]
],
read: [
is_member: [:value],
value: []
]
]
end
@doc """
Applies a CRDT to a counter in an abstract way.
This is for ops-based support.
iex> alias Loom.CRDT
iex> reg = Loom.RWORSet.new |> CRDT.apply({:add, :a, "test"}) |> CRDT.apply({:add, :a, "testing"}) |> CRDT.apply({:remove, :a, "test"})
iex> {CRDT.apply(reg, {:is_member, "testing"}), CRDT.apply(reg, {:is_member, "test"})}
{true, false}
"""
def apply(crdt, {:add, actor, value}) do
{reg, _} = Set.add(crdt, actor, value)
reg
end
def apply(crdt, {:remove, actor, value}) do
{reg, _} = Set.remove(crdt, actor, value)
reg
end
def apply(crdt, {:is_member, value}), do: Set.member?(crdt, value)
def apply(crdt, :value), do: Set.value(crdt)
@doc """
Joins 2 CRDT's of the same type.
2 different types cannot mix (yet).
iex> alias Loom.CRDT
iex> a = Loom.RWORSet.new |> CRDT.apply({:add, :a, "test"})
iex> b = Loom.RWORSet.new |> CRDT.apply({:add, :b, "test2"})
iex> CRDT.join(a,b) |> CRDT.apply(:value) |> Enum.sort
["test","test2"]
"""
@spec join(Set.t(), Set.t()) :: Set.t()
def join(a, b), do: Set.join(a, b)
@doc """
Returns the most natural primitive value for a set, a list.
iex> Loom.RWORSet.new |> Loom.CRDT.value
[]
"""
def value(crdt), do: Set.value(crdt)
end
|
lib/loom/rworset.ex
| 0.855429
| 0.460895
|
rworset.ex
|
starcoder
|
defmodule Exceptional.Raise do
@moduledoc ~S"""
Raise an exception if one has been propagated, otherwise continue
## Convenience `use`s
Everything:
use Exceptional.Raise
Only named functions (`raise_or_continue!`):
use Exceptional.Raise, only: :named_functions
Only operators (`>>>`):
use Exceptional.Raise, only: :operators
"""
defmacro __using__(only: :named_functions) do
quote do
import unquote(__MODULE__), except: [>>>: 2]
end
end
defmacro __using__(only: :operators) do
quote do
import unquote(__MODULE__), only: [>>>: 2]
end
end
defmacro __using__(_) do
quote do
import unquote(__MODULE__)
end
end
@doc ~S"""
`raise` if an exception, otherwise continue computation.
## Examples
iex> use Exceptional.Raise
...> raise_or_continue!(1, fn x -> x + 1 end.())
2
iex> use Exceptional.Raise
...> %ArgumentError{message: "raise me"}
...> |> raise_or_continue!(fn x -> x + 1 end.())
** (ArgumentError) raise me
"""
defmacro raise_or_continue!(maybe_exception, continue) do
quote do
require Exceptional.Control
Exceptional.Control.branch unquote(maybe_exception),
value_do: unquote(continue),
exception_do: fn exception -> raise(exception) end.()
end
end
@doc ~S"""
An operator alias of `raise_or_continue!`
## Examples
iex> use Exceptional.Raise
...> 1 >>> fn x -> x + 1 end.()
2
iex> use Exceptional.Raise
...> %ArgumentError{message: "raise me"} >>> fn x -> x + 1 end.()
** (ArgumentError) raise me
"""
defmacro maybe_exception >>> continue do
quote do
require Exceptional.Control
Exceptional.Control.branch unquote(maybe_exception),
value_do: unquote(continue),
exception_do: fn exception -> raise(exception) end.()
end
end
@doc ~S"""
Raise an exception, otherwise return plain value
## Examples
iex> ensure!([1, 2, 3])
[1, 2, 3]
iex> ensure!(%ArgumentError{message: "raise me"})
** (ArgumentError) raise me
"""
def ensure!(maybe_exception) do
if Exception.exception?(maybe_exception) do
raise maybe_exception
else
maybe_exception
end
end
@doc ~S"""
Define a function and automatically generate a variant that raises
## Examples
iex> defmodule Foo do
...> use Exceptional
...>
...> def! foo(a), do: a
...> end
...>
...> Foo.foo([1, 2, 3])
[1, 2, 3]
iex> defmodule Bar do
...> use Exceptional
...>
...> def! bar(a), do: a
...> end
...>
...> Bar.bar(%ArgumentError{message: "raise me"})
%ArgumentError{message: "raise me"}
iex> defmodule Baz do
...> use Exceptional
...>
...> def! baz(a), do: a
...> end
...>
...> Baz.baz!([1, 2, 3])
[1, 2, 3]
iex> defmodule Quux do
...> use Exceptional
...>
...> def! quux(a), do: a
...> end
...>
...> Quux.quux!(%ArgumentError{message: "raise me"})
** (ArgumentError) raise me
"""
defmacro def!(head, do: body) do
{name, ctx, args} = head
variant = {String.to_atom("#{name}!"), ctx, args}
quote do
def unquote(head), do: unquote(body)
def unquote(variant), do: unquote(body) |> ensure!
end
end
end
|
lib/exceptional/raise.ex
| 0.812644
| 0.500305
|
raise.ex
|
starcoder
|
defmodule Elasticlunr.Dsl.TermsQuery do
use Elasticlunr.Dsl.Query
alias Elasticlunr.Dsl.Query
alias Elasticlunr.{Index, Token}
defstruct ~w[minimum_should_match expand field terms boost fuzziness]a
@type t :: %__MODULE__{
minimum_should_match: pos_integer(),
expand: boolean(),
field: Index.document_field(),
terms: list(Token.t()),
boost: integer(),
fuzziness: integer()
}
@options ~w[boost expand fuzziness minimum_should_match]
@spec new(keyword()) :: t()
def new(opts) do
attrs = %{
minimum_should_match: Keyword.get(opts, :minimum_should_match, 1),
expand: Keyword.get(opts, :expand, false),
field: Keyword.get(opts, :field, ""),
terms: Keyword.get(opts, :terms, []),
boost: Keyword.get(opts, :boost, 1),
fuzziness: Keyword.get(opts, :fuzziness, 0)
}
struct!(__MODULE__, attrs)
end
@impl true
def score(
%__MODULE__{
boost: boost,
field: field,
expand: expand,
terms: terms,
fuzziness: fuzziness,
minimum_should_match: minimum_should_match
},
%Index{} = index,
options \\ []
) do
terms =
case expand do
true ->
Enum.map(terms, fn
%Token{token: token} ->
Regex.compile!("^#{token}.*")
token ->
Regex.compile!("^#{token}.*")
end)
false ->
terms
end
query = [
field: field,
terms: terms,
fuzziness: fuzziness,
minimum_should_match: minimum_should_match
]
query =
case Keyword.get(options, :filtered) do
nil ->
query
filtered when is_list(filtered) ->
Keyword.put(query, :docs, filtered)
end
docs = Index.terms(index, query)
pick_highest_score = fn a, b ->
if(hd(a) > hd(b), do: a, else: b)
end
Stream.map(docs, &elem(&1, 0))
|> Enum.reduce([], fn id, matched ->
[score, doc] =
Map.get(docs, id)
|> Stream.map(fn doc ->
[doc.tf * :math.pow(doc.idf, 2) * doc.norm, doc]
end)
|> Enum.reduce([0, nil], pick_highest_score)
ob = %{
ref: id,
field: field,
score: score * boost,
positions: Map.put(%{}, field, doc.positions)
}
matched ++ [ob]
end)
end
@impl true
def parse(options, _query_options, repo) do
cond do
Enum.empty?(options) ->
repo.parse("match_all", %{})
Enum.count(options) > 1 ->
should =
options
|> Enum.reject(fn {key, _field} -> key in @options end)
|> Enum.map(fn {field, terms} ->
%{"terms" => %{field => terms}}
end)
repo.parse("bool", %{"should" => should})
true ->
{field, params} = Query.split_root(options)
terms = get_terms(params)
opts = to_terms_params(params)
__MODULE__.new([field: field, terms: terms] ++ opts)
end
end
defp get_terms(params) when is_map(params) do
params
|> Map.get("value")
|> to_list()
end
defp get_terms(value), do: to_list(value)
defp to_terms_params(params) when is_map(params) do
[]
|> update_options(params, :minimum_should_match)
|> update_options(params, :fuzziness)
|> update_options(params, :expand)
|> update_options(params, :boost)
end
defp to_terms_params(params), do: to_terms_params(%{"value" => params})
defp update_options(opts, params, key) do
case Map.get(params, to_string(key)) do
nil ->
opts
value ->
Keyword.put(opts, key, value)
end
end
defp to_list(value) when is_list(value), do: value
defp to_list(value), do: [value]
end
|
lib/elasticlunr/dsl/query/terms_query.ex
| 0.691914
| 0.463384
|
terms_query.ex
|
starcoder
|
defmodule Maptu.Extension do
@moduledoc """
Contains custom functions extending [Maptu](https://github.com/lexhide/maptu)
and superfluous to Maptu requirements. This module builds on top of `maptu.ex`
and with extracts and modifications of `maptu.ex`. Mapail would not
work without the additional functionality in this module.
Maptu Creators:
- [<NAME>](https://github.com/whatyouhide)
- [<NAME>](https://github.com/lexmag)
Maptu License:
- MIT
- https://github.com/lexhide/maptu/blob/master/LICENSE.txt
Modified by:
- <NAME>
"""
import Kernel, except: [struct: 1, struct: 2]
@type non_strict_error_reason ::
:missing_struct_key
| :atom_key_not_expected
| {:bad_module_name, binary}
| {:non_existing_module, binary}
| {:non_struct, module}
@type strict_error_reason ::
non_strict_error_reason
| {:non_existing_atom, binary}
| {:non_existing_module, binary}
| {:unknown_struct_field, module, atom}
# We use a macro for this so we keep a nice stacktrace.
@doc :false
defmacro raise_on_error(code) do
quote do
case unquote(code) do
{:ok, result} -> result
{:ok, result, rest} -> rest
{:error, reason} -> raise(ArgumentError, Maptu.Extension.format_error(reason))
end
end
end
@doc """
Converts a map to a struct, silently capturing residual `key => value`
pairs into a map with keys in the `String.t` format.
`map` is a map with binary keys that represents a "dumped" struct; it must
contain a `"__struct__"` key with a binary value that can be converted to a
valid module name. If the value of `"__struct__"` is not a module name or it's
a module that isn't a struct, then an error is returned.
Keys in `map` that are not fields of the resulting struct are are collected along with their
respective values into a separate map denoted by `rest`.
This function returns `{:ok, struct, rest}` if the conversion is successful,
`{:error, reason}` otherwise.
## Examples
iex> Maptu.Extension.struct_rest(%{"__struct__" => "Elixir.URI", "port" => 8080, "foo" => 1})
{:ok, %URI{port: 8080}, %{"foo" => 1}}
iex> Maptu.Extension.struct_rest(%{"__struct__" => "Elixir.GenServer"})
{:error, {:non_struct, GenServer}}
"""
@spec struct_rest(map) :: {:ok, struct, map} | {:error, non_strict_error_reason}
def struct_rest(map) do
with {:ok, {mod_name, fields}} <- extract_mod_name_and_fields(map),
:ok <- ensure_exists(mod_name),
{:ok, mod} <- module_to_atom(mod_name),
do: struct_rest(mod, fields)
end
@doc """
Behaves like `Maptu.Extension.struct_rest/1` but returns the residual `rest` map rather
than the `struct` and raises in case of error.
This function behaves like `Maptu.Extension.struct_rest/1`, but it returns the `rest` map (instead
of `{:ok, struct, rest}`) if the conversion is valid, and raises an `ArgumentError`
exception if it's not valid.
## Examples
iex> Maptu.Extension.rest!(%{"__struct__" => "Elixir.URI", "port" => 8080})
%{}
iex> Maptu.Extension.rest!(%{"__struct__" => "Elixir.URI", "port" => 8080, "foo" => 1})
%{"foo" => 1}
iex> Maptu.Extension.rest!(%{"__struct__" => "Elixir.GenServer"})
** (ArgumentError) module is not a struct: GenServer
"""
@spec rest!(map) :: map | no_return
def rest!(map) do
map |> struct_rest() |> raise_on_error()
end
@doc """
Builds the `mod` struct with the given `fields`, silently capturing residual `key => value`
pairs into a map with keys in the `String.t` format.
This function takes a struct `mod` (`mod` should be a module that defines a
struct) and a map of fields with binary keys. It builds the `mod` struct by
safely parsing the fields in `fields`.
If a key in `fields` doesn't map to a field in the resulting struct, the key and it's
respective value are collected into a separate map denoted by `rest`.
This function returns `{:ok, struct, rest}` if the building is successful,
`{:error, reason}` otherwise.
## Examples
iex> Maptu.Extension.struct_rest(URI, %{"port" => 8080, "nonexisting_field" => 1})
{:ok, %URI{port: 8080}, %{"nonexisting_field" => 1}}
iex> Maptu.Extension.struct_rest(GenServer, %{})
{:error, {:non_struct, GenServer}}
"""
@spec struct_rest(module, map) :: {:ok, struct, map} | {:error, non_strict_error_reason}
def struct_rest(mod, fields) when is_atom(mod) and is_map(fields) do
with :ok <- ensure_exists(mod),
:ok <- ensure_struct(mod),
do: fill_struct_rest(mod, fields)
end
@doc """
Behaves like `Maptu.Extension.struct_rest/2` but returns the residual `rest` map rather than the `struct`
and raises in case of error.
This function behaves like `Maptu.Extension.struct_rest/2`, but it returns the `rest` map (instead
of `{:ok, struct, rest}`) if the conversion is valid, and raises an `ArgumentError`
exception if it's not valid.
## Examples
iex> Maptu.Extension.rest!(URI, %{"port" => 8080, "nonexisting_field" => 1})
%{"nonexisting_field" => 1}
iex> Maptu.Extension.rest!(GenServer, %{})
** (ArgumentError) module is not a struct: GenServer
"""
@spec rest!(module, map) :: map | no_return
def rest!(mod, fields) do
struct_rest(mod, fields) |> raise_on_error()
end
# Private or docless
defp extract_mod_name_and_fields(%{"__struct__" => "Elixir." <> _} = map),
do: {:ok, Map.pop(map, "__struct__")}
defp extract_mod_name_and_fields(%{"__struct__" => name}),
do: {:error, {:bad_module_name, name}}
defp extract_mod_name_and_fields(%{}),
do: {:error, :missing_struct_key}
defp module_to_atom("Elixir." <> name = mod_name) do
case to_existing_atom_safe(mod_name) do
{:ok, mod} -> {:ok, mod}
:error -> {:error, {:non_existing_module, name}}
end
end
defp ensure_exists(mod) when is_binary(mod) do
try do
String.to_existing_atom(mod)
rescue
ArgumentError ->
error_mod = Module.split(mod) |> Enum.join(".")
{:error, {:non_existing_module, error_mod}}
else
_atom -> :ok
end
end
defp ensure_exists(mod) when is_atom(mod) do
Atom.to_string(mod) |> ensure_exists()
end
defp ensure_struct(mod) when is_atom(mod) do
if function_exported?(mod, :__struct__, 0) do
:ok
else
{:error, {:non_struct, mod}}
end
end
defp fill_struct_rest(mod, fields) do
{result, rest} = Enum.reduce fields, {mod.__struct__(), %{}}, fn({bin_field, value}, {acc1, acc2}) ->
case to_existing_atom_safe(bin_field) do
{:ok, atom_field} ->
if Map.has_key?(acc1, atom_field), do: {Map.put(acc1, atom_field, value), acc2}, else: {acc1, Map.put(acc2, bin_field, value)}
:error ->
{acc1, Map.put(acc2, bin_field, value)}
end
end
{:ok, result, rest}
end
# @doc :false
# This function can be extended if want to allow atom keys
# def to_existing_atom_safe(arg) when is_atom(arg) do
# {:ok, arg}
# end
@doc :false
def to_existing_atom_safe(arg) when is_binary(arg) do
try do
String.to_existing_atom(arg)
rescue
ArgumentError -> :error
else
atom -> {:ok, atom}
end
end
@doc :false
def format_error(:missing_struct_key),
do: "the given map doesn't contain a \"__struct__\" key"
@doc :false
def format_error(:atom_key_not_expected),
do: "the map may contain an atom key which is not expected"
@doc :false
def format_error({:bad_module_name, name}) when is_binary(name),
do: "not an elixir module: #{inspect name}"
@doc :false
def format_error({:non_existing_module, mod}) when is_binary(mod),
do: "module doesn't exist: #{inspect mod}"
@doc :false
def format_error({:non_struct, mod}) when is_atom(mod),
do: "module is not a struct: #{inspect mod}"
@doc :false
def format_error({:non_existing_atom, bin}) when is_binary(bin),
do: "atom doesn't exist: #{inspect bin}"
@doc :false
def format_error({:unknown_struct_field, struct, field})
when is_atom(struct) and is_atom(field),
do: "unknown field #{inspect field} for struct #{inspect struct}"
end
|
lib/maptu/extension.ex
| 0.911864
| 0.562958
|
extension.ex
|
starcoder
|
defmodule Ecto.Query do
@moduledoc ~S"""
Provides the Query DSL.
Queries are used to retrieve and manipulate data in a repository
(see `Ecto.Repo`). Although this module provides a complete API,
supporting expressions like `where/3`, `select/3` and so forth,
most of the time developers need to import only the `from/2`
macro.
# Imports only from/2 of Ecto.Query
import Ecto.Query, only: [from: 2]
# Create a query
query = from w in Weather,
where: w.prcp > 0,
select: w.city
# Send the query to the repository
Repo.all(query)
## Composition
Ecto queries are composable. For example, the query above can
actually be defined in two parts:
# Create a query
query = from w in Weather, where: w.prcp > 0
# Extend the query
query = from w in query, select: w.city
Keep in mind though the variable names used on the left-hand
side of `in` are just a convenience, they are not taken into
account in the query generation.
Any value can be used on the right-side of `in` as long as it
implements the `Ecto.Queryable` protocol.
## Query expressions
Ecto allows a limited set of expressions inside queries. In the
query below, for example, we use `w.prcp` to access a field, the
`>` comparison operator and the literal `0`:
query = from w in Weather, where: w.prcp > 0
You can find the full list of operations in `Ecto.Query.API`.
Besides the operations listed here, the following literals are
supported in queries:
* Integers: `1`, `2`, `3`
* Floats: `1.0`, `2.0`, `3.0`
* Booleans: `true`, `false`
* Binaries: `<<1, 2, 3>>`
* Strings: `"foo bar"`, `~s(this is a string)`
* Arrays: `[1, 2, 3]`, `~w(interpolate words)`
All other types must be passed as a parameter using interpolation
as explained below.
## Interpolation
External values and Elixir expressions can be injected into a query
expression with `^`:
def with_minimum(age, height_ft) do
from u in User,
where: u.age > ^age and u.height > ^(height_ft * 3.28)
end
with_minimum(18, 5.0)
Interpolation can also be used with the `field/2` function which allows
developers to dynamically choose a field to query:
def at_least_four(doors_or_tires) do
from c in Car,
where: field(c, ^doors_or_tires) >= 4
end
In the example above, both `at_least_four(:doors)` and `at_least_four(:tires)`
would be valid calls as the field is dynamically inserted.
## Casting
Ecto is able to cast interpolated values in queries:
age = "1"
Repo.all(from u in User, where: u.age > ^age)
The example above works because `u.age` is tagged as an :integer
in the User schema and therefore Ecto will attempt to cast the
interpolated `^age` to integer. When a value cannot be cast,
`Ecto.CastError` is raised.
In some situations, Ecto is unable to infer the type for interpolated
values (as a database would be unable) and you may need to explicitly
tag it with the type/2 function:
type(^"1", :integer)
type(^<<0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15>>, Ecto.UUID)
It is important to keep in mind that Ecto cannot cast nil values in
queries. Passing nil automatically causes the query to fail.
## Query Prefix
It is possible to set a prefix for the table name in queries. For Postgres users,
this will specify the schema where the table is located, while for MySQL users this will
specify the database where the table is located. When no prefix is set, Postgres queries
are assumed to be in the public schema, while MySQL queries are assumed to be in the
database set in the config for the repo.
Set the prefix on a query:
query = from u in User, select: u
queryable = %{query | prefix: "foo"}
results = Repo.all(queryable)
Set the prefix without the query syntax:
results =
User
|> Ecto.Queryable.to_query
|> Map.put(:prefix, "foo")
|> Repo.all
When a prefix is set in a query, all loaded structs will belong to that
prefix, so operations like update and delete will be applied to the
proper prefix. In case you want to manually set the prefix for new data,
specially on insert, use `Ecto.put_meta/2`.
## Macro API
In all examples so far we have used the **keywords query syntax** to
create a query:
import Ecto.Query
from w in Weather, where: w.prcp > 0, select: w.city
Behind the scenes, the query above expands to the set of macros defined
in this module:
from(w in Weather) |> where([w], w.prcp > 0) |> select([w], w.city)
which then expands to:
select(where(from(w in Weather), [w], w.prcp > 0), [w], w.city)
This module documents each of those macros, providing examples in both
the keywords query and query expression formats.
"""
defstruct [prefix: nil, sources: nil, from: nil, joins: [], wheres: [], select: nil,
order_bys: [], limit: nil, offset: nil, group_bys: [], updates: [],
havings: [], preloads: [], assocs: [], distinct: nil, lock: nil]
@opaque t :: %__MODULE__{}
defmodule QueryExpr do
@moduledoc false
defstruct [:expr, :file, :line, params: %{}]
end
defmodule SelectExpr do
@moduledoc false
defstruct [:expr, :file, :line, fields: [], params: %{}]
end
defmodule JoinExpr do
@moduledoc false
defstruct [:qual, :source, :on, :file, :line, :assoc, :ix, params: %{}]
end
defmodule Tagged do
@moduledoc false
# * value is the tagged value
# * tag is the directly tagged value, like Ecto.DateTime
# * type is the underlying tag type, like :datetime
defstruct [:value, :tag, :type]
end
alias Ecto.Query.Builder
alias Ecto.Query.Builder.From
alias Ecto.Query.Builder.Filter
alias Ecto.Query.Builder.Select
alias Ecto.Query.Builder.Distinct
alias Ecto.Query.Builder.OrderBy
alias Ecto.Query.Builder.LimitOffset
alias Ecto.Query.Builder.GroupBy
alias Ecto.Query.Builder.Preload
alias Ecto.Query.Builder.Join
alias Ecto.Query.Builder.Lock
alias Ecto.Query.Builder.Update
@doc """
Resets a previously set field on a query.
It can reset any query field except the query source (`from`).
## Example
query |> Ecto.Query.exclude(:select)
"""
def exclude(%Ecto.Query{} = query, field), do: do_exclude(query, field)
def exclude(query, field), do: do_exclude(Ecto.Queryable.to_query(query), field)
defp do_exclude(%Ecto.Query{} = query, :join), do: %{query | joins: []}
defp do_exclude(%Ecto.Query{} = query, :where), do: %{query | wheres: []}
defp do_exclude(%Ecto.Query{} = query, :order_by), do: %{query | order_bys: []}
defp do_exclude(%Ecto.Query{} = query, :group_by), do: %{query | group_bys: []}
defp do_exclude(%Ecto.Query{} = query, :having), do: %{query | havings: []}
defp do_exclude(%Ecto.Query{} = query, :distinct), do: %{query | distinct: nil}
defp do_exclude(%Ecto.Query{} = query, :select), do: %{query | select: nil}
defp do_exclude(%Ecto.Query{} = query, :limit), do: %{query | limit: nil}
defp do_exclude(%Ecto.Query{} = query, :offset), do: %{query | offset: nil}
defp do_exclude(%Ecto.Query{} = query, :lock), do: %{query | lock: nil}
defp do_exclude(%Ecto.Query{} = query, :preload), do: %{query | preloads: [], assocs: []}
@doc """
Creates a query.
It can either be a keyword query or a query expression. If it is a
keyword query the first argument should be an `in` expression and
the second argument a keyword query where the keys are expression
types and the values are expressions.
If it is a query expression the first argument is the original query
and the second argument the expression.
## Keywords example
from(City, select: c)
## Expressions example
City |> select([c], c)
## Examples
def paginate(query, page, size) do
from query,
limit: ^size,
offset: ^((page-1) * size)
end
The example above does not use `in` because `limit` and `offset`
do not require such. However, extending a query with a where expression would
require the use of `in`:
def published(query) do
from p in query, where: not(is_nil(p.published_a))
end
Notice we have created a `p` variable to represent each item in the query.
When the given query has more than one `from` expression, a variable
must be given for each in the order they were bound:
def published_multi(query) do
from [p,o] in query,
where: not(is_nil(p.published_at)) and not(is_nil(o.published_at))
end
Note the variables `p` and `o` can be named whatever you like
as they have no importance in the query sent to the database.
"""
defmacro from(expr, kw \\ []) do
unless Keyword.keyword?(kw) do
raise ArgumentError, "second argument to `from` must be a keyword list"
end
{quoted, binds, count_bind} = From.build(expr, __CALLER__)
from(kw, __CALLER__, count_bind, quoted, binds)
end
@binds [:where, :select, :distinct, :order_by, :group_by,
:having, :limit, :offset, :preload, :update]
@no_binds [:lock]
@joins [:join, :inner_join, :left_join, :right_join, :full_join]
defp from([{type, expr}|t], env, count_bind, quoted, binds) when type in @binds do
# If all bindings are integer indexes keep AST Macro.expand'able to %Query{},
# otherwise ensure that quoted code is evaluated before macro call
quoted =
if Enum.all?(binds, fn {_, value} -> is_integer(value) end) do
quote do
Ecto.Query.unquote(type)(unquote(quoted), unquote(binds), unquote(expr))
end
else
quote do
query = unquote(quoted)
Ecto.Query.unquote(type)(query, unquote(binds), unquote(expr))
end
end
from(t, env, count_bind, quoted, binds)
end
defp from([{type, expr}|t], env, count_bind, quoted, binds) when type in @no_binds do
quoted =
quote do
Ecto.Query.unquote(type)(unquote(quoted), unquote(expr))
end
from(t, env, count_bind, quoted, binds)
end
defp from([{join, expr}|t], env, count_bind, quoted, binds) when join in @joins do
qual =
case join do
:join -> :inner
:inner_join -> :inner
:left_join -> :left
:right_join -> :right
:full_join -> :full
end
{t, on} = collect_on(t, nil)
{quoted, binds, count_bind} = Join.build(quoted, qual, binds, expr, on, count_bind, env)
from(t, env, count_bind, quoted, binds)
end
defp from([{:on, _value}|_], _env, _count_bind, _quoted, _binds) do
Builder.error! "`on` keyword must immediately follow a join"
end
defp from([{key, _value}|_], _env, _count_bind, _quoted, _binds) do
Builder.error! "unsupported #{inspect key} in keyword query expression"
end
defp from([], _env, _count_bind, quoted, _binds) do
quoted
end
defp collect_on([{:on, expr}|t], nil),
do: collect_on(t, expr)
defp collect_on([{:on, expr}|t], acc),
do: collect_on(t, {:and, [], [acc, expr]})
defp collect_on(other, acc),
do: {other, acc}
@doc """
A join query expression.
Receives a source that is to be joined to the query and a condition for
the join. The join condition can be any expression that evaluates
to a boolean value. The join is by default an inner join, the qualifier
can be changed by giving the atoms: `:inner`, `:left`, `:right` or
`:full`. For a keyword query the `:join` keyword can be changed to:
`:inner_join`, `:left_join`, `:right_join` or `:full_join`.
Currently it is possible to join on an Ecto.Schema (a module), an
existing source (a binary representing a table), an association or a
fragment. See the examples below.
## Keywords examples
from c in Comment,
join: p in Post, on: c.post_id == p.id,
select: {p.title, c.text}
from p in Post,
left_join: c in assoc(p, :comments),
select: {p, c}
## Expressions examples
Comment
|> join(:inner, [c], p in Post, c.post_id == p.id)
|> select([c, p], {p.title, c.text})
Post
|> join(:left, [p], c in assoc(p, :comments))
|> select([p, c], {p, c})
## Joining with fragments
When you need to join on a complex expression that cannot be
expressed via Ecto associations, Ecto supports fragments in joins:
Comment
|> join(:inner, [c], p in fragment("SOME COMPLEX QUERY", c.id, ^some_param))
This style discouraged due to its complexity.
"""
defmacro join(query, qual, binding \\ [], expr, on \\ nil) do
Join.build(query, qual, binding, expr, on, nil, __CALLER__)
|> elem(0)
end
@doc """
A select query expression.
Selects which fields will be selected from the schema and any transformations
that should be performed on the fields. Any expression that is accepted in a
query can be a select field.
The sub-expressions in the query can be wrapped in lists, tuples or maps as
shown in the examples. A full schema can also be selected.
There can only be one select expression in a query, if the select expression
is omitted, the query will by default select the full schema.
## Keywords examples
from(c in City, select: c) # selects the entire schema
from(c in City, select: {c.name, c.population})
from(c in City, select: [c.name, c.county])
from(c in City, select: {c.name, ^to_string(40 + 2), 43})
from(c in City, select: %{n: c.name, answer: 42})
## Expressions examples
City |> select([c], c)
City |> select([c], {c.name, c.country})
City |> select([c], %{"name" => c.name})
"""
defmacro select(query, binding \\ [], expr) do
Select.build(query, binding, expr, __CALLER__)
end
@doc """
A distinct query expression.
When true, only keeps distinct values from the resulting
select expression.
If supported by your database, you can also pass query
expressions to distinct and it will generate a query
with DISTINCT ON. In such cases, the row that is being
kept depends on the ordering of the rows. When an `order_by`
expression is also added to the query, all fields in the
`distinct` expression are automatically referenced `order_by`
too.
`distinct` also accepts a list of atoms where each atom refers to
a field in source.
## Keywords examples
# Returns the list of different categories in the Post schema
from(p in Post, distinct: true, select: p.category)
# If your database supports DISTINCT ON(),
# you can pass expressions to distinct too
from(p in Post,
distinct: p.category,
order_by: [p.date])
# Using atoms
from(p in Post, distinct: :category, order_by: :date)
## Expressions example
Post
|> distinct(true)
|> order_by([p], [p.category, p.author])
"""
defmacro distinct(query, binding \\ [], expr) do
Distinct.build(query, binding, expr, __CALLER__)
end
@doc """
A where query expression.
`where` expressions are used to filter the result set. If there is more
than one where expression, they are combined with an `and` operator. All
where expressions have to evaluate to a boolean value.
`where` also accepts a keyword list where the field given as key is going to
be compared with the given value. The fields will always refer to the source
given in `from`.
## Keywords example
from(c in City, where: c.state == "Sweden")
from(c in City, where: [state: "Sweden"])
It is also possible to interpolate the whole keyword list, allowing you to
dynamically filter the source:
filters = [state: "Sweden"]
from(c in City, where: ^filters)
## Expressions example
City |> where([c], c.state == "Sweden")
City |> where(state: "Sweden")
"""
defmacro where(query, binding \\ [], expr) do
Filter.build(:where, query, binding, expr, __CALLER__)
end
@doc """
An order by query expression.
Orders the fields based on one or more fields. It accepts a single field
or a list of fields. The direction can be specified in a keyword list as shown
in the examples. There can be several order by expressions in a query.
`order_by` also accepts a list of atoms where each atom refers to a field in
source or a keyword list where the direction is given as key and the field
to order as value.
## Keywords examples
from(c in City, order_by: c.name, order_by: c.population)
from(c in City, order_by: [c.name, c.population])
from(c in City, order_by: [asc: c.name, desc: c.population])
from(c in City, order_by: [:name, :population])
from(c in City, order_by: [asc: :name, desc: :population])
A keyword list can also be interpolated:
values = [asc: :name, desc: :population]
from(c in City, order_by: ^values)
## Expressions example
City |> order_by([c], asc: c.name, desc: c.population)
City |> order_by(asc: :name) # Sorts by the cities name
"""
defmacro order_by(query, binding \\ [], expr) do
OrderBy.build(query, binding, expr, __CALLER__)
end
@doc """
A limit query expression.
Limits the number of rows returned from the result. Can be any expression but
has to evaluate to an integer value and it can't include any field.
If `limit` is given twice, it overrides the previous value.
## Keywords example
from(u in User, where: u.id == ^current_user, limit: 1)
## Expressions example
User |> where([u], u.id == ^current_user) |> limit(1)
"""
defmacro limit(query, binding \\ [], expr) do
LimitOffset.build(:limit, query, binding, expr, __CALLER__)
end
@doc """
An offset query expression.
Offsets the number of rows selected from the result. Can be any expression
but it must evaluate to an integer value and it can't include any field.
If `offset` is given twice, it overrides the previous value.
## Keywords example
# Get all posts on page 4
from(p in Post, limit: 10, offset: 30)
## Expressions example
Post |> limit(10) |> offset(30)
"""
defmacro offset(query, binding \\ [], expr) do
LimitOffset.build(:offset, query, binding, expr, __CALLER__)
end
@doc ~S"""
A lock query expression.
Provides support for row-level pessimistic locking using
`SELECT ... FOR UPDATE` or other, database-specific, locking clauses.
`expr` can be any expression but has to evaluate to a boolean value or to a
string and it can't include any fields.
If `lock` is used more than once, the last one used takes precedence.
Ecto also supports [optimistic
locking](http://en.wikipedia.org/wiki/Optimistic_concurrency_control) but not
through queries. For more information on optimistic locking, have a look at
the `Ecto.Model.OptimisticLock` module.
## Keywords example
from(u in User, where: u.id == ^current_user, lock: "FOR SHARE NOWAIT")
## Expressions example
User |> where(u.id == ^current_user) |> lock("FOR SHARE NOWAIT")
"""
defmacro lock(query, expr) do
Lock.build(query, expr, __CALLER__)
end
@doc ~S"""
An update query expression.
Updates are used to update the filtered entries. In order for
updates to be applied, `Ecto.Repo.update_all/3` must be invoked.
## Keywords example
from(u in User, update: [set: [name: "new name"]]
## Expressions example
User |> update([u], set: [name: "new name"])
User |> update(set: [name: "new name"])
## Operators
The update expression in Ecto supports the following operators:
* `set` - sets the given field in the table to the given value
from(u in User, update: [set: [name: "new name"]]
* `inc` - increments (or decrements if the value is negative) the given field in the table by the given value
from(u in User, update: [inc: [accesses: 1]]
* `push` - pushes (appends) the given value to the end of the array field
from(u in User, update: [push: [tags: "cool"]]
* `pull` - pulls (removes) the given value from the array field
from(u in User, update: [pull: [tags: "not cool"]]
"""
defmacro update(query, binding \\ [], expr) do
Update.build(query, binding, expr, __CALLER__)
end
@doc """
A group by query expression.
Groups together rows from the schema that have the same values in the given
fields. Using `group_by` "groups" the query giving it different semantics
in the `select` expression. If a query is grouped, only fields that were
referenced in the `group_by` can be used in the `select` or if the field
is given as an argument to an aggregate function.
`group_by` also accepts a list of atoms where each atom refers to
a field in source.
## Keywords examples
# Returns the number of posts in each category
from(p in Post,
group_by: p.category,
select: {p.category, count(p.id)})
# Group on all fields on the Post schema
from(p in Post, group_by: p, select: p)
# Using atoms
from(p in Post, group_by: :category, select: {p.category, count(p.id)})
## Expressions example
Post |> group_by([p], p.category) |> select([p], count(p.id))
"""
defmacro group_by(query, binding \\ [], expr) do
GroupBy.build(query, binding, expr, __CALLER__)
end
@doc """
A having query expression.
Like `where`, `having` filters rows from the schema, but after the grouping is
performed giving it the same semantics as `select` for a grouped query
(see `group_by/3`). `having` groups the query even if the query has no
`group_by` expression.
## Keywords example
# Returns the number of posts in each category where the
# average number of comments is above ten
from(p in Post,
group_by: p.category,
having: avg(p.num_comments) > 10,
select: {p.category, count(p.id)})
## Expressions example
Post
|> group_by([p], p.category)
|> having([p], avg(p.num_comments) > 10)
|> select([p], count(p.id))
"""
defmacro having(query, binding \\ [], expr) do
Filter.build(:having, query, binding, expr, __CALLER__)
end
@doc """
Preloads the associations into the given struct.
Preloading allows developers to specify associations that are preloaded
into the struct. Consider this example:
Repo.all from p in Post, preload: [:comments]
The example above will fetch all posts from the database and then do
a separate query returning all comments associated to the given posts.
However, often times, you want posts and comments to be selected and
filtered in the same query. For such cases, you can explicitly tell
the association to be preloaded into the struct:
Repo.all from p in Post,
join: c in assoc(p, :comments),
where: c.published_at > p.updated_at,
preload: [comments: c]
In the example above, instead of issuing a separate query to fetch
comments, Ecto will fetch posts and comments in a single query.
Nested associations can also be preloaded in both formats:
Repo.all from p in Post,
preload: [comments: :likes]
Repo.all from p in Post,
join: c in assoc(p, :comments),
join: l in assoc(c, :likes),
where: l.inserted_at > c.updated_at,
preload: [comments: {c, likes: l}]
Keep in mind neither format can be nested arbitrarily. For
example, the query below is invalid because we cannot preload
likes with the join association `c`.
Repo.all from p in Post,
join: c in assoc(p, :comments),
preload: [comments: {c, :likes}]
## Preload queries
Preload also allows queries to be given, allowing you to filter or
customize how the preloads are fetched:
comments_query = from c in Comment, order_by: c.published_at
Repo.all from p in Post, preload: [comments: ^comments_query]
The example above will issue two queries, one for loading posts and
then another for loading the comments associated with the posts.
Comments will be ordered by `published_at`.
Note: keep in mind operations like limit and offset in the preload
query will affect the whole result set and not each association. For
example, the query below:
comments_query = from c in Comment, order_by: c.popularity, limit: 5
Repo.all from p in Post, preload: [comments: ^comments_query]
won't bring the top of comments per post. Rather, it will only bring
the 5 top comments across all posts.
## Keywords example
# Returns all posts and their associated comments
from(p in Post,
preload: [:comments, comments: :likes],
select: p)
## Expressions examples
Post |> preload(:comments) |> select([p], p)
Post |> preload([p, c], [:user, comments: c]) |> select([p], p)
"""
defmacro preload(query, bindings \\ [], expr) do
Preload.build(query, bindings, expr, __CALLER__)
end
end
|
lib/ecto/query.ex
| 0.908413
| 0.771456
|
query.ex
|
starcoder
|
defmodule XPlane.DataRef do
@moduledoc """
Represent an X-Plane Data Reference (DataRef) and provide helper methods
to load the closest available set of DataRefs for a given X-Plane version
"""
@type xtype :: {:byte | :float | :int | :uint | :short | :ushort, list(integer)} | :void
defstruct [
name: "",
id: :unknown,
code: -1,
type: :void,
writable: false,
units: "???",
description: "???"]
@type t :: %XPlane.DataRef{
name: String.t,
id: atom,
code: integer,
type: XPlane.DataRef.xtype,
writable: boolean,
units: String.t,
description: String.t}
@doc """
Load the closest list of DataRefs we have available for the specified X-Plane version.
## Parameters
- version_number: X-Plane version number as returned by `XPlane.Instance.list/0`
"""
@spec load_version(integer) :: %{atom: XPlane.DataRef.t}
def load_version(version_number) do
exact = "DataRefs#{version_number}.txt.gz"
closest = "#{:code.priv_dir(:xplane)}/datarefs"
|> File.ls!
|> Enum.reverse
|> Enum.filter(&(&1 <= exact))
|> Enum.at(0)
{:ok, file} = File.open("#{:code.priv_dir(:xplane)}/datarefs/#{closest}", [:read, :compressed])
IO.stream(file, :line)
|> Enum.with_index()
|> Enum.flat_map(
fn({line, code}) ->
parse(line |> String.split("\t"), code)
end)
|> Map.new(
fn d = %XPlane.DataRef{} ->
{d.id, d}
end)
end
def describe(data_refs, pattern) do
IO.puts("\n")
data_refs
|> Enum.filter(
fn {id, _} -> Regex.match?(pattern, Atom.to_string(id)) end)
|> Enum.sort
|> Enum.map(
fn {id, %XPlane.DataRef{description: d, units: u, writable: w}} ->
wd = if w do ", writable" else "" end
"#{Atom.to_string(id) |> String.pad_trailing(40)} #{d} (#{u}#{wd})"
end)
|> Enum.join("\n")
|> IO.puts
IO.puts("\n")
end
def withCode(data_refs, code_to_match) do
[{_, matching_data_ref} | _] =
Enum.filter(data_refs,
fn {_, %XPlane.DataRef{code: code}} ->
code == code_to_match
end)
matching_data_ref
end
@spec parse(list, integer) :: list(XPlane.DataRef.t)
defp parse([name, type, writable, units, description], code) do
[%XPlane.DataRef{
parse([name, type, writable, units], code) |> Enum.at(0)
| description: binary_part(description, 0, byte_size(description) - 1)
}]
end
@spec parse(list, integer) :: list(XPlane.DataRef.t)
defp parse([name, type, writable, units], code) do
[%XPlane.DataRef{
parse([name, type, writable], code) |> Enum.at(0)
| units: units
}]
end
@spec parse(list, integer) :: list(XPlane.DataRef.t)
defp parse([name, type, writable], code) do
[%XPlane.DataRef{
name: name,
id: String.to_atom(name
|> String.slice(4..400)
|> String.replace("/", "_")),
code: code,
type: parse_type(type),
writable: (writable == "y")
}]
end
defp parse(_, _) do
[]
end
@spec parse_type(String.t) :: XPlane.DataRef.xtype
defp parse_type(type) do
[type | dims] = type |> String.split(["[", "]"], trim: true)
{String.to_atom(type), if Enum.empty?(dims) do [1] else dims |> Enum.map(&(String.to_integer(&1))) end}
end
end
|
lib/xplane_data_ref.ex
| 0.729423
| 0.619586
|
xplane_data_ref.ex
|
starcoder
|
defmodule Example do
def start(_type, _args) do
# If you are running the ModelFox app locally or on your own server you can pass the URL to it with the MODELFOX_URL environment variable.
modelfox_url = System.get_env("MODELFOX_URL")
# Get the path to the .modelfox file.
model_path = Path.join(:code.priv_dir(:example), "heart_disease.modelfox")
# Load the model from the path.
model = ModelFox.load_model_from_path(model_path, %{modelfox_url: modelfox_url})
# Create an example input matching the schema of the CSV file the model was trained on. Here the data is just hard-coded, but in your application you will probably get this from a database or user input.
input = %{
:age => 63.0,
:gender => "male",
:chest_pain => "typical angina",
:resting_blood_pressure => 145.0,
:cholesterol => 233.0,
:fasting_blood_sugar_greater_than_120 => "true",
:resting_ecg_result => "probable or definite left ventricular hypertrophy",
:exercise_max_heart_rate => 150.0,
:exercise_induced_angina => "no",
:exercise_st_depression => 2.3,
:exercise_st_slope => "downsloping",
:fluoroscopy_vessels_colored => "0",
:thallium_stress_test => "fixed defect"
}
# Make the prediction using a custom threshold chosen on the "Tuning" page of the ModelFox app.
predict_options = %ModelFox.PredictOptions{
threshold: 0.5,
compute_feature_contributions: false
}
output = ModelFox.predict(model, input, predict_options)
# Print the output.
IO.write("Output: ")
IO.inspect(output)
# Log the prediction.
ModelFox.log_prediction(model, %ModelFox.LogPredictionArgs{
identifier: "71762b29-2296-4bf9-a1d4-59144d74c9d9",
options: predict_options,
input: input,
output: output
})
# Later on, if we get an official diagnosis for the patient, log the true value. Make sure to match the `identifier`.
ModelFox.log_true_value(model, %ModelFox.LogTrueValueArgs{
identifier: "71762b29-2296-4bf9-a1d4-59144d74c9d9",
true_value: "Positive"
})
Supervisor.start_link([], strategy: :one_for_one)
end
end
|
languages/elixir/examples/advanced/lib/application.ex
| 0.806472
| 0.410402
|
application.ex
|
starcoder
|
defmodule FinTex.Model.Account do
@moduledoc """
The following fields are public:
* `account_number` - Account number
* `subaccount_id` - Subaccount ID
* `blz` - Bank code
* `bank_name` - Bank name
* `currency` - Three-character currency code (ISO 4217)
* `iban` - IBAN
* `bic` - BIC
* `name` - Account name
* `owner` - Name of the account holder
* `balance` - Account balance
* `supported_payments` - List of payment types with payment parameters
* `supported_transactions` - List of transaction names
* `supported_tan_schemes` - List of TAN schemes
* `preferred_tan_scheme` - Security function of the TAN scheme preferred by the user
"""
alias FinTex.Model.Balance
alias FinTex.Model.TANScheme
alias FinTex.User.FinAccount
@type t :: %__MODULE__{
account_number: String.t(),
subaccount_id: String.t(),
blz: String.t(),
bank_name: String.t(),
currency: String.t(),
iban: String.t(),
bic: String.t(),
name: String.t(),
owner: String.t(),
balance: Balance.t(),
supported_payments: map,
supported_tan_schemes: [TANScheme.t()],
preferred_tan_scheme: String.t(),
supported_transactions: [String.t()]
}
defstruct [
:account_number,
:subaccount_id,
:blz,
:bank_name,
:currency,
:iban,
:bic,
:name,
:owner,
balance: nil,
supported_payments: Map.new(),
supported_tan_schemes: [],
preferred_tan_scheme: nil,
supported_transactions: []
]
def valid?, do: true
@doc false
@spec from_fin_account(FinAccount.t()) :: t
def from_fin_account(fin_account) do
%__MODULE__{
account_number: fin_account |> FinAccount.account_number(),
subaccount_id: fin_account |> FinAccount.subaccount_id(),
blz: fin_account |> FinAccount.blz(),
iban: fin_account |> FinAccount.iban(),
bic: fin_account |> FinAccount.bic(),
owner: fin_account |> FinAccount.owner()
}
end
def key(%__MODULE__{
account_number: account_number,
subaccount_id: subaccount_id
}) do
"#{account_number}#{subaccount_id}"
end
end
|
lib/model/account.ex
| 0.865025
| 0.480905
|
account.ex
|
starcoder
|
defmodule Mnemonix do
@moduledoc """
Provides easy access to a store through a Map-like interface.
Rather than a map, use the `t:GenServer.server/0` reference returned
by a store's `start_link/2` to perform operations on them.
## Map Features
`Mnemonix.Features.Map` lets you manipulate a store just like a `Map`.
The `new/0`, `new/1`, and `new/2` functions start links to a
`Mnemonix.Stores.Map` (mimicking `Map.new`) and make it easy to play with the
Mnemonix functions:
iex> store = Mnemonix.new(fizz: 1)
iex> Mnemonix.get(store, :foo)
nil
iex> Mnemonix.get(store, :fizz)
1
iex> Mnemonix.put_new(store, :foo, "bar")
iex> Mnemonix.get(store, :foo)
"bar"
iex> Mnemonix.put_new(store, :foo, "baz")
iex> Mnemonix.get(store, :foo)
"bar"
iex> Mnemonix.put(store, :foo, "baz")
iex> Mnemonix.get(store, :foo)
"baz"
iex> Mnemonix.get(store, :fizz)
1
iex> Mnemonix.get_and_update(store, :fizz, &({&1, &1 * 2}))
iex> Mnemonix.get_and_update(store, :fizz, &({&1, &1 * 2}))
iex> Mnemonix.get(store, :fizz)
4
These functions behave exactly like their `Map` counterparts. `Mnemonix`
doesn't supply analogs for only a few Map functions:
- `Map.from_struct/1`
- `Map.merge/2`
- `Map.merge/3`
Map functions that traverse every entry in a store are handled a little differently, in the Enumerable feature below.
## Bump Features
`Mnemonix.Features.Bump` lets you perform increment/decrement operations on any store.
iex> store = Mnemonix.new(fizz: 1)
iex> Mnemonix.increment(store, :fizz)
iex> Mnemonix.get(store, :fizz)
2
iex> Mnemonix.decrement(store, :fizz)
iex> Mnemonix.get(store, :fizz)
1
## Expiry Features
`Mnemonix.Features.Expiry` lets you set entries to expire after a given time-to-live on any store.
iex> store = Mnemonix.new(fizz: 1)
iex> Mnemonix.expire(store, :fizz, 100)
iex> :timer.sleep(1000)
iex> Mnemonix.get(store, :fizz)
nil
## Enumerable Features
`Mnemonix.Features.Enumerable` enables functions that try to iterate over a store's contents. These
functions keep parity with the `Map` API, but be warned: they are only implemented for a subset of
stores, and may be very inefficient. Consult your store's specific documentation for more details.
These `Map` equivalents will raise `Mnemonix.Features.Enumerable.Exception` if your store doesn't
support them:
- `Mnemonix.Features.Enumerable.equal?/2`
- `Mnemonix.Features.Enumerable.keys/1`
- `Mnemonix.Features.Enumerable.to_list/1`
- `Mnemonix.Features.Enumerable.values/1`
Any store can be checked for enumerable support via `Mnemonix.enumerable?/1`.
"""
@typedoc """
Keys allowed in Mnemonix entries.
"""
@type key :: term
@typedoc """
Values allowed in Mnemonix entries.
"""
@type value :: term
@typedoc """
Valid key/value pairs allowed as Mnemonix entries.
"""
@type pair :: {key, value}
@typedoc """
Values representing a store that Mnemonix functions can operate on.
"""
@type store :: pid | GenServer.name
@type spec :: {Mnemonix.Store.Behaviour.t, Mnemonix.Store.Server.options}
@typedoc """
An instruction from a `Mnemonix.Store.Server` to return successfully in the client.
"""
@type success :: :ok
@typedoc """
An instruction from a `Mnemonix.Store.Server` to return given value successfully in the client.
"""
@type success(value) :: {:ok, value}
@typedoc """
An instruction from a `Mnemonix.Store.Server` to emit a warning when returning in the client.
"""
@type warning :: {:warn, message :: String.t}
@typedoc """
An instruction from a `Mnemonix.Store.Server` to emit a warning when returning given value in the client.
"""
@type warning(value) :: {:warn, message :: String.t, value}
@typedoc """
An instruction from a `Mnemonix.Store.Server` to raise an error in the client.
"""
@type exception :: {:raise, exception :: module, raise_opts :: Keyword.t}
@doc """
Starts a new empty in-memory store.
## Examples
iex> store = Mnemonix.new
iex> Mnemonix.get(store, :a)
nil
iex> Mnemonix.get(store, :b)
nil
"""
@spec new() :: store
def new() do
do_new Map.new
end
@doc """
Starts a new in-memory store using `enumerable` for initial data.
Duplicated keys in the `enumerable` are removed; the last mentioned one prevails.
## Examples
iex> store = Mnemonix.new(a: 1)
iex> Mnemonix.get(store, :a)
1
iex> Mnemonix.get(store, :b)
nil
"""
@spec new(Enum.t) :: store
def new(enumerable) do
do_new Map.new(enumerable)
end
@doc """
Starts a new store applying a `transformation` to `enumerable` for initial data.
Duplicated keys are removed; the last mentioned one prevails.
## Examples
iex> store = Mnemonix.new(%{"A" => 0}, fn {key, value} ->
...> { String.downcase(key), value + 1 }
...> end )
iex> Mnemonix.get(store, "a")
1
iex> Mnemonix.get(store, "A")
nil
"""
@spec new(Enum.t, (term -> {key, value})) :: store
def new(enumerable, transform) do
do_new Map.new(enumerable, transform)
end
defp do_new(map) do
with {:ok, store} <- start_link(Mnemonix.Stores.Map, initial: map), do: store
end
use Mnemonix.Builder, inline: true
end
|
lib/mnemonix.ex
| 0.894361
| 0.451629
|
mnemonix.ex
|
starcoder
|
defmodule ExZendesk do
use HTTPoison.Base
@moduledoc """
Simple [HTTPoison](https://hexdocs.pm/httpoison/overview.html) based helper library to interact with the [Zendesk API](https://developer.zendesk.com/rest_api/docs/core/introduction)
## Configuration
First you must configure the subdomain, username, and password. The library will check environment variables first and fall back to application configuration.
This library currently supports Basic authentication or API token authentication.
- subdomain, taken from ZENDESK_SUBDOMAIN environment variable or `Application.get_env(:ex_zendesk, :subdomain)`. This is your zendesk base url, such as https://obscura.zendesk.com
- user, taken from ZENDESK_USER environment variable or `Application.get_env(:ex_zendesk, :user)`. If using basic authentication, this is your zendesk login (ex. <EMAIL>). If using api token authentication this is your zendesk login followed by /token (ex. <EMAIL>/token)
- password, taken from ZENDESK_PASSWORD environment variable or `Application.get_env(:ex_zendesk, :password)`. If using basic authentication this is your zendesk password. If using api token authentication put your api token here.
## Examples
```
ExZendesk.start
ExZendesk.get "/api/v2/tickets.json?per_page=1"
# => {:ok,
# %HTTPoison.Response{body: %{count: 114,
# next_page: "https://storestartup.zendesk.com/api/v2/tickets.json?page=2&per_page=1",
# previous_page: nil,
# tickets: [%{allow_channelback: false, assignee_id: 1802891567,
# brand_id: 572847, collaborator_ids: [],
# created_at: "2016-03-01T07:03:12Z", custom_fields: [],
# description: "Hello,\r\nThis is a test ticket",
# due_at: nil, external_id: nil, fields: [], followup_ids: [],
# forum_topic_id: nil, group_id: 26814467, has_incidents: false, id: 38,
# organization_id: nil, priority: nil, problem_id: nil,
# raw_subject: "Test ticket", recipient: nil,
# requester_id: 5022882178, satisfaction_rating: nil,
# sharing_agreement_ids: [], status: "closed",
# subject: "Test Ticket",
# submitter_id: 5022882178, tags: [], type: nil,
# updated_at: "2016-05-07T17:01:56Z",
# url: "https://storestartup.zendesk.com/api/v2/tickets/38.json",
# via: %{channel: "web", source: %{from: %{}, rel: nil, to: %{}}}}]},
# headers: [{"Server", "nginx"}, {"Date", "Fri, 12 Aug 2016 16:33:01 GMT"},
# {"Content-Type", "application/json; charset=UTF-8"},
# {"Content-Length", "1291"}, {"Connection", "keep-alive"},
# {"X-Zendesk-API-Version", "v2"},
# {"X-Zendesk-Application-Version", "v3.94.2"},
# {"X-Zendesk-API-Warn",
# "Removed restricted keys [\"_json\"] from parameters according to whitelist"},
# {"X-Frame-Options", "SAMEORIGIN"}, {"X-Rate-Limit", "700"},
# {"X-Rate-Limit-Remaining", "699"},
# {"Strict-Transport-Security", "max-age=31536000;"},
# {"X-UA-Compatible", "IE=Edge,chrome=1"},
# {"ETag", "\"8cd9c3920ce86f5b21141e6bcef72358\""},
# {"Cache-Control", "must-revalidate, private, max-age=0"},
# {"X-Zendesk-Origin-Server", "app41.pod6.iad1.zdsys.com"},
# {"X-Request-Id", "5aa337af-21ba-4ef0-ce53-b8ca3a6bb1dc"},
# {"X-Runtime", "0.188008"}, {"X-Rack-Cache", "miss"},
# {"X-Zendesk-Request-Id", "1fcdee3a31262d9d3064"},
# {"X-Content-Type-Options", "nosniff"}], status_code: 200}}
sample_ticket = %{ticket: %{subject: "testing", comment: %{body: "this is a test"}, requester: %{name: "<NAME>", email: "<EMAIL>"}}}
ExZendesk.post "/api/v2/tickets.json", sample_ticket
# => {:ok,
# %HTTPoison.Response{body: %{audit: %{author_id: 1802891567,
# created_at: "2016-08-12T16:40:14Z",
# events: [%{attachments: [], audit_id: 145382499768, author_id: 9343852848,
# body: "this is a test",
# html_body: "<div class=\"zd-comment\"><p dir=\"auto\">this is a test</p></div>",
# id: 145382499848, public: true, type: "Comment"},
# %{field_name: "subject", id: 145382500008, type: "Create",
# value: "testing"},
# %{field_name: "status", id: 145382500068, type: "Create", value: "open"},
# %{field_name: "priority", id: 145382500148, type: "Create", value: nil},
# %{field_name: "type", id: 145382500188, type: "Create", value: nil},
# %{field_name: "assignee_id", id: 145382500248, type: "Create",
# value: "1802891567"},
# %{field_name: "group_id", id: 145382500288, type: "Create",
# value: "26814467"},
# %{field_name: "requester_id", id: 145382500368, type: "Create",
# value: "9343852848"},
# %{body: "Your request ({{ticket.id}}) has been received and is being reviewed by our support staff.\n\nTo add additional # # comments, reply to this email.\n\n{{ticket.comments_formatted}}",
# id: 145382500448, recipients: [9343852848],
# subject: "[Request received] {{ticket.title}}", type: "Notification",
# via: %{channel: "rule",
# source: %{from: %{id: 64041887,
# title: "Notify requester of received request"}, rel: "trigger",
# to: %{}}}},
# %{body: "A ticket (\#{{ticket.id}}) by {{ticket.requester.name}} has been received. It is unassigned.\n\n{{ticket.comments_formatted}}",
# id: 145382500608, recipients: [1802891567],
# subject: "[{{ticket.account}}] {{ticket.title}}", type: "Notification",
# via: %{channel: "rule",
# source: %{from: %{id: 64041947,
# title: "Notify all agents of received request"}, rel: "trigger",
# to: %{}}}}], id: 145382499768,
# metadata: %{custom: %{},
# system: %{client: "Elixir/ExZendesk", ip_address: "172.16.58.3",
# latitude: 27.88669999999999, location: "Tampa, FL, United States",
# longitude: -82.5117}}, ticket_id: 173,
# via: %{channel: "api", source: %{from: %{}, rel: nil, to: %{}}}},
# ticket: %{allow_channelback: false, assignee_id: 1802891567,
# brand_id: 572847, collaborator_ids: [],
# created_at: "2016-08-12T16:40:14Z", custom_fields: [],
# description: "this is a test", due_at: nil, external_id: nil, fields: [],
# forum_topic_id: nil, group_id: 26814467, has_incidents: false, id: 173,
# organization_id: nil, priority: nil, problem_id: nil,
# raw_subject: "testing", recipient: nil, requester_id: 9343852848,
# satisfaction_rating: nil, sharing_agreement_ids: [], status: "open",
# subject: "testing", submitter_id: 9343852848, tags: [], type: nil,
# updated_at: "2016-08-12T16:40:14Z",
# url: "https://storestartup.zendesk.com/api/v2/tickets/173.json",
# via: %{channel: "api", source: %{from: %{}, rel: nil, to: %{}}}}},
# headers: [{"Server", "nginx"}, {"Date", "Fri, 12 Aug 2016 16:40:14 GMT"},
# {"Content-Type", "application/json; charset=UTF-8"},
# {"Content-Length", "2665"}, {"Connection", "keep-alive"},
# {"X-Zendesk-API-Version", "v2"},
# {"X-Zendesk-Application-Version", "v3.94.2"},
# {"X-Frame-Options", "SAMEORIGIN"},
# {"Location", "https://partially.zendesk.com/api/v2/tickets/173.json"},
# {"X-Rate-Limit", "700"}, {"X-Rate-Limit-Remaining", "699"},
# {"Strict-Transport-Security", "max-age=31536000;"},
# {"X-UA-Compatible", "IE=Edge,chrome=1"},
# {"ETag", "\"71ffe41cfa9ce1e92dfd15a1de13822d\""},
# {"Cache-Control", "max-age=0, private, must-revalidate"},
# {"X-Zendesk-Origin-Server", "app44.pod6.iad1.zdsys.com"},
# {"X-Request-Id", "deeaf0fd-e352-4102-c1e6-b8ca3a6bbce0"},
# {"X-Runtime", "0.679025"}, {"X-Rack-Cache", "invalidate, pass"},
# {"X-Zendesk-Request-Id", "4e1deda0879800940aeb"},
# {"X-Content-Type-Options", "nosniff"}], status_code: 201}}
```
"""
def process_url(url) do
subdomain <> url
end
def process_response_body(body) do
try do
Poison.decode!(body, keys: :atoms)
rescue
_ -> body
end
end
def process_request_headers(headers) do
added_headers = [{"Accept", "application/json"},
{"Content-Type", "application/json"},
{"User-Agent", "Elixir/ExZendesk"},
{"Authorization", "Basic " <> Base.encode64(user <> ":" <> password)}]
headers ++ added_headers
end
def process_request_body(body) do
Poison.encode! body
end
defp user do
System.get_env("ZENDESK_USER") || Application.get_env(:ex_zendesk, :user)
end
defp password do
System.get_env("ZENDESK_PASSWORD") || Application.get_env(:ex_zendesk, :password)
end
defp subdomain do
System.get_env("ZENDESK_SUBDOMAIN") || Application.get_env(:ex_zendesk, :subdomain)
end
end
|
lib/ex_zendesk.ex
| 0.775307
| 0.723621
|
ex_zendesk.ex
|
starcoder
|
defmodule RigInboundGateway.Proxy do
@moduledoc """
Enables persisting and CRUD operations for Proxy's API definitions in presence.
In a distributed setting, the node that does the persisting of API definitions
spreads the information via Phoenix' PubSub Server as Phoenix Presence information.
The other nodes react by tracking the same record themselves, which means that
for one record and n nodes there are n items in the Presence list. The
following properties are a result of this:
- API definitions managing can occur on/by any node.
- API definitions are eventually consistent over all nodes.
- Any node can go down and come up at any time without affecting the
API definitions, except if all nodes go down at the same time (in that case
there is nothing to synchronize from -- changes are not stored on disk).
"""
# config file is not required
use Rig.Config, []
alias Rig.Config
alias RigInboundGateway.ApiProxy.Api
alias RigInboundGateway.ApiProxy.Validations
require Logger
@typep state_t :: map
@typep server_t :: pid | atom
defmodule ProxyBehaviour do
@moduledoc false
@callback list_apis(server :: Proxy.server_t()) :: [Api.t(), ...]
@callback get_api(id :: Proxy.server_t(), id :: String.t()) :: Api.t()
@callback add_api(id :: Proxy.server_t(), id :: String.t(), api :: Api.t()) :: any
@callback replace_api(
id :: Proxy.server_t(),
id :: String.t(),
prev_api :: Api.t(),
next_api :: Api.t()
) :: any
@callback update_api(id :: Proxy.server_t(), id :: String.t(), api :: Api.t()) :: any
@callback deactivate_api(id :: Proxy.server_t(), id :: String.t()) :: atom
end
@behaviour ProxyBehaviour
@default_tracker_mod RigInboundGateway.ApiProxy.Tracker
def start_link(tracker_mod \\ nil, opts \\ []) do
tracker_mod = if tracker_mod, do: tracker_mod, else: @default_tracker_mod
Logger.debug(fn -> "API proxy with tracker #{inspect(tracker_mod)}" end)
GenServer.start_link(
__MODULE__,
_state = %{tracker_mod: tracker_mod},
Keyword.merge([name: __MODULE__], opts)
)
end
@spec init_presence(state_t) :: :ok
def init_presence(state) do
conf = config()
case conf.config_path_or_json do
nil ->
Logger.info(fn -> "Reverse-proxy configuration not present." end)
config_path_or_json ->
do_init_presence(config_path_or_json, state)
end
end
defp do_init_presence(config_path_or_json, state) do
case Config.parse_json_env(config_path_or_json) do
{:ok, config} when is_list(config) ->
Enum.each(config, fn api ->
api_with_default_values = api |> Validations.validate!() |> set_default_api_values
%{"id" => id} = api
# credo:disable-for-next-line Credo.Check.Refactor.Nesting
Logger.info(fn -> "Reverse proxy: service #{id}" end)
state.tracker_mod.track(api["id"], api_with_default_values)
end)
{:ok, not_a_list} ->
Logger.error(fn -> "The proxy config must be a list, got #{inspect(not_a_list)}" end)
System.stop()
{:error, %Config.SyntaxError{} = e} ->
Logger.error(fn -> "Could not read proxy config, reason: #{Exception.message(e)}" end)
System.stop()
end
end
@impl ProxyBehaviour
def list_apis(server) do
GenServer.call(server, {:list_apis})
end
@impl ProxyBehaviour
def get_api(server, id) do
GenServer.call(server, {:get_api, id})
end
@impl ProxyBehaviour
def add_api(server, id, api) do
GenServer.call(server, {:add_api, id, api})
end
@impl ProxyBehaviour
def replace_api(server, id, prev_api, next_api) do
GenServer.call(server, {:replace_api, id, prev_api, next_api})
end
@impl ProxyBehaviour
def update_api(server, id, api) do
GenServer.call(server, {:update_api, id, api})
end
@impl ProxyBehaviour
def deactivate_api(server, id) do
GenServer.call(server, {:deactivate_api, id})
end
@spec handle_join_api(server_t, String.t(), Api.t()) :: server_t
def handle_join_api(server, id, api) do
GenServer.cast(server, {:handle_join_api, id, api})
end
# callbacks
@spec init(state_t) :: {:ok, state_t}
def init(state) do
send(self(), :init_apis)
{:ok, state}
end
@spec handle_info(:init_apis, state_t) :: {:noreply, state_t}
def handle_info(:init_apis, state) do
:ok = init_presence(state)
{:noreply, state}
end
@spec handle_call({:add_api, String.t(), Api.t()}, any, state_t) :: {:reply, any, state_t}
def handle_call({:add_api, id, api}, _from, state) do
Logger.info("Handling add of new API definition with id=#{id}")
api_with_default_values = set_default_api_values(api)
response = state.tracker_mod.track(id, api_with_default_values)
{:reply, response, state}
end
@spec handle_call({:replace_api, String.t(), Api.t(), Api.t()}, any, state_t) ::
{:reply, any, state_t}
def handle_call({:replace_api, id, prev_api, next_api}, _from, state) do
Logger.info("Handling replace of deactivated API definition with id=#{id} with new API")
api_with_default_values =
set_default_api_values(next_api)
|> Map.put("ref_number", prev_api["ref_number"] + 1)
response = state.tracker_mod.update(id, api_with_default_values)
{:reply, response, state}
end
@spec handle_call({:update_api, String.t(), Api.t()}, any, state_t) :: {:reply, any, state_t}
def handle_call({:update_api, id, api}, _from, state) do
Logger.info("Handling update of API definition with id=#{id}")
meta_info = %{"ref_number" => api["ref_number"] + 1, "timestamp" => Timex.now()}
api_with_meta_info = add_meta_info(api, meta_info)
response = state.tracker_mod.update(id, api_with_meta_info)
{:reply, response, state}
end
@spec handle_call({:deactivate_api, String.t(), Api.t()}, any, state_t) ::
{:reply, atom, state_t}
def handle_call({:deactivate_api, id}, _from, state) do
node_name = get_node_name()
Logger.info("Handling deactivate of API definition with id=#{id} in presence")
{_id, current_api} = state.tracker_mod.find_by_node(id, node_name)
api =
current_api
|> Map.update("ref_number", 0, &(&1 + 1))
|> Map.put("active", false)
|> Map.put("timestamp", Timex.now())
response = state.tracker_mod.update(id, api)
{:reply, response, state}
end
@spec handle_call({:list_apis}, any, state_t) :: {:reply, [Api.t(), ...], state_t}
def handle_call({:list_apis}, _from, state) do
list_of_apis = get_node_name() |> state.tracker_mod.list_by_node
{:reply, list_of_apis, state}
end
@spec handle_call({:get_api}, any, state_t) :: {:reply, Api.t(), state_t}
def handle_call({:get_api, id}, _from, state) do
node_name = get_node_name()
api = state.tracker_mod.find_by_node(id, node_name)
{:reply, api, state}
end
# Handle incoming JOIN events with new data from Presence
@spec handle_cast({:handle_join_api, String.t(), Api.t()}, state_t) :: {:noreply, state_t}
def handle_cast({:handle_join_api, id, api}, state) do
node_name = get_node_name()
Logger.info(
"Handling JOIN differential for API definition with id=#{id} for node=#{node_name}"
)
prev_api = state.tracker_mod.find_by_node(id, node_name)
case compare_api(id, prev_api, api, state) do
{:error, :exit} ->
Logger.debug(fn ->
"There is already most recent API definition with id=#{id} in presence"
end)
{:ok, :track} ->
Logger.debug(fn -> "API definition with id=#{id} doesn't exist yet, starting to track" end)
api_with_default_values = set_default_api_values(api)
state.tracker_mod.track(id, api_with_default_values)
{:ok, :update_no_ref} ->
Logger.debug(fn ->
"API definition with id=#{id} is adopting new version with no ref_number update"
end)
state.tracker_mod.update(id, add_meta_info(api))
{:ok, :update_with_ref} ->
Logger.debug(fn ->
"API definition with id=#{id} is adopting new version with ref_number update"
end)
prev_api_data = elem(prev_api, 1)
meta_info = %{"ref_number" => prev_api_data["ref_number"] + 1}
api_with_meta_info = add_meta_info(api, meta_info)
state.tracker_mod.update(id, api_with_meta_info)
end
{:noreply, state}
end
# private functions
# Compare current API and new API based by ID
# Comparison is done by several steps:
# - Reference number
# - Data equality (without internal information like phx_ref, node_name, timestamp, etc.)
# - Data equality across nodes in cluster (without internal information ...)
# - Timestamp
@spec compare_api(String.t(), nil, Api.t(), state_t) :: {:ok, :track}
defp compare_api(_id, nil, _next_api, _state), do: {:ok, :track}
@spec compare_api(String.t(), {String.t(), Api.t()}, Api.t(), state_t) :: {atom, atom}
defp compare_api(id, {id, prev_api}, next_api, state) do
cond do
# Evaluate only active APIs with equal reference number
# -> prevent deactivated API overriding on node start-up
# Freshly deactivated APIs have incremented reference number => not equal
next_api["ref_number"] == prev_api["ref_number"] && prev_api["active"] == true ->
eval_data_change(id, prev_api, next_api, state)
next_api["ref_number"] > prev_api["ref_number"] ->
{:ok, :update_with_ref}
true ->
{:error, :exit}
end
end
# Evaluate if current API and new API are the same or not => data wise
@spec eval_data_change(String.t(), Api.t(), Api.t(), state_t) :: {atom, atom}
defp eval_data_change(id, prev_api, next_api, state) do
if data_equal?(prev_api, next_api) do
{:error, :exit}
else
eval_all_nodes_data(id, prev_api, next_api, state)
end
end
# Evaluate how many nodes have data from new API
# If exactly half of the nodes are different => compare timestamps
@spec eval_all_nodes_data(String.t(), Api.t(), Api.t(), state_t) :: {atom, atom}
defp eval_all_nodes_data(id, prev_api, next_api, state) do
all_api_instances = state.tracker_mod.find_all(id)
n_api_instances_halved = length(all_api_instances) / 2
matching_api_instances =
all_api_instances
|> Enum.filter(fn {_key, meta} ->
meta |> data_equal?(next_api)
end)
n_matching_api_instances = length(matching_api_instances)
cond do
n_matching_api_instances < n_api_instances_halved ->
{:error, :exit}
n_matching_api_instances > n_api_instances_halved ->
{:ok, :update_no_ref}
true ->
next_api["timestamp"]
|> Timex.after?(prev_api["timestamp"])
|> eval_time
end
end
# Checks if current API and new API are equal => data wise
# Strips internal information such as timestamp, phx_ref, node_name, ...
@spec data_equal?(Api.t(), Api.t()) :: boolean
defp data_equal?(prev_api, next_api) do
next_api_without_meta = next_api |> remove_meta_info
prev_api
|> remove_meta_info
|> Map.equal?(next_api_without_meta)
end
@spec eval_time(false) :: {:error, :exit}
defp eval_time(false) do
{:error, :exit}
end
@spec eval_time(true) :: {:ok, :update_no_ref}
defp eval_time(true) do
{:ok, :update_no_ref}
end
@spec get_node_name() :: atom
defp get_node_name, do: Phoenix.PubSub.node_name(Rig.PubSub)
# Enhance API definition with internal information
@spec add_meta_info(Api.t(), map) :: Api.t()
defp add_meta_info(api, meta_info \\ %{}) do
api
|> Map.merge(meta_info)
|> Map.put("node_name", get_node_name())
end
# Remove internal information from API definition => to have just raw data
@spec remove_meta_info(Api.t()) :: Api.t()
defp remove_meta_info(api) do
api
|> Map.delete(:phx_ref)
|> Map.delete(:phx_ref_prev)
|> Map.delete("node_name")
|> Map.delete("timestamp")
end
@spec set_default_api_values(Api.t()) :: Api.t()
defp set_default_api_values(api) do
default_api_values = %{
"active" => true,
"auth_type" => "none",
"auth" => %{
"use_header" => false,
"header_name" => "",
"use_query" => false,
"query_name" => ""
},
"proxy" => %{
"use_env" => false
},
"ref_number" => 0,
"timestamp" => Timex.now(),
"versioned" => false
}
api_with_default =
default_api_values
|> Map.merge(api)
# Make sure API has always origin node
|> Map.put("node_name", get_node_name())
default_auth_values =
api_with_default
|> Map.get("auth")
|> Map.merge(auth_type_based_values(api_with_default["auth_type"]))
Map.put(api_with_default, "auth", default_auth_values)
end
@spec auth_type_based_values(String.t()) :: map
defp auth_type_based_values("jwt") do
%{"use_header" => true, "header_name" => "Authorization"}
end
defp auth_type_based_values(_), do: %{}
end
|
lib/rig_inbound_gateway/proxy.ex
| 0.810291
| 0.433082
|
proxy.ex
|
starcoder
|
defmodule Wadm.Deployments.DeploymentMonitor do
@doc """
One of these processes should be running to observe and reconcile
an appspec+version instance.
This is a Horde-managed server and so only one of these will be running within
the horde cluster at any given time, and Horde can relocate this to any cluster
member as it sees fit. Make sure you only ever provision this with the start_deployment_monitor
function.
Process Drift
When a node in a wadm cluster shuts down (intentionally or otherwise), this process
will be re-started on a new target node _without saving state_. An instant later, that
same process will try and reconstitute its state from cache.
"""
use GenServer
require Logger
defmodule State do
defstruct [:spec, :lattice_id]
end
@spec start_link(Map.t()) :: GenServer.on_start()
def start_link(%{app_spec: app_spec, lattice_id: lattice_id} = opts) do
case GenServer.start_link(__MODULE__, opts, name: via_tuple(app_spec.name, lattice_id)) do
{:ok, pid} ->
{:ok, pid}
{:error, {:already_started, pid}} ->
Logger.debug("Already running deployment monitor at #{inspect(pid)}")
:ignore
other ->
other
end
end
@impl true
def init(opts) do
Logger.debug(
"Starting Deployment Monitor for deployment #{opts.app_spec.name} v#{opts.app_spec.version}"
)
{:ok,
%State{
spec: opts.app_spec,
lattice_id: opts.lattice_id
}, {:continue, :ensure_lattice_supervisor}}
end
@impl true
def handle_continue(:ensure_lattice_supervisor, state) do
# Make sure that there's a lattice supervisor running
{:ok, _pid} = Wadm.LatticeSupervisor.start_lattice_supervisor(state.lattice_id)
{:noreply, state}
end
@spec start_deployment_monitor(AppSpec.t(), String.t()) :: {:error, any} | {:ok, pid()}
def start_deployment_monitor(app_spec, lattice_id) do
opts = %{
app_spec: app_spec,
lattice_id: lattice_id
}
pid = get_process(opts.app_spec.name, opts.lattice_id)
if pid == nil do
Horde.DynamicSupervisor.start_child(
Wadm.HordeSupervisor,
{Wadm.Deployments.DeploymentMonitor, opts}
)
else
{:ok, pid}
end
end
# Within a libcluster-formed BEAM cluster, each deployment manager is
# uniquely identified by its spec name and the lattice in which it's
# running
def via_tuple(spec_name, lattice_id),
do: {:via, Horde.Registry, {Wadm.HordeRegistry, "depmon_#{spec_name}_#{lattice_id}"}}
def get_process(spec_name, lattice_id) when is_binary(lattice_id) do
case Horde.Registry.lookup(Wadm.HordeRegistry, "depmon_#{spec_name}_#{lattice_id}") do
[{pid, _val}] -> pid
[] -> nil
end
end
end
|
wadm/lib/wadm/deployments/deployment_monitor.ex
| 0.637257
| 0.414751
|
deployment_monitor.ex
|
starcoder
|
defmodule Mcc.Lib do
@moduledoc """
This module contains functions to manipulate cluster based on mnesia.
Includes:
- 1, start mnesia and mnesia table
- 2, make node join into the mnesia cluster
- 3, make node leave from the mnesia cluster
- 4, remove one node from mnesia cluster
- 5, get status of mnesia cluster
"""
@doc """
Tries to start mnesia and create or copy mnesia table.
It will raises an exception in case of failure, return `:ok` if successful,
or `{:error, reason}` if an error occurs.
"""
@spec start :: :ok | {:error, term()}
def start do
:ok = ensure_ok(ensure_data_dir())
:ok = ensure_ok(init_mnesia_schema())
:ok = :mnesia.start()
:ok = init_tables()
:ok = wait_for(:tables)
end
@doc """
Make one node join into the mnesia cluster.
"""
@spec join_cluster(node()) :: :ok | {:error, term()}
def join_cluster(node_name) do
:ok = ensure_ok(ensure_stopped())
:ok = ensure_ok(delete_schema())
:ok = ensure_ok(ensure_started())
:ok = ensure_ok(connect(node_name))
:ok = ensure_ok(copy_schema(node()))
:ok = copy_tables()
:ok = ensure_ok(wait_for(:tables))
end
@doc """
Make one node leave from the mnesia cluster.
"""
@spec leave_cluster ::
:ok
| {:error, :node_not_in_cluster}
| {:error, {:failed_to_leave, node()}}
def leave_cluster do
leave_cluster(running_nodes() -- [node()])
end
@doc """
Remove one node from the mnesia cluster.
"""
@spec remove_from_cluster(node()) ::
:ok
| {:error, :node_not_in_cluster}
| {:error, term()}
def remove_from_cluster(node_name) when node_name != node() do
case {node_in_cluster?(node_name), running_db_node?(node_name)} do
{true, true} ->
:ok = ensure_ok(:rpc.call(node_name, __MODULE__, :ensure_stopped, []))
:ok = ensure_ok(del_schema_copy(node_name))
:ok = ensure_ok(:rpc.call(node_name, __MODULE__, :delete_schema, []))
{true, false} ->
:ok = ensure_ok(del_schema_copy(node_name))
:ok = ensure_ok(:rpc.call(node_name, __MODULE__, :delete_schema, []))
{false, _} ->
{:error, :node_not_in_cluster}
end
end
@doc """
Get status of the mnesia cluster.
"""
@spec status :: list()
def status do
running = :mnesia.system_info(:running_db_nodes)
stopped = :mnesia.system_info(:db_nodes) -- running
[{:running_nodes, running}, {:stopped_nodes, stopped}]
end
@doc """
Delete schema copy of given node.
"""
@spec del_schema_copy(node()) :: :ok | {:error, any()}
def del_schema_copy(node_name) do
case :mnesia.del_table_copy(:schema, node_name) do
{:atomic, :ok} -> :ok
{:aborted, reason} -> {:error, reason}
end
end
@doc """
Delete schema information in local node.
"""
@spec delete_schema :: :ok | {:error, any()}
def delete_schema, do: :mnesia.delete_schema([node()])
@doc """
Ensure mnesia stoppted.
"""
@spec ensure_stopped :: :ok | {:error, any()}
def ensure_stopped do
_ = :mnesia.stop()
wait_for(:stop)
end
@doc """
Get all nodes in current mnesia cluster.
"""
@spec all_nodes :: [node()]
def all_nodes, do: :mnesia.system_info(:db_nodes)
@doc """
Get all running nodes in current mnesia cluster.
"""
@spec running_nodes :: [node()]
def running_nodes, do: :mnesia.system_info(:running_db_nodes)
@doc """
Get all not running nodes in current mnesia cluster.
"""
@spec not_running_nodes :: [node()]
def not_running_nodes, do: all_nodes() -- running_nodes()
@doc """
Copy mnesia table from remote node.
"""
@spec copy_table(atom(), atom()) :: :ok | {:error, any()}
def copy_table(name, ram_or_disc \\ :ram_copies) do
ensure_tab(:mnesia.add_table_copy(name, node(), ram_or_disc))
end
@doc """
Create mnesia table.
"""
@spec create_table(atom(), list()) :: :ok | {:error, any()}
def create_table(name, tabdef) do
ensure_tab(:mnesia.create_table(name, tabdef))
end
@doc false
defp ensure_data_dir do
mnesia_dir = :mnesia.system_info(:directory)
case :filelib.ensure_dir(:filename.join(mnesia_dir, :foo)) do
:ok -> :ok
{:error, reason} -> {:error, {:mnesia_dir_error, mnesia_dir, reason}}
end
end
@doc false
defp init_mnesia_schema do
case :mnesia.system_info(:extra_db_nodes) do
[] -> :mnesia.create_schema([node()])
[_ | _] -> :ok
end
end
@doc false
defp copy_schema(node_name) do
case :mnesia.change_table_copy_type(:schema, node_name, :disc_copies) do
{:atomic, :ok} -> :ok
{:aborted, {:already_exists, :schema, _node_name, :disc_copies}} -> :ok
{:aborted, error} -> {:error, error}
end
end
@doc false
defp init_tables do
case :mnesia.system_info(:extra_db_nodes) do
[] -> create_tables()
[_ | _] -> copy_tables()
end
end
@doc false
defp create_tables do
:mcc
|> Application.get_env(:mnesia_table_modules, [])
|> Enum.each(fn t -> Code.ensure_loaded?(t) and apply(t, :boot_tables, []) end)
end
@doc false
defp copy_tables do
:mcc
|> Application.get_env(:mnesia_table_modules, [])
|> Enum.each(fn t -> Code.ensure_loaded?(t) and apply(t, :copy_tables, []) end)
end
@doc false
defp ensure_started do
_ = :mnesia.start()
wait_for(:start)
end
@doc false
defp connect(node_name) do
case :mnesia.change_config(:extra_db_nodes, [node_name]) do
{:ok, [_node_name]} -> :ok
{:ok, []} -> {:error, {:failed_to_connect, node_name}}
error -> error
end
end
@doc false
defp leave_cluster([]), do: {:error, :node_not_in_cluster}
defp leave_cluster(nodes) when is_list(nodes) do
case Enum.any?(nodes, fn node_name -> leave_cluster(node_name) end) do
true -> :ok
_ -> {:error, {:failed_to_leave, nodes}}
end
end
defp leave_cluster(node_name) when is_atom(node_name) and node_name != node() do
case running_db_node?(node_name) do
true ->
:ok = ensure_ok(ensure_stopped())
:ok = ensure_ok(:rpc.call(node_name, __MODULE__, :del_schema_copy, [node()]))
:ok = ensure_ok(delete_schema())
false ->
{:error, {:node_name_not_running, node_name}}
end
end
@doc false
defp node_in_cluster?(node_name), do: Enum.member?(all_nodes(), node_name)
@doc false
defp running_db_node?(node_name), do: Enum.member?(running_nodes(), node_name)
@doc false
defp wait_for(:start) do
case :mnesia.system_info(:is_running) do
:yes ->
:ok
:starting ->
Process.sleep(1_000)
wait_for(:start)
_ ->
{:error, :mnesia_unexpectedly_stopped}
end
end
defp wait_for(:stop) do
case :mnesia.system_info(:is_running) do
:no ->
:ok
:stopping ->
Process.sleep(1_000)
wait_for(:stop)
_ ->
{:error, :mnesia_unexpectedly_running}
end
end
defp wait_for(:tables) do
:local_tables
|> :mnesia.system_info()
|> :mnesia.wait_for_tables(Application.get_env(:mcc, :mnesia_table_wait_timeout, 150_000))
|> case do
:ok -> :ok
{:error, reason} -> {:error, reason}
{:timeout, badtables} -> {:error, {:timetout, badtables}}
end
end
@doc false
defp ensure_ok(:ok), do: :ok
defp ensure_ok({:error, {_, {:already_exists, _}}}), do: :ok
defp ensure_ok(any), do: {:error, any}
@doc false
defp ensure_tab({:atomic, :ok}), do: :ok
defp ensure_tab({:aborted, {:already_exists, _}}), do: :ok
defp ensure_tab({:aborted, {:already_exists, _name, _node_name}}), do: :ok
defp ensure_tab({:aborted, error}), do: {:error, error}
# __end_of_module__
end
|
lib/mcc/lib.ex
| 0.755186
| 0.680494
|
lib.ex
|
starcoder
|
defmodule Infer.Loaders.Dataloader do
@moduledoc """
Uses `Dataloader` to load missing data incrementally.
"""
alias Infer.Result
def lookup(cache, data_req) do
case apply(Dataloader, :get, [cache | args_for(data_req)]) do
{:error, "Unable to find " <> _} -> {:not_loaded, MapSet.new([data_req])}
{:ok, result} -> Result.ok(result)
other -> other
end
end
defp args_for({:assoc, subject, key}) do
[:assoc, key, subject]
end
defp args_for({:query_one, type, [main_condition | other_conditions], opts}) do
opts = opts |> where(other_conditions)
[:assoc, {:one, type, opts}, [main_condition]]
end
defp args_for({:query_first, type, [main_condition | other_conditions], opts}) do
opts = opts |> where(other_conditions) |> Keyword.put(:limit, 1)
[:assoc, {:one, type, opts}, [main_condition]]
end
defp args_for({:query_all, type, [main_condition | other_conditions], opts}) do
opts = opts |> where(other_conditions)
[:assoc, {:many, type, opts}, [main_condition]]
end
defp where(opts, []), do: opts
defp where(opts, conditions), do: Keyword.put(opts, :where, {:all, conditions})
def init() do
repo = config(:repo)
# workaround for dataloader incompatibility with transactions
# -> https://github.com/absinthe-graphql/dataloader/issues/129#issuecomment-965492108
run_concurrently? = not db_conn_checked_out?(repo)
source =
Dataloader.Ecto.new(repo,
query: &Infer.Ecto.Query.from_options/2,
async: run_concurrently?
)
Dataloader.new(get_policy: :tuples, async: run_concurrently?)
|> Dataloader.add_source(:assoc, source)
end
defp db_conn_checked_out?(repo_name) do
case Ecto.Repo.Registry.lookup(repo_name) do
# Ecto < 3.8.0
{adapter, meta} -> adapter.checked_out?(meta)
# Ecto >= 3.8.0
%{adapter: adapter} = meta -> adapter.checked_out?(meta)
end
end
def load(cache, data_reqs) do
Enum.reduce(data_reqs, cache, fn data_req, cache ->
apply(Dataloader, :load, [cache | args_for(data_req)])
end)
|> Dataloader.run()
end
def config(:repo), do: Application.fetch_env!(:infer, :repo)
end
|
lib/infer/loaders/dataloader.ex
| 0.629319
| 0.40645
|
dataloader.ex
|
starcoder
|
defmodule Deneb.Chart do
@moduledoc """
Vega-lite chart implementation
"""
use TypedStruct
alias Deneb.{Chart, Mark, Encoding, Projection, Selection, Transform, Utils}
typedstruct do
@typedoc "chart properties"
field :mark, Mark.t() | nil, default: nil
field :encoding, Encoding.t() | nil, default: nil
field :projection, Selection.t() | nil, default: nil
field :selection, Projection.t() | nil, default: nil
field :transform, Transform.t() | nil, default: nil
field :raw, String.t() | map() | nil, default: nil
end
def new(raw) when is_binary(raw) or is_map(raw) do
%Chart {
raw: raw
}
end
def new(mark, encoding, opts \\ []) do
selection = opts[:selection] || nil
transform = opts[:transform] || nil
projection = opts[:projection] || nil
%Chart {
mark: mark,
encoding: encoding,
selection: selection,
transform: transform,
projection: projection
}
end
def to_json(chart, base_chart_opts \\ [])
def to_json(%Chart{} = chart, base_chart_opts) do
result = chart
|> Utils.to_map()
|> Enum.reduce(%{}, fn {k, v}, acc ->
cond do
is_struct(v) -> Map.put(acc, k, apply(v.__struct__, :to_json, [v]))
is_nil(v) -> acc
true -> Map.put(acc, k, v)
end
end)
Map.merge(result, Utils.encode(base_chart_opts))
end
def to_json(_chart, _base_chart_opts), do: raise "Please provide an Chart object to this function"
def repeat(chart, repeat, base_chart_opts \\ []) do
data = Chart.to_json(chart, base_chart_opts)
Utils.encode(%{spec: data, repeat: repeat})
end
def layer(charts, base_chart_opts \\ []) do
data = Utils.encode(base_chart_opts)
Map.put(data, :layer, charts_to_json(charts))
end
def hconcat(charts, base_chart_opts \\ []) do
data = Utils.encode(base_chart_opts)
Map.put(data, :hconcat, charts_to_json(charts))
end
def vconcat(charts, base_chart_opts \\ []) do
data = Utils.encode(base_chart_opts)
Map.put(data, :vconcat, charts_to_json(charts))
end
def concat(charts, columns \\ 2, base_chart_opts \\ []) do
data = Utils.encode(base_chart_opts)
data
|> Map.put(:columns, columns)
|> Map.put(:concat, Enum.map(charts, &Chart.to_json/1))
end
# private functions
defp charts_to_json(charts) do
Enum.map(charts, fn chart ->
case is_struct(chart) do
true -> apply(chart.__struct__, :to_json, [chart])
_ -> chart
end
end)
end
end
|
lib/deneb/chart.ex
| 0.739799
| 0.470433
|
chart.ex
|
starcoder
|
defmodule Numexy do
alias Numexy.Array
@moduledoc """
Documentation for Numexy.
"""
@doc """
New matrix.
## Examples
iex> Numexy.new([1,2,3])
%Numexy.Array{array: [1, 2, 3], shape: {3, nil}}
iex> Numexy.new([[1,2,3],[1,2,3]])
%Numexy.Array{array: [[1, 2, 3], [1, 2, 3]], shape: {2, 3}}
"""
def new(array), do: %Array{array: array, shape: {row_count(array), col_count(array)}}
defp row_count(array), do: Enum.count(array)
defp col_count([head | _]) when is_list(head), do: Enum.count(head)
defp col_count(_), do: nil
@doc """
Add vector or matrix.
## Examples
iex> x = Numexy.new([1,2,3])
%Numexy.Array{array: [1,2,3], shape: {3, nil}}
iex> y = 4
iex> Numexy.add(x, y)
%Numexy.Array{array: [5,6,7], shape: {3, nil}}
"""
def add(%Array{array: v, shape: {_, nil}}, s) when is_number(s),
do: Enum.map(v, &(&1 + s)) |> new
def add(s, %Array{array: v, shape: {_, nil}}) when is_number(s),
do: Enum.map(v, &(&1 + s)) |> new
def add(%Array{array: xv, shape: {xv_row, nil}}, %Array{array: yv, shape: {yv_row, nil}})
when xv_row == yv_row do
# vector + vector
Enum.zip(xv, yv)
|> Enum.map(fn {a, b} -> a + b end)
|> new
end
def add(%Array{array: xm, shape: xm_shape}, %Array{array: ym, shape: ym_shape})
when xm_shape == ym_shape do
# matrix + matrix
{_, xm_col} = xm_shape
xv = List.flatten(xm)
yv = List.flatten(ym)
Enum.zip(xv, yv)
|> Enum.map(fn {a, b} -> a + b end)
|> Enum.chunk_every(xm_col)
|> new
end
def add(%Array{array: m, shape: {_, col}}, s) when col != nil do
m
|> Enum.map(&Enum.map(&1, fn x -> x + s end))
|> new
end
def add(s, %Array{array: m, shape: {_, col}}) when col != nil do
m
|> Enum.map(&Enum.map(&1, fn x -> x + s end))
|> new
end
@doc """
Subtraction vector or matrix.
## Examples
iex> x = Numexy.new([1,2,3])
%Numexy.Array{array: [1,2,3], shape: {3, nil}}
iex> y = 4
iex> Numexy.sub(x, y)
%Numexy.Array{array: [-3,-2,-1], shape: {3, nil}}
"""
def sub(%Array{array: v, shape: {_, nil}}, s) when is_number(s),
do: Enum.map(v, &(&1 - s)) |> new
def sub(s, %Array{array: v, shape: {_, nil}}) when is_number(s),
do: Enum.map(v, &(&1 - s)) |> new
def sub(%Array{array: xv, shape: {xv_row, nil}}, %Array{array: yv, shape: {yv_row, nil}})
when xv_row == yv_row do
# vector + vector
Enum.zip(xv, yv)
|> Enum.map(fn {a, b} -> a - b end)
|> new
end
def sub(%Array{array: xm, shape: xm_shape}, %Array{array: ym, shape: ym_shape})
when xm_shape == ym_shape do
# matrix + matrix
{_, xm_col} = xm_shape
xv = List.flatten(xm)
yv = List.flatten(ym)
Enum.zip(xv, yv)
|> Enum.map(fn {a, b} -> a - b end)
|> Enum.chunk_every(xm_col)
|> new
end
def sub(%Array{array: m, shape: {_, col}}, s) when col != nil do
m
|> Enum.map(&Enum.map(&1, fn x -> x - s end))
|> new
end
def sub(s, %Array{array: m, shape: {_, col}}) when col != nil do
m
|> Enum.map(&Enum.map(&1, fn x -> x - s end))
|> new
end
@doc """
Multiplication vector or matrix.
## Examples
iex> x = Numexy.new([1,2,3])
%Numexy.Array{array: [1,2,3], shape: {3, nil}}
iex> y = 4
iex> Numexy.mul(x, y)
%Numexy.Array{array: [4,8,12], shape: {3, nil}}
"""
def mul(%Array{array: v, shape: {_, nil}}, s) when is_number(s),
do: Enum.map(v, &(&1 * s)) |> new
def mul(s, %Array{array: v, shape: {_, nil}}) when is_number(s),
do: Enum.map(v, &(&1 * s)) |> new
def mul(%Array{array: xv, shape: {xv_row, nil}}, %Array{array: yv, shape: {yv_row, nil}})
when xv_row == yv_row do
# vector + vector
Enum.zip(xv, yv)
|> Enum.map(fn {a, b} -> a * b end)
|> new
end
def mul(%Array{array: xm, shape: xm_shape}, %Array{array: ym, shape: ym_shape})
when xm_shape == ym_shape do
# matrix + matrix
{_, xm_col} = xm_shape
xv = List.flatten(xm)
yv = List.flatten(ym)
Enum.zip(xv, yv)
|> Enum.map(fn {a, b} -> a * b end)
|> Enum.chunk_every(xm_col)
|> new
end
def mul(%Array{array: m, shape: {_, col}}, s) when col != nil do
m
|> Enum.map(&Enum.map(&1, fn x -> x * s end))
|> new
end
def mul(s, %Array{array: m, shape: {_, col}}) when col != nil do
m
|> Enum.map(&Enum.map(&1, fn x -> x * s end))
|> new
end
@doc """
Division vector or matrix.
## Examples
iex> x = Numexy.new([8,4,2])
%Numexy.Array{array: [8,4,2], shape: {3, nil}}
iex> y = 4
iex> Numexy.div(x, y)
%Numexy.Array{array: [2.0,1.0,0.5], shape: {3, nil}}
"""
def div(%Array{array: v, shape: {_, nil}}, s) when is_number(s),
do: Enum.map(v, &(&1 / s)) |> new
def div(s, %Array{array: v, shape: {_, nil}}) when is_number(s),
do: Enum.map(v, &(&1 / s)) |> new
def div(%Array{array: xv, shape: {xv_row, nil}}, %Array{array: yv, shape: {yv_row, nil}})
when xv_row == yv_row do
# vector + vector
Enum.zip(xv, yv)
|> Enum.map(fn {a, b} -> a / b end)
|> new
end
def div(%Array{array: xm, shape: xm_shape}, %Array{array: ym, shape: ym_shape})
when xm_shape == ym_shape do
# matrix + matrix
{_, xm_col} = xm_shape
xv = List.flatten(xm)
yv = List.flatten(ym)
Enum.zip(xv, yv)
|> Enum.map(fn {a, b} -> a / b end)
|> Enum.chunk_every(xm_col)
|> new
end
def div(%Array{array: m, shape: {_, col}}, s) when col != nil do
m
|> Enum.map(&Enum.map(&1, fn x -> x / s end))
|> new
end
def div(s, %Array{array: m, shape: {_, col}}) when col != nil do
m
|> Enum.map(&Enum.map(&1, fn x -> x / s end))
|> new
end
@doc """
Calculate inner product.
## Examples
iex> x = Numexy.new([1,2,3])
%Numexy.Array{array: [1,2,3], shape: {3, nil}}
iex> y = Numexy.new([1,2,3])
%Numexy.Array{array: [1,2,3], shape: {3, nil}}
iex> Numexy.dot(x, y)
14
"""
def dot(%Array{array: xv, shape: {xv_row, nil}}, %Array{array: yv, shape: {yv_row, nil}})
when xv_row == yv_row do
# vector * vector return scalar
dot_vector(xv, yv)
end
def dot(%Array{array: m, shape: {_, m_col}}, %Array{array: v, shape: {v_row, nil}})
when m_col == v_row do
# matrix * vector return vector
m = for mi <- m, vi <- [v], do: [mi, vi]
m
|> Enum.map(fn [x, y] -> dot_vector(x, y) end)
|> new
end
def dot(%Array{array: xm, shape: {x_row, x_col}}, %Array{array: ym, shape: {y_row, _}})
when x_col == y_row do
# matrix * matrix return matrix
m = for xi <- xm, yi <- list_transpose(ym), do: [xi, yi]
m
|> Enum.map(fn [x, y] -> dot_vector(x, y) end)
|> Enum.chunk_every(x_row)
|> new
end
defp dot_vector(xv, yv) do
Enum.zip(xv, yv)
|> Enum.reduce(0, fn {a, b}, acc -> a * b + acc end)
end
@doc """
Calculate transpose matrix.
## Examples
iex> x = Numexy.new([[4,3],[7,5],[2,7]])
%Array{array: [[4, 3], [7, 5], [2, 7]], shape: {3, 2}}
iex> Numexy.transpose(x)
%Array{array: [[4, 7, 2], [3, 5, 7]], shape: {2, 3}}
"""
def transpose(%Array{array: m, shape: {_, col}}) when col != nil do
m
|> list_transpose
|> new
end
defp list_transpose(list) do
list
|> List.zip()
|> Enum.map(&Tuple.to_list/1)
end
@doc """
Create ones matrix or vector.
## Examples
iex> Numexy.ones({2, 3})
%Array{array: [[1, 1, 1], [1, 1, 1]], shape: {2, 3}}
iex> Numexy.ones({3, nil})
%Array{array: [1, 1, 1], shape: {3, nil}}
"""
def ones({row, nil}) do
List.duplicate(1, row)
|> new
end
def ones({row, col}) do
List.duplicate(1, col)
|> List.duplicate(row)
|> new
end
@doc """
Create zeros matrix or vector.
## Examples
iex> Numexy.zeros({2, 3})
%Numexy.Array{array: [[0, 0, 0], [0, 0, 0]], shape: {2, 3}}
iex> Numexy.zeros({3, nil})
%Numexy.Array{array: [0, 0, 0], shape: {3, nil}}
"""
def zeros({row, nil}) do
List.duplicate(0, row)
|> new
end
def zeros({row, col}) do
List.duplicate(0, col)
|> List.duplicate(row)
|> new
end
@doc """
Sum matrix or vector.
## Examples
iex> Numexy.new([2,9,5]) |> Numexy.sum
16
iex> Numexy.new([[1,2,3],[4,5,6]]) |> Numexy.sum
21
"""
def sum(%Array{array: v, shape: {_, nil}}) do
v
|> Enum.reduce(&(&1 + &2))
end
def sum(%Array{array: m, shape: _}) do
m
|> Enum.reduce(0, &(Enum.reduce(&1, fn x, acc -> x + acc end) + &2))
end
@doc """
Avarage matrix or vector.
## Examples
iex> Numexy.new([2,9,5]) |> Numexy.avg
5.333333333333333
iex> Numexy.new([[1,2,3],[4,5,6]]) |> Numexy.avg
3.5
"""
def avg(%Array{array: v, shape: {row, nil}}) do
v
|> Enum.reduce(&(&1 + &2))
|> float_div(row)
end
def avg(%Array{array: m, shape: {row, col}}) do
m
|> Enum.reduce(0, &(Enum.reduce(&1, fn x, acc -> x + acc end) + &2))
|> float_div(row * col)
end
defp float_div(dividend, divisor) do
dividend / divisor
end
@doc """
Get matrix or vector value.
## Examples
iex> Numexy.new([2,9,5]) |> Numexy.get({2, nil})
9
iex> Numexy.new([[1,2,3],[4,5,6]]) |> Numexy.get({2, 1})
4
"""
def get(%Array{array: v, shape: {_, nil}}, {row, nil}), do: Enum.at(v, row - 1)
def get(%Array{array: m, shape: _}, {row, col}), do: Enum.at(m, row - 1) |> Enum.at(col - 1)
@doc """
Get index of max value.
## Examples
iex> Numexy.new([[1,2,9],[4,5,6]]) |> Numexy.argmax
2
"""
def argmax(%Array{array: v, shape: {_, nil}}), do: v |> find_max_value_index
def argmax(%Array{array: m, shape: _}) do
m |> find_max_value_index
end
@doc """
Get index of max value row or col.
## Examples
iex> Numexy.new([[1,2,9],[4,6,3]]) |> Numexy.argmax(:row)
[2, 1]
iex> Numexy.new([[1,2,9],[4,6,3]]) |> Numexy.argmax(:col)
[1, 1, 0]
"""
def argmax(%Array{array: m, shape: _}, :row) do
m
|> Enum.map(&find_max_value_index(&1))
end
def argmax(%Array{array: m, shape: _}, :col) do
m
|> list_transpose
|> Enum.map(&find_max_value_index(&1))
end
defp find_max_value_index(list) do
flat_list = List.flatten(list)
max_value = Enum.max(flat_list)
flat_list |> Enum.find_index(&(&1 == max_value))
end
@doc """
Get step function value.
## Examples
iex> Numexy.new([-2,9,5]) |> Numexy.step_function()
%Numexy.Array{array: [0, 1, 1], shape: {3, nil}}
"""
def step_function(%Array{array: v, shape: {_, nil}}) do
v
|> Enum.map(&step_function_output(&1))
|> new
end
defp step_function_output(num) when num > 0, do: 1
defp step_function_output(num) when num <= 0, do: 0
@doc """
Get sigmoid function value.
## Examples
iex> Numexy.new([-2,9,5]) |> Numexy.sigmoid()
%Numexy.Array{array: [0.11920292202211755, 0.9998766054240137, 0.9933071490757153], shape: {3, nil}}
"""
def sigmoid(%Array{array: v, shape: {_, nil}}) do
v
|> Enum.map(&(1 / (1 + :math.exp(-1 * &1))))
|> new
end
@doc """
Get relu function value.
## Examples
iex> Numexy.new([-2,9,5]) |> Numexy.relu()
%Numexy.Array{array: [0, 9, 5], shape: {3, nil}}
"""
def relu(%Array{array: v, shape: {_, nil}}) do
v
|> Enum.map(&relu_output(&1))
|> new
end
defp relu_output(x) when x > 0, do: x
defp relu_output(x) when x <= 0, do: 0
@doc """
Get softmax function value.
## Examples
iex> Numexy.new([-2,9,5]) |> Numexy.softmax()
%Numexy.Array{array: [1.6401031494862326e-5, 0.9819976839988096, 0.017985914969695496], shape: {3, nil}}
"""
def softmax(%Array{array: v, shape: {_, nil}}) do
sum_num = Enum.reduce(v, 0, &(:math.exp(&1) + &2))
v
|> Enum.map(&(:math.exp(&1) / sum_num))
|> new
end
@doc """
Reshape list.
## Examples
iex> Numexy.reshape([1,2,3,4,5,6], 3)
%Numexy.Array{array: [[1,2,3],[4,5,6]], shape: {2, 3}}
"""
def reshape(list, col) when rem(length(list), col) == 0 do
list
|> Enum.chunk_every(col)
|> new
end
@doc """
Calculate outer product.
## Examples
iex> Numexy.new([1,2,3,4]) |> Numexy.outer(Numexy.new([4,3,2,1]))
%Numexy.Array{array: [[4,3,2,1],[8,6,4,2],[12,9,6,3],[16,12,8,4]], shape: {4, 4}}
"""
def outer(%Array{array: array1, shape: _}, %Array{array: array2, shape: _}) do
list1 = List.flatten(array1)
list2 = List.flatten(array2)
Enum.map(list1, &Enum.map(list2, fn x -> x * &1 end))
|> new
end
end
|
lib/numexy.ex
| 0.842798
| 0.643133
|
numexy.ex
|
starcoder
|
defmodule Contentful.Delivery.Entries do
@moduledoc """
Entries allows for querying the entries of a space via `Contentful.Query`.
## Fetching a single entry
import Contentful.Query
alias Contentful.Delivery.Entries
{:ok, entry} = Entries |> fetch_one("my_entry_id")
## Simple querying of a collection of entries
import Contentful.Query
alias Contentful.Delivery.Entries
# fetches the entries based on the `space_id`, `environment` and `access_token` in config/config.exs
{:ok, entries, total: _total_count_of_entries} =
Entries |> fetch_all
# fetches the entries by passing `space_id`, `environment` and `access_token`
space_id = "<my_space_id>"
environment = "<my_environment>"
access_token = "<my_access_token>"
{:ok, entries, total: _total_count_of_entries} =
Entries |> fetch_all(space_id, environment, access_token)
## More advanced query with included assets
Entries can have assets included, which limits the amount of times a client has to request data from the server:
import Contentful.Query
alias Contentful.{Asset, Entry}
alias Contentful.Delivery.Entries
# The default include depth is 1 (max 10)
{:ok, [ %Entry{assets: assets} = entry | _ ], total: _total_count_of_entries} =
Entries |> include |> fetch_all
assets |> Enum.map(fn %Asset{fields: fields} -> {fields.title, fields.file} end)
# you can also just get the assets belonging to an entry lazily:
Entries |> include |> stream |> Stream.flat_map(fn entry -> entry.assets end) |> Enum.take(2)
## Accessing common resource attributes
Entries embed `Contentful.SysData` with extra information about the entry:
import Contentful.Query
alias Contentful.{ContentType, Entry, SysData}
alias Contentful.Delivery.Entries
{:ok, entry} = Entries |> fetch_one("my_entry_id")
"my_entry_id" = entry.id
"<a timestamp for updated_at>" = entry.sys.updated_at
"<a timestamp for created_at>" = entry.sys.created_at
"<a locale string>" = entry.sys.locale
%ContentType{id: "the_associated_content_type_id"} = entry.sys.content_type
"""
alias Contentful.{Asset, ContentType, Entry, Queryable, SysData}
alias Contentful.Delivery.Assets
alias Contentful.Entry.AssetResolver
@behaviour Queryable
@endpoint "/entries"
@impl Queryable
def endpoint do
@endpoint
end
@doc """
specifies the collection resolver for the case when assets are included within the entries response
"""
def resolve_collection_response(%{
"total" => total,
"items" => items,
"includes" => %{"Asset" => assets}
}) do
{:ok, entries, total: total} =
resolve_collection_response(%{"total" => total, "items" => items})
{:ok, entries |> Enum.map(fn entry -> entry |> resolve_assets(assets) end), total: total}
end
@doc """
maps a standard API response for queries compliant with the `Contentful.Queryable` behaviour.
"""
@impl Queryable
def resolve_collection_response(%{"total" => total, "items" => items}) do
entries =
items
|> Enum.map(&resolve_entity_response/1)
|> Enum.map(fn {:ok, entry} -> entry end)
{:ok, entries, total: total}
end
@impl Queryable
@doc """
maps a standard API response for a single entry returned, compliant with the `Contentful.Queryable` behaviour.
"""
def resolve_entity_response(%{
"fields" => fields,
"sys" => %{
"id" => id,
"revision" => rev,
"updatedAt" => updated_at,
"createdAt" => created_at,
"locale" => locale,
"contentType" => %{"sys" => content_type_id}
}
}) do
{:ok,
%Entry{
fields: fields,
sys: %SysData{
id: id,
revision: rev,
locale: locale,
updated_at: updated_at,
created_at: created_at,
content_type: %ContentType{id: content_type_id}
}
}}
end
@spec resolve_assets(Entry.t(), list(Asset.t())) :: Entry.t()
defp resolve_assets(%Entry{} = entry, assets) do
asset_ids = entry |> AssetResolver.find_linked_asset_ids()
assets_for_entry =
assets
|> Enum.map(&Assets.resolve_entity_response/1)
|> Enum.map(fn {:ok, asset} -> asset end)
|> Enum.filter(fn %Asset{sys: %SysData{id: id}} -> asset_ids |> Enum.member?(id) end)
entry |> Map.put(:assets, assets_for_entry)
end
end
|
lib/contentful_delivery/entries.ex
| 0.780621
| 0.485661
|
entries.ex
|
starcoder
|
defmodule JaSerializer.PhoenixView do
@moduledoc """
Use in your Phoenix.View to render jsonapi.org spec json.
See JaSerializer.Serializer for documentation on defining your serializer.
## Usage example
defmodule PhoenixExample.ArticleView do
use PhoenixExample.Web, :view
use JaSerializer.PhoenixView # Or use in web/web.ex
attributes [:title]
has_many :comments,
serializer: PhoenixExample.CommentsView,
include: false,
identifiers: :when_included
end
defmodule PhoenixExample.ArticlesController do
use PhoenixExample.Web, :controller
def index(conn, _params) do
render conn, "index.json-api", data: Repo.all(Article)
end
def show(conn, %{"id" => id}) do
article = Repo.get(Article, id) |> Repo.preload([:comments])
render conn, "show.json-api", data: article,
opts: [include: "comments"]
end
def create(conn, %{"data" => %{"attributes" => attrs}}) do
changeset = Article.changeset(%Article{}, attrs)
case Repo.insert(changeset) do
{:ok, article} ->
conn
|> put_status(201)
|> render("show.json-api", data: article)
{:error, changeset} ->
conn
|> put_status(422)
|> render(:errors, data: changeset)
end
end
end
"""
@doc false
defmacro __using__(opts \\ []) do
quote do
use JaSerializer, unquote(opts)
def render("index.json-api", data) do
JaSerializer.PhoenixView.render(__MODULE__, data)
end
def render("show.json-api", data) do
JaSerializer.PhoenixView.render(__MODULE__, data)
end
def render("errors.json-api", data) do
JaSerializer.PhoenixView.render_errors(data)
end
# These will be deprecated in the future
def render("index.json", data) do
IO.write(
:stderr,
IO.ANSI.format([
:red,
:bright,
"warning: Please use index.json-api instead. This will stop working in a future version.\n"
])
)
JaSerializer.PhoenixView.render(__MODULE__, data)
end
def render("show.json", data) do
IO.write(
:stderr,
IO.ANSI.format([
:red,
:bright,
"warning: Please use show.json-api instead. This will stop working in a future version.\n"
])
)
JaSerializer.PhoenixView.render(__MODULE__, data)
end
def render("errors.json", data) do
IO.write(
:stderr,
IO.ANSI.format([
:red,
:bright,
"warning: Please use errors.json-api instead. This will stop working in a future version.\n"
])
)
JaSerializer.PhoenixView.render_errors(data)
end
end
end
@doc """
Extracts the data and opts from the keyword list passed to render and returns
result of formatting.
"""
def render(serializer, data) do
struct = find_struct(serializer, data)
JaSerializer.format(serializer, struct, data[:conn], data[:opts] || [])
end
@doc """
Extracts the errors and opts from the data passed to render and returns
result of formatting.
`data` is expected to be either an invalid `Ecto.Changeset` or preformatted
errors as described in `JaSerializer.ErrorSerializer`.
"""
def render_errors(data) do
errors = data[:data] || data[:errors]
errors
|> error_serializer
|> apply(:format, [errors, data[:conn], data[:opts]])
end
defp error_serializer(%{__struct__: Ecto.Changeset}) do
JaSerializer.EctoErrorSerializer
end
defp error_serializer(_) do
JaSerializer.ErrorSerializer
end
defp find_struct(serializer, data) do
singular = singular_type(serializer.type)
plural = plural_type(serializer.type)
deprecated_struct = data[:model] || data[singular] || data[plural]
cond do
data[:data] ->
data[:data]
deprecated_struct ->
IO.write(
:stderr,
IO.ANSI.format([
:red,
:bright,
"warning: Passing data via `:model`, `:#{plural}` or `:#{singular}`
atoms to JaSerializer.PhoenixView has be deprecated. Please use
`:data` instead. This will stop working in a future version.\n"
])
)
deprecated_struct
is_nil(data[:data]) ->
nil
end
end
defp singular_type(type) do
type
|> Inflex.singularize()
|> String.to_atom()
end
defp plural_type(type) do
type
|> Inflex.pluralize()
|> String.to_atom()
end
end
|
lib/ja_serializer/phoenix_view.ex
| 0.7324
| 0.42057
|
phoenix_view.ex
|
starcoder
|
defmodule Calendar.DateTime.Parse do
import Calendar.ParseUtil
@secs_between_year_0_and_unix_epoch 719528*24*3600 # From erlang calendar docs: there are 719528 days between Jan 1, 0 and Jan 1, 1970. Does not include leap seconds
@doc """
Parses an RFC 822 datetime string and shifts it to UTC.
Takes an RFC 822 `string` and `year_guessing_base`. The `year_guessing_base`
argument is used in case of a two digit year which is allowed in RFC 822.
The function tries to guess possible four digit versions of the year and
chooses the one closest to `year_guessing_base`. It defaults to 2015.
# Examples
# 2 digit year
iex> "5 Jul 15 20:26:13 PST" |> rfc822_utc
{:ok,
%DateTime{zone_abbr: "UTC", day: 6, hour: 4, minute: 26, month: 7,
second: 13, std_offset: 0, time_zone: "Etc/UTC", microsecond: {0, 0}, utc_offset: 0,
year: 2015}}
# 82 as year
iex> "5 Jul 82 20:26:13 PST" |> rfc822_utc
{:ok,
%DateTime{zone_abbr: "UTC", day: 6, hour: 4, minute: 26, month: 7,
second: 13, std_offset: 0, time_zone: "Etc/UTC", microsecond: {0, 0}, utc_offset: 0,
year: 1982}}
# 1982 as year
iex> "5 Jul 82 20:26:13 PST" |> rfc822_utc
{:ok,
%DateTime{zone_abbr: "UTC", day: 6, hour: 4, minute: 26, month: 7,
second: 13, std_offset: 0, time_zone: "Etc/UTC", microsecond: {0, 0}, utc_offset: 0,
year: 1982}}
# 2 digit year and we use 2099 as the base guessing year
# which means that 15 should be interpreted as 2115 no 2015
iex> "5 Jul 15 20:26:13 PST" |> rfc822_utc(2099)
{:ok,
%DateTime{zone_abbr: "UTC", day: 6, hour: 4, minute: 26, month: 7,
second: 13, std_offset: 0, time_zone: "Etc/UTC", microsecond: {0, 0}, utc_offset: 0,
year: 2115}}
"""
def rfc822_utc(string, year_guessing_base \\ 2015) do
string
|> capture_rfc822_string
|> change_captured_year_to_four_digit(year_guessing_base)
|> rfc2822_utc_from_captured
end
defp capture_rfc822_string(string) do
~r/(?<day>[\d]{1,2})[\s]+(?<month>[^\d]{3})[\s]+(?<year>[\d]{2,4})[\s]+(?<hour>[\d]{2})[^\d]?(?<min>[\d]{2})[^\d]?(?<sec>[\d]{2})[^\d]?(((?<offset_sign>[+-])(?<offset_hours>[\d]{2})(?<offset_mins>[\d]{2})|(?<offset_letters>[A-Z]{1,3})))?/
|> Regex.named_captures(string)
end
defp change_captured_year_to_four_digit(cap, year_guessing_base) do
changed_year = to_int(cap["year"])
|> two_to_four_digit_year(year_guessing_base)
|> to_string
%{cap | "year" => changed_year}
end
@doc """
Parses an RFC 2822 or RFC 1123 datetime string.
The datetime is shifted to UTC.
## Examples
iex> rfc2822_utc("Sat, 13 Mar 2010 11:23:03 -0800")
{:ok,
%DateTime{zone_abbr: "UTC", day: 13, hour: 19, minute: 23, month: 3, second: 3, std_offset: 0,
time_zone: "Etc/UTC", microsecond: {0, 0}, utc_offset: 0, year: 2010}}
# PST is the equivalent of -0800 in the RFC 2822 standard
iex> rfc2822_utc("Sat, 13 Mar 2010 11:23:03 PST")
{:ok,
%DateTime{zone_abbr: "UTC", day: 13, hour: 19, minute: 23, month: 3, second: 3, std_offset: 0,
time_zone: "Etc/UTC", microsecond: {0, 0}, utc_offset: 0, year: 2010}}
# Z is the equivalent of UTC
iex> rfc2822_utc("Sat, 13 Mar 2010 11:23:03 Z")
{:ok,
%DateTime{zone_abbr: "UTC", day: 13, hour: 11, minute: 23, month: 3, second: 3, std_offset: 0,
time_zone: "Etc/UTC", microsecond: {0, 0}, utc_offset: 0, year: 2010}}
"""
def rfc2822_utc(string) do
string
|> capture_rfc2822_string
|> rfc2822_utc_from_captured
end
defp rfc2822_utc_from_captured(cap) do
month_num = month_number_for_month_name(cap["month"])
{:ok, offset_in_secs} = offset_in_seconds_rfc2822(cap["offset_sign"],
cap["offset_hours"],
cap["offset_mins"],
cap["offset_letters"])
{:ok, result} = Calendar.DateTime.from_erl({{cap["year"]|>to_int, month_num, cap["day"]|>to_int}, {cap["hour"]|>to_int, cap["min"]|>to_int, cap["sec"]|>to_int}}, "Etc/UTC")
Calendar.DateTime.add(result, offset_in_secs*-1)
end
defp offset_in_seconds_rfc2822(_, _, _, "UTC"), do: {:ok, 0 }
defp offset_in_seconds_rfc2822(_, _, _, "UT"), do: {:ok, 0 }
defp offset_in_seconds_rfc2822(_, _, _, "Z"), do: {:ok, 0 }
defp offset_in_seconds_rfc2822(_, _, _, "GMT"), do: {:ok, 0 }
defp offset_in_seconds_rfc2822(_, _, _, "EDT"), do: {:ok, -4*3600 }
defp offset_in_seconds_rfc2822(_, _, _, "EST"), do: {:ok, -5*3600 }
defp offset_in_seconds_rfc2822(_, _, _, "CDT"), do: {:ok, -5*3600 }
defp offset_in_seconds_rfc2822(_, _, _, "CST"), do: {:ok, -6*3600 }
defp offset_in_seconds_rfc2822(_, _, _, "MDT"), do: {:ok, -6*3600 }
defp offset_in_seconds_rfc2822(_, _, _, "MST"), do: {:ok, -7*3600 }
defp offset_in_seconds_rfc2822(_, _, _, "PDT"), do: {:ok, -7*3600 }
defp offset_in_seconds_rfc2822(_, _, _, "PST"), do: {:ok, -8*3600 }
defp offset_in_seconds_rfc2822(_, _, _, letters) when letters != "", do: {:error, :invalid_letters}
defp offset_in_seconds_rfc2822(offset_sign, offset_hours, offset_mins, _letters) do
offset_in_secs = hours_mins_to_secs!(offset_hours, offset_mins)
offset_in_secs = case offset_sign do
"-" -> offset_in_secs*-1
_ -> offset_in_secs
end
{:ok, offset_in_secs}
end
@doc """
Takes unix time as an integer or float. Returns a DateTime struct.
## Examples
iex> unix!(1_000_000_000)
%DateTime{zone_abbr: "UTC", day: 9, microsecond: {0, 0}, hour: 1, minute: 46, month: 9, second: 40, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 2001}
iex> unix!("1000000000")
%DateTime{zone_abbr: "UTC", day: 9, microsecond: {0, 0}, hour: 1, minute: 46, month: 9, second: 40, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 2001}
iex> unix!("1000000000.010")
%DateTime{zone_abbr: "UTC", day: 9, microsecond: {10_000, 3}, hour: 1, minute: 46, month: 9, second: 40, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 2001}
iex> unix!(1_000_000_000.9876)
%DateTime{zone_abbr: "UTC", day: 9, microsecond: {987600, 6}, hour: 1, minute: 46, month: 9, second: 40, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 2001}
iex> unix!(1_000_000_000.999999)
%DateTime{zone_abbr: "UTC", day: 9, microsecond: {999999, 6}, hour: 1, minute: 46, month: 9, second: 40, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 2001}
"""
def unix!(unix_time_stamp) when is_integer(unix_time_stamp) do
unix_time_stamp + @secs_between_year_0_and_unix_epoch
|>:calendar.gregorian_seconds_to_datetime
|> Calendar.DateTime.from_erl!("Etc/UTC")
end
def unix!(unix_time_stamp) when is_float(unix_time_stamp) do
{whole, micro} = int_and_microsecond_for_float(unix_time_stamp)
whole + @secs_between_year_0_and_unix_epoch
|>:calendar.gregorian_seconds_to_datetime
|> Calendar.DateTime.from_erl!("Etc/UTC", micro)
end
def unix!(unix_time_stamp) when is_binary(unix_time_stamp) do
{int, frac} = Integer.parse(unix_time_stamp)
unix!(int) |> Map.put(:microsecond, parse_fraction(frac))
end
defp int_and_microsecond_for_float(float) do
float_as_string = Float.to_string(float, [decimals: 6, compact: false])
{int, frac} = Integer.parse(float_as_string)
{int, parse_fraction(frac)}
end
@doc """
Parse JavaScript style milliseconds since epoch.
# Examples
iex> js_ms!("1000000000123")
%DateTime{zone_abbr: "UTC", day: 9, microsecond: {123000,3}, hour: 1, minute: 46, month: 9, second: 40, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 2001}
iex> js_ms!(1_000_000_000_123)
%DateTime{zone_abbr: "UTC", day: 9, microsecond: {123000,3}, hour: 1, minute: 46, month: 9, second: 40, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 2001}
iex> js_ms!(1424102000000)
%DateTime{zone_abbr: "UTC", day: 16, hour: 15, microsecond: {0, 3}, minute: 53, month: 2, second: 20, std_offset: 0, time_zone: "Etc/UTC", utc_offset: 0, year: 2015}
"""
def js_ms!(millisec) when is_integer(millisec) do
result = (millisec/1000.0) |> unix!
%DateTime{result| microsecond: {elem(result.microsecond, 0), 3}} # change usec precision to 3
end
def js_ms!(millisec) when is_binary(millisec) do
{int, ""} = millisec
|> Integer.parse
js_ms!(int)
end
@doc """
Parses a timestamp in RFC 2616 format.
iex> httpdate("Sat, 06 Sep 2014 09:09:08 GMT")
{:ok, %DateTime{year: 2014, month: 9, day: 6, hour: 9, minute: 9, second: 8, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0, microsecond: {0, 0}}}
iex> httpdate("invalid")
{:bad_format, nil}
iex> httpdate("Foo, 06 Foo 2014 09:09:08 GMT")
{:error, :invalid_datetime}
"""
def httpdate(rfc2616_string) do
~r/(?<weekday>[^\s]{3}),\s(?<day>[\d]{2})\s(?<month>[^\s]{3})[\s](?<year>[\d]{4})[^\d](?<hour>[\d]{2})[^\d](?<min>[\d]{2})[^\d](?<sec>[\d]{2})\sGMT/
|> Regex.named_captures(rfc2616_string)
|> httpdate_parsed
end
defp httpdate_parsed(nil), do: {:bad_format, nil}
defp httpdate_parsed(mapped) do
Calendar.DateTime.from_erl(
{
{mapped["year"]|>to_int,
mapped["month"]|>month_number_for_month_name,
mapped["day"]|>to_int},
{mapped["hour"]|>to_int, mapped["min"]|>to_int, mapped["sec"]|>to_int }
}, "Etc/UTC")
end
@doc """
Like `httpdate/1`, but returns the result without tagging it with :ok
in case of success. In case of errors it raises.
iex> httpdate!("Sat, 06 Sep 2014 09:09:08 GMT")
%DateTime{year: 2014, month: 9, day: 6, hour: 9, minute: 9, second: 8, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0}
"""
def httpdate!(rfc2616_string) do
{:ok, dt} = httpdate(rfc2616_string)
dt
end
@doc """
Parse RFC 3339 timestamp strings as UTC. If the timestamp is not in UTC it
will be shifted to UTC.
## Examples
iex> rfc3339_utc("fooo")
{:bad_format, nil}
iex> rfc3339_utc("1996-12-19T16:39:57Z")
{:ok, %DateTime{year: 1996, month: 12, day: 19, hour: 16, minute: 39, second: 57, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0}}
iex> rfc3339_utc("1996-12-19T16:39:57.123Z")
{:ok, %DateTime{year: 1996, month: 12, day: 19, hour: 16, minute: 39, second: 57, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0, microsecond: {123000, 3}}}
iex> rfc3339_utc("1996-12-19T16:39:57-08:00")
{:ok, %DateTime{year: 1996, month: 12, day: 20, hour: 0, minute: 39, second: 57, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0}}
# No seperation chars between numbers. Not RFC3339, but we still parse it.
iex> rfc3339_utc("19961219T163957-08:00")
{:ok, %DateTime{year: 1996, month: 12, day: 20, hour: 0, minute: 39, second: 57, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0}}
# Offset does not have colon (-0800). That makes it ISO8601, but not RFC3339. We still parse it.
iex> rfc3339_utc("1996-12-19T16:39:57-0800")
{:ok, %DateTime{year: 1996, month: 12, day: 20, hour: 0, minute: 39, second: 57, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0}}
"""
def rfc3339_utc(<<year::4-bytes, ?-, month::2-bytes , ?-, day::2-bytes , ?T, hour::2-bytes, ?:, min::2-bytes, ?:, sec::2-bytes, ?Z>>) do
# faster version for certain formats of of RFC3339
{{year|>to_int, month|>to_int, day|>to_int},{hour|>to_int, min|>to_int, sec|>to_int}} |> Calendar.DateTime.from_erl("Etc/UTC")
end
def rfc3339_utc(rfc3339_string) do
parsed = rfc3339_string
|> parse_rfc3339_string
if parsed do
parse_rfc3339_as_utc_parsed_string(parsed, parsed["z"], parsed["offset_hours"], parsed["offset_mins"])
else
{:bad_format, nil}
end
end
@doc """
Parses an RFC 3339 timestamp and shifts it to
the specified time zone.
iex> rfc3339("1996-12-19T16:39:57Z", "Etc/UTC")
{:ok, %DateTime{year: 1996, month: 12, day: 19, hour: 16, minute: 39, second: 57, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0}}
iex> rfc3339("1996-12-19T16:39:57.1234Z", "Etc/UTC")
{:ok, %DateTime{year: 1996, month: 12, day: 19, hour: 16, minute: 39, second: 57, time_zone: "Etc/UTC", zone_abbr: "UTC", std_offset: 0, utc_offset: 0, microsecond: {123400, 4}}}
iex> rfc3339("1996-12-19T16:39:57-8:00", "America/Los_Angeles")
{:ok, %DateTime{zone_abbr: "PST", day: 19, hour: 16, minute: 39, month: 12, second: 57, std_offset: 0, time_zone: "America/Los_Angeles", utc_offset: -28800, year: 1996}}
iex> rfc3339("invalid", "America/Los_Angeles")
{:bad_format, nil}
iex> rfc3339("1996-12-19T16:39:57-08:00", "invalid time zone name")
{:invalid_time_zone, nil}
"""
def rfc3339(rfc3339_string, "Etc/UTC") do
rfc3339_utc(rfc3339_string)
end
def rfc3339(rfc3339_string, time_zone) do
rfc3339_utc(rfc3339_string) |> do_parse_rfc3339_with_time_zone(time_zone)
end
defp do_parse_rfc3339_with_time_zone({utc_tag, _utc_dt}, _time_zone) when utc_tag != :ok do
{utc_tag, nil}
end
defp do_parse_rfc3339_with_time_zone({_utc_tag, utc_dt}, time_zone) do
utc_dt |> Calendar.DateTime.shift_zone(time_zone)
end
defp parse_rfc3339_as_utc_parsed_string(mapped, z, _offset_hours, _offset_mins) when z == "Z" or z=="z" do
parse_rfc3339_as_utc_parsed_string(mapped, "", "00", "00")
end
defp parse_rfc3339_as_utc_parsed_string(mapped, _z, offset_hours, offset_mins) when offset_hours == "00" and offset_mins == "00" do
Calendar.DateTime.from_erl(erl_date_time_from_regex_map(mapped), "Etc/UTC", parse_fraction(mapped["fraction"]))
end
defp parse_rfc3339_as_utc_parsed_string(mapped, _z, offset_hours, offset_mins) do
offset_in_secs = hours_mins_to_secs!(offset_hours, offset_mins)
offset_in_secs = case mapped["offset_sign"] do
"-" -> offset_in_secs*-1
_ -> offset_in_secs
end
erl_date_time = erl_date_time_from_regex_map(mapped)
parse_rfc3339_as_utc_with_offset(offset_in_secs, erl_date_time)
end
defp parse_fraction("." <> frac), do: parse_fraction(frac)
defp parse_fraction(""), do: {0, 0}
# parse and return microseconds
defp parse_fraction(string) do
usec = String.slice(string, 0..5)
|> String.ljust(6, ?0)
|> Integer.parse
|> elem(0)
{usec, min(String.length(string), 6)}
end
defp parse_rfc3339_as_utc_with_offset(offset_in_secs, erl_date_time) do
greg_secs = :calendar.datetime_to_gregorian_seconds(erl_date_time)
new_time = greg_secs - offset_in_secs
|> :calendar.gregorian_seconds_to_datetime
Calendar.DateTime.from_erl(new_time, "Etc/UTC")
end
defp erl_date_time_from_regex_map(mapped) do
erl_date_time_from_strings({{mapped["year"],mapped["month"],mapped["day"]},{mapped["hour"],mapped["min"],mapped["sec"]}})
end
defp erl_date_time_from_strings({{year, month, date},{hour, min, sec}}) do
{ {year|>to_int, month|>to_int, date|>to_int},
{hour|>to_int, min|>to_int, sec|>to_int} }
end
defp parse_rfc3339_string(rfc3339_string) do
~r/(?<year>[\d]{4})[^\d]?(?<month>[\d]{2})[^\d]?(?<day>[\d]{2})[^\d](?<hour>[\d]{2})[^\d]?(?<min>[\d]{2})[^\d]?(?<sec>[\d]{2})(\.(?<fraction>[\d]+))?(?<z>[zZ])?((?<offset_sign>[\+\-])(?<offset_hours>[\d]{1,2}):?(?<offset_mins>[\d]{2}))?/
|> Regex.named_captures(rfc3339_string)
end
end
|
data/web/deps/calendar/lib/calendar/date_time/parse.ex
| 0.835383
| 0.531209
|
parse.ex
|
starcoder
|
defmodule RealworldPhoenix.Articles do
@moduledoc """
The Articles context.
"""
import Ecto.Query, warn: false
alias RealworldPhoenix.Repo
alias RealworldPhoenix.Articles.Article
alias RealworldPhoenix.Articles.Comment
alias RealworldPhoenix.Accounts.User
alias RealworldPhoenix.Articles.Favorite
alias RealworldPhoenix.Articles.Tag
alias RealworldPhoenix.Profiles.FollowRelated
@default_limit 20
@default_offset 0
def article_preload(article_or_articles) do
article_or_articles
|> Repo.preload(:author)
|> Repo.preload(:favorites)
|> Repo.preload(:tagList)
end
@doc """
Returns the list of articles.
"""
def list_articles(params \\ []) do
{limit, params} = params |> Keyword.pop(:limit, @default_limit)
{offset, params} = params |> Keyword.pop(:offset, @default_offset)
from(a in Article)
|> article_where(params)
|> limit(^limit)
|> offset(^offset)
|> order_by(desc: :inserted_at)
|> Repo.all()
end
def list_articles_feed(user, params \\ []) do
{limit, params} = params |> Keyword.pop(:limit, @default_limit)
{offset, _} = params |> Keyword.pop(:offset, @default_offset)
from(a in Article,
inner_join: f in Favorite,
on: a.id == f.article_id,
where: f.user_id == ^user.id,
limit: ^limit,
offset: ^offset
)
|> order_by(desc: :inserted_at)
|> Repo.all()
end
def article_where(query, []), do: query
def article_where(query, [{:tag, tag} | rest]) do
query
|> join(:left, [a], at in assoc(a, :tagList), as: :tags)
|> where([tags: tags], tags.name == ^tag)
|> article_where(rest)
end
def article_where(query, [{:author, author_name} | rest]) do
query
|> join(:inner, [a], author in User, as: :author, on: a.author_id == author.id)
|> where([author: author], author.username == ^author_name)
|> article_where(rest)
end
def article_where(query, [{:favorited, favorited} | rest]) do
query
|> join(:left, [a], f in Favorite, as: :favorite, on: a.id == f.article_id)
|> join(:left, [favorite: f], fu in User, as: :favorite_user, on: f.user_id == fu.id)
|> where([favorite_user: fu], fu.username == ^favorited)
|> article_where(rest)
end
def article_where(query, [{:user, %User{} = user} | rest]) do
artcile_favorite(query, user)
|> article_where(rest)
end
def article_where(query, _), do: query
def artcile_favorite(query, nil), do: query
def artcile_favorite(query, user) do
query
|> join(:left, [a], f in Favorite,
as: :myfavorite,
on: a.id == f.article_id and f.user_id == ^user.id
)
|> select_merge([myfavorite: f], %{favorited: not is_nil(f)})
end
@doc """
Gets a single article.
Raises `Ecto.NoResultsError` if the Article does not exist.
## Examples
iex> get_article!(123)
%Article{}
iex> get_article!(456)
** (Ecto.NoResultsError)
"""
def get_article!(id), do: Repo.get!(Article, id) |> Repo.preload(:author)
def get_article_by_slug(slug, user \\ nil) do
article =
get_article_by_slug_query(slug, user)
|> Repo.one()
unless is_nil(article), do: article, else: {:error, :not_found}
end
defp get_article_by_slug_query(slug, user) do
from(a in Article, where: a.slug == ^slug)
|> artcile_favorite(user)
end
@doc """
Creates a article.
## Examples
iex> create_article(%{field: value})
{:ok, %Article{}}
iex> create_article(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_article(attrs \\ %{}) do
%Article{}
|> Article.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a article.
## Examples
iex> update_article(article, %{field: new_value})
{:ok, %Article{}}
iex> update_article(article, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_article(%Article{} = article, attrs) do
article
|> Repo.preload(:tagList)
|> Article.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a article.
## Examples
iex> delete_article(article)
{:ok, %Article{}}
iex> delete_article(article)
{:error, %Ecto.Changeset{}}
"""
def delete_article(%Article{} = article) do
Repo.delete(article)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking article changes.
## Examples
iex> change_article(article)
%Ecto.Changeset{data: %Article{}}
"""
def change_article(%Article{} = article, attrs \\ %{}) do
Article.changeset(article, attrs)
end
def create_comment(attrs) do
Comment.changeset(%Comment{}, attrs)
|> Repo.insert()
end
def create_comment(comment, %Article{} = article, %User{} = author) do
attrs =
comment
|> Map.put(:article_id, article.id)
|> Map.put(:author_id, author.id)
Comment.changeset(%Comment{}, attrs)
|> Repo.insert()
end
def list_comment_by_article_slug(slug, user) do
from(c in Comment,
join: a in Article,
on: c.article_id == a.id,
where:
a.slug ==
^slug
)
|> comment_following(user)
|> Repo.all()
end
def get_comment!(id) do
Repo.get!(Comment, id)
end
def delete_comment(%Comment{} = comment) do
Repo.delete(comment)
end
def comment_following(query, %User{id: user_id}) do
query
|> join(:inner, [c], at in assoc(c, :author), as: :author)
|> join(:left, [c], fr in FollowRelated,
as: :fr,
on: fr.target_id == c.author_id and fr.user_id == ^user_id
)
|> select_merge([fr: fr, author: author], %{
author: merge(author, %{following: not is_nil(fr)})
})
end
def comment_following(query, _), do: query
def create_favorite(%User{} = user, %Article{} = article) do
%Favorite{}
|> Favorite.changeset(%{user_id: user.id, article_id: article.id})
|> Repo.insert()
end
def delete_favorite(%User{} = user, %Article{} = article) do
from(f in Favorite,
where:
f.user_id == ^user.id and
f.article_id ==
^article.id
)
|> Repo.delete_all()
end
def list_tags() do
Repo.all(Tag)
end
end
|
lib/realworld_phoenix/articles.ex
| 0.819713
| 0.474875
|
articles.ex
|
starcoder
|
defmodule MeshxRpc.Common.Options do
@moduledoc false
def common(),
do: [
pool_opts: [
type: :keyword_list,
default: [],
doc: """
worker pool options for RPC client and server.
Server pool is build using [`ranch` socket acceptor pool](https://hex.pm/packages/ranch). `pool_opts` correspond in this case to `ranch` [transport options](https://ninenines.eu/docs/en/ranch/2.0/manual/ranch/).
Client pool is build using [`poolboy`](https://hex.pm/packages/poolboy), `pool_opts` correspond here directly to [`poolboy` options](https://github.com/devinus/poolboy#options). Users usually should not set any `pool_opts` for server pools. Options that can be set for RPC client pools:
* `:size` - maximum pool size, default 10,
* `:max_overflow` - maximum number of workers created if pool is empty,
* `:strategy` - `:lifo` or `:fifo`, determines whether checked in workers should be placed first or last in the line of available workers. So, `:lifo` operates like a traditional stack; `:fifo` like a queue. Default is `:lifo`.
*
"""
],
address: [
type: {:custom, __MODULE__, :address, []},
required: true,
doc: """
Address on which server will listen for requests or client will send requests to.
Two address types are supported:
* ethernet interface address: `{:tcp, ip, port}`, where `ip` is defined as tuple (e.g. `{127, 0, 0, 1}`) and `port` is integer in the range `1..65535`,
* Unix Domain Socket address: `{:uds, path}`, where socket `path` is defined as string, e.g. `{:uds, "/tmp/beam.sock"}`
"""
],
blk_max_size: [
type: {:custom, __MODULE__, :int_range, [200..268_435_456]},
default: 16384,
doc: """
user request function arguments are first serialized into binary using function specified with `:serialize_mfa` option. Binary is then split into smaller blocks send over the wire. Option defines maximum send block byte size. Must be between 200 bytes and 256 MB (`200..268_435_456`).
"""
],
cks_mfa: [
type: {:or, [:mfa, :atom]},
default: nil,
doc: """
block checksum function `{module, function, argument}`. 2-arity function accepting: 1st argument - binary data to calculate checksum, 2nd argument - as set in option `argument`. Function result should be calculated checksum of `binary()` type. Example checksum implementation is `MeshxRpc.Protocol.Default.checksum/2`, using `:erlang.crc32/1`. To use this function set: `[cks_mfa: {MeshxRpc.Protocol.Default, :checksum, []}]`. If option is left undefined checksums are not calculated.
"""
],
conn_ref_mfa: [
type: {:or, [:mfa, :string, :atom]},
default: {MeshxRpc.Protocol.Default, :conn_ref, []},
doc: """
`{module, function, argument}` function generating connection reference, 0-arity.
"""
],
deserialize_mfa: [
type: :mfa,
default: {MeshxRpc.Protocol.Default, :deserialize, []},
doc: """
`{module, function, argument}` function used to de-serialize request argument, 3-arity. Function should reverse data serialization process executed by function specified by `:serialize_mfa`. First function argument is binary data to de-serialize, second is option `argument`, third is serialization flag generated by `:serialize_mfa`. Function should return `{:ok, deserialized}` if successful, `{:error, reason}` otherwise.
"""
],
hsk_dgt_mfa: [
type: :mfa,
default: {MeshxRpc.Protocol.Default, :checksum, []},
doc: """
`{module, function, argument}` function used to calculate digest in connection handshake, 2-arity. First argument is binary data for which digest should be calculated, second is option `argument`.
"""
],
node_ref_mfa: [
type: {:or, [:mfa, :string, :atom]},
default: {MeshxRpc.Protocol.Default, :node_ref, []},
doc: """
`{module, function, argument}` function generating request node reference, 0-arity.
"""
],
quiet_on_hsk_error?: [
type: :boolean,
default: false,
doc: """
when connection is established between RPC server and client first step is a two-way handshake. If handshake fails remote node can be notified about failure or failure can be silently discarded.
"""
],
serialize_mfa: [
type: :mfa,
default: {MeshxRpc.Protocol.Default, :serialize, []},
doc: """
`{module, function, argument}` function used to serialize user request function argument(s), 2-arity. First argument is erlang term that requires serialization, second is option `argument`. Function should return if successful `{:ok, serialized_binary, serialization_flag}`. `serialization_flag` should be one byte integer (`0..255`). It states how data should be de-serialized and will be passed as third argument to function specified in `:deserialize_mfa`. In case of error function should return `{:error, reason}`.
"""
],
shared_key: [
type: :string,
default: "",
doc: """
shared key must be same for connecting server and client. Can be used to specify for example API version: `shared_key: "api_ver_01"`.
"""
],
socket_opts: [
type: :keyword_list,
default: [],
doc: """
connection socket options. Check `:inet.setopts/2` for available settings.
"""
],
svc_ref_mfa: [
type: {:or, [:mfa, :string, :atom]},
doc: """
`{module, function, argument}` function generating request service reference, 0-arity. If not defined, by default service reference will be calculated from: `service_id |> to_string() |> String.slice(0..255)`.
"""
],
telemetry_prefix: [
type: {:list, :atom},
doc: """
specifies prefix used when executing telemetry events. If not defined package name and service module will be used, e.g.: `[:meshx_rpc, MyApp.Rpc.Server1]`.
"""
],
timeout_hsk: [
type: :timeout,
default: 5_000,
doc: """
handshake timeout.
"""
],
timeout_cks: [
type: :timeout,
default: 500,
doc: """
timeout for function calculating block checksums specified by `:cks_mfa`.
"""
],
gen_statem_opts: [
type: :keyword_list,
default: [],
doc: """
option passed as third argument to `:gen_statem.start_link/3` when starting RPC server or client pool worker.
"""
]
]
def int_range(val, min..max) when val in min..max, do: {:ok, val}
def int_range(val, range), do: {:error, "Expected integer in range min..max, got: #{inspect(val)} in #{inspect(range)}."}
def address(val) do
case val do
{:tcp, ip, port} when port in 1..65535 ->
case :inet.ntoa(ip) do
{:error, _e} ->
{:error,
~s(Expected socket address, e.g.: {:tcp, {127, 0, 0, 1}, 3333} or {:uds, "/tmp/beam.sock"}. Got: #{inspect(val)})}
_ip ->
{:ok, val}
end
{:uds, path} when is_bitstring(path) ->
{:ok, {:uds, {:local, path}, 0}}
_e ->
{:error,
~s(Expected socket address, e.g.: {:tcp, {127, 0, 0, 1}, 3333} or {:uds, "/tmp/beam.sock"}. Got: #{inspect(val)})}
end
end
end
|
lib/common/options.ex
| 0.91243
| 0.553385
|
options.ex
|
starcoder
|
defmodule RDF.Star.Statement do
@moduledoc """
Helper functions for RDF-star statements.
An RDF-star statement is either a `RDF.Star.Triple` or a `RDF.Star.Quad`.
"""
alias RDF.Star.{Triple, Quad}
alias RDF.PropertyMap
@type subject :: RDF.Statement.subject() | Triple.t()
@type predicate :: RDF.Statement.predicate()
@type object :: RDF.Statement.object() | Triple.t()
@type graph_name :: RDF.Statement.graph_name()
@type coercible_subject :: RDF.Statement.coercible_subject() | Triple.t()
@type coercible_predicate :: RDF.Statement.coercible_predicate()
@type coercible_object :: RDF.Statement.coercible_object() | Triple.t()
@type coercible_graph_name :: RDF.Statement.coercible_graph_name()
@type t :: Triple.t() | Quad.t()
@type coercible :: Triple.coercible() | Quad.coercible()
# deprecated: This will be removed in v0.11.
@type coercible_t :: coercible
@type term_mapping :: RDF.Statement.term_mapping()
@doc """
Creates a `RDF.Star.Triple` or `RDF.Star.Quad` with proper RDF values.
An error is raised when the given elements are not coercible to RDF-star values.
Note: The `RDF.statement` function is a shortcut to this function.
## Examples
iex> RDF.Star.Statement.new({EX.S, EX.p, 42})
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42)}
iex> RDF.Star.Statement.new({EX.S, EX.p, 42, EX.Graph})
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42), RDF.iri("http://example.com/Graph")}
iex> RDF.Star.Statement.new({EX.S, :p, 42, EX.Graph}, RDF.PropertyMap.new(p: EX.p))
{RDF.iri("http://example.com/S"), RDF.iri("http://example.com/p"), RDF.literal(42), RDF.iri("http://example.com/Graph")}
"""
def new(tuple, property_map \\ nil)
def new({_, _, _} = tuple, property_map), do: Triple.new(tuple, property_map)
def new({_, _, _, _} = tuple, property_map), do: Quad.new(tuple, property_map)
defdelegate new(s, p, o), to: Triple, as: :new
defdelegate new(s, p, o, g), to: Quad, as: :new
@doc """
Creates a `RDF.Star.Statement` tuple with proper RDF values.
An error is raised when the given elements are not coercible to RDF-star values.
## Examples
iex> RDF.Star.Statement.coerce {"http://example.com/S", "http://example.com/p", 42}
{~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42)}
iex> RDF.Star.Statement.coerce {"http://example.com/S", "http://example.com/p", 42, "http://example.com/Graph"}
{~I<http://example.com/S>, ~I<http://example.com/p>, RDF.literal(42), ~I<http://example.com/Graph>}
"""
@spec coerce(coercible(), PropertyMap.t() | nil) :: Triple.t() | Quad.t()
def coerce(statement, property_map \\ nil)
def coerce({_, _, _} = triple, property_map), do: Triple.new(triple, property_map)
def coerce({_, _, _, _} = quad, property_map), do: Quad.new(quad, property_map)
@doc false
@spec coerce_subject(coercible_subject, PropertyMap.t() | nil) :: subject
def coerce_subject(subject, property_map \\ nil)
def coerce_subject({_, _, _} = triple, property_map), do: Triple.new(triple, property_map)
def coerce_subject(subject, _), do: RDF.Statement.coerce_subject(subject)
defdelegate coerce_predicate(predicate), to: RDF.Statement
defdelegate coerce_predicate(predicate, property_map), to: RDF.Statement
@doc false
@spec coerce_object(coercible_object, PropertyMap.t() | nil) :: object
def coerce_object(object, property_map \\ nil)
def coerce_object({_, _, _} = triple, property_map), do: Triple.new(triple, property_map)
def coerce_object(object, _), do: RDF.Statement.coerce_object(object)
defdelegate coerce_graph_name(iri), to: RDF.Statement
@doc """
Checks if the given tuple is a valid RDF-star statement, i.e. RDF-star triple or quad.
The elements of a valid RDF-star statement must be RDF terms. On the subject
position only IRIs, blank nodes and triples allowed, while on the predicate and graph
context position only IRIs allowed. The object position can be any RDF term or a triple.
"""
@spec valid?(Triple.t() | Quad.t() | any) :: boolean
def valid?(tuple)
def valid?({subject, predicate, object}) do
valid_subject?(subject) && valid_predicate?(predicate) && valid_object?(object)
end
def valid?({subject, predicate, object, graph_name}) do
valid_subject?(subject) && valid_predicate?(predicate) && valid_object?(object) &&
valid_graph_name?(graph_name)
end
def valid?(_), do: false
@spec valid_subject?(subject | any) :: boolean
def valid_subject?({_, _, _} = triple), do: Triple.valid?(triple)
def valid_subject?(any), do: RDF.Statement.valid_subject?(any)
@spec valid_predicate?(predicate | any) :: boolean
def valid_predicate?(any), do: RDF.Statement.valid_predicate?(any)
@spec valid_object?(object | any) :: boolean
def valid_object?({_, _, _} = triple), do: Triple.valid?(triple)
def valid_object?(any), do: RDF.Statement.valid_object?(any)
@spec valid_graph_name?(graph_name | any) :: boolean
def valid_graph_name?(any), do: RDF.Statement.valid_graph_name?(any)
@doc """
Checks if the given tuple is a RDF-star statement with a quoted triple on subject or object position.
Note: This function won't check if the given tuple or the quoted triple is valid.
Use `valid?/1` for this purpose.
## Examples
iex> RDF.Star.Statement.star_statement?({EX.S, EX.P, EX.O})
false
iex> RDF.Star.Statement.star_statement?({EX.AS, EX.AP, {EX.S, EX.P, EX.O}})
true
iex> RDF.Star.Statement.star_statement?({{EX.S, EX.P, EX.O}, EX.AP, EX.AO})
true
"""
@spec star_statement?(Triple.t() | Quad.t() | any) :: boolean
def star_statement?({{_, _, _}, _, _}), do: true
def star_statement?({_, _, {_, _, _}}), do: true
def star_statement?(_), do: false
end
|
lib/rdf/star/statement.ex
| 0.919503
| 0.600481
|
statement.ex
|
starcoder
|
defmodule Flex.Set do
alias Flex.{MembershipFun, Set}
@moduledoc """
An interface to create Fuzzy Sets struct.
"""
defstruct mf_type: nil,
mf: nil,
mf_center: nil,
mf_params: nil,
tag: nil
@typedoc """
Fuzzy Set struct.
- `:mf_type` - Defines which type of membership function uses the set.
- `:mf` - Anonymous function reference of the membership function.
- `:mf_center` - The center point of the membership function.
- `:mf_params` - The parameters of the membership function.
- `:tag` - Linguistic name of the fuzzy set.
"""
@type t :: %__MODULE__{
mf_type: String.t(),
mf: fun(),
mf_center: integer() | float(),
mf_params: keyword(),
tag: String.t()
}
@doc """
Creates a Fuzzy set.
The following options are require:
* `:mf_type` - (string) Defines which type of membership function uses the set (e.g., "triangle").
* `:tag` - (string) defines the linguistic name of the fuzzy set (e.g., "too hot"),
* `:mf_params` - The parameters of the membership function, see Membership functions.
"""
@spec new(keyword) :: Flex.Set.t()
def new(opt) do
mf_type = Keyword.fetch!(opt, :mf_type)
tag = Keyword.fetch!(opt, :tag)
mf_params = Keyword.fetch!(opt, :mf_params)
{mf, c} = build_membership_function(mf_type, mf_params)
%Set{mf_type: mf_type, tag: tag, mf: mf, mf_params: mf_params, mf_center: c}
end
defp build_membership_function("saturation", mf_params), do: MembershipFun.saturation(mf_params)
defp build_membership_function("shoulder", mf_params), do: MembershipFun.shoulder(mf_params)
defp build_membership_function("triangle", mf_params), do: MembershipFun.triangle(mf_params)
defp build_membership_function("trapezoidal", mf_params),
do: MembershipFun.trapezoidal(mf_params)
defp build_membership_function("gaussian", mf_params), do: MembershipFun.gaussian(mf_params)
defp build_membership_function("bell", mf_params), do: MembershipFun.gbell(mf_params)
defp build_membership_function("pi_shaped", mf_params), do: MembershipFun.pi_shaped(mf_params)
defp build_membership_function("s_shaped", mf_params), do: MembershipFun.s_shaped(mf_params)
defp build_membership_function("z_shaped", mf_params), do: MembershipFun.z_shaped(mf_params)
defp build_membership_function("sigmoid", mf_params), do: MembershipFun.sigmoid(mf_params)
defp build_membership_function("linear_combination", mf_params),
do: MembershipFun.linear_combination(mf_params)
defp build_membership_function(_mf_type, _mf_params),
do: raise("Membership function not supported")
@doc """
Updates a Fuzzy set depending on the gradient.
"""
@spec update(Flex.Set.t(), list(), number()) :: Flex.Set.t()
def update(fuzzy_set, gradient, learning_rate) do
new_mf_params =
fuzzy_set.mf_params
|> Enum.zip(gradient)
|> Enum.map(fn {aij, gradient_j} -> aij - learning_rate * gradient_j end)
new(mf_type: fuzzy_set.mf_type, tag: fuzzy_set.tag, mf_params: new_mf_params)
end
@doc """
Updates a Fuzzy set with new membership function parameters.
"""
@spec update(Flex.Set.t(), list()) :: Flex.Set.t()
def update(fuzzy_set, new_mf_params) do
new(mf_type: fuzzy_set.mf_type, tag: fuzzy_set.tag, mf_params: new_mf_params)
end
end
|
lib/set.ex
| 0.869867
| 0.650557
|
set.ex
|
starcoder
|
defmodule Asteroid.OIDC.AuthenticatedSession do
@moduledoc """
Convenience functions to work with authenticated sessions
The `%Asteroid.OIDC.AuthenticatedSession{}` object has the following meaningful members in
its `:data` field:
- `"current_acr"`: the current ACR, as calculated (`t:Asteroid.OIDC.acr/0`)
- `"current_auth_time"`: the current authentication time, as calculated (`non_neg_integer`)
- `"exp"`: expiration time (`non_neg_integer()`), to be optionally used if authentication
events are not
"""
import Asteroid.Utils
alias Asteroid.Context
alias Asteroid.OIDC
alias Asteroid.OIDC.AuthenticationEvent
alias Asteroid.Subject
alias Asteroid.Token
alias Asteroid.Token.RefreshToken
@type id :: String.t()
@enforce_keys [:id, :subject_id]
defstruct [:id, :subject_id, :data]
@type t :: %__MODULE__{
id: id(),
subject_id: Subject.id(),
data: map()
}
@doc """
Generates a new authenticated session
"""
@spec gen_new(Subject.id()) :: t()
def gen_new(subject_id) do
%__MODULE__{
id: secure_random_b64(),
subject_id: subject_id,
data: %{}
}
end
@doc """
Gets an authenticated session from the store
"""
@spec get(id(), Keyword.t()) :: {:ok, t()} | {:error, Exception.t()}
def get(authenticated_session_id, _opts \\ []) do
as_store_module = astrenv(:object_store_authenticated_session)[:module]
as_store_opts = astrenv(:object_store_authenticated_session)[:opts] || []
case as_store_module.get(authenticated_session_id, as_store_opts) do
{:ok, authenticated_session} when not is_nil(authenticated_session) ->
{:ok, authenticated_session}
{:ok, nil} ->
{:error,
Token.InvalidTokenError.exception(
sort: "authenticated session",
reason: "not found in the store",
id: authenticated_session_id
)}
{:error, error} ->
{:error, error}
end
end
@doc """
Stores an authenticated session
"""
@spec store(t(), Context.t()) :: {:ok, t()} | {:error, any()}
def store(authenticated_session, ctx \\ %{})
def store(authenticated_session, ctx) do
as_store_module = astrenv(:object_store_authenticated_session)[:module]
as_store_opts = astrenv(:object_store_authenticated_session)[:opts] || []
authenticated_session =
astrenv(:object_store_authenticated_session_before_store_callback).(
authenticated_session,
ctx
)
case as_store_module.put(authenticated_session, as_store_opts) do
:ok ->
{:ok, authenticated_session}
{:error, _} = error ->
error
end
end
@doc """
Deletes an authenticated session
"""
@spec delete(t() | id()) :: :ok | {:error, any()}
def delete(%__MODULE__{id: id}) do
delete(id)
end
def delete(authenticated_session_id) do
as_store_module = astrenv(:object_store_authenticated_session)[:module]
as_store_opts = astrenv(:object_store_authenticated_session)[:opts] || []
as_store_module.delete(authenticated_session_id, as_store_opts)
rt_store_module = astrenv(:object_store_refresh_token)[:module]
rt_store_opts = astrenv(:object_store_refresh_token)[:opts] || []
case rt_store_module.get_from_authenticated_session_id(
authenticated_session_id,
rt_store_opts
) do
{:ok, refresh_token_ids} ->
for refresh_token_id <- refresh_token_ids do
{:ok, refresh_token} = RefreshToken.get(refresh_token_id, check_active: false)
rt_scopes = refresh_token.data["scope"] || []
if "openid" in rt_scopes and "offline_access" not in rt_scopes do
RefreshToken.delete(refresh_token_id)
end
end
:ok
{:error, _} = error ->
error
end
end
@doc """
Puts a value into the `data` field of authenticated session
If the value is `nil`, the authenticated session is not changed and the filed is not added.
"""
@spec put_value(t(), any(), any()) :: t()
def put_value(authenticated_session, _key, nil), do: authenticated_session
def put_value(authenticated_session, key, val) do
%{authenticated_session | data: Map.put(authenticated_session.data, key, val)}
end
@doc """
Removes a value from the `data` field of a authenticated session
If the value does not exist, does nothing.
"""
@spec delete_value(t(), any()) :: t()
def delete_value(authenticated_session, key) do
%{authenticated_session | data: Map.delete(authenticated_session.data, key)}
end
@doc """
Computes the current ACR of an authenticated session
"""
@spec compute_current_acr(%__MODULE__{} | id()) :: OIDC.acr() | nil
def compute_current_acr(%__MODULE__{id: id}) do
compute_current_acr(id)
end
def compute_current_acr(authenticated_session_id) when is_binary(authenticated_session_id) do
ae_store_module = astrenv(:object_store_authentication_event)[:module]
ae_store_opts = astrenv(:object_store_authentication_event)[:opts] || []
case ae_store_module.get_from_authenticated_session_id(
authenticated_session_id,
ae_store_opts
) do
{:ok, auth_event_ids} ->
auth_events =
auth_event_ids
|> Enum.map(fn
auth_event_id ->
{:ok, auth_event} = AuthenticationEvent.get(auth_event_id)
auth_event
end)
|> Enum.reduce(MapSet.new(), &MapSet.put(&2, &1.data["name"]))
acr_config =
Enum.find(
astrenv(:oidc_acr_config, []),
fn
{_acr, acr_conf} ->
Enum.find(
acr_conf[:auth_event_set] || [],
fn
acr_auth_events when is_list(acr_auth_events) ->
acr_auth_events = MapSet.new(acr_auth_events)
MapSet.subset?(acr_auth_events, auth_events)
end
)
end
)
case acr_config do
{acr, _} ->
Atom.to_string(acr)
nil ->
nil
end
_ ->
nil
end
end
@doc """
Updates the current acr of an authenticated session and stores it
"""
@spec update_acr(%__MODULE__{} | id()) :: {:ok, %__MODULE__{}} | {:error, any()}
def update_acr(%__MODULE__{id: id}) do
update_acr(id)
end
def update_acr(authenticated_session_id) when is_binary(authenticated_session_id) do
{:ok, auth_session} = get(authenticated_session_id)
case compute_current_acr(authenticated_session_id) do
nil ->
delete(authenticated_session_id)
acr when is_binary(acr) ->
auth_session
|> put_value("current_acr", acr)
|> store()
end
end
@doc """
Returns the authentication time and the AMRs of an authenticated session for a given ACR
If no ACR is given as the second argument, returns the current state of the session.
It returns:
- `:acr`: the acr
- `:auth_time`: the timestamp of the most recent authentication event
- `:amr`: the list of ARMs of these authentication events
Returns `nil` if no match was found.
## Example
```elixir
iex> Asteroid.Utils.astrenv(:oidc_acr_config)
[
"3-factor": [
callback: &AsteroidWeb.LOA3_webflow.start_webflow/2,
auth_event_set: [["password", "otp", "webauthn"]]
],
"2-factor": [
callback: &AsteroidWeb.LOA2_webflow.start_webflow/2,
auth_event_set: [
["password", "<PASSWORD>"],
["password", "<PASSWORD>"],
["webauthn", "otp"]
]
],
"1-factor": [
callback: &AsteroidWeb.LOA1_webflow.start_webflow/2,
auth_event_set: [["password"], ["webauthn"]],
default: true
]
]
iex> alias Asteroid.OIDC.AuthenticationEvent, as: AE
Asteroid.OIDC.AuthenticationEvent
iex> alias Asteroid.OIDC.AuthenticatedSession, as: AS
Asteroid.OIDC.AuthenticatedSession
iex> {:ok, as} = AS.gen_new("user_1") |> AS.store()
{:ok,
%Asteroid.OIDC.AuthenticatedSession{
data: %{},
id: "gvycEhbeig9RjwUu36UEGTxO1MNRE3qQ9WHisfpk0Zk",
subject_id: "user_1"
}}
iex> AE.gen_new(as.id) |> AE.put_value("name", "password") |> AE.put_value("amr", "pwd") |> AE.put_value("time", 100000) |> AE.store()
{:ok,
%Asteroid.OIDC.AuthenticationEvent{
authenticated_session_id: "gvycEhbeig9RjwUu36UEGTxO1MNRE3qQ9WHisfpk0Zk",
data: %{"amr" => "pwd", "name" => "password", "time" => 100000},
id: "WxQ6AHMRthQlk9cqsGUMVWsFNZ3EeNjyFfNCRYkiF20"
}}
iex> AE.gen_new(as.id) |> AE.put_value("name", "otp") |> AE.put_value("amr", "otp") |> AE.put_value("time", 200000)|> AE.store()
{:ok,
%Asteroid.OIDC.AuthenticationEvent{
authenticated_session_id: "gvycEhbeig9RjwUu36UEGTxO1MNRE3qQ9WHisfpk0Zk",
data: %{"amr" => "otp", "name" => "otp", "time" => 200000},
id: "QnZZE82I4St41JieLpLg8z3HG_T8l6yutlt3dPo_Yx8"
}}
iex> AE.gen_new(as.id) |> AE.put_value("name", "webauthn") |> AE.put_value("amr", "phr") |> AE.put_value("time", 300000)|> AE.store()
{:ok,
%Asteroid.OIDC.AuthenticationEvent{
authenticated_session_id: "gvycEhbeig9RjwUu36UEGTxO1MNRE3qQ9WHisfpk0Zk",
data: %{"amr" => "phr", "name" => "webauthn", "time" => 300000},
id: "N_V4i9lz5obd-3C0XZagZGtOFuDMZo0ywXSBjoum0KY"
}}
iex> AS.info(as.id)
%{acr: "3-factor", amr: ["otp", "phr", "pwd"], auth_time: 300000}
iex> AS.info(as.id, "1-factor")
%{acr: "1-factor", amr: ["pwd"], auth_time: 100000}
iex> AS.info(as.id, "2-factor")
%{acr: "2-factor", amr: ["otp", "pwd"], auth_time: 200000}
iex> AS.info(as.id, "3-factor")
%{acr: "3-factor", amr: ["otp", "phr", "pwd"], auth_time: 300000}
```
"""
@spec info(%__MODULE__{} | id(), Asteroid.OIDC.acr() | nil) ::
%{
required(:acr) => Asteroid.OIDC.acr() | nil,
required(:auth_time) => non_neg_integer() | nil,
required(:amr) => [Asteroid.OIDC.amr(), ...]
}
| nil
def info(auth_session_id, acr \\ nil)
def info(auth_session_id, acr) when is_binary(auth_session_id) do
case get(auth_session_id) do
{:ok, auth_session} ->
info(auth_session, acr)
{:error, _} ->
nil
end
end
def info(authenticated_session, acr) do
acr =
if acr do
acr
else
authenticated_session.data["current_acr"]
end
if acr do
auth_events =
AuthenticationEvent.get_from_authenticated_session_id(authenticated_session.id)
case find_matching_auth_event_set(auth_events, acr) do
auth_event_set when is_list(auth_event_set) ->
amr =
Enum.reduce(
auth_events,
MapSet.new(),
fn
%AuthenticationEvent{data: %{"name" => name, "amr" => amr}}, acc ->
if name in auth_event_set do
MapSet.put(acc, amr)
else
acc
end
_, acc ->
acc
end
)
|> MapSet.to_list()
auth_time =
Enum.reduce(
auth_events,
nil,
fn
%AuthenticationEvent{data: %{"name" => name, "time" => auth_time}}, acc ->
if name in auth_event_set do
if acc == nil do
auth_time
else
max(acc, auth_time)
end
else
acc
end
_, acc ->
acc
end
)
%{acr: acr, amr: amr, auth_time: auth_time}
nil ->
%{acr: acr, amr: nil, auth_time: nil}
end
else
%{acr: nil, amr: nil, auth_time: nil}
end
end
@doc """
Find matching authentication event set (`t:Asteroid.OIDC.ACR.auth_event_set/0`) for a given
ACR from a set of authentication events, or `nil` if none could be found
## Example
```elixir
iex> Asteroid.Utils.astrenv(:oidc_acr_config)
[
loa2: [
callback: &AsteroidWeb.LOA2_webflow.start_webflow/2,
auth_event_set: [
["password", "<PASSWORD>"],
["password", "<PASSWORD>"],
["webauthn", "otp"]
]
],
loa1: [
callback: &AsteroidWeb.LOA1_webflow.start_webflow/2,
auth_event_set: [["password"], ["webauthn"]],
default: true
]
]
iex> alias Asteroid.OIDC.AuthenticationEvent
Asteroid.OIDC.AuthenticationEvent
iex> alias Asteroid.OIDC.AuthenticatedSession
Asteroid.OIDC.AuthenticatedSession
iex> {:ok, as} = AuthenticatedSession.gen_new("user_1") |> AuthenticatedSession.store()
{:ok,
%Asteroid.OIDC.AuthenticatedSession{
data: %{},
id: "jcKc4uwqDYN7c84B7gzfzgVBkpLMUBNusTRMG6NdOTg",
subject_id: "user_1"
}}
iex> {:ok, ae1} = AuthenticationEvent.gen_new(as.id) |> AuthenticationEvent.put_value("name", "password") |> AuthenticationEvent.store()
{:ok,
%Asteroid.OIDC.AuthenticationEvent{
authenticated_session_id: "jcKc4uwqDYN7c84B7gzfzgVBkpLMUBNusTRMG6NdOTg",
data: %{"name" => "password"},
id: "zwhSZ4HPs6JuFpeoTCNNV1wCzFBnnrKAU5bv1FCxLDg"
}}
iex> {:ok, ae2} = AuthenticationEvent.gen_new(as.id) |> AuthenticationEvent.put_value("name", "otp") |> AuthenticationEvent.store()
{:ok,
%Asteroid.OIDC.AuthenticationEvent{
authenticated_session_id: "jcKc4uwqDYN7c84B7gzfzgVBkpLMUBNusTRMG6NdOTg",
data: %{"name" => "otp"},
id: "cvYtgORDaZfzjxxpUnpS0mf3M1d2lLt74tm6KXifyYg"
}}
iex> AuthenticatedSession.find_matching_auth_event_set(AuthenticationEvent.get_from_authenticated_session_id(as.id), "loa1")
["password"]
iex> AuthenticatedSession.find_matching_auth_event_set(AuthenticationEvent.get_from_authenticated_session_id(as.id), "loa2")
["password", "<PASSWORD>"]
iex> AuthenticatedSession.find_matching_auth_event_set(AuthenticationEvent.get_from_authenticated_session_id(as.id), "loa3")
nil
```
"""
@spec find_matching_auth_event_set([AuthenticationEvent.t()], OIDC.acr()) ::
OIDC.ACR.auth_event_set()
| nil
def find_matching_auth_event_set(auth_events, acr) do
acr = String.to_existing_atom(acr)
searched_auth_events_set =
Enum.reduce(auth_events, MapSet.new(), &MapSet.put(&2, &1.data["name"]))
Enum.find(
astrenv(:oidc_acr_config)[acr][:auth_event_set] || [],
fn
conf_auth_event_set ->
conf_auth_event_set = MapSet.new(conf_auth_event_set)
MapSet.subset?(conf_auth_event_set, searched_auth_events_set)
end
)
rescue
_ ->
nil
end
end
|
lib/asteroid/oidc/authenticated_session.ex
| 0.897863
| 0.400456
|
authenticated_session.ex
|
starcoder
|
defmodule Nerves.Grove.PCA9685.DeviceSupervisor do
use Supervisor
@dev_srv Nerves.Grove.PCA9685.Device
@device_registry_name :PCA9685_proccess_registry
@moduledoc """
Device pca9685 Worker
config :pca9685,
devices: [%{bus: 1, address: 0x40, pwm_freq: 50}],
servos: [%{bus: 1, address: 0x40, channel: 0, position: 90, min: 175, max: 575},
%{bus: 1, address: 0x40, channel: 1, position: 90, min: 175, max: 575},
%{bus: 1, address: 0x40, channel: 2, position: 90, min: 175, max: 575},
%{bus: 1, address: 0x40, channel: 3, position: 90, min: 175, max: 575},
%{bus: 1, address: 0x40, channel: 4, position: 90, min: 175, max: 575},
%{bus: 1, address: 0x40, channel: 5, position: 90, min: 175, max: 575},
%{bus: 1, address: 0x40, channel: 6, position: 90, min: 175, max: 575},
%{bus: 1, address: 0x40, channel: 7, position: 90, min: 175, max: 575},
%{bus: 1, address: 0x43, channel: 8, position: 90, min: 175, max: 575},
%{bus: 1, address: 0x43, channel: 9, position: 90, min: 175, max: 575},
%{bus: 1, address: 0x43, channel: 10, position: 90, min: 175, max: 575},
%{bus: 1, address: 0x43, channel: 11, position: 90, min: 175, max: 575},
]
It accepts a list because raspberry can manage several boards
Board 0: Address = 0x40 Offset = binary 00000 (no jumpers required)
Board 1: Address = 0x41 Offset = binary 00001 (bridge A0 as in the photo above)
Board 2: Address = 0x42 Offset = binary 00010 (bridge A1)
Board 3: Address = 0x43 Offset = binary 00011 (bridge A0 & A1)
Board 4: Address = 0x44 Offset = binary 00100 (bridge A2)
Nerves.Grove.PCA9685.DeviceSupervisor.start_link (%{bus: 1, address: 0x40, pwm_freq: 60})
"""
def start_link(config) when is_list(config) do
Supervisor.start_link(__MODULE__, config, name: __MODULE__)
end
def init(config) when is_list(config) do
[
# worker(Registry, [:unique, @device_registry_name])
{Registry, keys: :unique, name: @device_registry_name}
| children(config)
]
# supervise(options())
|> Supervisor.init(options())
end
def children(config) do
config
|> Enum.map(fn %{bus: bus, address: address} = config ->
worker(Nerves.Grove.PCA9685.Device, [config], id: {bus, address})
end)
end
def start_link(), do: Supervisor.start_link(__MODULE__, [], name: __MODULE__)
def init() do
[
# worker(Registry, [:unique, @device_registry_name])
{Registry, keys: :unique, name: @device_registry_name}
| children()
]
# supervise(options())
|> Supervisor.init(options())
end
def children do
:pca9685
|> Application.get_env(:devices, [])
|> Enum.map(fn %{bus: bus, address: address} = config ->
worker(Nerves.Grove.PCA9685.Device, [config], id: {bus, address})
end)
end
def start_device(%{bus: bus, address: address} = map) do
# And we use `start_child/2` to start a new Chat.Server process
with {:ok, _pid} <-
Supervisor.start_child(__MODULE__, worker(@dev_srv, [map], id: {bus, address})) do
{:ok, {bus, address}}
else
{:error, error} -> {:error, error}
end
end
def account_process_devices, do: Supervisor.which_children(__MODULE__)
defp options do
[strategy: :one_for_one, name: __MODULE__]
end
end
|
lib/nerves_grove/pca9685/device_supervisor.ex
| 0.576304
| 0.481393
|
device_supervisor.ex
|
starcoder
|
defmodule Callisto.Properties do
@moduledoc """
Module for defining properties on a struct. Macro properties lets you
define fields that are "known" to the vertex that uses this as a label
(or the edge that uses this as a type). You can define if these fields
are required to be set when calling .new(), or if they have default
values that should be inserted.
By default the last "part" of the module name is used as the label/type
in the database, but this can be overridden with the name() call in the
properties block.
The properties call takes an optional id: key, which can be set to choose
how/if IDs are automatically applied:
* `:uuid` - (default) applies a UUID from the default generator
* `func/1` - An arity 1 function can be given, and will be called with the object getting ID'd -- you can define your own UUID generator this way.
* `nil`/`false` - No ID will be set
## Example
defmodule MyVertex do
use Callisto.Properties
def my_uuid(_obj), do: "foo" # Stupid, all IDs will be "foo"!
properties id: &my_uuid/1 do
name "my_vertex" # Default would be "MyVertex"
field :thing, :integer, required: true
field :other, :string, default: "foo"
end
end
"""
require Inflex
alias Callisto.{Edge, Type, Vertex}
defmacro __using__(_) do
quote do
import Callisto.Properties, only: [properties: 2, properties: 1]
Module.register_attribute(__MODULE__, :callisto_properties, accumulate: false)
end
end
defmacro properties(options \\ [id: :uuid], [do: block]) do
id_option = Keyword.get(options, :id, :uuid)
quote do
try do
import Callisto.Properties
default_name = to_string(__MODULE__ )
|> String.split(".")
|> List.last
|> Inflex.camelize
props = Module.put_attribute(__MODULE__, :callisto_properties,
%{name: default_name, fields: %{} })
unquote(block)
attrs = Module.get_attribute(__MODULE__, :callisto_properties)
|> Map.merge(%{id: unquote(id_option)})
default_args = if unquote(id_option), do: [{:id, nil}], else: []
args = Enum.reduce(attrs.fields, default_args,
fn({k, v}, acc) ->
acc ++ [{k, Keyword.get(v, :default)}]
end)
@callisto_properties attrs
def __callisto_properties(), do: @callisto_properties
def __callisto_field(field), do: @callisto_properties.fields[field]
defstruct args
def apply_defaults(x), do: Callisto.Properties.apply_defaults(x, __MODULE__)
def validate(x), do: Callisto.Properties.validate(x, __MODULE__)
def cast_props(x), do: Callisto.Properties.cast_props(x, __MODULE__)
after
:ok
end
end
end
def __callisto_properties(arg), do: arg.__struct__.__callisto_properties
def __callisto_field(arg, field), do: __callisto_properties(arg).fields[field]
defmacro name(name) do
quote do
new_props = Module.get_attribute(__MODULE__, :callisto_properties)
|> Map.merge(%{name: unquote(name)})
Module.put_attribute(__MODULE__, :callisto_properties, new_props)
end
end
defmacro field(name, type, options \\ []) do
options = [type: type] ++ options
quote do
props = Module.get_attribute(__MODULE__, :callisto_properties)
new_props = Map.put(props, :fields, Map.put(props.fields, unquote(name), unquote(options)))
Module.put_attribute(__MODULE__, :callisto_properties, new_props)
end
end
@doc """
Atomizes known keys, and if any keys are missing from the map, adds the
default value to that key in the map. Note that this is checking for
the existence of the key, not the non-nil nature of the value.
"""
def apply_defaults(data, type) when is_binary(type), do: data
def apply_defaults(data, types) when is_list(types) do
Enum.reduce(types, data, fn(x, acc) -> apply_defaults(acc, x) end)
end
def apply_defaults(data, type) do
type.__callisto_properties.fields
|> Enum.filter(fn({_, value}) -> Keyword.has_key?(value, :default) end)
|> Enum.map(fn({key, value}) ->
cond do
is_function(value[:default]) -> {key, value[:default].(data)}
true -> {key, value[:default]}
end
end)
|> Map.new
|> Map.merge(atomize_known_keys(data, type))
end
@doc """
Moves any keys defined as fields in the Properties to their atomic key
version, if they are in the Map as string keys. If both exist, the string
version is copied over the atom version. The string key is then removed.
Any unknown keys are left -- either as strings or atoms, as they were
received. Since the defined keys are already atoms, this does not
risk polluting the atom cache, and it's safe to pass random hashes through
this function.
"""
def atomize_known_keys(data, type) when is_binary(type), do: data
def atomize_known_keys(data, types) when is_list(types) do
Enum.reduce(types, data, fn(x, acc) -> atomize_known_keys(acc, x) end)
end
def atomize_known_keys(data, type) do
type.__callisto_properties.fields
|> Map.keys
|> Enum.filter(&(Map.has_key?(data, Atom.to_string(&1))))
|> Enum.reduce(Map.new(data), fn(atom_key, acc) ->
key = Atom.to_string(atom_key)
# Write the atomized key (may overwrite existing atom key!)
Map.put(acc, atom_key, Map.get(data, key))
|> Map.delete(key) # Then delete the string version of the key.
end)
end
@doc """
Checks the data against type to make sure all required keys are there,
either as atoms or strings. Does NOT convert string keys to atoms.
Returns {:ok, []} on success, or {:error, [missing_key, ...]} on error.
"""
def validate(_data, type) when is_binary(type), do: {:ok, []}
def validate(data, types) when is_list(types) do
Enum.reduce(types, {:ok, []}, fn(t, {status, errors}) ->
case validate(data, t) do
{:ok, _} -> {status, errors}
{_, new_errors} -> {:error, errors ++ new_errors}
end
end)
end
def validate(data, type) do
clean_data = atomize_known_keys(data, type)
type.__callisto_properties.fields
|> Enum.filter(fn({_, value}) -> value[:required] == true end)
|> Keyword.keys
|> Enum.filter(fn(key) -> !Map.has_key?(clean_data, key) end)
|> case do
[] -> {:ok, []}
absent_keys -> {:error, absent_keys}
end
end
@doc """
Calls validate/2 but returns data if validation passes, raises exception
otherwise.
"""
def validate!(data, type) do
case validate(data, type) do
{:ok, _} -> data
{:error, absent_keys} -> raise ArgumentError, "missing required fields: (#{Enum.join(absent_keys, ", ")})"
end
end
defp cast_values(data, type) when is_binary(type), do: data
defp cast_values(data, types) when is_list(types) do
Enum.reduce(types, data, fn(t, acc) -> cast_values(acc, t) end)
end
defp cast_values(data, type) do
Enum.reduce(type.__callisto_properties.fields, data,
fn({field, defn}, acc) ->
case Map.has_key?(acc, field) do
false -> acc
_ -> Map.put(acc, field, Type.cast!(defn[:type], acc[field]))
end
end)
end
def cast_props(type, data) when is_binary(type), do: Map.new(data)
def cast_props(type, data) do
Map.new(data)
|> validate!(type)
|> apply_defaults(type)
|> cast_values(type)
end
# The ID set on the object directly takes precedence; make sure it's in the
# props hash.
def denormalize_id(v=%{ id: id }) when is_nil(id) == false do
%{ v | props: Map.put(v.props, :id, id) }
end
# If set in the props, but not on the object, copy it over.
def denormalize_id(v=%{ props: %{id: id}}), do: %{ v | id: id }
# Otherwise, figure out if/how we need to assign the ID.
def denormalize_id(v=%Vertex{}), do: assign_id(v.validators, v)
def denormalize_id(e=%Edge{}), do: assign_id([e.type], e)
defp assign_id([], v), do: v
defp assign_id([label|rest], v)
when is_binary(label) or is_nil(label), do: assign_id(rest, v)
defp assign_id([label|rest], v) do
id = label.__callisto_properties.id
cond do
is_function(id) -> %{ v | props: Map.put(v.props, :id, id.(v)) } |> denormalize_id
id == :uuid -> %{ v | props: Map.put(v.props, :id, UUID.uuid1) } |> denormalize_id
true -> assign_id(rest, v)
end
end
end
|
lib/callisto/properties.ex
| 0.803906
| 0.475788
|
properties.ex
|
starcoder
|
defmodule AWS.CloudHSM do
@moduledoc """
AWS CloudHSM Service
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "CloudHSM",
api_version: "2014-05-30",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "cloudhsm",
global?: false,
protocol: "json",
service_id: "CloudHSM",
signature_version: "v4",
signing_name: "cloudhsm",
target_prefix: "CloudHsmFrontendService"
}
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Adds or overwrites one or more tags for the specified AWS CloudHSM resource.
Each tag consists of a key and a value. Tag keys must be unique to each
resource.
"""
def add_tags_to_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddTagsToResource", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Creates a high-availability partition group. A high-availability partition group
is a group of partitions that spans multiple physical HSMs.
"""
def create_hapg(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateHapg", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Creates an uninitialized HSM instance.
There is an upfront fee charged for each HSM instance that you create with the
`CreateHsm` operation. If you accidentally provision an HSM and want to request
a refund, delete the instance using the `DeleteHsm` operation, go to the [AWS Support Center](https://console.aws.amazon.com/support/home), create a new case,
and select **Account and Billing Support**.
It can take up to 20 minutes to create and provision an HSM. You can monitor the
status of the HSM with the `DescribeHsm` operation. The HSM is ready to be
initialized when the status changes to `RUNNING`.
"""
def create_hsm(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateHsm", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Creates an HSM client.
"""
def create_luna_client(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateLunaClient", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Deletes a high-availability partition group.
"""
def delete_hapg(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteHapg", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Deletes an HSM. After completion, this operation cannot be undone and your key
material cannot be recovered.
"""
def delete_hsm(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteHsm", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Deletes a client.
"""
def delete_luna_client(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteLunaClient", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Retrieves information about a high-availability partition group.
"""
def describe_hapg(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeHapg", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Retrieves information about an HSM. You can identify the HSM by its ARN or its
serial number.
"""
def describe_hsm(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeHsm", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Retrieves information about an HSM client.
"""
def describe_luna_client(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeLunaClient", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Gets the configuration files necessary to connect to all high availability
partition groups the client is associated with.
"""
def get_config(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetConfig", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Lists the Availability Zones that have available AWS CloudHSM capacity.
"""
def list_available_zones(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListAvailableZones", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Lists the high-availability partition groups for the account.
This operation supports pagination with the use of the `NextToken` member. If
more results are available, the `NextToken` member of the response contains a
token that you pass in the next call to `ListHapgs` to retrieve the next set of
items.
"""
def list_hapgs(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListHapgs", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Retrieves the identifiers of all of the HSMs provisioned for the current
customer.
This operation supports pagination with the use of the `NextToken` member. If
more results are available, the `NextToken` member of the response contains a
token that you pass in the next call to `ListHsms` to retrieve the next set of
items.
"""
def list_hsms(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListHsms", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Lists all of the clients.
This operation supports pagination with the use of the `NextToken` member. If
more results are available, the `NextToken` member of the response contains a
token that you pass in the next call to `ListLunaClients` to retrieve the next
set of items.
"""
def list_luna_clients(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListLunaClients", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Returns a list of all tags for the specified AWS CloudHSM resource.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Modifies an existing high-availability partition group.
"""
def modify_hapg(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyHapg", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Modifies an HSM.
This operation can result in the HSM being offline for up to 15 minutes while
the AWS CloudHSM service is reconfigured. If you are modifying a production HSM,
you should ensure that your AWS CloudHSM service is configured for high
availability, and consider executing this operation during a maintenance window.
"""
def modify_hsm(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyHsm", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Modifies the certificate used by the client.
This action can potentially start a workflow to install the new certificate on
the client's HSMs.
"""
def modify_luna_client(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ModifyLunaClient", input, options)
end
@doc """
This is documentation for **AWS CloudHSM Classic**.
For more information, see [AWS CloudHSM Classic FAQs](http://aws.amazon.com/cloudhsm/faqs-classic/), the [AWS CloudHSM Classic User Guide](http://docs.aws.amazon.com/cloudhsm/classic/userguide/), and the
[AWS CloudHSM Classic API Reference](http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
**For information about the current version of AWS CloudHSM**, see [AWS CloudHSM](http://aws.amazon.com/cloudhsm/), the [AWS CloudHSM User Guide](http://docs.aws.amazon.com/cloudhsm/latest/userguide/), and the [AWS CloudHSM API
Reference](http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
Removes one or more tags from the specified AWS CloudHSM resource.
To remove a tag, specify only the tag key to remove (not the value). To
overwrite the value for an existing tag, use `AddTagsToResource`.
"""
def remove_tags_from_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveTagsFromResource", input, options)
end
end
|
lib/aws/generated/cloud_hsm.ex
| 0.796292
| 0.630059
|
cloud_hsm.ex
|
starcoder
|
defmodule Rummage.Ecto.Services.BuildSearchQuery do
@moduledoc """
`Rummage.Ecto.Services.BuildSearchQuery` is a service module which serves the
default search hook, `Rummage.Ecto.Hook.Search` that comes shipped with `Rummage.Ecto`.
## Module Attributes
```elixir
@search_types ~w{like ilike eq gt lt gteq lteq is_nil}a
@search_exprs ~w{where or_where not_where}a
```
`@search_types` is a collection of all the 8 valid `search_types` that come shipped with
`Rummage.Ecto`'s default search hook. The types are:
* `like`: Searches for a `term` in a given `field` of a `queryable`.
* `ilike`: Searches for a `term` in a given `field` of a `queryable`, in a case insensitive fashion.
* `eq`: Searches for a `term` to be equal to a given `field` of a `queryable`.
* `gt`: Searches for a `term` to be greater than to a given `field` of a `queryable`.
* `lt`: Searches for a `term` to be less than to a given `field` of a `queryable`.
* `gteq`: Searches for a `term` to be greater than or equal to to a given `field` of a `queryable`.
* `lteq`: Searches for a `term` to be less than or equal to a given `field` of a `queryable`.
* `is_nil`: Searches for a null value when `term` is true, or not null when `term` is false.
* `in`: Searches for a given `field` in a collection of `terms` in a `queryable`.
`@search_exprs` is a collection of 3 valid `search_exprs` that are used to
apply a `search_type` to a `Ecto.Queryable`. Those expressions are:
* `where` (DEFAULT): An AND where query expression.
* `or_where`: An OR where query expression. Behaves exactly like where but
combines the previous expression with an `OR` operation. Useful
for optional searches.
* `not_where`: A NOT where query expression. This can be used while excluding
a list of entries based on where query.
Feel free to use this module on a custom search hook that you write.
"""
import Ecto.Query
@typedoc ~s(TODO: Finish)
@type search_expr :: :where | :or_where | :not_where
@typedoc ~s(TODO: Finish)
@type search_type :: :like | :ilike | :eq | :gt
| :lt | :gteq | :lteq | :is_nil
@search_types ~w{like ilike eq gt lt gteq lteq is_nil in}a
@search_exprs ~w{where or_where not_where}a
# Only for Postgres (only one or two interpolations are supported)
# TODO: Fix this once Ecto 3.0 comes out with `unsafe_fragment`
@supported_fragments_one ["date_part('day', ?)",
"date_part('month', ?)",
"date_part('year', ?)",
"date_part('hour', ?)",
"lower(?)",
"upper(?)"]
@supported_fragments_two ["concat(?, ?)",
"coalesce(?, ?)"]
@doc """
Builds a searched `queryable` on top of the given `queryable` using `field`, `search_type`
and `search_term`.
## Examples
When `search_type` is `where`:
When `field`, `search_type` and `queryable` are passed with `search_type` of `like`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, {:where, :like}, "field_!")
#Ecto.Query<from p in "parents", where: like(p.field_1, ^"%field_!%")>
When `field`, `search_type` and `queryable` are passed with `search_type` of `ilike`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, {:where, :ilike}, "field_!")
#Ecto.Query<from p in "parents", where: ilike(p.field_1, ^"%field_!%")>
When `field`, `search_type` and `queryable` are passed with `search_type` of `eq`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, {:where, :eq}, "field_!")
#Ecto.Query<from p in "parents", where: p.field_1 == ^"field_!">
When `field`, `search_type` and `queryable` are passed with `search_type` of `gt`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, {:where, :gt}, "field_!")
#Ecto.Query<from p in "parents", where: p.field_1 > ^"field_!">
When `field`, `search_type` and `queryable` are passed with `search_type` of `lt`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, {:where, :lt}, "field_!")
#Ecto.Query<from p in "parents", where: p.field_1 < ^"field_!">
When `field`, `search_type` and `queryable` are passed with `search_type` of `gteq`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, {:where, :gteq}, "field_!")
#Ecto.Query<from p in "parents", where: p.field_1 >= ^"field_!">
When `field`, `search_type` and `queryable` are passed with `search_type` of `lteq`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, {:where, :lteq}, "field_!")
#Ecto.Query<from p in "parents", where: p.field_1 <= ^"field_!">
When `search_type` is `or_where`:
When `field`, `search_type` and `queryable` are passed with `search_type` of `like`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, {:or_where, :like}, "field_!")
#Ecto.Query<from p in "parents", or_where: like(p.field_1, ^"%field_!%")>
When `field`, `search_type` and `queryable` are passed with `search_type` of `ilike`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, {:or_where, :ilike}, "field_!")
#Ecto.Query<from p in "parents", or_where: ilike(p.field_1, ^"%field_!%")>
When `field`, `search_type` and `queryable` are passed with `search_type` of `eq`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, {:or_where, :eq}, "field_!")
#Ecto.Query<from p in "parents", or_where: p.field_1 == ^"field_!">
When `field`, `search_type` and `queryable` are passed with `search_type` of `gt`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, {:or_where, :gt}, "field_!")
#Ecto.Query<from p in "parents", or_where: p.field_1 > ^"field_!">
When `field`, `search_type` and `queryable` are passed with `search_type` of `lt`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, {:or_where, :lt}, "field_!")
#Ecto.Query<from p in "parents", or_where: p.field_1 < ^"field_!">
When `field`, `search_type` and `queryable` are passed with `search_type` of `gteq`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, {:or_where, :gteq}, "field_!")
#Ecto.Query<from p in "parents", or_where: p.field_1 >= ^"field_!">
When `field`, `search_type` and `queryable` are passed with `search_type` of `lteq`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, {:or_where, :lteq}, "field_!")
#Ecto.Query<from p in "parents", or_where: p.field_1 <= ^"field_!">
When `field`, `search_type` and `queryable` are passed with an invalid `search_type`
and `search_expr`:
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.run(queryable, :field_1, {:pizza, :cheese}, "field_!")
** (RuntimeError) Unknown {search_expr, search_type}, {:pizza, :cheese}
search_type should be one of #{inspect @search_types}
search_expr should be one of #{inspect @search_exprs}
"""
@spec run(Ecto.Query.t, {__MODULE__.search_expr(), __MODULE__.search_type()},
String.t, term) :: {Ecto.Query.t}
def run(queryable, field, {search_expr, search_type}, search_term)
when search_type in @search_types and search_expr in @search_exprs
do
apply(__MODULE__, String.to_atom("handle_" <> to_string(search_type)),
[queryable, field, search_term, search_expr])
end
def run(_, _, search_tuple, _) do
raise "Unknown {search_expr, search_type}, #{inspect search_tuple}\n" <>
"search_type should be one of #{inspect @search_types}\n" <>
"search_expr should be one of #{inspect @search_exprs}"
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using
`field`, `search_term` and `search_expr` when the `search_type` is `like`.
Checkout [Ecto.Query.API.like/2](https://hexdocs.pm/ecto/Ecto.Query.API.html#like/2)
for more info.
NOTE: Be careful of [Like Injections](https://githubengineering.com/like-injection/)
Assumes that `search_expr` is in #{inspect @search_exprs}.
## Examples
When `search_expr` is `:where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_like(queryable, :field_1, "field_!", :where)
#Ecto.Query<from p in "parents", where: like(p.field_1, ^"%field_!%")>
When `search_expr` is `:or_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_like(queryable, :field_1, "field_!", :or_where)
#Ecto.Query<from p in "parents", or_where: like(p.field_1, ^"%field_!%")>
When `search_expr` is `:not_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_like(queryable, :field_1, "field_!", :not_where)
#Ecto.Query<from p in "parents", where: not(like(p.field_1, ^"%field_!%"))>
"""
@spec handle_like(Ecto.Query.t(), atom() | tuple(), String.t(),
__MODULE__.search_expr()) :: Ecto.Query.t()
for fragment <- @supported_fragments_one do
def handle_like(queryable, {:fragment, unquote(fragment), field}, search_term, :where) do
queryable
|> where([..., b],
like(fragment(unquote(fragment), field(b, ^field)), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
end
for fragment <- @supported_fragments_two do
def handle_like(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :where) do
queryable
|> where([..., b],
like(fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
end
def handle_like(queryable, field, search_term, :where) do
queryable
|> where([..., b],
like(field(b, ^field), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
for fragment <- @supported_fragments_one do
def handle_like(queryable, {:fragment, unquote(fragment), field}, search_term, :or_where) do
queryable
|> or_where([..., b],
like(fragment(unquote(fragment), field(b, ^field)), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
end
for fragment <- @supported_fragments_two do
def handle_like(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :or_where) do
queryable
|> or_where([..., b],
like(fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
end
def handle_like(queryable, field, search_term, :or_where) do
queryable
|> or_where([..., b],
like(field(b, ^field), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
for fragment <- @supported_fragments_one do
def handle_like(queryable, {:fragment, unquote(fragment), field}, search_term, :not_where) do
queryable
|> where([..., b],
not like(fragment(unquote(fragment), field(b, ^field)), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
end
for fragment <- @supported_fragments_two do
def handle_like(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :not_where) do
queryable
|> where([..., b],
not like(fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
end
def handle_like(queryable, field, search_term, :not_where) do
queryable
|> where([..., b],
not like(field(b, ^field), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using
`field`, `search_term` and `search_expr` when the `search_type` is `ilike`.
Checkout [Ecto.Query.API.ilike/2](https://hexdocs.pm/ecto/Ecto.Query.API.html#ilike/2)
for more info.
Assumes that `search_expr` is in #{inspect @search_exprs}.
## Examples
When `search_expr` is `:where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_ilike(queryable, :field_1, "field_!", :where)
#Ecto.Query<from p in "parents", where: ilike(p.field_1, ^"%field_!%")>
When `search_expr` is `:or_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_ilike(queryable, :field_1, "field_!", :or_where)
#Ecto.Query<from p in "parents", or_where: ilike(p.field_1, ^"%field_!%")>
When `search_expr` is `:not_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_ilike(queryable, :field_1, "field_!", :not_where)
#Ecto.Query<from p in "parents", where: not(ilike(p.field_1, ^"%field_!%"))>
"""
@spec handle_ilike(Ecto.Query.t(), atom(), String.t(),
__MODULE__.search_expr()) :: Ecto.Query.t()
for fragment <- @supported_fragments_one do
def handle_ilike(queryable, {:fragment, unquote(fragment), field}, search_term, :where) do
queryable
|> where([..., b],
ilike(fragment(unquote(fragment), field(b, ^field)), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
end
for fragment <- @supported_fragments_two do
def handle_ilike(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :where) do
queryable
|> where([..., b],
ilike(fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
end
def handle_ilike(queryable, field, search_term, :where) do
queryable
|> where([..., b],
ilike(field(b, ^field), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
for fragment <- @supported_fragments_one do
def handle_ilike(queryable, {:fragment, unquote(fragment), field}, search_term, :or_where) do
queryable
|> or_where([..., b],
ilike(fragment(unquote(fragment), field(b, ^field)), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
end
for fragment <- @supported_fragments_two do
def handle_ilike(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :or_where) do
queryable
|> or_where([..., b],
ilike(fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
end
def handle_ilike(queryable, field, search_term, :or_where) do
queryable
|> or_where([..., b],
ilike(field(b, ^field), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
for fragment <- @supported_fragments_one do
def handle_ilike(queryable, {:fragment, unquote(fragment), field}, search_term, :not_where) do
queryable
|> where([..., b],
not ilike(fragment(unquote(fragment), field(b, ^field)), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
end
for fragment <- @supported_fragments_two do
def handle_ilike(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :not_where) do
queryable
|> where([..., b],
not ilike(fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
end
def handle_ilike(queryable, field, search_term, :not_where) do
queryable
|> where([..., b],
not ilike(field(b, ^field), ^"%#{String.replace(search_term, "%", "\\%")}%"))
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using
`field`, `search_term` and `search_expr` when the `search_type` is `eq`.
Assumes that `search_expr` is in #{inspect @search_exprs}.
## Examples
When `search_expr` is `:where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_eq(queryable, :field_1, "field_!", :where)
#Ecto.Query<from p in \"parents\", where: p.field_1 == ^\"field_!\">
When `search_expr` is `:or_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_eq(queryable, :field_1, "field_!", :or_where)
#Ecto.Query<from p in \"parents\", or_where: p.field_1 == ^\"field_!\">
When `search_expr` is `:not_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_eq(queryable, :field_1, "field_!", :not_where)
#Ecto.Query<from p in \"parents\", where: p.field_1 != ^\"field_!\">
"""
@spec handle_eq(Ecto.Query.t(), atom(), term(),
__MODULE__.search_expr()) :: Ecto.Query.t()
for fragment <- @supported_fragments_one do
def handle_eq(queryable, {:fragment, unquote(fragment), field}, search_term, :where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field)) == ^search_term)
end
end
for fragment <- @supported_fragments_two do
def handle_eq(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) == ^search_term)
end
end
def handle_eq(queryable, field, search_term, :where) do
queryable
|> where([..., b],
field(b, ^field) == ^search_term)
end
for fragment <- @supported_fragments_one do
def handle_eq(queryable, {:fragment, unquote(fragment), field}, search_term, :or_where) do
queryable
|> or_where([..., b],
fragment(unquote(fragment), field(b, ^field)) == ^search_term)
end
end
for fragment <- @supported_fragments_two do
def handle_eq(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :or_where) do
queryable
|> or_where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) == ^search_term)
end
end
def handle_eq(queryable, field, search_term, :or_where) do
queryable
|> or_where([..., b],
field(b, ^field) == ^search_term)
end
for fragment <- @supported_fragments_one do
def handle_eq(queryable, {:fragment, unquote(fragment), field}, search_term, :not_where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field)) != ^search_term)
end
end
for fragment <- @supported_fragments_two do
def handle_eq(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :not_where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) != ^search_term)
end
end
def handle_eq(queryable, field, search_term, :not_where) do
queryable
|> where([..., b],
field(b, ^field) != ^search_term)
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using
`field`, `search_term` and `search_expr` when the `search_type` is `gt`.
Assumes that `search_expr` is in #{inspect @search_exprs}.
## Examples
When `search_expr` is `:where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_gt(queryable, :field_1, "field_!", :where)
#Ecto.Query<from p in \"parents\", where: p.field_1 > ^\"field_!\">
When `search_expr` is `:or_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_gt(queryable, :field_1, "field_!", :or_where)
#Ecto.Query<from p in \"parents\", or_where: p.field_1 > ^\"field_!\">
When `search_expr` is `:not_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_gt(queryable, :field_1, "field_!", :not_where)
#Ecto.Query<from p in \"parents\", where: p.field_1 <= ^\"field_!\">
"""
@spec handle_gt(Ecto.Query.t(), atom(), term(),
__MODULE__.search_expr()) :: Ecto.Query.t()
for fragment <- @supported_fragments_one do
def handle_gt(queryable, {:fragment, unquote(fragment), field}, search_term, :where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field)) > ^search_term)
end
end
for fragment <- @supported_fragments_two do
def handle_gt(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) > ^search_term)
end
end
def handle_gt(queryable, field, search_term, :where) do
queryable
|> where([..., b],
field(b, ^field) > ^search_term)
end
for fragment <- @supported_fragments_one do
def handle_gt(queryable, {:fragment, unquote(fragment), field}, search_term, :or_where) do
queryable
|> or_where([..., b],
fragment(unquote(fragment), field(b, ^field)) > ^search_term)
end
end
for fragment <- @supported_fragments_two do
def handle_gt(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :or_where) do
queryable
|> or_where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) > ^search_term)
end
end
def handle_gt(queryable, field, search_term, :or_where) do
queryable
|> or_where([..., b],
field(b, ^field) > ^search_term)
end
for fragment <- @supported_fragments_one do
def handle_gt(queryable, {:fragment, unquote(fragment), field}, search_term, :not_where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field)) <= ^search_term)
end
end
for fragment <- @supported_fragments_two do
def handle_gt(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :not_where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) <= ^search_term)
end
end
def handle_gt(queryable, field, search_term, :not_where) do
queryable
|> where([..., b],
field(b, ^field) <= ^search_term)
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using
`field`, `search_term` and `search_expr` when the `search_type` is `lt`.
Assumes that `search_expr` is in #{inspect @search_exprs}.
## Examples
When `search_expr` is `:where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_lt(queryable, :field_1, "field_!", :where)
#Ecto.Query<from p in \"parents\", where: p.field_1 < ^\"field_!\">
When `search_expr` is `:or_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_lt(queryable, :field_1, "field_!", :or_where)
#Ecto.Query<from p in \"parents\", or_where: p.field_1 < ^\"field_!\">
When `search_expr` is `:not_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_lt(queryable, :field_1, "field_!", :not_where)
#Ecto.Query<from p in \"parents\", where: p.field_1 >= ^\"field_!\">
"""
@spec handle_lt(Ecto.Query.t(), atom(), term(),
__MODULE__.search_expr()) :: Ecto.Query.t()
for fragment <- @supported_fragments_one do
def handle_lt(queryable, {:fragment, unquote(fragment), field}, search_term, :where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field)) < ^search_term)
end
end
for fragment <- @supported_fragments_two do
def handle_lt(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) < ^search_term)
end
end
def handle_lt(queryable, field, search_term, :where) do
queryable
|> where([..., b],
field(b, ^field) < ^search_term)
end
for fragment <- @supported_fragments_one do
def handle_lt(queryable, {:fragment, unquote(fragment), field}, search_term, :or_where) do
queryable
|> or_where([..., b],
fragment(unquote(fragment), field(b, ^field)) < ^search_term)
end
end
for fragment <- @supported_fragments_two do
def handle_lt(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :or_where) do
queryable
|> or_where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) < ^search_term)
end
end
def handle_lt(queryable, field, search_term, :or_where) do
queryable
|> or_where([..., b],
field(b, ^field) < ^search_term)
end
for fragment <- @supported_fragments_one do
def handle_lt(queryable, {:fragment, unquote(fragment), field}, search_term, :not_where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field)) >= ^search_term)
end
end
for fragment <- @supported_fragments_two do
def handle_lt(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :not_where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) >= ^search_term)
end
end
def handle_lt(queryable, field, search_term, :not_where) do
queryable
|> where([..., b],
field(b, ^field) >= ^search_term)
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using
`field`, `search_term` and `search_expr` when the `search_type` is `gteq`.
Assumes that `search_expr` is in #{inspect @search_exprs}.
## Examples
When `search_expr` is `:where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_gteq(queryable, :field_1, "field_!", :where)
#Ecto.Query<from p in \"parents\", where: p.field_1 >= ^\"field_!\">
When `search_expr` is `:or_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_gteq(queryable, :field_1, "field_!", :or_where)
#Ecto.Query<from p in \"parents\", or_where: p.field_1 >= ^\"field_!\">
When `search_expr` is `:not_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_gteq(queryable, :field_1, "field_!", :not_where)
#Ecto.Query<from p in \"parents\", where: p.field_1 < ^\"field_!\">
"""
@spec handle_gteq(Ecto.Query.t(), atom(), term(),
__MODULE__.search_expr()) :: Ecto.Query.t()
for fragment <- @supported_fragments_one do
def handle_gteq(queryable, {:fragment, unquote(fragment), field}, search_term, :where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field)) >= ^search_term)
end
end
for fragment <- @supported_fragments_two do
def handle_gteq(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) >= ^search_term)
end
end
def handle_gteq(queryable, field, search_term, :where) do
queryable
|> where([..., b],
field(b, ^field) >= ^search_term)
end
for fragment <- @supported_fragments_one do
def handle_gteq(queryable, {:fragment, unquote(fragment), field}, search_term, :or_where) do
queryable
|> or_where([..., b],
fragment(unquote(fragment), field(b, ^field)) >= ^search_term)
end
end
for fragment <- @supported_fragments_two do
def handle_gteq(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :or_where) do
queryable
|> or_where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) >= ^search_term)
end
end
def handle_gteq(queryable, field, search_term, :or_where) do
queryable
|> or_where([..., b],
field(b, ^field) >= ^search_term)
end
for fragment <- @supported_fragments_one do
def handle_gteq(queryable, {:fragment, unquote(fragment), field}, search_term, :not_where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field)) < ^search_term)
end
end
for fragment <- @supported_fragments_two do
def handle_gteq(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :not_where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) < ^search_term)
end
end
def handle_gteq(queryable, field, search_term, :not_where) do
queryable
|> where([..., b],
field(b, ^field) < ^search_term)
end
@doc """
Builds a searched `queryable` on top of the given `queryable` using
`field`, `search_term` and `search_expr` when the `search_type` is `lteq`.
Assumes that `search_expr` is in #{inspect @search_exprs}.
## Examples
When `search_expr` is `:where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_lteq(queryable, :field_1, "field_!", :where)
#Ecto.Query<from p in \"parents\", where: p.field_1 <= ^\"field_!\">
When `search_expr` is `:or_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_lteq(queryable, :field_1, "field_!", :or_where)
#Ecto.Query<from p in \"parents\", or_where: p.field_1 <= ^\"field_!\">
When `search_expr` is `:not_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_lteq(queryable, :field_1, "field_!", :not_where)
#Ecto.Query<from p in \"parents\", where: p.field_1 > ^\"field_!\">
"""
@spec handle_lteq(Ecto.Query.t(), atom(), term(),
__MODULE__.search_expr()) :: Ecto.Query.t()
for fragment <- @supported_fragments_one do
def handle_lteq(queryable, {:fragment, unquote(fragment), field}, search_term, :where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field)) <= ^search_term)
end
end
for fragment <- @supported_fragments_two do
def handle_lteq(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) <= ^search_term)
end
end
def handle_lteq(queryable, field, search_term, :where) do
queryable
|> where([..., b],
field(b, ^field) <= ^search_term)
end
for fragment <- @supported_fragments_one do
def handle_lteq(queryable, {:fragment, unquote(fragment), field}, search_term, :or_where) do
queryable
|> or_where([..., b],
fragment(unquote(fragment), field(b, ^field)) <= ^search_term)
end
end
for fragment <- @supported_fragments_two do
def handle_lteq(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :or_where) do
queryable
|> or_where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) <= ^search_term)
end
end
def handle_lteq(queryable, field, search_term, :or_where) do
queryable
|> or_where([..., b],
field(b, ^field) <= ^search_term)
end
for fragment <- @supported_fragments_one do
def handle_lteq(queryable, {:fragment, unquote(fragment), field}, search_term, :not_where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field)) > ^search_term)
end
end
for fragment <- @supported_fragments_two do
def handle_lteq(queryable, {:fragment, unquote(fragment), field1, field2}, search_term, :not_where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) > ^search_term)
end
end
def handle_lteq(queryable, field, search_term, :not_where) do
queryable
|> where([..., b],
field(b, ^field) > ^search_term)
end
@doc """
Builds a searched `queryable` on `field` is_nil (when `term` is true),
or not is_nil (when `term` is false), based on `search_expr` given.
Checkout [Ecto.Query.API.like/2](https://hexdocs.pm/ecto/Ecto.Query.API.html#is_nil/1)
for more info.
Assumes that `search_expr` is in #{inspect @search_exprs}.
## Examples
When `search_expr` is `:where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_is_nil(queryable, :field_1, true, :where)
#Ecto.Query<from p in "parents", where: is_nil(p.field_1)>
iex> BuildSearchQuery.handle_is_nil(queryable, :field_1, false, :where)
#Ecto.Query<from p in "parents", where: not(is_nil(p.field_1))>
When `search_expr` is `:or_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_is_nil(queryable, :field_1, true, :or_where)
#Ecto.Query<from p in "parents", or_where: is_nil(p.field_1)>
iex> BuildSearchQuery.handle_is_nil(queryable, :field_1, false, :or_where)
#Ecto.Query<from p in "parents", or_where: not(is_nil(p.field_1))>
When `search_expr` is `:not_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_is_nil(queryable, :field_1, true, :not_where)
#Ecto.Query<from p in "parents", where: not(is_nil(p.field_1))>
iex> BuildSearchQuery.handle_is_nil(queryable, :field_1, false, :not_where)
#Ecto.Query<from p in "parents", where: is_nil(p.field_1)>
"""
@spec handle_is_nil(Ecto.Query.t(), atom(), boolean(),
__MODULE__.search_expr()) :: Ecto.Query.t()
for fragment <- @supported_fragments_one do
def handle_is_nil(queryable, {:fragment, unquote(fragment), field}, true, :where) do
queryable
|> where([..., b],
is_nil(fragment(unquote(fragment), field(b, ^field))))
end
end
for fragment <- @supported_fragments_two do
def handle_is_nil(queryable, {:fragment, unquote(fragment), field1, field2}, true, :where) do
queryable
|> where([..., b],
is_nil(fragment(unquote(fragment), field(b, ^field1), field(b, ^field2))))
end
end
def handle_is_nil(queryable, field, true, :where) do
queryable
|> where([..., b],
is_nil(field(b, ^field)))
end
for fragment <- @supported_fragments_one do
def handle_is_nil(queryable, {:fragment, unquote(fragment), field}, true, :or_where) do
queryable
|> or_where([..., b],
is_nil(fragment(unquote(fragment), field(b, ^field))))
end
end
for fragment <- @supported_fragments_two do
def handle_is_nil(queryable, {:fragment, unquote(fragment), field1, field2}, true, :or_where) do
queryable
|> or_where([..., b],
is_nil(fragment(unquote(fragment), field(b, ^field1), field(b, ^field2))))
end
end
def handle_is_nil(queryable, field, true, :or_where) do
queryable
|> or_where([..., b],
is_nil(field(b, ^field)))
end
for fragment <- @supported_fragments_one do
def handle_is_nil(queryable, {:fragment, unquote(fragment), field}, true, :not_where) do
queryable
|> where([..., b],
not is_nil(fragment(unquote(fragment), field(b, ^field))))
end
end
for fragment <- @supported_fragments_two do
def handle_is_nil(queryable, {:fragment, unquote(fragment), field1, field2}, true, :not_where) do
queryable
|> where([..., b],
not is_nil(fragment(unquote(fragment), field(b, ^field1), field(b, ^field2))))
end
end
def handle_is_nil(queryable, field, true, :not_where) do
queryable
|> where([..., b],
not is_nil(field(b, ^field)))
end
for fragment <- @supported_fragments_one do
def handle_is_nil(queryable, {:fragment, unquote(fragment), field}, false, :where) do
queryable
|> where([..., b],
not is_nil(fragment(unquote(fragment), field(b, ^field))))
end
end
for fragment <- @supported_fragments_two do
def handle_is_nil(queryable, {:fragment, unquote(fragment), field1, field2}, false, :where) do
queryable
|> where([..., b],
not is_nil(fragment(unquote(fragment), field(b, ^field1), field(b, ^field2))))
end
end
def handle_is_nil(queryable, field, :false, :where) do
queryable
|> where([..., b],
not is_nil(field(b, ^field)))
end
for fragment <- @supported_fragments_one do
def handle_is_nil(queryable, {:fragment, unquote(fragment), field}, false, :or_where) do
queryable
|> or_where([..., b],
not is_nil(fragment(unquote(fragment), field(b, ^field))))
end
end
for fragment <- @supported_fragments_two do
def handle_is_nil(queryable, {:fragment, unquote(fragment), field1, field2}, false, :or_where) do
queryable
|> or_where([..., b],
not is_nil(fragment(unquote(fragment), field(b, ^field1), field(b, ^field2))))
end
end
def handle_is_nil(queryable, field, :false, :or_where) do
queryable
|> or_where([..., b],
not is_nil(field(b, ^field)))
end
for fragment <- @supported_fragments_one do
def handle_is_nil(queryable, {:fragment, unquote(fragment), field}, false, :not_where) do
queryable
|> where([..., b],
is_nil(fragment(unquote(fragment), field(b, ^field))))
end
end
for fragment <- @supported_fragments_two do
def handle_is_nil(queryable, {:fragment, unquote(fragment), field1, field2}, false, :not_where) do
queryable
|> where([..., b],
is_nil(fragment(unquote(fragment), field(b, ^field1), field(b, ^field2))))
end
end
def handle_is_nil(queryable, field, :false, :not_where) do
queryable
|> where([..., b],
is_nil(field(b, ^field)))
end
@doc """
Builds a searched `queryable` on `field` based on whether it exists in
a given collection, based on `search_expr` given.
Checkout [Ecto.Query.API.in/2](https://hexdocs.pm/ecto/Ecto.Query.API.html#in/2)
for more info.
Assumes that `search_expr` is in #{inspect @search_exprs}.
## Examples
When `search_expr` is `:where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_in(queryable, :field_1, ["a", "b"], :where)
#Ecto.Query<from p in "parents", where: p.field_1 in ^["a", "b"]>
When `search_expr` is `:or_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_in(queryable, :field_1, ["a", "b"], :or_where)
#Ecto.Query<from p in "parents", or_where: p.field_1 in ^["a", "b"]>
When `search_expr` is `:not_where`
iex> alias Rummage.Ecto.Services.BuildSearchQuery
iex> import Ecto.Query
iex> queryable = from u in "parents"
#Ecto.Query<from p in "parents">
iex> BuildSearchQuery.handle_in(queryable, :field_1, ["a", "b"], :not_where)
#Ecto.Query<from p in "parents", where: p.field_1 not in ^["a", "b"]>
"""
@spec handle_is_nil(Ecto.Query.t(), atom(), boolean(),
__MODULE__.search_expr()) :: Ecto.Query.t()
for fragment <- @supported_fragments_one do
def handle_in(queryable, {:fragment, unquote(fragment), field}, list, :where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field)) in ^list)
end
end
for fragment <- @supported_fragments_two do
def handle_in(queryable, {:fragment, unquote(fragment), field1, field2}, list, :where) do
queryable
|> where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) in ^list)
end
end
def handle_in(queryable, field, list, :where) do
queryable
|> where([..., b],
field(b, ^field) in ^list)
end
for fragment <- @supported_fragments_one do
def handle_in(queryable, {:fragment, unquote(fragment), field}, list, :or_where) do
queryable
|> or_where([..., b],
fragment(unquote(fragment), field(b, ^field)) in ^list)
end
end
for fragment <- @supported_fragments_two do
def handle_in(queryable, {:fragment, unquote(fragment), field1, field2}, list, :or_where) do
queryable
|> or_where([..., b],
fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) in ^list)
end
end
def handle_in(queryable, field, list, :or_where) do
queryable
|> or_where([..., b],
field(b, ^field) in ^list)
end
for fragment <- @supported_fragments_one do
def handle_in(queryable, {:fragment, unquote(fragment), field}, list, :not_where) do
queryable
|> where([..., b],
not fragment(unquote(fragment), field(b, ^field)) in ^list)
end
end
for fragment <- @supported_fragments_two do
def handle_in(queryable, {:fragment, unquote(fragment), field1, field2}, list, :not_where) do
queryable
|> where([..., b],
not fragment(unquote(fragment), field(b, ^field1), field(b, ^field2)) in ^list)
end
end
def handle_in(queryable, field, list, :not_where) do
queryable
|> where([..., b],
not field(b, ^field) in ^list)
end
end
|
lib/rummage_ecto/services/build_search_query.ex
| 0.724481
| 0.930268
|
build_search_query.ex
|
starcoder
|
defmodule Membrane.File.Source do
@moduledoc """
Element that reads chunks of data from given file and sends them as buffers
through the output pad.
"""
use Membrane.Source
alias Membrane.{Buffer, RemoteStream}
@common_file Membrane.File.CommonFileBehaviour.get_impl()
def_options location: [
spec: Path.t(),
description: "Path to the file"
],
chunk_size: [
spec: pos_integer(),
default: 2048,
description: "Size of chunks being read"
]
def_output_pad :output, caps: {RemoteStream, type: :bytestream}
@impl true
def handle_init(%__MODULE__{location: location, chunk_size: size}) do
{:ok,
%{
location: Path.expand(location),
chunk_size: size,
fd: nil
}}
end
@impl true
def handle_stopped_to_prepared(_ctx, %{location: location} = state) do
fd = @common_file.open!(location, :read)
{:ok, %{state | fd: fd}}
end
@impl true
def handle_prepared_to_playing(_ctx, state) do
{{:ok, caps: {:output, %RemoteStream{type: :bytestream}}}, state}
end
@impl true
def handle_demand(:output, _size, :buffers, _ctx, %{chunk_size: chunk_size} = state),
do: supply_demand(chunk_size, [redemand: :output], state)
def handle_demand(:output, size, :bytes, _ctx, state),
do: supply_demand(size, [], state)
defp supply_demand(size, redemand, %{fd: fd} = state) do
actions =
case @common_file.binread!(fd, size) do
<<payload::binary>> when byte_size(payload) == size ->
[buffer: {:output, %Buffer{payload: payload}}] ++ redemand
<<payload::binary>> when byte_size(payload) < size ->
[buffer: {:output, %Buffer{payload: payload}}, end_of_stream: :output]
:eof ->
[end_of_stream: :output]
end
{{:ok, actions}, state}
end
@impl true
def handle_prepared_to_stopped(_ctx, %{fd: fd} = state) do
:ok = @common_file.close!(fd)
{:ok, %{state | fd: nil}}
end
end
|
lib/membrane_file/source.ex
| 0.799755
| 0.450662
|
source.ex
|
starcoder
|
defmodule Membrane.Pad do
@moduledoc """
Pads are units defined by elements and bins, allowing them to be linked with their
siblings. This module consists of pads typespecs and utils.
Each pad is described by its name, direction, availability, mode and possible caps.
For pads to be linkable, these properties have to be compatible. For more
information on each of them, check appropriate type in this module.
Each link can only consist of exactly two pads.
"""
use Bunch
use Bunch.Typespec
alias Membrane.{Buffer, Caps}
@typedoc """
Defines the term by which the pad instance is identified.
"""
@type ref_t :: name_t | {__MODULE__, name_t, dynamic_id_t}
@typedoc """
Possible id of dynamic pad
"""
@type dynamic_id_t :: any
@typedoc """
Defines the name of pad or group of dynamic pads
"""
@type name_t :: atom | {:private, atom}
@typedoc """
Defines possible pad directions:
- `:output` - data can only be sent through such pad,
- `:input` - data can only be received through such pad.
One cannot link two pads with the same direction.
"""
@type direction_t :: :output | :input
@typedoc """
Describes how an element sends and receives data.
Modes are strictly related to pad directions:
- `:push` output pad - element can send data through such pad whenever it wants.
- `:push` input pad - element has to deal with data whenever it comes through
such pad, and do it fast enough not to let data accumulate on such pad, what
may lead to overflow of element process erlang queue, which is highly unwanted.
- `:pull` output pad - element can send data through such pad only if it have
already received demand on the pad. Sending small, limited amount of
undemanded data is supported and handled by `Membrane.Core.InputBuffer`.
- `:pull` input pad - element receives through such pad only data that it has
previously demanded, so that no undemanded data can arrive.
Linking pads with different modes is possible, but only in case of output pad
working in push mode, and input in pull mode. Moreover, toilet mode of
`Membrane.Core.InputBuffer` has to be enabled then.
For more information on transfering data and demands, see `Membrane.Source`,
`Membrane.Filter`, `Membrane.Sink`.
"""
@type mode_t :: :push | :pull
@typedoc """
Values used when defining pad availability:
- `:always` - a static pad, which can remain unlinked in `stopped` state only.
- `:on_request` - a dynamic pad, instance of which is created every time it is
linked to another pad. Thus linking the pad with _k_ other pads, creates _k_
instances of the pad, and links each with another pad.
"""
@list_type availability_t :: [:always, :on_request]
@typedoc """
Type describing availability mode of a created pad:
- `:static` - there always exist exactly one instance of such pad.
- `:dynamic` - multiple instances of such pad may be created and removed (which
entails executing `handle_pad_added` and `handle_pad_removed` callbacks,
respectively).
"""
@type availability_mode_t :: :static | :dynamic
@typedoc """
Describes how a pad should be declared in element or bin.
"""
@type spec_t :: output_spec_t | input_spec_t | bin_spec_t
@typedoc """
For bins there are exactly the same options for both directions.
Demand unit is derived from the first element inside the bin linked to the
given input.
"""
@type bin_spec_t :: input_spec_t
@typedoc """
Describes how an output pad should be declared inside an element.
"""
@type output_spec_t :: {name_t(), [common_spec_options_t]}
@typedoc """
Describes how an input pad should be declared inside an element.
"""
@type input_spec_t ::
{name_t(), [common_spec_options_t | {:demand_unit, Buffer.Metric.unit_t()}]}
@typedoc """
Pad options used in `t:spec_t/0`
"""
@type common_spec_options_t ::
{:availability, availability_t()}
| {:mode, mode_t()}
| {:caps, Caps.Matcher.caps_specs_t()}
| {:options, Keyword.t()}
@typedoc """
Type describing a pad. Contains data parsed from `t:spec_t/0`
"""
@type description_t :: %{
:availability => availability_t(),
:mode => mode_t(),
:name => name_t(),
:caps => Caps.Matcher.caps_specs_t(),
optional(:demand_unit) => Buffer.Metric.unit_t(),
:direction => direction_t(),
:options => nil | Keyword.t()
}
@doc """
Creates a static pad reference.
"""
defmacro ref(name) do
quote do
unquote(name)
end
end
@doc """
Creates a dynamic pad reference.
"""
defmacro ref(name, id) do
quote do
{unquote(__MODULE__), unquote(name), unquote(id)}
end
end
defguard is_pad_ref(term)
when term |> is_atom or
(term |> is_tuple and term |> tuple_size == 3 and term |> elem(0) == __MODULE__ and
term |> elem(1) |> is_atom)
defguard is_public_name(term) when is_atom(term)
defguardp is_private_name(term)
when tuple_size(term) == 2 and elem(term, 0) == :private and is_atom(elem(term, 1))
defguard is_pad_name(term)
when is_public_name(term) or is_private_name(term)
defguard is_availability(term) when term in @availability_t
defguard is_availability_dynamic(availability) when availability == :on_request
defguard is_availability_static(availability) when availability == :always
@doc """
Returns pad availability mode for given availability.
"""
@spec availability_mode(availability_t) :: availability_mode_t
def availability_mode(:always), do: :static
def availability_mode(:on_request), do: :dynamic
@doc """
Returns the name for the given pad reference
"""
@spec name_by_ref(ref_t()) :: name_t()
def name_by_ref(ref(name, _id)) when is_pad_name(name), do: name
def name_by_ref(name) when is_pad_name(name), do: name
@spec opposite_direction(direction_t()) :: direction_t()
def opposite_direction(:input), do: :output
def opposite_direction(:output), do: :input
@spec get_corresponding_bin_pad(ref_t()) :: ref_t()
def get_corresponding_bin_pad(ref(name, id)),
do: ref(get_corresponding_bin_name(name), id)
def get_corresponding_bin_pad(name), do: get_corresponding_bin_name(name)
@spec create_private_name(atom) :: name_t()
def create_private_name(name) when is_public_name(name) do
get_corresponding_bin_name(name)
end
@spec get_corresponding_bin_name(name_t()) :: name_t()
defp get_corresponding_bin_name({:private, name}) when is_public_name(name), do: name
defp get_corresponding_bin_name(name) when is_public_name(name), do: {:private, name}
@spec assert_public_name!(name_t()) :: :ok
def assert_public_name!(name) when is_public_name(name) do
:ok
end
def assert_public_name!(name) do
raise CompileError,
file: __ENV__.file,
description: "#{inspect(name)} is not a proper pad name. Use public names only."
end
end
|
lib/membrane/pad.ex
| 0.896671
| 0.700805
|
pad.ex
|
starcoder
|
defmodule OrderDistributor do
@moduledoc """
A module for assigning and distributing orders from the local button panel to the most optimal order handler.
It should be noted that the `OrderDistributor` has no state like a regular `GenServer` would have, but the `GenServer` behavior is still used for casting and calling functionality.
"""
use GenServer, restart: :permanent
require Logger
# Public functions
# --------------------------------------------
def init([]), do: {:ok, nil}
@doc "Starts the `OrderDistributor`, to be used in a supervision tree, see `Supervisor`."
def start_link([]), do: GenServer.start_link(__MODULE__, [], name: __MODULE__)
# API
# --------------------------------------------
@doc "Informs the `OrderDistributor` that a button was pressed."
def buttonPressed(buttonPress), do: GenServer.cast(__MODULE__, {:buttonPressed, buttonPress})
# Calls/Casts
# --------------------------------------------
# Handles the OrderDistributor getting informed of a local button press. If the button press is a cab, the distributor assigns it to the local OrderHandler.
# If not, it collects states of entire visible system, calculates which node has the lowest cost and assigns the order to that node's OrderHandler.
def handle_cast({:buttonPressed, buttonPress}, nil) do
{floor, buttonType} = buttonPress
unless buttonType == :cab do
{orderQueues, _bad_nodes} = OrderHandler.getOrderQueues()
{fsmStates, _bad_nodes} = Fsm.getStates()
lowestCostNode =
pairReplies(orderQueues, fsmStates)
# Transform into pairs of nodeIDs and their associated cost of taking the order.
|> Enum.map(fn {nodeID, {orderQueue, {_dir, floor, {availability, _timestamp}}}} ->
{nodeID, CostFunction.calculate(buttonPress, orderQueue, floor, availability)}
end)
|> getLowestCostNode()
order = {floor, buttonType, lowestCostNode}
OrderHandler.addOrder(order, lowestCostNode)
{:noreply, nil}
else
with {[{_nodeID, {_direction, _floor, {availability, _timestamp}}}], _bad_nodes} <-
Fsm.getStates([node()]) do
# Only ask the local OrderHandler to add the order if it's available, thus not promising to take new cab orders
# after unavailability has occurred.
if availability == :available do
order = {floor, buttonType, node()}
OrderHandler.addOrder(order, node())
end
end
{:noreply, nil}
end
end
# Private functions
# --------------------------------------------
# Takes in a list of nodes and associated cost, returns the name of the node with the lowest associated cost. If all costs are Inf, returns nil
defp getLowestCostNode(costList) do
{lowestCostNode, _cost} =
costList
|> Enum.reject(fn {_nodeID, cost} -> cost == Inf end)
|> Enum.min_by(fn {nodeID, cost} -> {cost, nodeID} end, fn -> nil end)
lowestCostNode
end
# Takes in lists of orderQueues and fsm states along with associated nodeIDs, pairs them and return the pairs with their associated nodeIDs.
defp pairReplies(orderQueueList, fsmList) do
orderQueueNodes = Enum.map(orderQueueList, fn {nodeID, _orderQueue} -> nodeID end)
fsmNodes = Enum.map(fsmList, fn {nodeID, _fsmState} -> nodeID end)
# List arithmetic black magic for the set intersection operation.
commonNodes = orderQueueNodes -- orderQueueNodes -- fsmNodes
commonNodes
|> Enum.map(fn nodeID -> {nodeID, {orderQueueList[nodeID], fsmList[nodeID]}} end)
end
end
|
elevator/lib/orderDistributor.ex
| 0.76105
| 0.613541
|
orderDistributor.ex
|
starcoder
|
defmodule Queutils.BlockingProducer do
use GenStage
@moduledoc """
A `GenStage` producer that acts as a blocking queue, with a fixed length.
Blocks any time `Queutils.BlockingProducer.push/2` is called when the queue
is at its maximum length.
This can be used as an entry-point to a `GenStage` pipeline, since the
max queue length provides for back-pressure.
You can even set the queue's length to zero in order to block all pushes
until demand comes in.
## Usage
Add it to your application supervisor's `start/2` function like this:
def start(_type, _args) do
children = [
...
{Queutils.BlockingProducer, name: MessageProducer, max_length: 10_000},
...
]
opts = [strategy: :one_for_one, name: MyApplication.Supervisor]
Supervisor.start_link(children, opts)
end
Then, subscribe a `GenStage` to it.
```elixir
def init(:ok) do
{:consumer, :the_state_does_not_matter, subscribe_to: [MessageProducer]}
end
```
You can now push messages to the queue like this:
:ok = Queutils.BlockingProducer.push(MessageProducer, :my_message)
Broadway users be forewared! A Broadway module needs to start its producer itself,
so it's not possible to customize the process ID a la the `:name` option documented below.
If you're in that boat, you should use a `Queutils.BlockingQueue` along with
a `Queutils.BlockingQueueProducer`, so you can customize your reference to your `BlockingQueue`.
## Options
- `:name` - the ID of the queue. This will be the first argument to the `push/2` function.
- `:max_length` - The maximum number of messages that this process will store until it starts blocking. Default is 1,000.
- `:dispatcher` - The `GenStage` dispatcher that this producer should use. Default is `GenStage.DemandDispatcher`.
"""
def start_link(opts) do
case Keyword.fetch(opts, :name) do
{:ok, name} -> GenStage.start_link(__MODULE__, opts, name: name)
:error -> GenStage.start_link(__MODULE__, opts)
end
end
def child_spec(opts) do
%{
id: Keyword.get(opts, :name, Queutils.BlockingProducer),
start: {__MODULE__, :start_link, [opts]},
type: :supervisor
}
end
@doc """
Push an item onto the queue.
This function will block if the queue is full, and unblock once it's not.
"""
@spec push(term(), term()) :: :ok
def push(queue, msg) do
GenStage.call(queue, {:push, msg})
end
@doc """
Get the number of items currently sitting in a queue.
"""
def length(queue) do
GenStage.call(queue, :length)
end
@doc """
Get the number of blocked processes waiting to push to a queue.
"""
def waiting_count(queue) do
GenStage.call(queue, :waiting_count)
end
@doc """
Get the total count of messages that have been pushed to a queue over its lifetime.
"""
def pushed_count(queue) do
GenStage.call(queue, :pushed_count)
end
@doc """
Get the total count of messages that have been popped from a queue over its lifetime.
"""
def popped_count(queue) do
GenStage.call(queue, :popped_count)
end
@impl true
def init(opts) do
dispatcher = Keyword.get(opts, :dispatcher, GenStage.DemandDispatcher)
max_length = Keyword.get(opts, :max_length, 1_000)
unless max_length >= 0 do
raise "Invalid argument :max_length. Must be an integer zero or greater, but was #{inspect max_length}"
end
{:producer, %{queue: [], waiting: [], demand: 0, max_length: max_length, pushed_count: 0, popped_count: 0}, dispatcher: dispatcher}
end
@impl true
def handle_call({:push, msg}, from, state) do
:ok = validate_state(state)
cond do
state.demand > 0 ->
remaining_demand = state.demand - 1
{:reply, :ok, [msg], %{state | demand: remaining_demand, pushed_count: state.pushed_count + 1, popped_count: state.popped_count + 1}}
Enum.count(state.queue) >= state.max_length ->
waiting = state.waiting ++ [{from, msg}]
{:noreply, [], %{state | waiting: waiting}}
true ->
queue = state.queue ++ [msg]
{:reply, :ok, [], %{state | queue: queue, pushed_count: state.pushed_count + 1}}
end
end
def handle_call(:length, _from, state) do
{:reply, Kernel.length(state.queue), [], state}
end
def handle_call(:waiting_count, _from, state) do
{:reply, Kernel.length(state.waiting), [], state}
end
def handle_call(:pushed_count, _from, state) do
{:reply, state.pushed_count, [], state}
end
def handle_call(:popped_count, _from, state) do
{:reply, state.popped_count, [], state}
end
@impl true
def handle_demand(demand, state) do
:ok = validate_state(state)
total_demand = demand + state.demand
{popped, remaining} = Enum.split(state.queue, total_demand)
{popped_waiters, still_waiting} = Enum.split(state.waiting, total_demand)
msgs_from_waiters = Enum.map(popped_waiters, fn {from, msg} ->
GenStage.reply(from, :ok)
msg
end)
queue = remaining ++ msgs_from_waiters
remaining_demand = total_demand - Enum.count(queue)
new_pushes = Kernel.length(msgs_from_waiters)
new_pops = Kernel.length(popped)
{:noreply, popped, %{state | demand: remaining_demand, queue: queue, waiting: still_waiting, pushed_count: state.pushed_count + new_pushes, popped_count: state.popped_count + new_pops}}
end
defp validate_state(state) do
# If we have a non-zero demand, it is assumed that we will not have
# anyone waiting and that the queue is empty, since we would have
# popped off all those messages before building up any demand.
cond do
state.demand < 0 ->
raise "Incorrect state: BlockingProducer has a negative demand (demand is #{inspect state.demand})."
state.demand > 0 && not Enum.empty?(state.queue) ->
raise "Incorrect state: BlockingProducer has demand but also has items in its queue."
state.demand > 0 && not Enum.empty?(state.waiting) ->
raise "Incorrect state: BlockingProducer has demand but also has processes waiting to insert."
state.pushed_count < state.popped_count ->
raise "Incorrect state: pushed_count is less than popped_count (pushed: #{state.pushed_count}, popped: #{state.popped_count})"
true -> :ok
end
end
end
|
lib/queutils/blocking_producer.ex
| 0.826222
| 0.760917
|
blocking_producer.ex
|
starcoder
|
defmodule Dynamo.Router.Filters do
@moduledoc """
This module is responsible for providing filters to a router or a Dynamo.
A filter is a module that is invoked before, during or after a service match.
While hooks are executed only if a route match, filters are always
executed. In general, filters are a more low-level mechanism, with
less conveniences compared to hooks.
There is also a difference regarding ordering. While filters are invoked
in the order they are declared, regardless of their behaviour, hooks
always execute `prepare` hooks first, followed by the `finalize` ones.
## Usage
defmodule MyDynamo do
use Dynamo.Router
filter Dynamo.Static.new("/public", :myapp)
end
## Behaviours
A filter must implement one of the three functions:
* `prepare/1` - the filter will be executed before invoking service
* `service/2` - the filter will be executed with the service function as argument
* `finalize/1` - the filter will be executed after invoking the service
## Examples
A filter that adds a Chrome Frame header to the response:
defmodule ChromeFrameFilter do
def prepare(conn) do
conn.put_resp_header("x-ua-compatible", "chrome=1")
end
end
Notice the filter receives a `conn` as argument and must return an
updated `conn`. A finalize filter works similarly.
A service filter receives and must return a `conn`, but it also
receives a function which should be invoked in order to continue
the request. Here is a filter that sets the content type to json
and converts the response body to valid json:
defmodule JSONFilter do
def service(conn, fun) do
conn = conn.put_resp_header("content-type", "application/json")
conn = fun.(conn)
conn.resp_body(to_json(conn.resp_body))
end
def to_json(data), do: ...
end
"""
@doc false
defmacro __using__(_) do
quote location: :keep do
@dynamo_filters []
@before_compile unquote(__MODULE__)
import unquote(__MODULE__), only: [filter: 1, prepend_filter: 1, delete_filter: 1]
end
end
@doc false
defmacro __before_compile__(env) do
module = env.module
filters = Module.get_attribute(module, :dynamo_filters)
escaped = Macro.escape(Enum.reverse(filters))
code = quote(do: super(conn))
code = Enum.reduce(filters, code, compile_filter(&1, &2))
quote location: :keep do
defoverridable [service: 1]
def __filters__(), do: unquote(escaped)
def service(conn), do: unquote(code)
end
end
@doc """
Adds a filter to the filter chain.
"""
defmacro filter(spec) do
quote do
@dynamo_filters [unquote(spec)|@dynamo_filters]
end
end
@doc """
Prepends a filter to the filter chain.
"""
defmacro prepend_filter(spec) do
quote do
@dynamo_filters @dynamo_filters ++ [unquote(spec)]
end
end
@doc """
Deletes a filter from the filter chain.
"""
defmacro delete_filter(atom) do
quote do
atom = unquote(atom)
@dynamo_filters Enum.filter(@dynamo_filters, fn(f) -> not unquote(__MODULE__).match?(atom, f) end)
end
end
@doc """
Matches a filter against the other
"""
def match?(atom, filter) when is_atom(atom) and is_tuple(filter) do
elem(filter, 0) == atom
end
def match?(atom, atom) when is_atom(atom) do
true
end
def match?(_, _) do
false
end
## Helpers
defp compile_filter(filter, acc) do
case Code.ensure_compiled(extract_module(filter)) do
{ :module, _ } ->
escaped = Macro.escape(filter)
cond do
function_exported?(filter, :service, 2) ->
quote do
unquote(escaped).service(conn, fn(conn) -> unquote(acc) end)
end
function_exported?(filter, :prepare, 1) ->
quote do
conn = unquote(escaped).prepare(conn)
unquote(acc)
end
function_exported?(filter, :finalize, 1) ->
quote do
unquote(escaped).finalize(unquote(acc))
end
true ->
raise "filter #{inspect filter} does not implement any of the required functions"
end
{ :error, _ } ->
raise "could not find #{inspect filter} to be used as filter"
end
end
defp extract_module(mod) when is_atom(mod) do
mod
end
defp extract_module(tuple) when is_tuple(tuple) and is_atom(elem(tuple, 0)) do
elem(tuple, 0)
end
end
|
lib/dynamo/router/filters.ex
| 0.886595
| 0.725697
|
filters.ex
|
starcoder
|
defmodule Coxir.Commander.Utils do
use Bitwise
alias Coxir.Struct.{Guild, Role}
alias Coxir.Struct.{Member, Channel}
@permissions [
CREATE_INSTANT_INVITE: 1 <<< 0,
KICK_MEMBERS: 1 <<< 1,
BAN_MEMBERS: 1 <<< 2,
ADMINISTRATOR: 1 <<< 3,
MANAGE_CHANNELS: 1 <<< 4,
MANAGE_GUILD: 1 <<< 5,
ADD_REACTIONS: 1 <<< 6,
VIEW_AUDIT_LOG: 1 <<< 7,
PRIORITY_SPEAKER: 1 <<< 8,
VIEW_CHANNEL: 1 <<< 10,
SEND_MESSAGES: 1 <<< 11,
SEND_TTS_MESSAGES: 1 <<< 12,
MANAGE_MESSAGES: 1 <<< 13,
EMBED_LINKS: 1 <<< 14,
ATTACH_FILES: 1 <<< 15,
READ_MESSAGE_HISTORY: 1 <<< 16,
MENTION_EVERYONE: 1 <<< 17,
USE_EXTERNAL_EMOJIS: 1 <<< 18,
CONNECT: 1 <<< 20,
SPEAK: 1 <<< 21,
MUTE_MEMBERS: 1 <<< 22,
DEAFEN_MEMBERS: 1 <<< 23,
MOVE_MEMBERS: 1 <<< 24,
USE_VAD: 1 <<< 25,
CHANGE_NICKNAME: 1 <<< 26,
MANAGE_NICKNAMES: 1 <<< 27,
MANAGE_ROLES: 1 <<< 28,
MANAGE_WEBHOOKS: 1 <<< 29,
MANAGE_EMOJIS: 1 <<< 30,
]
@all @permissions
|> Enum.map(&elem(&1, 1))
|> Enum.reduce(&bor/2)
@admin @permissions[:ADMINISTRATOR]
def permit?(user, channel, term) when is_function(term) do
term.(user, channel)
end
def permit?(user, channel, term) do
term = term
|> List.wrap
|> Enum.map(& @permissions[&1])
|> Enum.reduce(&bor/2)
permissions(user, channel)
|> band(term)
== term
end
def permit?(member, term) do
general = Channel.get(member.guild_id)
permit?(member.user, general, term)
end
def is_admin?(user, channel) do
permit?(user, channel, :ADMINISTRATOR)
end
defp permissions(user, channel) do
guild_id = channel[:guild_id]
everyone = Role.get(guild_id)
member = Member.get(guild_id, user.id)
guild = Guild.get(guild_id)
cond do
is_nil(guild) ->
1
guild.owner_id == user.id ->
@all
true ->
permissions = \
[everyone | member.roles]
|> Enum.sort_by(& &1[:position])
|> Enum.map(& &1[:permissions])
|> Enum.reduce(&bor/2)
flakes = Enum.map(member.roles, & &1[:id])
writes = \
channel.permission_overwrites
|> Enum.filter(& &1[:id] in [user.id, guild_id | flakes])
finale = writes
|> Enum.reduce(
permissions,
fn %{deny: deny, allow: allow}, perms ->
perms
|> bor(allow)
|> band(deny |> bnot)
end
)
cond do
band(permissions, @admin) == @admin ->
@all
true ->
finale
end
end
end
end
|
lib/commander/utils.ex
| 0.528777
| 0.401365
|
utils.ex
|
starcoder
|
use Bitwise
defmodule Aoc2021.Day13 do
def process1(s) do
[ a, b ] = String.split(s, ",")
{ String.to_integer(a), String.to_integer(b) }
end
def process2(s) do
[ l, r ] = String.split(s, "=")
xy = List.last(String.split(l, " "))
{ String.to_atom(xy), String.to_integer(r) }
end
def foldxy({x, y}, []), do: {x, y} # swap after folding
def foldxy({x, y}, [ {:x, f} | t ]), do: foldxy({f-abs(x-f), y}, t)
def foldxy({x, y}, [ {:y, f} | t ]), do: foldxy({x, f-abs(y-f)}, t)
def process(args, limit) do
[ ps, _, fs ] = Enum.chunk_by(args, &(&1 == ""))
folds = Enum.take(Enum.map( fs, &process2/1), limit)
Enum.map(ps, &process1/1) |> Enum.map(&foldxy(&1, folds)) |> Enum.frequencies() |> Map.keys()
end
def part1(args), do: process(args, 1) |> length()
def match([]), do: ""
def match([0b111110,0b001001,0b001001,0b111110 | t]), do: "A" <> match(t)
def match([0b111111,0b100101,0b100101,0b011010 | t]), do: "B" <> match(t)
def match([0b011110,0b100001,0b100001,0b010010 | t]), do: "C" <> match(t)
def match([0b111111,0b100101,0b100101,0b100001 | t]), do: "E" <> match(t)
def match([0b111111,0b000101,0b000101,0b000001 | t]), do: "F" <> match(t)
def match([0b011110,0b100001,0b101001,0b111010 | t]), do: "G" <> match(t)
def match([0b111111,0b000100,0b000100,0b111111 | t]), do: "H" <> match(t)
def match([0b010000,0b100000,0b100001,0b011111 | t]), do: "J" <> match(t)
def match([0b111111,0b000100,0b011010,0b100001 | t]), do: "K" <> match(t)
def match([0b111111,0b100000,0b100000,0b100000 | t]), do: "L" <> match(t)
def match([0b011110,0b100001,0b100001,0b011110 | t]), do: "O" <> match(t)
def match([0b111111,0b001001,0b001001,0b000110 | t]), do: "P" <> match(t)
def match([0b011110,0b100001,0b100011,0b011111 | t]), do: "Q" <> match(t)
def match([0b111111,0b001001,0b011001,0b100110 | t]), do: "R" <> match(t)
def match([0b010010,0b100101,0b101001,0b010010 | t]), do: "S" <> match(t)
def match([0b011111,0b100000,0b100000,0b011111 | t]), do: "U" <> match(t)
def match([0b110001,0b101001,0b100101,0b100011 | t]), do: "Z" <> match(t)
def match([0b11111,0b10001,0b10001,0b10001,0b11111 | t]), do: "▯" <> match(t)
def paint([ ]), do: 0
def paint([ {_, y} | t ]), do: (1 <<< y) + paint(t)
def part2(args) do
process(args, 999) |> Enum.sort() |> Enum.chunk_by(&elem(&1,0)) |> Enum.map(&paint/1)|> match()
end
end
|
lib/day13.ex
| 0.581422
| 0.530601
|
day13.ex
|
starcoder
|
defmodule Depot do
@external_resource "README.md"
@moduledoc @external_resource
|> File.read!()
|> String.split("<!-- MDOC !-->")
|> Enum.fetch!(1)
@type adapter :: module()
@type filesystem :: {module(), Depot.Adapter.config()}
@doc """
Write to a filesystem
## Examples
### Direct filesystem
filesystem = Depot.Adapter.Local.configure(prefix: "/home/user/storage")
:ok = Depot.write(filesystem, "test.txt", "Hello World")
### Module-based filesystem
defmodule LocalFileSystem do
use Depot.Filesystem,
adapter: Depot.Adapter.Local,
prefix: "/home/user/storage"
end
LocalFileSystem.write("test.txt", "Hello World")
"""
@spec write(filesystem, Path.t(), iodata(), keyword()) :: :ok | {:error, term}
def write({adapter, config}, path, contents, opts \\ []) do
with {:ok, path} <- Depot.RelativePath.normalize(path) do
adapter.write(config, path, contents, opts)
end
end
@doc """
Returns a `Stream` for writing to the given `path`.
## Options
The following stream options apply to all adapters:
* `:chunk_size` - When reading, the amount to read,
usually expressed as a number of bytes.
## Examples
> Note: The shape of the returned stream will
> necessarily depend on the adapter in use. In the
> following examples the [`Local`](`Depot.Adapter.Local`)
> adapter is invoked, which returns a `File.Stream`.
### Direct filesystem
filesystem = Depot.Adapter.Local.configure(prefix: "/home/user/storage")
{:ok, %File.Stream{}} = Depot.write_stream(filesystem, "test.txt")
### Module-based filesystem
defmodule LocalFileSystem do
use Depot.Filesystem,
adapter: Depot.Adapter.Local,
prefix: "/home/user/storage"
end
{:ok, %File.Stream{}} = LocalFileSystem.write_stream("test.txt")
"""
def write_stream({adapter, config}, path, opts \\ []) do
with {:ok, path} <- Depot.RelativePath.normalize(path) do
adapter.write_stream(config, path, opts)
end
end
@doc """
Read from a filesystem
## Examples
### Direct filesystem
filesystem = Depot.Adapter.Local.configure(prefix: "/home/user/storage")
{:ok, "Hello World"} = Depot.read(filesystem, "test.txt")
### Module-based filesystem
defmodule LocalFileSystem do
use Depot.Filesystem,
adapter: Depot.Adapter.Local,
prefix: "/home/user/storage"
end
{:ok, "Hello World"} = LocalFileSystem.read("test.txt")
"""
@spec read(filesystem, Path.t(), keyword()) :: {:ok, binary} | {:error, term}
def read({adapter, config}, path, _opts \\ []) do
with {:ok, path} <- Depot.RelativePath.normalize(path) do
adapter.read(config, path)
end
end
@doc """
Returns a `Stream` for reading the given `path`.
## Options
The following stream options apply to all adapters:
* `:chunk_size` - When reading, the amount to read,
usually expressed as a number of bytes.
## Examples
> Note: The shape of the returned stream will
> necessarily depend on the adapter in use. In the
> following examples the [`Local`](`Depot.Adapter.Local`)
> adapter is invoked, which returns a `File.Stream`.
### Direct filesystem
filesystem = Depot.Adapter.Local.configure(prefix: "/home/user/storage")
{:ok, %File.Stream{}} = Depot.read_stream(filesystem, "test.txt")
### Module-based filesystem
defmodule LocalFileSystem do
use Depot.Filesystem,
adapter: Depot.Adapter.Local,
prefix: "/home/user/storage"
end
{:ok, %File.Stream{}} = LocalFileSystem.read_stream("test.txt")
"""
def read_stream({adapter, config}, path, opts \\ []) do
with {:ok, path} <- Depot.RelativePath.normalize(path) do
adapter.read_stream(config, path, opts)
end
end
@doc """
Delete a file from a filesystem
## Examples
### Direct filesystem
filesystem = Depot.Adapter.Local.configure(prefix: "/home/user/storage")
:ok = Depot.delete(filesystem, "test.txt")
### Module-based filesystem
defmodule LocalFileSystem do
use Depot.Filesystem,
adapter: Depot.Adapter.Local,
prefix: "/home/user/storage"
end
:ok = LocalFileSystem.delete("test.txt")
"""
@spec delete(filesystem, Path.t(), keyword()) :: :ok | {:error, term}
def delete({adapter, config}, path, _opts \\ []) do
with {:ok, path} <- Depot.RelativePath.normalize(path) do
adapter.delete(config, path)
end
end
@doc """
Move a file from source to destination on a filesystem
## Examples
### Direct filesystem
filesystem = Depot.Adapter.Local.configure(prefix: "/home/user/storage")
:ok = Depot.move(filesystem, "test.txt", "other-test.txt")
### Module-based filesystem
defmodule LocalFileSystem do
use Depot.Filesystem,
adapter: Depot.Adapter.Local,
prefix: "/home/user/storage"
end
:ok = LocalFileSystem.move("test.txt", "other-test.txt")
"""
@spec move(filesystem, Path.t(), Path.t(), keyword()) :: :ok | {:error, term}
def move({adapter, config}, source, destination, opts \\ []) do
with {:ok, source} <- Depot.RelativePath.normalize(source),
{:ok, destination} <- Depot.RelativePath.normalize(destination) do
adapter.move(config, source, destination, opts)
end
end
@doc """
Copy a file from source to destination on a filesystem
## Examples
### Direct filesystem
filesystem = Depot.Adapter.Local.configure(prefix: "/home/user/storage")
:ok = Depot.copy(filesystem, "test.txt", "other-test.txt")
### Module-based filesystem
defmodule LocalFileSystem do
use Depot.Filesystem,
adapter: Depot.Adapter.Local,
prefix: "/home/user/storage"
end
:ok = LocalFileSystem.copy("test.txt", "other-test.txt")
"""
@spec copy(filesystem, Path.t(), Path.t(), keyword()) :: :ok | {:error, term}
def copy({adapter, config}, source, destination, opts \\ []) do
with {:ok, source} <- Depot.RelativePath.normalize(source),
{:ok, destination} <- Depot.RelativePath.normalize(destination) do
adapter.copy(config, source, destination, opts)
end
end
@doc """
Copy a file from source to destination on a filesystem
## Examples
### Direct filesystem
filesystem = Depot.Adapter.Local.configure(prefix: "/home/user/storage")
:ok = Depot.copy(filesystem, "test.txt", "other-test.txt")
### Module-based filesystem
defmodule LocalFileSystem do
use Depot.Filesystem,
adapter: Depot.Adapter.Local,
prefix: "/home/user/storage"
end
:ok = LocalFileSystem.copy("test.txt", "other-test.txt")
"""
@spec file_exists(filesystem, Path.t(), keyword()) :: {:ok, :exists | :missing} | {:error, term}
def file_exists({adapter, config}, path, _opts \\ []) do
with {:ok, path} <- Depot.RelativePath.normalize(path) do
adapter.file_exists(config, path)
end
end
@doc """
List the contents of a folder on a filesystem
## Examples
### Direct filesystem
filesystem = Depot.Adapter.Local.configure(prefix: "/home/user/storage")
{:ok, contents} = Depot.list_contents(filesystem, ".")
### Module-based filesystem
defmodule LocalFileSystem do
use Depot.Filesystem,
adapter: Depot.Adapter.Local,
prefix: "/home/user/storage"
end
{:ok, contents} = LocalFileSystem.list_contents(".")
"""
@spec list_contents(filesystem, Path.t(), keyword()) ::
{:ok, [%Depot.Stat.Dir{} | %Depot.Stat.File{}]} | {:error, term}
def list_contents({adapter, config}, path, _opts \\ []) do
with {:ok, path} <- Depot.RelativePath.normalize(path) do
adapter.list_contents(config, path)
end
end
@doc """
Create a directory
## Examples
### Direct filesystem
filesystem = Depot.Adapter.Local.configure(prefix: "/home/user/storage")
:ok = Depot.create_directory(filesystem, "test/")
### Module-based filesystem
defmodule LocalFileSystem do
use Depot.Filesystem,
adapter: Depot.Adapter.Local,
prefix: "/home/user/storage"
end
LocalFileSystem.create_directory("test/")
"""
@spec create_directory(filesystem, Path.t(), keyword()) :: :ok | {:error, term}
def create_directory({adapter, config}, path, opts \\ []) do
with {:ok, path} <- Depot.RelativePath.normalize(path),
{:ok, path} <- Depot.RelativePath.assert_directory(path) do
adapter.create_directory(config, path, opts)
end
end
@doc """
Delete a directory.
## Options
* `:recursive` - Recursively delete contents. Defaults to `false`.
## Examples
### Direct filesystem
filesystem = Depot.Adapter.Local.configure(prefix: "/home/user/storage")
:ok = Depot.delete_directory(filesystem, "test/")
### Module-based filesystem
defmodule LocalFileSystem do
use Depot.Filesystem,
adapter: Depot.Adapter.Local,
prefix: "/home/user/storage"
end
LocalFileSystem.delete_directory("test/")
"""
@spec delete_directory(filesystem, Path.t(), keyword()) :: :ok | {:error, term}
def delete_directory({adapter, config}, path, opts \\ []) do
with {:ok, path} <- Depot.RelativePath.normalize(path),
{:ok, path} <- Depot.RelativePath.assert_directory(path) do
adapter.delete_directory(config, path, opts)
end
end
@doc """
Clear the filesystem.
This is always recursive.
## Examples
### Direct filesystem
filesystem = Depot.Adapter.Local.configure(prefix: "/home/user/storage")
:ok = Depot.clear(filesystem)
### Module-based filesystem
defmodule LocalFileSystem do
use Depot.Filesystem,
adapter: Depot.Adapter.Local,
prefix: "/home/user/storage"
end
LocalFileSystem.clear()
"""
@spec clear(filesystem, keyword()) :: :ok | {:error, term}
def clear({adapter, config}, _opts \\ []) do
adapter.clear(config)
end
@spec set_visibility(filesystem, Path.t(), Depot.Visibility.t()) :: :ok | {:error, term}
def set_visibility({adapter, config}, path, visibility) do
with {:ok, path} <- Depot.RelativePath.normalize(path) do
adapter.set_visibility(config, path, visibility)
end
end
@spec visibility(filesystem, Path.t()) :: {:ok, Depot.Visibility.t()} | {:error, term}
def visibility({adapter, config}, path) do
with {:ok, path} <- Depot.RelativePath.normalize(path) do
adapter.visibility(config, path)
end
end
@doc """
Copy a file from one filesystem to the other
This can either be done natively if the same adapter is used for both filesystems
or by streaming/read-write cycle the file from the source to the local system
and back to the destination.
## Examples
### Direct filesystem
filesystem_source = Depot.Adapter.Local.configure(prefix: "/home/user/storage")
filesystem_destination = Depot.Adapter.Local.configure(prefix: "/home/user/storage2")
:ok = Depot.copy_between_filesystem({filesystem_source, "test.txt"}, {filesystem_destination, "copy.txt"})
### Module-based filesystem
defmodule LocalSourceFileSystem do
use Depot.Filesystem,
adapter: Depot.Adapter.Local,
prefix: "/home/user/storage"
end
defmodule LocalDestinationFileSystem do
use Depot.Filesystem,
adapter: Depot.Adapter.Local,
prefix: "/home/user/storage2"
end
:ok = Depot.copy_between_filesystem(
{LocalSourceFileSystem.__filesystem__(), "test.txt"},
{LocalDestinationFileSystem.__filesystem__(), "copy.txt"}
)
"""
@spec copy_between_filesystem(
source :: {filesystem, Path.t()},
destination :: {filesystem, Path.t()},
keyword()
) :: :ok | {:error, term}
def copy_between_filesystem(source, destination, opts \\ [])
# Same adapter, same config -> just do a plain copy
def copy_between_filesystem({filesystem, source}, {filesystem, destination}, opts) do
copy(filesystem, source, destination, opts)
end
# Same adapter -> try direct copy if supported
def copy_between_filesystem(
{{adapter, config_source}, path_source} = source,
{{adapter, config_destination}, path_destination} = destination,
opts
) do
with :ok <-
adapter.copy(config_source, path_source, config_destination, path_destination, opts) do
:ok
else
{:error, :unsupported} -> copy_via_local_memory(source, destination, opts)
error -> error
end
end
# different adapter
def copy_between_filesystem(source, destination, opts) do
copy_via_local_memory(source, destination, opts)
end
defp copy_via_local_memory(
{{source_adapter, _} = source_filesystem, source_path},
{{destination_adapter, _} = destination_filesystem, destination_path},
opts
) do
case {Depot.read_stream(source_filesystem, source_path, opts),
Depot.write_stream(destination_filesystem, destination_path, opts)} do
# A and B support streaming -> Stream data
{{:ok, read_stream}, {:ok, write_stream}} ->
read_stream
|> Stream.into(write_stream)
|> Stream.run()
# Only A support streaming -> Stream to memory and write when done
{{:ok, read_stream}, {:error, ^destination_adapter}} ->
Depot.write(destination_filesystem, destination_path, Enum.into(read_stream, []))
# Only B support streaming -> Load into memory and stream to B
{{:error, ^source_adapter}, {:ok, write_stream}} ->
with {:ok, contents} <- Depot.read(source_filesystem, source_path) do
contents
|> chunk(Keyword.get(opts, :chunk_size, 5 * 1024))
|> Enum.into(write_stream)
end
# Neither support streaming
{{:error, ^source_adapter}, {:error, ^destination_adapter}} ->
with {:ok, contents} <- Depot.read(source_filesystem, source_path) do
Depot.write(destination_filesystem, destination_path, contents)
end
end
rescue
e -> {:error, e}
end
@doc false
# Also used by the InMemory adapter and therefore not private
def chunk("", _size), do: []
def chunk(binary, size) when byte_size(binary) >= size do
{chunk, rest} = :erlang.split_binary(binary, size)
[chunk | chunk(rest, size)]
end
def chunk(binary, _size), do: [binary]
end
|
lib/depot.ex
| 0.847306
| 0.444625
|
depot.ex
|
starcoder
|
defmodule FunLand.Reducible do
@moduledoc """
Anything that implements the Reducible behaviour, can be reduced to a single value, when given a combinable (or combining-function + base value).
This is enough information to convert any reducible to a List.
It even is enough information to implement most enumerable methods.
However, what is _not_ possible, is to stop halfway through the reduction.
Therefore, Reducible is a lot simpler than the Enumerable protocol.
For convenience, though, a very basic implementation of the Enumerable protocol is
automatically added when you `use Reducible`. This implementation first converts your Reducible
to a list, and then enumerates on that.
This is very convenient, but it _does_ mean that your *whole* reducible is first converted to a list.
This will therefore always be slower than a full-blown custom implementation that is specific for your structure.
If you want to implement your own version of Enumerable, add Reducible with `use FunLand.Reducible, auto_enumerable: false`.
"""
@type reducible(a) :: FunLand.adt(a)
@callback reduce(reducible(a), acc, (a, acc -> acc)) :: acc when a: any, acc: any
defmacro __using__(opts) do
enum_protocol_implementation =
if Keyword.get(opts, :auto_enumerable, false) do
quote do
defimpl Enumerable do
def count(reducible), do: {:error, __MODULE__}
def empty?(reducible), do: {:error, __MODULE__}
def member?(reducible, elem), do: {:error, __MODULE__}
def reduce(reducible, acc, fun) do
reducible
|> @for.to_list
|> Enumerable.List.reduce(acc, fun)
end
end
end
else
quote do
end
end
unused_opts = Keyword.delete(opts, :auto_enumerable)
if unused_opts != [] do
IO.puts(
"Warning: `use FunLand.Reducible` does not understand options: #{inspect(unused_opts)}"
)
end
quote do
@behaviour FunLand.Reducible
unquote(enum_protocol_implementation)
@doc """
Converts the reducible into a list,
by building up a list from all elements, and in the end reversing it.
This is an automatic function implementation, made possible because #{inspect(__MODULE__)}
implements the `FunLand.Reducible` behaviour.
"""
def to_list(reducible) do
reducible
|> __MODULE__.reduce([], fn x, acc -> [x | acc] end)
|> :lists.reverse()
end
defoverridable to_list: 1
@doc """
A variant of reduce that accepts anything that is Combinable
as second argument. This Combinable will determine what the empty value and the
combining operation will be.
Pass in the combinable module name to start with `empty` as accumulator,
or the combinable as struct to use that as starting accumulator.
This is an automatic function implementation, made possible because #{inspect(__MODULE__)}
implements the `FunLand.Reducible` behaviour.
See `FunLand.Reducible.reduce/2` for examples.
"""
def reduce(a, combinable) do
reduce(a, FunLand.Combinable.empty(combinable), &FunLand.Combinable.combine/2)
end
end
end
def reduce(reducible, acc, fun)
# stdlib structs
for {stdlib_module, module} <- FunLand.Builtin.__stdlib_struct_modules__() do
def reduce(reducible = %unquote(stdlib_module){}, acc, fun) do
apply(unquote(module), :reduce, [reducible, acc, fun])
end
end
# custom structs
def reduce(reducible = %module{}, acc, fun) do
module.reduce(reducible, acc, fun)
end
use FunLand.Helper.GuardMacros
for {guard, module} <- FunLand.Builtin.__builtin__() do
def reduce(reducible, acc, fun) when unquote(guard)(reducible) do
apply(unquote(module), :reduce, [reducible, acc, fun])
end
end
@doc """
A variant of reduce that accepts anything that is Combinable
as second argument. This Combinable will determine what the empty value and the
combining operation will be.
Pass in the combinable module name to start with `empty` as accumulator,
or the combinable as struct to use that as starting accumulator.
This is an automatic function implementation, made possible because #{inspect(__MODULE__)}
implements the `FunLand.Reducible` behaviour.
Some examples:
iex> ["the", "quick", "brown", "fox"] |> FunLand.Reducible.reduce("")
"thequickbrownfox"
Or given the following module:
defmodule Score do
defstruct [val: 0]
use FunLand.Combinable
def new(val), do: %Score{val: val}
def empty(), do: new(0)
def combine(%Score{val: a}, %Score{val: b}), do: new(a + b)
end
iex> [Score.new(10), Score.new(20), Score.new(42)] |> FunLand.Reducible.reduce(Score)
%Score{val: 72}
"""
# Using a Combinable
def reduce(a, combinable) do
reduce(a, FunLand.Combinable.empty(combinable), &FunLand.Combinable.combine/2)
end
end
|
lib/fun_land/reducible.ex
| 0.800068
| 0.62892
|
reducible.ex
|
starcoder
|
defmodule Tensorflow.GetStatusRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{}
defstruct []
end
defmodule Tensorflow.GetStatusResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
device_attributes: [Tensorflow.DeviceAttributes.t()]
}
defstruct [:device_attributes]
field(:device_attributes, 1,
repeated: true,
type: Tensorflow.DeviceAttributes
)
end
defmodule Tensorflow.CreateWorkerSessionRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
session_handle: String.t(),
server_def: Tensorflow.ServerDef.t() | nil,
isolate_session_state: boolean,
cluster_device_attributes: [Tensorflow.DeviceAttributes.t()],
master_task: String.t(),
master_incarnation: integer
}
defstruct [
:session_handle,
:server_def,
:isolate_session_state,
:cluster_device_attributes,
:master_task,
:master_incarnation
]
field(:session_handle, 1, type: :string)
field(:server_def, 2, type: Tensorflow.ServerDef)
field(:isolate_session_state, 3, type: :bool)
field(:cluster_device_attributes, 4,
repeated: true,
type: Tensorflow.DeviceAttributes
)
field(:master_task, 5, type: :string)
field(:master_incarnation, 6, type: :int64)
end
defmodule Tensorflow.CreateWorkerSessionResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{}
defstruct []
end
defmodule Tensorflow.DeleteWorkerSessionRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
session_handle: String.t()
}
defstruct [:session_handle]
field(:session_handle, 1, type: :string)
end
defmodule Tensorflow.DeleteWorkerSessionResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{}
defstruct []
end
defmodule Tensorflow.RegisterGraphRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
session_handle: String.t(),
create_worker_session_called: boolean,
graph_def: Tensorflow.GraphDef.t() | nil,
has_control_flow: boolean,
graph_options: Tensorflow.GraphOptions.t() | nil,
debug_options: Tensorflow.DebugOptions.t() | nil,
collective_graph_key: integer,
config_proto: Tensorflow.ConfigProto.t() | nil
}
defstruct [
:session_handle,
:create_worker_session_called,
:graph_def,
:has_control_flow,
:graph_options,
:debug_options,
:collective_graph_key,
:config_proto
]
field(:session_handle, 1, type: :string)
field(:create_worker_session_called, 6, type: :bool)
field(:graph_def, 2, type: Tensorflow.GraphDef)
field(:has_control_flow, 3, type: :bool, deprecated: true)
field(:graph_options, 4, type: Tensorflow.GraphOptions)
field(:debug_options, 5, type: Tensorflow.DebugOptions)
field(:collective_graph_key, 7, type: :int64)
field(:config_proto, 8, type: Tensorflow.ConfigProto)
end
defmodule Tensorflow.RegisterGraphResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
graph_handle: String.t()
}
defstruct [:graph_handle]
field(:graph_handle, 1, type: :string)
end
defmodule Tensorflow.DeregisterGraphRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
session_handle: String.t(),
create_worker_session_called: boolean,
graph_handle: String.t()
}
defstruct [:session_handle, :create_worker_session_called, :graph_handle]
field(:session_handle, 2, type: :string)
field(:create_worker_session_called, 3, type: :bool)
field(:graph_handle, 1, type: :string)
end
defmodule Tensorflow.DeregisterGraphResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{}
defstruct []
end
defmodule Tensorflow.CleanupAllRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
container: [String.t()]
}
defstruct [:container]
field(:container, 1, repeated: true, type: :string)
end
defmodule Tensorflow.CleanupAllResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{}
defstruct []
end
defmodule Tensorflow.ExecutorOpts do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
record_costs: boolean,
record_timeline: boolean,
record_partition_graphs: boolean,
report_tensor_allocations_upon_oom: boolean
}
defstruct [
:record_costs,
:record_timeline,
:record_partition_graphs,
:report_tensor_allocations_upon_oom
]
field(:record_costs, 1, type: :bool)
field(:record_timeline, 3, type: :bool)
field(:record_partition_graphs, 4, type: :bool)
field(:report_tensor_allocations_upon_oom, 5, type: :bool)
end
defmodule Tensorflow.RunGraphRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
session_handle: String.t(),
create_worker_session_called: boolean,
graph_handle: String.t(),
step_id: integer,
exec_opts: Tensorflow.ExecutorOpts.t() | nil,
send: [Tensorflow.NamedTensorProto.t()],
recv_key: [String.t()],
is_partial: boolean,
is_last_partial_run: boolean,
store_errors_in_response_body: boolean,
request_id: integer
}
defstruct [
:session_handle,
:create_worker_session_called,
:graph_handle,
:step_id,
:exec_opts,
:send,
:recv_key,
:is_partial,
:is_last_partial_run,
:store_errors_in_response_body,
:request_id
]
field(:session_handle, 8, type: :string)
field(:create_worker_session_called, 10, type: :bool)
field(:graph_handle, 1, type: :string)
field(:step_id, 2, type: :int64)
field(:exec_opts, 5, type: Tensorflow.ExecutorOpts)
field(:send, 3, repeated: true, type: Tensorflow.NamedTensorProto)
field(:recv_key, 4, repeated: true, type: :string)
field(:is_partial, 6, type: :bool)
field(:is_last_partial_run, 7, type: :bool)
field(:store_errors_in_response_body, 9, type: :bool)
field(:request_id, 11, type: :int64)
end
defmodule Tensorflow.RunGraphResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
recv: [Tensorflow.NamedTensorProto.t()],
step_stats: Tensorflow.StepStats.t() | nil,
cost_graph: Tensorflow.CostGraphDef.t() | nil,
partition_graph: [Tensorflow.GraphDef.t()],
status_code: Tensorflow.Error.Code.t(),
status_error_message: String.t()
}
defstruct [
:recv,
:step_stats,
:cost_graph,
:partition_graph,
:status_code,
:status_error_message
]
field(:recv, 1, repeated: true, type: Tensorflow.NamedTensorProto)
field(:step_stats, 2, type: Tensorflow.StepStats)
field(:cost_graph, 3, type: Tensorflow.CostGraphDef)
field(:partition_graph, 4, repeated: true, type: Tensorflow.GraphDef)
field(:status_code, 5, type: Tensorflow.Error.Code, enum: true)
field(:status_error_message, 6, type: :string)
end
defmodule Tensorflow.CleanupGraphRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
step_id: integer
}
defstruct [:step_id]
field(:step_id, 1, type: :int64)
end
defmodule Tensorflow.CleanupGraphResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{}
defstruct []
end
defmodule Tensorflow.RecvTensorRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
step_id: integer,
rendezvous_key: String.t(),
dma_ok: boolean,
client_locality: Tensorflow.DeviceLocality.t() | nil,
server_locality: Tensorflow.DeviceLocality.t() | nil,
transport_options: Google.Protobuf.Any.t() | nil,
request_id: integer
}
defstruct [
:step_id,
:rendezvous_key,
:dma_ok,
:client_locality,
:server_locality,
:transport_options,
:request_id
]
field(:step_id, 1, type: :int64)
field(:rendezvous_key, 2, type: :string)
field(:dma_ok, 3, type: :bool)
field(:client_locality, 4, type: Tensorflow.DeviceLocality)
field(:server_locality, 5, type: Tensorflow.DeviceLocality)
field(:transport_options, 6, type: Google.Protobuf.Any)
field(:request_id, 7, type: :int64)
end
defmodule Tensorflow.RecvTensorResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
tensor: Tensorflow.TensorProto.t() | nil,
is_dead: boolean,
send_start_micros: integer,
transport_options: Google.Protobuf.Any.t() | nil,
require_ack: boolean
}
defstruct [
:tensor,
:is_dead,
:send_start_micros,
:transport_options,
:require_ack
]
field(:tensor, 1, type: Tensorflow.TensorProto)
field(:is_dead, 2, type: :bool)
field(:send_start_micros, 3, type: :int64)
field(:transport_options, 4, type: Google.Protobuf.Any)
field(:require_ack, 5, type: :bool)
end
defmodule Tensorflow.MarkRecvFinishedRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
request_id: integer
}
defstruct [:request_id]
field(:request_id, 1, type: :int64)
end
defmodule Tensorflow.MarkRecvFinishedResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{}
defstruct []
end
defmodule Tensorflow.LoggingRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
enable_rpc_logging: boolean,
disable_rpc_logging: boolean,
clear: boolean,
fetch_step_id: [integer]
}
defstruct [
:enable_rpc_logging,
:disable_rpc_logging,
:clear,
:fetch_step_id
]
field(:enable_rpc_logging, 1, type: :bool)
field(:disable_rpc_logging, 4, type: :bool)
field(:clear, 2, type: :bool)
field(:fetch_step_id, 3, repeated: true, type: :int64)
end
defmodule Tensorflow.LabeledStepStats do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
step_id: integer,
step_stats: Tensorflow.StepStats.t() | nil
}
defstruct [:step_id, :step_stats]
field(:step_id, 1, type: :int64)
field(:step_stats, 2, type: Tensorflow.StepStats)
end
defmodule Tensorflow.LoggingResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
step: [Tensorflow.LabeledStepStats.t()]
}
defstruct [:step]
field(:step, 1, repeated: true, type: Tensorflow.LabeledStepStats)
end
defmodule Tensorflow.TraceOpts do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
duration: float | :infinity | :negative_infinity | :nan,
use_step_profiler: boolean,
use_kernel_profiler: boolean,
use_extended_profiler: boolean,
use_gpu_profiler: boolean,
use_sample_profiler: boolean
}
defstruct [
:duration,
:use_step_profiler,
:use_kernel_profiler,
:use_extended_profiler,
:use_gpu_profiler,
:use_sample_profiler
]
field(:duration, 1, type: :double)
field(:use_step_profiler, 2, type: :bool)
field(:use_kernel_profiler, 3, type: :bool)
field(:use_extended_profiler, 4, type: :bool)
field(:use_gpu_profiler, 5, type: :bool)
field(:use_sample_profiler, 6, type: :bool)
end
defmodule Tensorflow.TracingRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
options: Tensorflow.TraceOpts.t() | nil
}
defstruct [:options]
field(:options, 1, type: Tensorflow.TraceOpts)
end
defmodule Tensorflow.TracingResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{}
defstruct []
end
defmodule Tensorflow.RecvBufRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
step_id: integer,
buf_rendezvous_key: String.t(),
num_bytes: integer,
buf_ptr: non_neg_integer,
client_locality: Tensorflow.DeviceLocality.t() | nil,
server_locality: Tensorflow.DeviceLocality.t() | nil,
transport_options: Google.Protobuf.Any.t() | nil,
src_device: String.t(),
dst_device: String.t(),
request_id: integer,
src_incarnation: non_neg_integer
}
defstruct [
:step_id,
:buf_rendezvous_key,
:num_bytes,
:buf_ptr,
:client_locality,
:server_locality,
:transport_options,
:src_device,
:dst_device,
:request_id,
:src_incarnation
]
field(:step_id, 1, type: :int64)
field(:buf_rendezvous_key, 2, type: :string)
field(:num_bytes, 3, type: :int64)
field(:buf_ptr, 4, type: :fixed64)
field(:client_locality, 5, type: Tensorflow.DeviceLocality)
field(:server_locality, 6, type: Tensorflow.DeviceLocality)
field(:transport_options, 7, type: Google.Protobuf.Any)
field(:src_device, 8, type: :string)
field(:dst_device, 9, type: :string)
field(:request_id, 10, type: :int64)
field(:src_incarnation, 11, type: :uint64)
end
defmodule Tensorflow.RecvBufResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
buf_ptr: non_neg_integer,
num_bytes: integer,
is_dead: boolean,
transport_options: Google.Protobuf.Any.t() | nil,
send_start_micros: integer,
require_ack: boolean
}
defstruct [
:buf_ptr,
:num_bytes,
:is_dead,
:transport_options,
:send_start_micros,
:require_ack
]
field(:buf_ptr, 1, type: :fixed64)
field(:num_bytes, 2, type: :int64)
field(:is_dead, 3, type: :bool)
field(:transport_options, 4, type: Google.Protobuf.Any)
field(:send_start_micros, 5, type: :int64)
field(:require_ack, 6, type: :bool)
end
defmodule Tensorflow.CompleteGroupRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
group_key: integer,
group_size: integer,
device_type: String.t(),
collective_type: integer,
device_attributes: Tensorflow.DeviceAttributes.t() | nil
}
defstruct [
:group_key,
:group_size,
:device_type,
:collective_type,
:device_attributes
]
field(:group_key, 1, type: :int32)
field(:group_size, 2, type: :int32)
field(:device_type, 3, type: :string)
field(:collective_type, 5, type: :int32)
field(:device_attributes, 6, type: Tensorflow.DeviceAttributes)
end
defmodule Tensorflow.CompleteGroupResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
group_key: integer,
group_size: integer,
device_type: String.t(),
num_tasks: integer,
communicator_key: binary,
device_attributes: [Tensorflow.DeviceAttributes.t()]
}
defstruct [
:group_key,
:group_size,
:device_type,
:num_tasks,
:communicator_key,
:device_attributes
]
field(:group_key, 1, type: :int32)
field(:group_size, 2, type: :int32)
field(:device_type, 3, type: :string)
field(:num_tasks, 4, type: :int32)
field(:communicator_key, 7, type: :bytes)
field(:device_attributes, 8,
repeated: true,
type: Tensorflow.DeviceAttributes
)
end
defmodule Tensorflow.CompleteInstanceRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
name: String.t(),
type: integer,
data_type: Tensorflow.DataType.t(),
shape: Tensorflow.TensorShapeProto.t() | nil,
group_key: integer,
group_size: integer,
instance_key: integer,
device_type: String.t(),
subdiv_offset: [integer],
device: String.t(),
is_source: boolean
}
defstruct [
:name,
:type,
:data_type,
:shape,
:group_key,
:group_size,
:instance_key,
:device_type,
:subdiv_offset,
:device,
:is_source
]
field(:name, 1, type: :string)
field(:type, 2, type: :int32)
field(:data_type, 3, type: Tensorflow.DataType, enum: true)
field(:shape, 4, type: Tensorflow.TensorShapeProto)
field(:group_key, 5, type: :int32)
field(:group_size, 6, type: :int32)
field(:instance_key, 7, type: :int32)
field(:device_type, 8, type: :string)
field(:subdiv_offset, 9, repeated: true, type: :int32)
field(:device, 10, type: :string)
field(:is_source, 11, type: :bool)
end
defmodule Tensorflow.CompleteInstanceResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
instance_key: integer,
source_rank: integer
}
defstruct [:instance_key, :source_rank]
field(:instance_key, 1, type: :int32)
field(:source_rank, 2, type: :int32)
end
defmodule Tensorflow.GetStepSequenceRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
graph_key: [integer]
}
defstruct [:graph_key]
field(:graph_key, 1, repeated: true, type: :int64)
end
defmodule Tensorflow.StepSequence do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
graph_key: integer,
next_step_id: integer
}
defstruct [:graph_key, :next_step_id]
field(:graph_key, 1, type: :int64)
field(:next_step_id, 2, type: :int64)
end
defmodule Tensorflow.GetStepSequenceResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
step_sequence: [Tensorflow.StepSequence.t()]
}
defstruct [:step_sequence]
field(:step_sequence, 1, repeated: true, type: Tensorflow.StepSequence)
end
|
lib/tensorflow/core/protobuf/worker.pb.ex
| 0.733165
| 0.500793
|
worker.pb.ex
|
starcoder
|
if Code.ensure_loaded?(Broadway) do
defmodule PromEx.Plugins.Broadway do
@moduledoc """
This plugin captures metrics emitted by Broadway.
This plugin exposes the following metric groups:
- `:broadway_init_event_metrics`
- `:broadway_message_event_metrics`
- `:broadway_batch_event_metrics`
To use plugin in your application, add the following to your PromEx module:
```
defmodule WebApp.PromEx do
use PromEx, otp_app: :web_app
@impl true
def plugins do
[
...
PromEx.Plugins.Broadway
]
end
@impl true
def dashboards do
[
...
{:prom_ex, "broadway.json"}
]
end
end
```
To correctly capture per-message metrics and error rate, add the following transform to your pipeline:
```
defmodule WebApp.MyPipeline do
use Broadway
alias Broadway.Message
def start_link(_opts) do
Broadway.start_link(__MODULE__,
name: __MODULE__,
producer: [
...
transformer: {__MODULE__, :transform, []}
]
)
end
def transform(event, _opts) do
%Message{
data: event,
acknowledger: {__MODULE__, :ack_id, :ack_data}
}
end
end
```
"""
use PromEx.Plugin
require Logger
alias Broadway.{BatchInfo, Message, Options}
alias PromEx.Utils
@millisecond_duration_buckets [10, 100, 500, 1_000, 10_000, 30_000, 60_000]
@message_batch_size_buckets [1, 5, 10, 20, 50, 100]
@init_topology_event [:broadway, :topology, :init]
@message_stop_event [:broadway, :processor, :message, :stop]
@message_exception_event [:broadway, :processor, :message, :exception]
@batch_stop_event [:broadway, :batch_processor, :stop]
@init_topology_processors_proxy_event [:prom_ex, :broadway, :proxy, :processor, :init]
@init_topology_batchers_proxy_event [:prom_ex, :broadway, :proxy, :batcher, :init]
@impl true
def event_metrics(opts) do
otp_app = Keyword.fetch!(opts, :otp_app)
metric_prefix = Keyword.get(opts, :metric_prefix, PromEx.metric_prefix(otp_app, :broadway))
# Telemetry metrics will emit warnings if multiple handlers with the same names are defined.
# As a result, this plugin supports gathering metrics on multiple processors and batches, but needs
# to proxy them as not to create multiple definitions of the same metrics. The final data point will
# have a label for the module associated with the event though so you'll be able to separate one
# measurement from another.
set_up_telemetry_proxies(@init_topology_event, otp_app)
# Event metrics definitions
[
topology_init_events(metric_prefix),
handle_message_events(metric_prefix),
handle_batch_events(metric_prefix)
]
end
defp topology_init_events(metric_prefix) do
Event.build(
:broadway_init_event_metrics,
[
last_value(
metric_prefix ++ [:init, :status, :info],
event_name: @init_topology_event,
measurement: fn _measurements -> 1 end,
description: "The topology configuration data that was provided to Broadway.",
tags: [:name],
tag_values: &extract_init_tag_values/1
),
last_value(
metric_prefix ++ [:init, :hibernate_after, :default, :milliseconds],
event_name: @init_topology_event,
description: "The Broadway supervisor's hibernate after default value.",
measurement: extract_default_config_measurement(:hibernate_after),
tags: [:name],
tag_values: &extract_init_tag_values/1
),
last_value(
metric_prefix ++ [:init, :resubscribe_interval, :default, :milliseconds],
event_name: @init_topology_event,
description: "The Broadway supervisor's resubscribe interval default value.",
measurement: extract_default_config_measurement(:resubscribe_interval),
tags: [:name],
tag_values: &extract_init_tag_values/1
),
last_value(
metric_prefix ++ [:init, :max, :duration, :default, :milliseconds],
event_name: @init_topology_event,
description: "The Broadway supervisor's max seconds default value (in milliseconds).",
measurement: extract_default_config_measurement(:max_seconds),
tags: [:name],
tag_values: &extract_init_tag_values/1,
unit: {:second, :millisecond}
),
last_value(
metric_prefix ++ [:init, :max_restarts, :default, :value],
event_name: @init_topology_event,
description: "The Broadway supervisor's max restarts default value.",
measurement: extract_default_config_measurement(:max_restarts),
tags: [:name],
tag_values: &extract_init_tag_values/1
),
last_value(
metric_prefix ++ [:init, :shutdown, :default, :milliseconds],
event_name: @init_topology_event,
description: "The Broadway supervisor's shutdown default value.",
measurement: extract_default_config_measurement(:shutdown),
tags: [:name],
tag_values: &extract_init_tag_values/1
),
last_value(
metric_prefix ++ [:init, :processor, :hibernate_after, :milliseconds],
event_name: @init_topology_processors_proxy_event,
description: "The Broadway processors hibernate after value.",
measurement: fn _measurements, %{hibernate_after: hibernate_after} -> hibernate_after end,
tags: [:name, :processor]
),
last_value(
metric_prefix ++ [:init, :processor, :max_demand, :value],
event_name: @init_topology_processors_proxy_event,
description: "The Broadway processors max demand value.",
measurement: fn _measurements, %{max_demand: max_demand} -> max_demand end,
tags: [:name, :processor]
),
last_value(
metric_prefix ++ [:init, :processor, :concurrency, :value],
event_name: @init_topology_processors_proxy_event,
description: "The Broadway processors concurrency value.",
measurement: fn _measurements, %{concurrency: concurrency} -> concurrency end,
tags: [:name, :processor]
),
last_value(
metric_prefix ++ [:init, :batcher, :hibernate_after, :milliseconds],
event_name: @init_topology_batchers_proxy_event,
description: "The Broadway batchers hibernate after value.",
measurement: fn _measurements, %{hibernate_after: hibernate_after} -> hibernate_after end,
tags: [:name, :batcher]
),
last_value(
metric_prefix ++ [:init, :batcher, :concurrency, :value],
event_name: @init_topology_batchers_proxy_event,
description: "The Broadway batchers concurrency value.",
measurement: fn _measurements, %{concurrency: concurrency} -> concurrency end,
tags: [:name, :batcher]
),
last_value(
metric_prefix ++ [:init, :batcher, :batch_size, :value],
event_name: @init_topology_batchers_proxy_event,
description: "The Broadway batchers batch size value.",
measurement: fn _measurements, %{batch_size: batch_size} -> batch_size end,
tags: [:name, :batcher]
),
last_value(
metric_prefix ++ [:init, :batcher, :batch_timeout, :milliseconds],
event_name: @init_topology_batchers_proxy_event,
description: "The Broadway batchers timeout value.",
measurement: fn _measurements, %{batch_timeout: batch_timeout} -> batch_timeout end,
tags: [:name, :batcher]
)
]
)
end
@doc false
def proxy_broadway_init_event(_event_name, _measurements, %{config: config}, _config) do
# Invoking Broadway module
broadway_module =
config
|> Keyword.fetch!(:name)
|> Utils.normalize_module_name()
# Extract all of the processors and proxy for each processor
config
|> Keyword.get(:processors, [])
|> Enum.each(fn {processor, processor_options} ->
metadata =
processor_options
|> Map.new()
|> Map.put(:processor, processor)
|> Map.put(:name, broadway_module)
:telemetry.execute(@init_topology_processors_proxy_event, %{}, metadata)
end)
# Extract all of the batchers and proxy for each batcher
config
|> Keyword.get(:batchers, [])
|> Enum.each(fn {batcher, batcher_options} ->
metadata =
batcher_options
|> Map.new()
|> Map.put(:batcher, batcher)
|> Map.put(:name, broadway_module)
:telemetry.execute(@init_topology_batchers_proxy_event, %{}, metadata)
end)
end
defp set_up_telemetry_proxies(init_topology_event, otp_app) do
:telemetry.attach(
[:prom_ex, :broadway, :proxy, otp_app],
init_topology_event,
&__MODULE__.proxy_broadway_init_event/4,
%{}
)
end
defp extract_default_config_measurement(field) do
fn _measurements, %{config: config} ->
config
|> NimbleOptions.validate!(Options.definition())
|> Map.new()
|> Map.get(field)
end
end
defp handle_message_events(metric_prefix) do
Event.build(
:broadway_message_event_metrics,
[
distribution(
metric_prefix ++ [:process, :message, :duration, :milliseconds],
event_name: @message_stop_event,
measurement: :duration,
description: "The time it takes Broadway to process a message.",
reporter_options: [
buckets: @millisecond_duration_buckets
],
tags: [:processor_key, :name],
tag_values: fn %{processor_key: processor_key, message: %Message{acknowledger: {acknowledger, _, _}}} ->
%{
processor_key: processor_key,
name: Utils.normalize_module_name(acknowledger)
}
end,
unit: {:native, :millisecond}
),
distribution(
metric_prefix ++ [:process, :message, :exception, :duration, :milliseconds],
event_name: @message_exception_event,
measurement: :duration,
description: "The time it takes Broadway to process a message that results in an error.",
reporter_options: [
buckets: @millisecond_duration_buckets
],
tags: [:processor_key, :name, :kind, :reason],
tag_values: &extract_exception_tag_values/1,
unit: {:native, :millisecond}
)
]
)
end
defp handle_batch_events(metric_prefix) do
Event.build(
:broadway_batch_event_metrics,
[
distribution(
metric_prefix ++ [:process, :batch, :duration, :milliseconds],
event_name: @batch_stop_event,
measurement: :duration,
description: "The time it takes Broadway to process a batch of messages.",
reporter_options: [
buckets: @millisecond_duration_buckets
],
tags: [:batcher, :name],
tag_values: &extract_batcher_tag_values/1,
unit: {:native, :millisecond}
),
distribution(
metric_prefix ++ [:process, :batch, :failure, :size],
event_name: @batch_stop_event,
measurement: fn _measurements, metadata ->
length(metadata.failed_messages)
end,
description: "How many of the messages in the batch failed to process.",
reporter_options: [
buckets: @message_batch_size_buckets
],
tags: [:batcher, :name],
tag_values: &extract_batcher_tag_values/1
),
distribution(
metric_prefix ++ [:process, :batch, :success, :size],
event_name: @batch_stop_event,
measurement: fn _measurements, metadata ->
length(metadata.successful_messages)
end,
description: "How many of the messages in the batch were successfully processed.",
reporter_options: [
buckets: @message_batch_size_buckets
],
tags: [:batcher, :name],
tag_values: &extract_batcher_tag_values/1
)
]
)
end
defp extract_batcher_tag_values(%{batch_info: batch_info = %BatchInfo{}, topology_name: name}) do
%{
name: Utils.normalize_module_name(name),
batch_key: batch_info.batch_key,
batcher: batch_info.batcher
}
end
defp extract_init_tag_values(metadata) do
full_configuration =
metadata.config
|> NimbleOptions.validate!(Broadway.Options.definition())
|> Map.new()
%{
name: Utils.normalize_module_name(full_configuration.name)
}
end
defp extract_exception_tag_values(%{
processor_key: processor_key,
kind: kind,
reason: reason,
stacktrace: stacktrace,
message: %Message{acknowledger: {acknowledger, _, _}}
}) do
reason = Utils.normalize_exception(kind, reason, stacktrace)
%{
processor_key: processor_key,
kind: kind,
reason: reason,
name: Utils.normalize_module_name(acknowledger)
}
end
end
else
defmodule PromEx.Plugins.Broadway do
@moduledoc false
use PromEx.Plugin
@impl true
def event_metrics(_opts) do
PromEx.Plugin.no_dep_raise(__MODULE__, "Broadway")
end
end
end
|
lib/prom_ex/plugins/broadway.ex
| 0.906291
| 0.703864
|
broadway.ex
|
starcoder
|
defmodule TelemetryInfluxDB do
alias TelemetryInfluxDB.BatchReporter
alias TelemetryInfluxDB.BatchHandler
alias TelemetryInfluxDB.EventHandler
alias TelemetryInfluxDB.HTTP
alias TelemetryInfluxDB.UDP
require Logger
@moduledoc """
`Telemetry` reporter for InfluxDB compatible events.
To use it, start the reporter with the `start_link/1` function, providing it a list of
`Telemetry` event names:
```elixir
TelemetryMetricsInfluxDB.start_link(
events: [
%{name: [:memory, :usage], metadata_tag_keys: [:host, :ip_address]},
%{name: [:http, :request]},
]
)
```
> Note that in the real project the reporter should be started under a supervisor, e.g. the main
> supervisor of your application.
By default, the reporter sends events through UDP to localhost:8089.
Note that the reporter doesn't aggregate events in-process - it sends updates to InfluxDB
whenever a relevant Telemetry event is emitted.
#### Configuration
Possible options for the reporter:
Options for any InfluxDB version:
* `:version` - :v1 or :v2. The version of InfluxDB to use; defaults to :v1 if not provided
* `:reporter_name` - unique name for the reporter. The purpose is to distinguish between different reporters running in the system.
* `:batch_size` - maximum number of events to send to InfluxDB in a single batch (default 1: no batching)
One can run separate independent InfluxDB reporters, with different configurations and goals.
* `:protocol` - :udp or :http. Which protocol to use for connecting to InfluxDB. Default option is :udp. InfluxDB v2 only supports :http for now.
* `:host` - host, where InfluxDB is running.
* `:port` - port, where InfluxDB is running.
* `:events` - list of `Telemetry` events' names that we want to send to InfluxDB.
Each event should be specified by the map with the field `name`, e.g. %{name: [:sample, :event, :name]}.
Event names should be compatible with `Telemetry` events' format.
It is also possible to specify an optional list of metadata keys that will be included in the event body and sent to InfluxDB as tags.
The list of metadata keys should be specified in the event data with the field `metadata_tag_keys`, e.g. %{name: [:sample, :event, :name], metadata_tag_keys: [:sample_meta, sample_meta2]}
* `:tags` - list of global static tags, that will be attached to each reported event. The format is a map,
where the key and the value are tag's name and value, respectively.
Both the tag's name and the value could be atoms or binaries.
V1 Only Options
* `:db` - name of the location where time series data is stored in InfluxDB v1
* `:username` - username of InfluxDB's user that has writes privileges. Only required in v1.
* `:password` - <PASSWORD>. Only required in v1.
V2 Only Options
* `:bucket` - name of the location where time series data is stored in InfluxDB v2
* `:org` - workspace in InfluxDB v2 where a bucket belongs
* `:token` - InfluxDB v2 authentication token used for authenticating requests. Must have write privileges to the bucket and org specified.
#### Notes
For the HTTP protocol, [worker_pool](https://github.com/inaka/worker_pool) is used for sending requests asynchronously.
Therefore the HTTP requests are sent in the context of the separate workers' pool, which does not block the client's application
(it is not sent in the critical path of the client's process).
The events are sent straightaway without any batching techniques.
On the other hand, UDP packets are sent in the context of the processes that execute the events.
However, the lightweight nature of UDP should not cause any bottlenecks in such a solution.
Once the reporter is started, it is attached to specified `Telemetry` events.
The events are detached when the reporter is shutdown.
"""
@default_port 8089
@type option ::
{:port, :inet.port_number()}
| {:host, String.t()}
| {:protocol, atom()}
| {:reporter_name, binary()}
| {:batch_size, non_neg_integer()}
| {:version, atom()}
| {:db, String.t()}
| {:org, String.t()}
| {:bucket, String.t()}
| {:username, String.t()}
| {:password, <PASSWORD>()}
| {:token, String.t()}
| {:events, [event]}
| {:tags, tags}
| {:worker_pool_size, non_neg_integer()}
@type options :: [option]
@type event :: %{required(:name) => :telemetry.event_name()}
@type tags :: map()
@type event_spec() :: map()
@type event_name() :: [atom()]
@type event_measurements :: map()
@type event_metadata :: map()
@type config :: map()
@type handler_id() :: term()
@spec start_link(options) :: GenServer.on_start()
def start_link(options) do
config =
options
|> Enum.into(%{})
|> Map.put_new(:reporter_name, "default")
|> Map.put_new(:batch_size, 1)
|> Map.put_new(:protocol, :udp)
|> Map.put_new(:host, "localhost")
|> Map.put_new(:port, @default_port)
|> Map.put_new(:tags, %{})
|> Map.put_new(:worker_pool_size, 3)
|> Map.put_new(:version, :v1)
|> validate_required!([:events])
|> validate_event_fields!()
|> validate_protocol!()
|> validate_version_params!()
|> add_publisher()
create_ets(config.reporter_name)
specs = child_specs(config.protocol, config)
Supervisor.start_link(specs, strategy: :one_for_all)
end
defp add_publisher(%{protocol: :http} = config) do
Map.put(config, :publisher, HTTP.Publisher)
end
defp add_publisher(%{protocol: :udp} = config) do
Map.put(config, :publisher, UDP.Publisher)
end
defp create_ets(prefix) do
try do
:ets.new(table_name(prefix), [:set, :public, :named_table])
rescue
_ ->
:ok
end
end
defp table_name(prefix) do
:erlang.binary_to_atom(prefix <> "_influx_reporter", :utf8)
end
def stop(pid) do
Supervisor.stop(pid)
end
defp child_specs(protocol, config) do
publisher_child_specs(protocol, config) ++ common_child_specs(config)
end
defp publisher_child_specs(:http, config), do: [HTTP.Pool.child_spec(config)]
defp publisher_child_specs(:udp, config),
do: [%{id: UDP.Connector, start: {UDP.Connector, :start_link, [config]}}]
defp common_child_specs(config) do
[
%{id: EventHandler, start: {EventHandler, :start_link, [config]}},
%{id: BatchReporter, start: {BatchReporter, :start_link, [batch_reporter_options(config)]}}
]
end
def batch_reporter_options(config) do
[
name: BatchReporter.get_name(config),
batch_size: config.batch_size,
report_fn: &BatchHandler.handle_batch/1
]
end
defp validate_protocol!(%{protocol: :udp} = opts), do: opts
defp validate_protocol!(%{protocol: :http} = opts), do: opts
defp validate_protocol!(_) do
raise(ArgumentError, "protocol has to be :udp or :http")
end
defp validate_version_params!(%{version: :v2} = opts), do: validate_v2_params!(opts)
defp validate_version_params!(%{version: :v1} = opts), do: validate_v1_params!(opts)
defp validate_version_params!(_opts) do
raise(
ArgumentError,
"version must be :v1 or :v2"
)
end
defp validate_v2_params!(%{protocol: :http, org: _org, bucket: _bucket, token: _token} = opts),
do: opts
defp validate_v2_params!(%{protocol: :udp}) do
raise(
ArgumentError,
"the udp protocol is not currently supported for InfluxDB v2; please use http instead"
)
end
defp validate_v2_params!(_) do
raise(ArgumentError, "for InfluxDB v2 you need to specify :bucket, :org, and :token fields")
end
defp validate_v1_params!(%{protocol: :udp} = opts), do: opts
defp validate_v1_params!(%{protocol: :http, db: _db} = opts), do: opts
defp validate_v1_params!(_),
do: raise(ArgumentError, "for http protocol in v1 you need to specify :db field")
defp validate_event_fields!(%{events: []}) do
raise(ArgumentError, "you need to attach to at least one event")
end
defp validate_event_fields!(%{events: events} = opts) when is_list(events) do
Enum.map(events, &validate_required!(&1, :name))
opts
end
defp validate_event_fields!(%{events: _}) do
raise(ArgumentError, ":events needs to be list of events")
end
defp validate_required!(opts, fields) when is_list(fields) do
Enum.map(fields, &validate_required!(opts, &1))
opts
end
defp validate_required!(opts, field) do
case Map.has_key?(opts, field) do
true ->
opts
false ->
raise(ArgumentError, "#{inspect(field)} field needs to be specified")
end
end
end
|
lib/telemetry_influx_db.ex
| 0.86813
| 0.648731
|
telemetry_influx_db.ex
|
starcoder
|
defmodule LRU do
require Cbuf.Map, as: Buf
defstruct impl: %{}, ages: nil, size: 0, count: 0
@doc """
Make a new LRU cache of a given size backed by a map-based circular buffer.
iex> LRU.new(5)
#LRU<%{}>
"""
def new(size) when size > 0 do
%__MODULE__{impl: %{}, ages: Buf.new(size), size: size, count: 0}
end
@doc """
Get something
iex> cache = LRU.new(3)
iex> cache = cache |> LRU.put(:a, 1) |> LRU.put(:b, 2) |> LRU.put(:c, 3) |> LRU.put(:d, 4)
iex> {_cache, value} = LRU.get(cache, :b)
iex> value
2
iex> cache = LRU.new(3)
iex> cache = cache |> LRU.put(:a, 1) |> LRU.put(:b, 2) |> LRU.put(:c, 3) |> LRU.put(:d, 4)
iex> {cache, _value} = LRU.get(cache, :b)
iex> cache |> LRU.put(:z, 99)
#LRU<%{b: 2, d: 4, z: 99}>
"""
def get(cache, key) do
ages = cache.ages |> Buf.delete_value(key) |> Buf.insert(key)
cache = %{cache | ages: ages}
value = Map.get(cache.impl, key)
{cache, value}
end
@doc """
Put something
iex> cache = LRU.new(3)
iex> cache |> LRU.put(:a, 1) |> LRU.put(:b, 2) |> LRU.put(:c, 3) |> LRU.put(:d, 4)
#LRU<%{b: 2, c: 3, d: 4}>
iex> cache = LRU.new(3)
iex> cache |> LRU.put(:a, 1) |> LRU.put(:b, 2) |> LRU.put(:c, 3) |> LRU.put(:d, 4) |> LRU.put(:d, 5)
#LRU<%{b: 2, c: 3, d: 5}>
iex> cache = LRU.new(3)
iex> {c, v} = cache |> LRU.put(:a, 1) |> LRU.put(:b, 2) |> LRU.put(:c, 3) |> LRU.get(:a)
iex> c |> LRU.put(:d, 5)
#LRU<%{a: 1, c: 3, d: 5}>
"""
def put(cache, key, value) do
if cache.count >= cache.size do
key_to_delete = Buf.peek(cache.ages)
new_impl =
if Map.has_key?(cache.impl, key) do
Map.put(cache.impl, key, value)
else
cache.impl
|> Map.delete(key_to_delete)
|> Map.put(key, value)
end
%{cache | impl: new_impl, ages: Buf.insert(cache.ages, key)}
else
new_impl = Map.put(cache.impl, key, value)
%{
cache
| impl: new_impl,
ages: Buf.insert(cache.ages, key),
count: Enum.count(new_impl)
}
end
end
@doc """
Return the cache's map representation
iex> cache = LRU.new(3)
iex> cache = cache |> LRU.put(:a, 1) |> LRU.put(:b, 2) |> LRU.put(:c, 3) |> LRU.put(:d, 4)
iex> LRU.to_map(cache)
%{b: 2, c: 3, d: 4}
"""
def to_map(cache) do
cache.impl
end
@doc """
Get the number of items in the cache.
iex> LRU.new(5) |> LRU.put(:a, :b) |> LRU.count()
1
iex> LRU.new(5) |> LRU.count()
0
"""
def count(cache) do
cache.count
end
@doc """
Get the capacity of the cache.
iex> LRU.new(5) |> LRU.put(:a, :b) |> LRU.size()
5
iex> LRU.new(5) |> LRU.size()
5
"""
def size(cache) do
cache.size
end
@doc """
Check if the cache has a key.
iex> cache = LRU.new(3)
iex> cache = cache |> LRU.put(:a, 1) |> LRU.put(:b, 2) |> LRU.put(:c, 3) |> LRU.put(:d, 4)
iex> LRU.member?(cache, :b)
true
iex> cache = LRU.new(3)
iex> LRU.member?(cache, :z)
false
"""
def member?(cache, key) do
Map.has_key?(cache.impl, key)
end
defimpl Collectable, for: LRU do
def into(original) do
collector_fun = fn
buf, {:cont, {key, val}} ->
LRU.put(buf, key, val)
buf, :done ->
buf
_buf, :halt ->
:ok
end
{original, collector_fun}
end
end
defimpl Enumerable, for: LRU do
def count(cache), do: {:ok, LRU.count(cache)}
def member?(cache, key), do: {:ok, LRU.member?(cache, key)}
def reduce(cache, acc, fun), do: Enumerable.Map.reduce(LRU.to_map(cache), acc, fun)
def slice(_buf), do: {:error, __MODULE__}
end
defimpl Inspect, for: LRU do
import Inspect.Algebra
def inspect(buf, opts) do
concat(["#LRU<", to_doc(LRU.to_map(buf), opts), ">"])
end
end
end
|
lib/lru.ex
| 0.77535
| 0.508422
|
lru.ex
|
starcoder
|
defmodule Commanded.ExampleDomain.BankAccount do
@moduledoc false
defstruct [
account_number: nil,
balance: 0,
state: nil,
]
alias Commanded.ExampleDomain.BankAccount
defmodule Commands do
defmodule OpenAccount, do: defstruct [:account_number, :initial_balance]
defmodule DepositMoney, do: defstruct [:account_number, :transfer_uuid, :amount]
defmodule WithdrawMoney, do: defstruct [:account_number, :transfer_uuid, :amount]
defmodule CloseAccount, do: defstruct [:account_number]
end
defmodule Events do
defmodule BankAccountOpened, do: defstruct [:account_number, :initial_balance]
defmodule MoneyDeposited, do: defstruct [:account_number, :transfer_uuid, :amount, :balance]
defmodule MoneyWithdrawn, do: defstruct [:account_number, :transfer_uuid, :amount, :balance]
defmodule AccountOverdrawn, do: defstruct [:account_number, :balance]
defmodule BankAccountClosed, do: defstruct [:account_number]
end
alias Commands.{OpenAccount,DepositMoney,WithdrawMoney,CloseAccount}
alias Events.{BankAccountOpened,MoneyDeposited,MoneyWithdrawn,AccountOverdrawn,BankAccountClosed}
def open_account(%BankAccount{state: nil}, %OpenAccount{account_number: account_number, initial_balance: initial_balance})
when is_number(initial_balance) and initial_balance > 0
do
%BankAccountOpened{account_number: account_number, initial_balance: initial_balance}
end
def deposit(%BankAccount{state: :active, balance: balance}, %DepositMoney{account_number: account_number, transfer_uuid: transfer_uuid, amount: amount})
when is_number(amount) and amount > 0
do
balance = balance + amount
%MoneyDeposited{account_number: account_number, transfer_uuid: transfer_uuid, amount: amount, balance: balance}
end
def withdraw(%BankAccount{state: :active, balance: balance}, %WithdrawMoney{account_number: account_number, transfer_uuid: transfer_uuid, amount: amount})
when is_number(amount) and amount > 0
do
case balance - amount do
balance when balance < 0 ->
[
%MoneyWithdrawn{account_number: account_number, transfer_uuid: transfer_uuid, amount: amount, balance: balance},
%AccountOverdrawn{account_number: account_number, balance: balance},
]
balance ->
%MoneyWithdrawn{account_number: account_number, transfer_uuid: transfer_uuid, amount: amount, balance: balance}
end
end
def close_account(%BankAccount{state: :active}, %CloseAccount{account_number: account_number}) do
%BankAccountClosed{account_number: account_number}
end
# state mutatators
def apply(%BankAccount{} = state, %BankAccountOpened{account_number: account_number, initial_balance: initial_balance}) do
%BankAccount{state |
account_number: account_number,
balance: initial_balance,
state: :active,
}
end
def apply(%BankAccount{} = state, %MoneyDeposited{balance: balance}), do: %BankAccount{state | balance: balance}
def apply(%BankAccount{} = state, %MoneyWithdrawn{balance: balance}), do: %BankAccount{state | balance: balance}
def apply(%BankAccount{} = state, %AccountOverdrawn{}), do: state
def apply(%BankAccount{} = state, %BankAccountClosed{}) do
%BankAccount{state |
state: :closed,
}
end
end
|
test/example_domain/bank_account/bank_account.ex
| 0.694717
| 0.62949
|
bank_account.ex
|
starcoder
|
defmodule GGity.Geom.Text do
@moduledoc false
alias GGity.{Draw, Geom, Plot, Scale}
@hjust_anchor_map %{left: "start", center: "middle", right: "end"}
@vjust_anchor_map %{top: "baseline", middle: "middle", bottom: "hanging"}
@type t() :: %__MODULE__{}
@type plot() :: %Plot{}
@type record() :: map()
@type mapping() :: map()
defstruct data: nil,
mapping: nil,
stat: :identity,
position: :identity,
position_vjust: 1,
group_width: nil,
group_padding: 5,
key_glyph: :a,
alpha: 1,
color: "black",
size: 8,
family: "Helvetica, Arial, sans-serif",
fontface: "normal",
hjust: :center,
vjust: :middle,
nudge_x: "0",
nudge_y: "0"
@spec new(mapping(), keyword()) :: Geom.Text.t()
def new(mapping, options) do
struct(Geom.Text, [{:mapping, mapping} | options])
end
@spec draw(Geom.Text.t(), list(map()), Plot.t()) :: iolist()
def draw(%Geom.Text{} = geom_text, data, %Plot{scales: %{x: %Scale.X.Discrete{}}} = plot) do
number_of_levels = length(plot.scales.x.levels)
group_width = (plot.width - number_of_levels * (plot.scales.x.padding - 1)) / number_of_levels
geom_text = struct(geom_text, group_width: group_width)
words(geom_text, data, plot)
end
def draw(%Geom.Text{} = geom_text, data, plot), do: words(geom_text, data, plot)
defp words(%Geom.Text{} = geom_text, data, %Plot{scales: %{x: %Scale.X.Discrete{}}} = plot) do
data
|> Enum.reject(fn row -> row[geom_text.mapping[:y]] == 0 end)
|> Enum.group_by(fn row -> row[geom_text.mapping[:x]] end)
|> Enum.with_index()
|> Enum.map(fn {{_x_value, group}, group_index} ->
group(geom_text, group, group_index, plot)
end)
end
defp words(%Geom.Text{} = geom_text, data, %Plot{scales: scales} = plot) do
transforms = transforms(geom_text, scales)
data
|> Stream.map(fn row ->
[
transforms.x.(row[geom_text.mapping.x]),
transforms.y.(row[geom_text.mapping.y]),
transforms.label.(row[geom_text.mapping[:label]]),
transforms.alpha.(row[geom_text.mapping[:alpha]]),
transforms.color.(row[geom_text.mapping[:color]]),
transforms.size.(row[geom_text.mapping[:size]])
]
end)
|> Stream.map(fn row -> Enum.zip([:x, :y, :label, :fill_opacity, :fill, :size], row) end)
|> Enum.map(fn row ->
Draw.text(
to_string(row[:label]),
x: row[:x] + plot.area_padding,
y: (plot.width - row[:y]) / plot.aspect_ratio + plot.area_padding,
fill: row[:fill],
fill_opacity: row[:fill_opacity],
font_size: "#{row[:size]}px",
text_anchor: @hjust_anchor_map[geom_text.hjust],
dominant_baseline: @vjust_anchor_map[geom_text.vjust],
dx: geom_text.nudge_x,
dy: geom_text.nudge_y,
font_family: geom_text.family,
font_weight: geom_text.fontface
)
end)
end
defp group(geom_text, group_values, group_index, %Plot{scales: scales} = plot) do
transforms = transforms(geom_text, scales)
count_rows = length(group_values)
sort_order =
case geom_text.position do
:stack -> :desc
:dodge -> :asc
_unknown_adjustment -> :asc
end
group_values
|> Enum.sort_by(fn row -> row[plot.mapping[:group]] end, sort_order)
|> Enum.reduce({count_rows, 0, 0, []}, fn row,
{number_of_groups, total_width, total_height, text} ->
{
number_of_groups,
total_width + geom_text.group_width / count_rows,
total_height +
transforms.y.(row[geom_text.mapping[:y]]) / plot.aspect_ratio,
[
Draw.text(
to_string(row[geom_text.mapping[:label]]),
x:
position_adjust_x(geom_text, row, group_index, total_width, plot, number_of_groups),
y:
plot.area_padding + plot.width / plot.aspect_ratio -
position_adjust_y(geom_text, row, total_height, plot),
fill: geom_text.color,
fill_opacity: geom_text.alpha,
font_size: "#{transforms.size.(row[geom_text.mapping[:size]])}pt",
text_anchor: @hjust_anchor_map[geom_text.hjust],
dominant_baseline: @vjust_anchor_map[geom_text.vjust],
dx: geom_text.nudge_x,
dy: geom_text.nudge_y,
font_family: geom_text.family,
font_weight: geom_text.fontface
)
| text
]
}
end)
|> elem(3)
end
defp transforms(geom, scales) do
scale_transforms =
geom.mapping
|> Map.keys()
|> Enum.reduce(%{}, fn aesthetic, mapped ->
Map.put(mapped, aesthetic, Map.get(scales[aesthetic], :transform))
end)
geom
|> Map.take([:alpha, :color, :shape, :size])
|> Enum.reduce(%{}, fn {aesthetic, fixed_value}, fixed ->
Map.put(fixed, aesthetic, fn _value -> fixed_value end)
end)
|> Map.merge(scale_transforms)
end
defp position_adjust_x(
%Geom.Text{position: :identity},
row,
_group_index,
_total_width,
plot,
_number_of_groups
) do
plot.scales.x.transform.(row[plot.mapping[:x]])
end
defp position_adjust_x(
%Geom.Text{position: :stack} = geom_text,
_row,
group_index,
_total_width,
plot,
_number_of_groups
) do
plot.area_padding + geom_text.group_width / 2 +
group_index * (geom_text.group_width + plot.scales.x.padding)
end
defp position_adjust_x(
%Geom.Text{position: :dodge} = geom_text,
_row,
group_index,
total_width,
plot,
number_of_groups
) do
plot.area_padding + geom_text.group_width / 2 / number_of_groups +
group_index * (geom_text.group_width + plot.scales.x.padding) +
total_width
end
defp position_adjust_y(%Geom.Text{position: :identity} = geom_text, row, _total_height, plot) do
plot.scales.y.transform.(row[geom_text.mapping[:y]]) / plot.aspect_ratio
end
defp position_adjust_y(%Geom.Text{position: :stack} = geom_text, row, total_height, plot) do
total_height +
plot.scales.y.transform.(row[geom_text.mapping[:y]]) / plot.aspect_ratio *
geom_text.position_vjust
end
defp position_adjust_y(%Geom.Text{position: :dodge} = geom_text, row, _total_height, plot) do
plot.scales.y.transform.(row[geom_text.mapping[:y]]) / plot.aspect_ratio *
geom_text.position_vjust
end
end
|
lib/ggity/geom/text.ex
| 0.903385
| 0.45532
|
text.ex
|
starcoder
|
defprotocol Realm.Semigroup do
@moduledoc ~S"""
A semigroup is a structure describing data that can be appendenated with others of its type.
That is to say that appending another list returns a list, appending one map
to another returns a map, and appending two integers returns an integer, and so on.
These can be chained together an arbitrary number of times. For example:
1 <> 2 <> 3 <> 5 <> 7 == 18
[1, 2, 3] <> [4, 5, 6] <> [7, 8, 9] == [1, 2, 3, 4, 5, 6, 7, 8, 9]
"foo" <> " " <> "bar" == "foo bar"
This generalizes the idea of a monoid, as it does not require an `empty` version.
## Type Class
An instance of `Realm.Semigroup` must define `Realm.Semigroup.append/2`.
Semigroup [append/2]
"""
@doc ~S"""
`append`enate two data of the same type. These can be chained together an arbitrary number of times. For example:
iex> 1 |> append(2) |> append(3)
6
iex> [1, 2, 3]
...> |> append([4, 5, 6])
...> |> append([7, 8, 9])
[1, 2, 3, 4, 5, 6, 7, 8, 9]
iex> "foo" |> append(" ") |> append("bar")
"foo bar"
"""
@spec append(t(), t()) :: any()
def append(a, b)
end
defmodule Realm.Semigroup.Algebra do
alias Realm.Semigroup
@doc ~S"""
Flatten a list of homogeneous semigroups to a single container.
## Example
iex> import Realm.Semigroup.Algebra
...> concat [
...> [1, 2, 3],
...> [4, 5, 6]
...> ]
[1, 2, 3, 4, 5, 6]
"""
@spec concat(Semigroup.t()) :: [Semigroup.t()]
def concat(semigroups) do
Enum.reduce(semigroups, [], &Semigroup.append(&2, &1))
end
@doc ~S"""
Repeat the contents of a semigroup a certain number of times.
## Examples
iex> import Realm.Semigroup.Algebra
...> [1, 2, 3] |> repeat(3)
[1, 2, 3, 1, 2, 3, 1, 2, 3]
"""
@spec repeat(Semigroup.t(), non_neg_integer()) :: Semigroup.t()
# credo:disable-for-lines:6 Credo.Check.Refactor.PipeChainStart
def repeat(subject, n) do
fn -> subject end
|> Stream.repeatedly()
|> Stream.take(n)
|> Enum.reduce(&Semigroup.append(&2, &1))
end
end
defimpl Realm.Semigroup, for: Function do
def append(f, g) when is_function(g), do: Quark.compose(g, f)
end
defimpl Realm.Semigroup, for: Integer do
def append(a, b), do: a + b
end
defimpl Realm.Semigroup, for: Float do
def append(a, b), do: a + b
end
defimpl Realm.Semigroup, for: BitString do
def append(a, b), do: Kernel.<>(a, b)
end
defimpl Realm.Semigroup, for: List do
def append(a, b), do: a ++ b
end
defimpl Realm.Semigroup, for: Map do
def append(a, b), do: Map.merge(a, b)
end
defimpl Realm.Semigroup, for: MapSet do
def append(a, b), do: MapSet.union(a, b)
end
defimpl Realm.Semigroup, for: Tuple do
def append(left, right) do
left
|> Tuple.to_list()
|> Enum.zip(Tuple.to_list(right))
|> Enum.map(fn {x, y} -> Realm.Semigroup.append(x, y) end)
|> List.to_tuple()
end
end
|
lib/realm/semigroup.ex
| 0.822973
| 0.544438
|
semigroup.ex
|
starcoder
|
defmodule FiveHundred.Game do
@moduledoc """
Models a game of 500
TODO:
- score
- turn
"""
alias FiveHundred.{Bid, Game, Player, PlayerBid}
@derive Jason.Encoder
defstruct [
:winning_bid,
:players,
:turn,
:code,
:max_players,
:last_round_winner,
:bid_exclusion,
state: :waiting_for_players
]
@type code :: String.t()
@type state :: :bidding | :playing | :waiting_for_players | :finished
@type t :: %Game{
code: nil | code(),
players: [Player.t()],
turn: nil | integer(),
state: state,
winning_bid: nil | PlayerBid.t(),
max_players: integer(),
bid_exclusion: [Player.t()],
last_round_winner: nil | integer()
}
@spec new_game(Player.t()) :: t()
def new_game(%Player{} = player, max_players \\ 4),
do: %Game{
code: code(),
players: [player],
max_players: max_players,
last_round_winner: 0,
turn: 0,
bid_exclusion: [],
winning_bid: nil
}
@spec join_game(t(), Player.t()) :: {:ok, t()} | {:error, :max_players}
def join_game(%Game{players: players} = game, %Player{})
when length(players) == game.max_players,
do: {:error, :max_players}
def join_game(%Game{players: players} = game, %Player{} = player) do
{:ok, %Game{game | players: [player | players]}}
|> ready_for_bidding?
end
@spec bid(t(), PlayerBid.t()) ::
{:ok, %Game{}}
| {:error,
:last_round_winner_must_bid_first | :not_bidding | :bid_not_high_enough | :cannot_bid}
def bid(%Game{} = game, %PlayerBid{player_index: player_index} = playerBid) do
with {:ok, game} <- ensure_bidding(game),
{:ok, game} <- ensure_last_winner_has_bid_first(game, playerBid),
{:ok, game} <- ensure_turn(game, player_index),
{:ok, game} <- ensure_can_bid(game, player_index),
{:ok, game} <- ensure_bid_is_higher(game, playerBid),
{:ok, game} <- set_winning_bid(game, playerBid),
{:ok, game} <- exclude_from_bidding(game, player_index),
{:ok, game} <- bid_advance(game),
{:ok, game} <- advance_turn(game),
do: {:ok, game}
end
@spec ensure_last_winner_has_bid_first(t(), %PlayerBid{}) :: {:ok, t()} | {:error, :last_round_winner_bid_first}
def ensure_last_winner_has_bid_first(%Game{winning_bid: nil, last_round_winner: last_round_winner, bid_exclusion: bid_exclusion} = game, %PlayerBid{
player_index: player_index
})
do
cond do
Enum.member?(bid_exclusion, last_round_winner) -> {:ok, game}
player_index != last_round_winner ->
{:error, :last_round_winner_must_bid_first}
true -> {:ok, game}
end
end
def ensure_last_winner_has_bid_first(%Game{} = game, %PlayerBid{}), do: {:ok, game}
@spec pass(t(), integer()) :: {:ok, t()} | {:error, :not_your_turn | :not_bidding}
def pass(%Game{} = game, player_index) do
with {:ok, game} <- ensure_bidding(game),
{:ok, game} <- ensure_turn(game, player_index),
{:ok, game} <- ensure_can_bid(game, player_index),
{:ok, game} <- exclude_from_bidding(game, player_index),
{:ok, game} <- bid_advance(game),
{:ok, game} <- advance_turn(game),
do: {:ok, game}
end
@spec bid_advance(t()) :: {:ok, t()} | {:error, :not_bidding}
def bid_advance(%Game{bid_exclusion: bid_exclusion, players: players} = game)
when length(bid_exclusion) == length(players),
do: {:ok, %Game{game | state: :playing}}
def bid_advance(%Game{} = game), do: {:ok, game}
@spec advance_turn(t()) :: {:ok, t()}
def advance_turn(%Game{players: players, turn: current_turn} = game),
do: {:ok, %Game{game | turn: rem(current_turn + 1, length(players))}}
@spec ensure_bidding(t()) :: {:ok, t()} | {:error, :not_bidding}
def ensure_bidding(%Game{state: state}) when state != :bidding,
do: {:error, :not_bidding}
def ensure_bidding(%Game{} = game), do: {:ok, game}
@spec ensure_turn(t(), integer()) :: {:ok, t()} | {:error, :not_your_turn}
def ensure_turn(%Game{turn: turn}, player_index) when turn != player_index,
do: {:error, :not_your_turn}
def ensure_turn(%Game{} = game, _player_index), do: {:ok, game}
@spec ensure_bid_is_higher(t(), PlayerBid.t()) :: {:ok, t()} | {:error, :bid_not_high_enough}
def ensure_bid_is_higher(%Game{winning_bid: nil} = game, %PlayerBid{}), do: {:ok, game}
def ensure_bid_is_higher(
%Game{winning_bid: %PlayerBid{bid: winning_bid}} = game,
%PlayerBid{bid: new_bid} = playerBid
) do
case Bid.compare(new_bid, winning_bid) do
:gt -> set_winning_bid(game, playerBid)
_ -> {:error, :bid_not_high_enough}
end
end
@spec ensure_can_bid(t(), integer()) :: {:ok, t()} | {:error, atom()}
def ensure_can_bid(%Game{bid_exclusion: bid_exclusion} = game, player_index) do
if Enum.member?(bid_exclusion, player_index) do
{:error, :cannot_bid}
else
{:ok, game}
end
end
@spec set_winning_bid(t(), PlayerBid.t()) :: {:ok, t()} | {:error, atom()}
def set_winning_bid(%Game{} = game, %PlayerBid{} = playerBid),
do: {:ok, %Game{game | winning_bid: playerBid}}
@spec exclude_from_bidding(t(), integer()) :: {:ok, t()} | {:error, atom()}
def exclude_from_bidding(%Game{bid_exclusion: bid_exclusion} = game, player_index),
do: {:ok, %Game{game | bid_exclusion: [player_index | bid_exclusion]}}
@spec ready_for_bidding?({:ok, t()}) :: {:ok, t()}
def ready_for_bidding?({:ok, %Game{players: players, max_players: max_players} = game})
when length(players) == max_players,
do: {:ok, %Game{game | state: :bidding}}
def ready_for_bidding?({:ok, %Game{} = game}), do: {:ok, game}
@spec code(integer) :: String.t()
def code(length \\ 5),
do:
:crypto.strong_rand_bytes(length)
|> Base.url_encode64()
|> binary_part(0, length)
|> String.upcase()
end
|
lib/five_hundred/game.ex
| 0.623033
| 0.438485
|
game.ex
|
starcoder
|
defmodule Plan do
alias Engine.Adapters.RMQRPCWorker
import Logger
@jsprit_time_constraint_msg "Time of time window constraint is in the past!"
def find_optimal_routes(vehicle_ids, booking_ids) do
Logger.info("call calculate_route_optimization")
%{}
|> Map.put(:vehicles, Enum.map(vehicle_ids, &Vehicle.get/1))
|> Map.put(
:bookings,
Enum.map(booking_ids, &Booking.get/1)
|> Enum.filter(fn booking ->
is_nil(booking.assigned_to)
end)
)
|> insert_time_matrix()
|> RMQRPCWorker.call("calculate_route_optimization")
|> case do
{:ok, response} ->
Jason.decode!(response, keys: :atoms)
|> get_in([:data, :solution])
_ ->
nil
end
end
def insert_time_matrix(items),
do:
items
|> map_vehicles_and_bookings_to_coordinates()
|> Osrm.get_time_between_coordinates()
|> Map.delete(:code)
|> (fn matrix -> Map.put(items, :matrix, matrix) end).()
|> add_hints_from_matrix()
def map_vehicles_and_bookings_to_coordinates(%{vehicles: vehicles, bookings: bookings}),
do:
bookings
|> Enum.flat_map(fn %{pickup: pickup, delivery: delivery} -> [pickup, delivery] end)
|> Enum.concat(
Enum.flat_map(vehicles, fn %{start_address: start_address, end_address: end_address} ->
[start_address, end_address]
end)
)
def add_hints_from_matrix(%{bookings: bookings} = map) do
{booking_hints, vehicle_hints} =
map
|> get_in([:matrix, "sources"])
|> Enum.map(fn %{"hint" => hint} -> hint end)
|> Enum.split(length(bookings) * 2)
map
|> Map.update!(:bookings, fn bookings -> add_booking_hints(bookings, booking_hints) end)
|> Map.update!(:vehicles, fn vehicles -> add_vehicle_hints(vehicles, vehicle_hints) end)
end
defp add_booking_hints(bookings, hints) do
bookings
|> Enum.reduce({[], hints}, fn booking, {res, [pickup_hint, delivery_hint | rest_of_hints]} ->
updated_booking =
booking
|> Map.update!(:pickup, fn position -> Map.put(position, :hint, pickup_hint) end)
|> Map.update!(:delivery, fn position -> Map.put(position, :hint, delivery_hint) end)
{res ++ [updated_booking], rest_of_hints}
end)
|> elem(0)
end
def add_vehicle_hints(vehicles, hints) do
vehicles
|> Enum.reduce({[], hints}, fn vehicle, {res, hints} ->
[start_hint, end_hint | rest_of_hints] = hints
updated_vehicle =
vehicle
|> Map.update!(:start_address, fn start_address ->
Map.put(start_address, :hint, start_hint)
end)
|> Map.update!(:end_address, fn end_address ->
Map.put(end_address, :hint, end_hint)
end)
{res ++ [updated_vehicle], rest_of_hints}
end)
|> elem(0)
end
def calculate(vehicle_ids, booking_ids)
when length(vehicle_ids) == 0 or length(booking_ids) == 0,
do: IO.puts("No vehicles/bookings to calculate plan for")
def calculate(vehicle_ids, booking_ids) do
%{routes: routes, excluded: excluded} = find_optimal_routes(vehicle_ids, booking_ids)
transports =
routes
|> Enum.map(fn %{activities: activities, vehicle_id: id} ->
booking_ids =
activities |> Enum.filter(&Map.has_key?(&1, :id)) |> Enum.map(& &1.id) |> Enum.uniq()
Vehicle.get(id)
|> Map.put(:activities, activities)
|> Map.put(:booking_ids, booking_ids)
end)
|> Enum.map(fn vehicle ->
vehicle
|> Map.put(
:current_route,
vehicle.activities
|> Enum.map(fn %{address: address} -> address end)
|> Osrm.route()
)
end)
|> Enum.map(&add_distance_durations/1)
PlanStore.put_plan(%{
transports: transports,
booking_ids: booking_ids,
excluded_booking_ids: Enum.map(excluded, &handle_booking_failure/1)
})
end
def add_distance_durations(vehicle) do
distance_durations =
vehicle
|> Map.get(:current_route)
|> Jason.decode!()
|> Map.get("legs")
|> Enum.map(fn legs ->
legs
|> Enum.reduce(%{}, fn {key, val}, acc -> Map.put(acc, String.to_atom(key), val) end)
|> Map.take([:distance, :duration])
end)
|> List.insert_at(0, %{distance: 0, duration: 0})
Map.update!(vehicle, :activities, fn activities ->
Enum.zip(activities, distance_durations)
|> Enum.map(fn {activity, distance_duration} -> Map.merge(activity, distance_duration) end)
end)
end
defp handle_booking_failure(%{id: id, failure: %{status_msg: @jsprit_time_constraint_msg}}),
do: %{id: id, status: "TIME_CONSTRAINTS_EXPIRED"}
defp handle_booking_failure(%{id: id}), do: %{id: id, status: "CONSTRAINTS_FAILURE"}
end
|
packages/engine_umbrella/apps/engine/lib/plan.ex
| 0.666714
| 0.408601
|
plan.ex
|
starcoder
|
defmodule TeaVent do
@moduledoc """
TeaVent allows you to perform event-dispatching in a style that is a mixture of Event Sourcing and The 'Elm Architecture' (TEA).
The idea behind this is twofold:
1. Event dispatching is separated from event handling. The event handler (the `reducer`) can thus be a pure function, making it very easy to reason about it, and test it in isolation.
2. How subjects of the event handling are found and how the results are stored back to your application can be completely configured. This also means that it is easier to reason about and test in isolation.
The module strives to make it easy to work with a variety of different set-ups:
- 'full' Event Sourcing where events are always made and persisted, and the state-changes are done asynchroniously (and potentially there are multiple state-representations (i.e. event-handlers) working on the same queue of events).
- a 'classical' relational-database setup where this can be treated as single-source-of-truth, and events/state-changes are only persisted when they are 'valid'.
- a TEA-style application setup, where our business-domain model representation changes in a pure way.
- A distributed-database setup where it is impossible to view your data-model as a 'single-source-of-truth' (making database-constraints impossible to work with), but the logical contexts of most events do not overlap, which allows us to handle constraints inside the application logic that is set up in TEA-style.
The main entrance point of events is `TeaVent.dispatch()`
### Context Provider function
The configured Context Provider function receives two arguments as input: The current `event`, and a 'wrapped' reducer function.
The purpose of the Context Provider is a bit that of a 'lens' (or more specifically: A 'prism') from functional programming: It should find the precise subject (the 'logical context') based on the `topic` of the event, and after trying to combine the subject with the event, it should store the altered subject back (if successful) or do some proper error resolution (if not successful).
It should:
1. Based on the `topic` in the event (and potentially other fields), find the `subject` of the event.
2. Call the passed 'wrapped' `reducer`-function with this `subject`.
3. pattern-match on the result of this 'wrapped' `reducer`-function:
- `{:ok, updated_event}` -> The `updated_event` has the `subject`, `changed_subject` and `changes`-field filled in. The context provider.
- `{:error, some_problem}` is returned whenever the event could not be applied. The ContextProvider might decide to still persist the event, or do nothing. (Or e.g. roll back the whole database-transaction, etc).
Examples for Context Providers could be:
- a GenServer that contains a list of internal structures, which ensures that whenever an event is dispatched, this is done inside a _call_ to the GenServer, such that it is handled synchroniously w.r.t. the GenServer's state.
- A (relational-)database-wrapper which fetches a representation of a database-row, and updates it based on the results.
### Synchronious Callbacks
These callbacks are called in-order with the result of the ContextProvider.
Each of them should return either `{:ok, potentially_altered_event}` or `{:error, some_problem}`.
The next one will only be called if the previous one was successful.
#### Asynchronious Callbacks
These can be modeled on top of Synchronious Callbacks by sending a message with the event to wherever you'd want to handle it asynchroniously. TeaVent does not contain a built-in option for this, because there are many different approaches to do this. A couple that you could easily use are:
- Perform a `Phoenix.PubSub.broadcast!` (requires the 'phoenix_pubsub' library). This is probably the most 'Event-Source'-y of these solutions, because any place can subscribe to these events. Do note that using this will restrict you to certain kinds of `topic`-formats (i.e. 'binary-only')
- Start a `GenStage` or `Flow`-stream with the event result. (requires the 'gen_stage'/'flow' libraries)
- Spawn a `Task` for each of the things you'd want to do asynchroniously.
- Cast a GenServer that does something with the result.
### Middleware
A middleware-function is called with one argument: The next ('more inner') middleware-function. The return value of a middleware-function should be a function that takes an event, and calls the next middleware-function with this event. What it does _before_ and _after_ calling this function is up to the middleware (it can even decide not to call the next middleware-function, for instance).
The function that the middleware returns should take the event as input and return it as output (potentially adding things to e.g. the `meta`-field or other fields of the event). Yes, it is possible to return non-event structures from middleware, but this will make it difficult to chain middleware, unless the more 'higher-up' middleware also takes this other structure as return result, so it is not advised to do so.
Middleware is powerful, which means that it should be used sparingly!
Examples of potential middleware are:
- A wrapper that wraps the event-handling in a database-transaction. (e.g. Ecto's `Repo.transaction`)
- A wrapper that measures the duration of the event-handling code or performs some other instrumentation on it.
"""
# @keys [:middleware, :sync_callbacks, :subject_finder, :subject_putter]
@configuration_keys [:middleware, :sync_callbacks, :context_provider, :reducer]
@configuration_defaults %{
middleware: [],
sync_callbacks: [],
context_provider: &__MODULE__.Defaults.context_provider/2
# context_provider: fn _, reducer -> reducer.({:ok, nil}) end
# subject_finder: fn _ -> nil end,
# subject_putter: fn _, _, _ -> :ok end
}
@type reducer(data_model) :: (data_model, Event.t() -> {:ok, data_model} | {:error, any()})
@type reducer() :: reducer(any)
@type context_provider() :: (Event.t(), reducer() -> {:ok, Event.t()} | {:error, any()})
@type sync_callback :: (Event.t() -> {:ok, Event.t()} | {:error, any()})
@type middleware_function ::
(middleware_function -> (Event.t() -> {:ok, Event.t()} | {:error, any()}))
# @type configuration :: %{reducer: reducer(),
# optional(:middleware) => [middleware_function()],
# optional(:sync_callbacks) => [sync_callback()],
# optional(:context_provider) => [context_provider()]
# }
defmodule Event do
@moduledoc """
The data-representation of the occurrence of an event that is forwarded through the TeaVent stack.
## Fields:
`topic`: An identification of the 'logical context' the event works on.
`name`: The kind of event that is happening.
`data`: More information that determines what this event is trying to change.
`subject`: This field is filled in by the `context_provider` (if you have one), based on the `topic`, to contain a concrete representation of the logical context the event works on.
`changed_subject`: This field is filled in by the result of calling the `resolver` function. It is how the `subject` will look like after it has been altered by the event.
`changes`: This is automatically filled in by `TeaVent` once `subject` and `changed_subject` are filled in (and if and only if both of them are maps or structs) to contain a map of changes between the two. This map contains all keys+values that are different in `changed_subject` from `subject`.
It is allowed to pattern-match on these fields, but `subject`, `changed_subject` and `changes` will be filled-in automatically by TeaVent. (`topic`, `name`, `data` and `meta` are allowed to be accessed/altered directly.)
"""
@enforce_keys [:topic, :name]
defstruct [:topic, :name, :data, :subject, :changed_subject, :changes, meta: %{}]
@type t :: %__MODULE__{
topic: any(),
name: binary() | atom(),
meta: map(),
subject: any(),
changed_subject: any(),
changes: map()
}
def new(topic, name, data \\ %{}, meta \\ %{}) do
%__MODULE__{topic: topic, name: name, data: data, meta: meta}
end
end
defmodule Errors.MissingRequiredConfiguration do
@moduledoc """
Raised when a required option is missing (not specified as part of the configuration parameter nor in the current Application.env)
"""
defexception [:message]
@impl true
def exception(value) do
msg =
"The configuration require #{inspect(value)} to be specified (as part of the configuration parameter or in the Application.env), but it was not."
%__MODULE__{message: msg}
end
end
defmodule Errors.UnrecognizedConfiguration do
@moduledoc """
Raised when an option that was not recognized was passed to TeaVent (to catch for instance syntax errors in the option names.)
"""
defexception [:message]
@impl true
def exception(keys) do
msg =
"The keys `#{inspect(keys)}` are not part of the configuration and could not be recognized. Did you make a syntax-error somewhere?"
%__MODULE__{message: msg}
end
end
defmodule Defaults do
# This module contains default implementations of some of the TeaVent option functions,
# which cannot be specified as anonymous functions because those are not serializable at compile-time.
@moduledoc false
def context_provider(_event, reducer) do
reducer.({:ok, nil})
end
end
@doc """
Creates an event based on the `topic`, `name` and `data` given, and dispatches it using the given configuration.
## Required Configuration
- `reducer:` A function that, given an a subject and an event, returns either `{:ok, changed_subject}` or `{:error, some_problem}`.
## Optional Configuration
- `context_provider:` A function that receives the event and a reducer-function as input, and should fetch the context of the event and later update it back. See more information in the module documentation.
- `sync_callbacks:` A list of functions that take the event as input and return `{:ok, changed_event}` or `{:error, some_error}` as output. These are called _synchronious_, and when the first one fails, the rest of the chain will not be called. See more info in the module documentation.
- `middleware`: A list of functions that do something useful around the whole event-handling stack. See more info in the module documentation.
"""
def dispatch(topic, name, data \\ %{}, configuration \\ []) do
dispatch_event(__MODULE__.Event.new(topic, name, data), configuration)
end
@doc """
Dispatches the given `event` (that was created before using e.g. `TeaVent.Event.new`)
to the current TeaVent-based event-sourcing system.
Takes the same configuration as `TeaVent.dispatch`.
"""
def dispatch_event(event = %__MODULE__.Event{}, configuration \\ []) do
configuration = parse_configuration(configuration)
reducer = configuration[:reducer]
middleware = configuration[:middleware]
context_provider = configuration[:context_provider]
sync_callbacks = configuration[:sync_callbacks]
sync_chain = fn event ->
chain_until_failure(
event,
[
core_function(context_provider, reducer)
] ++ sync_callbacks
)
end
run_middleware(event, sync_chain, middleware)
end
defp core_function(context_provider, reducer) do
fn event ->
context_provider.(event, fn subject, event ->
with {:ok, changed_subject} <- reducer.(subject, event),
changes = calc_diff(subject, changed_subject) do
{:ok,
%__MODULE__.Event{
event
| subject: subject,
changed_subject: changed_subject,
changes: changes
}}
else
{:error, error} ->
{:error, error}
end
end)
end
end
defp calc_diff(a = %struct_a{}, b = %struct_b{}) when struct_a == struct_b do
calc_diff(Map.from_struct(a), Map.from_struct(b))
end
defp calc_diff(a = %{}, b = %{}) do
a_set = a |> MapSet.new()
b_set = b |> MapSet.new()
b_set
|> MapSet.difference(a_set)
|> Enum.into(%{})
end
defp calc_diff(_a, _b), do: nil
defp run_middleware(event, sync_fun, middleware) do
combined_middleware =
middleware
|> Enum.reverse()
|> Enum.reduce(sync_fun, fn outer_fun, inner_fun -> outer_fun.(inner_fun) end)
combined_middleware.(event)
end
# Chains the given list of functions until any one of them returns a failure result.
defp chain_until_failure(event, []), do: {:ok, event}
defp chain_until_failure(event, [callback | callbacks]) do
case callback.(event) do
{:error, error} -> {:error, error, event}
{:ok, changed_event = %__MODULE__.Event{}} -> chain_until_failure(changed_event, callbacks)
end
end
defp parse_configuration(configuration) do
configuration_map = Enum.into(configuration, %{})
configuration_map_keys = configuration_map |> Map.keys() |> MapSet.new()
allowed_keys = @configuration_keys |> MapSet.new()
unrecognized_keys = MapSet.difference(configuration_map_keys, allowed_keys)
case unrecognized_keys |> MapSet.to_list() do
[] -> :ok
keys -> raise __MODULE__.Errors.UnrecognizedConfiguration, keys
end
# TODO
@configuration_keys
|> Enum.map(fn key ->
with :error <- Map.fetch(configuration_map, key),
:error <- Application.fetch_env(__MODULE__, key),
:error <- Map.fetch(@configuration_defaults, key) do
raise __MODULE__.Errors.MissingRequiredConfiguration, key
else
{:ok, val} -> {key, val}
end
end)
end
end
|
lib/tea_vent.ex
| 0.917182
| 0.72876
|
tea_vent.ex
|
starcoder
|
defmodule Absinthe.Plug do
@moduledoc """
A plug for using Absinthe
See [The Guides](http://absinthe-graphql.org/guides/plug-phoenix/) for usage details
## Uploaded File Support
Absinthe.Plug can be used to support uploading of files. This is a schema that
has a mutation field supporting multiple files. Note that we have to import
types from Absinthe.Plug.Types in order to get this scalar type:
```elixir
defmodule MyApp.Schema do
use Absinthe.Schema
import_types Absinthe.Plug.Types
mutation do
field :upload_file, :string do
arg :users, non_null(:upload)
arg :metadata, :upload
resolve fn args, _ ->
args.users # this is a `%Plug.Upload{}` struct.
{:ok, "success"}
end
end
end
end
```
Next it's best to look at how one submits such a query over HTTP. You need to
use the `multipart/form-data` content type. From there we need
1) a `query` parameter holding out GraphQL document
2) optional variables parameter for JSON encoded variables
3) optional operationName parameter to specify the operation
4) a query key for each file that will be uploaded.
An example of using this with curl would look like:
```
curl -X POST \\
-F query="{files(users: \"users_csv\", metadata: \"metadata_json\")}" \\
-F users_csv=@<EMAIL> \\
-F metadata_json=@<EMAIL> \\
localhost:4000/graphql
```
Note how there is a correspondance between the value of the `:users` argument
and the `-F` form part of the associated file.
The advantage of doing uploads this way instead of merely just putting them in
the context is that if the file is simply in the context there isn't a way in
the schema to mark it as required. It also wouldn't show up in the documentation
as an argument that is required for a field.
By treating uploads as regular arguments we get all the usual GraphQL argument
validation.
"""
@behaviour Plug
import Plug.Conn
require Logger
@type function_name :: atom
@type opts :: [
schema: atom,
adapter: atom,
path: binary,
context: map,
json_codec: atom | {atom, Keyword.t},
pipeline: {Module.t, function_name},
no_query_message: binary,
]
@doc """
Sets up and validates the Absinthe schema
"""
@spec init(opts :: opts) :: map
def init(opts) do
adapter = Keyword.get(opts, :adapter)
context = Keyword.get(opts, :context, %{})
no_query_message = Keyword.get(opts, :no_query_message, "No query document supplied")
pipeline = Keyword.get(opts, :pipeline, {__MODULE__, :default_pipeline})
json_codec = case Keyword.get(opts, :json_codec, Poison) do
module when is_atom(module) -> %{module: module, opts: []}
other -> other
end
schema_mod = opts |> get_schema
%{adapter: adapter, schema_mod: schema_mod, context: context, json_codec: json_codec,
pipeline: pipeline, no_query_message: no_query_message}
end
defp get_schema(opts) do
default = Application.get_env(:absinthe, :schema)
schema = Keyword.get(opts, :schema, default)
try do
Absinthe.Schema.types(schema)
rescue
UndefinedFunctionError ->
raise ArgumentError, "The supplied schema: #{inspect schema} is not a valid Absinthe Schema"
end
schema
end
@doc """
Parses, validates, resolves, and executes the given Graphql Document
"""
def call(conn, %{json_codec: json_codec} = config) do
{conn, result} = conn |> execute(config)
case result do
{:input_error, msg} ->
conn
|> send_resp(400, msg)
{:ok, %{data: _} = result} ->
conn
|> json(200, result, json_codec)
{:ok, %{errors: _} = result} ->
conn
|> json(400, result, json_codec)
{:error, {:http_method, text}, _} ->
conn
|> send_resp(405, text)
{:error, error, _} when is_binary(error) ->
conn
|> send_resp(500, error)
end
end
@doc false
def execute(conn, config)do
{conn, body} = load_body_and_params(conn)
result = with {:ok, input, opts} <- prepare(conn, body, config),
{:ok, input} <- validate_input(input, config.no_query_message),
pipeline <- setup_pipeline(conn, config, opts),
{:ok, absinthe_result, _} <- Absinthe.Pipeline.run(input, pipeline) do
{:ok, absinthe_result}
end
{conn, result}
end
def setup_pipeline(conn, config, opts) do
private = conn.private[:absinthe] || %{}
private = Map.put(private, :http_method, conn.method)
config = Map.put(config, :conn_private, private)
{module, fun} = config.pipeline
apply(module, fun, [config, opts])
end
def default_pipeline(config, opts) do
config.schema_mod
|> Absinthe.Pipeline.for_document(opts)
|> Absinthe.Pipeline.insert_after(Absinthe.Phase.Document.CurrentOperation,
{Absinthe.Plug.Validation.HTTPMethod, method: config.conn_private.http_method}
)
end
@doc false
def prepare(conn, body, %{json_codec: json_codec} = config) do
raw_input = Map.get(conn.params, "query", body)
Logger.debug("""
GraphQL Document:
#{raw_input}
""")
variables = Map.get(conn.params, "variables") || "{}"
operation_name = conn.params["operationName"] |> decode_operation_name
context = build_context(conn, config)
with {:ok, variables} <- decode_variables(variables, json_codec) do
absinthe_opts = [
variables: variables,
context: context,
root_value: (conn.private[:absinthe][:root_value] || %{}),
operation_name: operation_name,
]
{:ok, raw_input, absinthe_opts}
end
end
defp build_context(conn, config) do
config.context
|> Map.merge(conn.private[:absinthe][:context] || %{})
|> Map.merge(uploaded_files(conn))
end
defp uploaded_files(conn) do
files =
conn.params
|> Enum.filter(&match?({_, %Plug.Upload{}}, &1))
|> Map.new
%{
__absinthe_plug__: %{uploads: files}
}
end
defp validate_input(nil, no_query_message), do: {:input_error, no_query_message}
defp validate_input("", no_query_message), do: {:input_error, no_query_message}
defp validate_input(doc, _), do: {:ok, doc}
# GraphQL.js treats an empty operation name as no operation name.
defp decode_operation_name(""), do: nil
defp decode_operation_name(name), do: name
defp decode_variables(%{} = variables, _), do: {:ok, variables}
defp decode_variables("", _), do: {:ok, %{}}
defp decode_variables("null", _), do: {:ok, %{}}
defp decode_variables(nil, _), do: {:ok, %{}}
defp decode_variables(variables, codec) do
case codec.module.decode(variables) do
{:ok, results} ->
{:ok, results}
_ ->
{:input_error, "The variable values could not be decoded"}
end
end
def load_body_and_params(%{body_params: %{"query" => _}}=conn) do
{fetch_query_params(conn), ""}
end
def load_body_and_params(conn) do
case get_req_header(conn, "content-type") do
["application/graphql"] ->
{:ok, body, conn} = read_body(conn)
{fetch_query_params(conn), body}
_ ->
{conn, ""}
end
end
@doc false
def json(conn, status, body, json_codec) do
conn
|> put_resp_content_type("application/json")
|> send_resp(status, json_codec.module.encode!(body, json_codec.opts))
end
end
|
lib/absinthe/plug.ex
| 0.884526
| 0.791418
|
plug.ex
|
starcoder
|
defmodule SmartNote.Questions do
@moduledoc """
Context for creating and managing common questions
"""
import Ecto.Query, except: [update: 2]
alias SmartNote.Questions.Question
alias SmartNote.Repo
def new(), do: %Question{} |> Question.create_changeset(%{})
def edit(question), do: question |> Question.update_changeset(%{})
@doc """
Get all questions, paginated
"""
def all(opts \\ []) do
if is_nil(opts[:search_term]) do
Repo.paginate(Question, opts)
else
search(opts[:search_term])
|> Repo.paginate(opts)
end
end
# do a naive lookup on all question fields to see if we
# have any matches
defp search("") do
Question
end
defp search(search_term) do
search_term_matcher = "%#{search_term}%"
Question
|> where(
[q],
ilike(q.title, ^search_term_matcher) or
ilike(q.body, ^search_term_matcher) or
ilike(q.answer, ^search_term_matcher) or
ilike(q.libraries, ^search_term_matcher) or
fragment("? = ANY(?)", ^search_term, q.tags)
)
end
@doc """
Get all questions with a specific tag, paginated
"""
def with_tag(tag, opts \\ []) do
Question
|> where([q], fragment("? = ANY(?)", ^tag, q.tags))
|> Repo.paginate(opts)
end
@doc """
Get all unique tags
"""
def all_tags() do
Question
|> select([q], q.tags)
|> Repo.all()
|> Enum.flat_map(fn tag_list -> tag_list end)
|> Enum.filter(fn x -> x != "" end)
|> Enum.sort()
|> Enum.reduce(%{}, fn tag, tag_counts ->
Map.update(tag_counts, tag, 1, fn count -> count + 1 end)
end)
end
@doc """
Get a single question
"""
def get(id) do
case Ecto.UUID.cast(id) do
{:ok, id} ->
case Repo.get(Question, id) do
nil ->
{:error, :not_found}
question ->
{:ok, question}
end
:error ->
{:error, :invalid_id}
end
end
@doc """
Create a new question
"""
def create(%{"tags" => tags} = params) when is_binary(tags) do
tags =
tags
|> String.split(",")
|> Enum.map(&String.trim/1)
|> Enum.map(&String.downcase/1)
create(Map.merge(params, %{"tags" => tags}))
end
def create(params) do
%Question{}
|> Question.create_changeset(params)
|> Repo.insert()
end
@doc """
Update a question
"""
def update(question, %{"tags" => tags} = params) when is_binary(tags) do
tags =
tags
|> String.split(",")
|> Enum.map(&String.trim/1)
|> Enum.map(&String.downcase/1)
update(question, Map.merge(params, %{"tags" => tags}))
end
def update(question, params) do
question
|> Question.update_changeset(params)
|> Repo.update()
end
end
|
lib/smart_note/questions.ex
| 0.619241
| 0.430806
|
questions.ex
|
starcoder
|
defmodule Resx.Resource.Content.Stream do
@moduledoc """
The streamable content of a resource.
%Resx.Resource.Content.Stream{
type: ["text/html]",
data: ["<p>", "Hello", "</p>"]
}
"""
alias Resx.Resource.Content
@enforce_keys [:type, :data]
defstruct [:type, :data]
@type t :: %Content.Stream{
type: Content.type,
data: Enumerable.t
}
defimpl Enumerable do
def count(%{ data: data }) do
case Enumerable.count(data) do
{ :error, _ } -> { :error, __MODULE__ }
result -> result
end
end
def member?(%{ data: data }, element) do
case Enumerable.member?(data, element) do
{ :error, _ } -> { :error, __MODULE__ }
result -> result
end
end
def reduce(%{ data: data }, acc, reducer), do: Enumerable.reduce(data, acc, reducer)
def slice(%{ data: data }) do
case Enumerable.slice(data) do
{ :error, _ } -> { :error, __MODULE__ }
result -> result
end
end
end
@doc """
Make some content streamable.
"""
@spec new(t | Content.t) :: t
def new(%Content{ type: type, data: data }), do: %Content.Stream{ type: type, data: [data] }
def new(content), do: content
@doc """
Combine a content stream into a collectable.
If the collectable is a bitstring, this will only combine the content into
a bitstring if it is possible to do so. Otherwise it will just return a list.
"""
@spec combine(t, Collectable.t) :: Collectable.t
def combine(content, collectable \\ [])
def combine(content, collectable) when is_bitstring(collectable) do
Enum.reduce(content.data, { [], true }, fn
chunk, { list, true } when is_bitstring(chunk) -> { [chunk|list], true }
chunk, { list, _ } -> { [chunk|list], false }
end)
|> case do
{ list, true } -> Enum.reverse(list) |> Enum.into(<<>>)
{ list, false } -> Enum.reverse(list)
end
end
def combine(content, collectable), do: Enum.into(content.data, collectable)
end
|
lib/resx/resource/content.stream.ex
| 0.756178
| 0.460168
|
content.stream.ex
|
starcoder
|
defmodule AWS.Athena do
@moduledoc """
Amazon Athena is an interactive query service that lets you use standard SQL to
analyze data directly in Amazon S3.
You can point Athena at your data in Amazon S3 and run ad-hoc queries and get
results in seconds. Athena is serverless, so there is no infrastructure to set
up or manage. You pay only for the queries you run. Athena scales
automatically—executing queries in parallel—so results are fast, even with large
datasets and complex queries. For more information, see [What is Amazon Athena](http://docs.aws.amazon.com/athena/latest/ug/what-is.html) in the *Amazon
Athena User Guide*.
If you connect to Athena using the JDBC driver, use version 1.1.0 of the driver
or later with the Amazon Athena API. Earlier version drivers do not support the
API. For more information and to download the driver, see [Accessing Amazon Athena with
JDBC](https://docs.aws.amazon.com/athena/latest/ug/connect-with-jdbc.html).
For code samples using the AWS SDK for Java, see [Examples and Code Samples](https://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the
*Amazon Athena User Guide*.
"""
@doc """
Returns the details of a single named query or a list of up to 50 queries, which
you provide as an array of query ID strings.
Requires you to have access to the workgroup in which the queries were saved.
Use `ListNamedQueriesInput` to get the list of named query IDs in the specified
workgroup. If information could not be retrieved for a submitted query ID,
information about the query ID submitted is listed under
`UnprocessedNamedQueryId`. Named queries differ from executed queries. Use
`BatchGetQueryExecutionInput` to get details about each unique query execution,
and `ListQueryExecutionsInput` to get a list of query execution IDs.
"""
def batch_get_named_query(client, input, options \\ []) do
request(client, "BatchGetNamedQuery", input, options)
end
@doc """
Returns the details of a single query execution or a list of up to 50 query
executions, which you provide as an array of query execution ID strings.
Requires you to have access to the workgroup in which the queries ran. To get a
list of query execution IDs, use `ListQueryExecutionsInput$WorkGroup`. Query
executions differ from named (saved) queries. Use `BatchGetNamedQueryInput` to
get details about named queries.
"""
def batch_get_query_execution(client, input, options \\ []) do
request(client, "BatchGetQueryExecution", input, options)
end
@doc """
Creates (registers) a data catalog with the specified name and properties.
Catalogs created are visible to all users of the same AWS account.
"""
def create_data_catalog(client, input, options \\ []) do
request(client, "CreateDataCatalog", input, options)
end
@doc """
Creates a named query in the specified workgroup.
Requires that you have access to the workgroup.
For code samples using the AWS SDK for Java, see [Examples and Code Samples](http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the
*Amazon Athena User Guide*.
"""
def create_named_query(client, input, options \\ []) do
request(client, "CreateNamedQuery", input, options)
end
@doc """
Creates a workgroup with the specified name.
"""
def create_work_group(client, input, options \\ []) do
request(client, "CreateWorkGroup", input, options)
end
@doc """
Deletes a data catalog.
"""
def delete_data_catalog(client, input, options \\ []) do
request(client, "DeleteDataCatalog", input, options)
end
@doc """
Deletes the named query if you have access to the workgroup in which the query
was saved.
For code samples using the AWS SDK for Java, see [Examples and Code Samples](http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the
*Amazon Athena User Guide*.
"""
def delete_named_query(client, input, options \\ []) do
request(client, "DeleteNamedQuery", input, options)
end
@doc """
Deletes the workgroup with the specified name.
The primary workgroup cannot be deleted.
"""
def delete_work_group(client, input, options \\ []) do
request(client, "DeleteWorkGroup", input, options)
end
@doc """
Returns the specified data catalog.
"""
def get_data_catalog(client, input, options \\ []) do
request(client, "GetDataCatalog", input, options)
end
@doc """
Returns a database object for the specfied database and data catalog.
"""
def get_database(client, input, options \\ []) do
request(client, "GetDatabase", input, options)
end
@doc """
Returns information about a single query.
Requires that you have access to the workgroup in which the query was saved.
"""
def get_named_query(client, input, options \\ []) do
request(client, "GetNamedQuery", input, options)
end
@doc """
Returns information about a single execution of a query if you have access to
the workgroup in which the query ran.
Each time a query executes, information about the query execution is saved with
a unique ID.
"""
def get_query_execution(client, input, options \\ []) do
request(client, "GetQueryExecution", input, options)
end
@doc """
Streams the results of a single query execution specified by `QueryExecutionId`
from the Athena query results location in Amazon S3.
For more information, see [Query Results](https://docs.aws.amazon.com/athena/latest/ug/querying.html) in the
*Amazon Athena User Guide*. This request does not execute the query but returns
results. Use `StartQueryExecution` to run a query.
To stream query results successfully, the IAM principal with permission to call
`GetQueryResults` also must have permissions to the Amazon S3 `GetObject` action
for the Athena query results location.
IAM principals with permission to the Amazon S3 `GetObject` action for the query
results location are able to retrieve query results from Amazon S3 even if
permission to the `GetQueryResults` action is denied. To restrict user or role
access, ensure that Amazon S3 permissions to the Athena query location are
denied.
"""
def get_query_results(client, input, options \\ []) do
request(client, "GetQueryResults", input, options)
end
@doc """
Returns table metadata for the specified catalog, database, and table.
"""
def get_table_metadata(client, input, options \\ []) do
request(client, "GetTableMetadata", input, options)
end
@doc """
Returns information about the workgroup with the specified name.
"""
def get_work_group(client, input, options \\ []) do
request(client, "GetWorkGroup", input, options)
end
@doc """
Lists the data catalogs in the current AWS account.
"""
def list_data_catalogs(client, input, options \\ []) do
request(client, "ListDataCatalogs", input, options)
end
@doc """
Lists the databases in the specified data catalog.
"""
def list_databases(client, input, options \\ []) do
request(client, "ListDatabases", input, options)
end
@doc """
Provides a list of available query IDs only for queries saved in the specified
workgroup.
Requires that you have access to the specified workgroup. If a workgroup is not
specified, lists the saved queries for the primary workgroup.
For code samples using the AWS SDK for Java, see [Examples and Code Samples](http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the
*Amazon Athena User Guide*.
"""
def list_named_queries(client, input, options \\ []) do
request(client, "ListNamedQueries", input, options)
end
@doc """
Provides a list of available query execution IDs for the queries in the
specified workgroup.
If a workgroup is not specified, returns a list of query execution IDs for the
primary workgroup. Requires you to have access to the workgroup in which the
queries ran.
For code samples using the AWS SDK for Java, see [Examples and Code Samples](http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the
*Amazon Athena User Guide*.
"""
def list_query_executions(client, input, options \\ []) do
request(client, "ListQueryExecutions", input, options)
end
@doc """
Lists the metadata for the tables in the specified data catalog database.
"""
def list_table_metadata(client, input, options \\ []) do
request(client, "ListTableMetadata", input, options)
end
@doc """
Lists the tags associated with an Athena workgroup or data catalog resource.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Lists available workgroups for the account.
"""
def list_work_groups(client, input, options \\ []) do
request(client, "ListWorkGroups", input, options)
end
@doc """
Runs the SQL query statements contained in the `Query`.
Requires you to have access to the workgroup in which the query ran. Running
queries against an external catalog requires `GetDataCatalog` permission to the
catalog. For code samples using the AWS SDK for Java, see [Examples and Code Samples](http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the
*Amazon Athena User Guide*.
"""
def start_query_execution(client, input, options \\ []) do
request(client, "StartQueryExecution", input, options)
end
@doc """
Stops a query execution.
Requires you to have access to the workgroup in which the query ran.
For code samples using the AWS SDK for Java, see [Examples and Code Samples](http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the
*Amazon Athena User Guide*.
"""
def stop_query_execution(client, input, options \\ []) do
request(client, "StopQueryExecution", input, options)
end
@doc """
Adds one or more tags to an Athena resource.
A tag is a label that you assign to a resource. In Athena, a resource can be a
workgroup or data catalog. Each tag consists of a key and an optional value,
both of which you define. For example, you can use tags to categorize Athena
workgroups or data catalogs by purpose, owner, or environment. Use a consistent
set of tag keys to make it easier to search and filter workgroups or data
catalogs in your account. For best practices, see [Tagging Best Practices](https://aws.amazon.com/answers/account-management/aws-tagging-strategies/).
Tag keys can be from 1 to 128 UTF-8 Unicode characters, and tag values can be
from 0 to 256 UTF-8 Unicode characters. Tags can use letters and numbers
representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys
and values are case-sensitive. Tag keys must be unique per resource. If you
specify more than one tag, separate them by commas.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Removes one or more tags from a data catalog or workgroup resource.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Updates the data catalog that has the specified name.
"""
def update_data_catalog(client, input, options \\ []) do
request(client, "UpdateDataCatalog", input, options)
end
@doc """
Updates the workgroup with the specified name.
The workgroup's name cannot be changed.
"""
def update_work_group(client, input, options \\ []) do
request(client, "UpdateWorkGroup", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "athena"}
host = build_host("athena", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AmazonAthena.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/athena.ex
| 0.920919
| 0.70388
|
athena.ex
|
starcoder
|
defmodule BikeBrigade.Utils do
def dev? do
Application.get_env(:bike_brigade, :environment) == :dev
end
def prod? do
Application.get_env(:bike_brigade, :environment) == :prod
end
def test? do
Application.get_env(:bike_brigade, :environment) == :test
end
defmacro get_config(keyword) do
quote do
config = Application.get_env(:bike_brigade, __MODULE__)
case config[unquote(keyword)] do
{:system, var, :optional} -> System.get_env(var)
{:system, var} -> System.get_env(var) || raise "Missing environment variable #{var}"
val when not is_nil(val) -> val
nil -> raise "Missing configuration: :bike_brigade, #{__MODULE__}, :#{unquote(keyword)}"
end
end
end
def random_enum(enum) do
enum.__enum_map__()
|> Keyword.keys()
|> Enum.random()
end
@doc "returns the given string, with a default if it's blank or nil"
def with_default(nil, default), do: default
def with_default("", default), do: default
def with_default(value, _default), do: value
@doc """
`replace_if_updated` takes a struct or a list of structs to be tested and the updated struct.
If the argument is a list, each item is tested.
If the two arguments are the same struct and `id` field matches, it returns `updated` otherwise it returns `struct.
Options:
- `replace_with:` - specify an optional thing to replace with (e.g. replace_if_updated(foo, bar, nil)) will return nil if foo has the same id as bar
"""
def replace_if_updated(_, updated, opts \\ [])
def replace_if_updated(items, updated, opts) when is_list(items) do
Enum.map(items, &replace_if_updated(&1, updated, opts))
end
def replace_if_updated(%mod{id: id}, %mod{id: id} = updated, opts) do
Keyword.get(opts, :replace_with, updated)
end
def replace_if_updated(struct, _, _), do: struct
def task_count(tasks) do
Map.values(count_tasks(tasks)) |> Enum.sum()
end
def humanized_task_count(tasks) do
count_tasks(tasks)
|> Enum.map(fn {item, count} ->
"#{count} #{Inflex.inflect(item.name, count)}"
end)
|> Enum.join(" and ")
end
def fetch_env!(cfg_section, key) do
Application.fetch_env!(:bike_brigade, cfg_section)
|> Keyword.fetch!(key)
end
def get_env(cfg_section, key, default \\ nil) do
Application.get_env(:bike_brigade, cfg_section, [])
|> Keyword.get(key, default)
end
def put_env(cfg_section, key, value) do
cfg =
Application.get_env(:bike_brigade, cfg_section, [])
|> Keyword.put(key, value)
Application.put_env(:bike_brigade, cfg_section, cfg)
end
@spec change_scheme(String.t(), String.t()) :: String.t()
def change_scheme(url, scheme) do
uri = URI.parse(url)
first = uri.scheme |> String.length()
last = url |> String.length()
scheme <> String.slice(url, first..last)
end
defp count_tasks(tasks) do
tasks
|> Enum.reduce(%{}, fn task, counts ->
new_counts =
for task_item <- task.task_items, into: %{} do
{task_item.item, task_item.count}
end
Map.merge(counts, new_counts, fn _k, v1, v2 -> v1 + v2 end)
end)
end
@doc """
Given a sorted list, returns a list of tuples `{x, group}`,
where x is the result of calling `fun` on the elements in `group`.
Maintains the sort order.
"""
def ordered_group_by([], _fun), do: []
def ordered_group_by([head | rest], fun),
do: ordered_group_by(rest, [{fun.(head), [head]}], fun)
def ordered_group_by([], [{key, group} | groups], _fun) do
Enum.reverse([{key, Enum.reverse(group)} | groups])
end
def ordered_group_by([head | rest], [{last_key, group} | groups], fun) do
key = fun.(head)
if key == last_key do
ordered_group_by(rest, [{key, [head | group]} | groups], fun)
else
ordered_group_by(rest, [{key, [head]} | [{last_key, Enum.reverse(group)} | groups]], fun)
end
end
end
|
lib/bike_brigade/utils.ex
| 0.685318
| 0.438064
|
utils.ex
|
starcoder
|
defmodule Locations.AssertHelpers do
import ExUnit.Assertions
alias Ecto.Changeset
def assert_valid_changeset(keys, expected_params, %Changeset{} = changeset) do
assert %Changeset{valid?: true, changes: changes} = changeset
Enum.each(keys, fn key ->
expected = Map.get(expected_params, Atom.to_string(key))
actual = Map.get(changes, key)
assert expected == actual,
"Error on: #{inspect(key)}\nExpected: #{inspect(expected)}\nExpected: #{
inspect(actual)
}"
end)
end
def assert_invalid_cast_changeset(keys, %Changeset{} = changeset) do
assert %Changeset{valid?: false, errors: errors} = changeset
Enum.each(keys, fn key ->
assert errors[key], "expected an error for #{key}"
{_, meta} = errors[key]
assert meta[:validation] == :cast,
"The validation type, #{meta[:validation]}, is incorrect."
end)
end
def assert_required_changeset(keys, %Changeset{} = changeset) do
assert %Changeset{valid?: false, errors: errors} = changeset
Enum.each(keys, fn key ->
assert errors[key], "The required field #{inspect(key)} is missing from errors."
{_, meta} = errors[key]
assert meta[:validation] == :required,
"The validation type, #{meta[:validation]}, is incorrect."
end)
end
def assert_changeset_types(expected_keys_and_types, schema_name) do
keys = schema_name.__schema__(:fields)
actual_keys_and_types =
Enum.map(keys, fn key ->
key_type = schema_name.__schema__(:type, key)
{key, key_type}
end)
actual_keys_and_types = Enum.sort(actual_keys_and_types)
expected_keys_and_types = Enum.sort(expected_keys_and_types)
if length(expected_keys_and_types) != length(actual_keys_and_types) do
assert expected_keys_and_types == actual_keys_and_types
end
Enum.each(expected_keys_and_types, fn {key, expected_type} ->
actual_type = Keyword.get(actual_keys_and_types, key)
assert expected_type == actual_type,
"Error on: #{inspect(key)}\nExpected #{inspect(expected_type)}\nActual: #{
inspect(actual_type)
}"
end)
end
def assert_values_set_on_schema(expected_params, db_schema) do
Enum.each(expected_params, fn {key, expected} ->
db_key = String.to_existing_atom(key)
actual = Map.get(db_schema, db_key)
assert expected == actual,
"Error on: #{inspect(key)}\nExpected: #{inspect(expected)}\nExpected: #{
inspect(actual)
}"
end)
end
def assert_insert_timestamps(db_schema) do
assert db_schema.inserted_at == db_schema.updated_at
end
def assert_update_timestamps(db_schema) do
:lt == NaiveDateTime.compare(db_schema.inserted_at, db_schema.updated_at)
end
end
|
test/support/assert_helpers.ex
| 0.795022
| 0.509764
|
assert_helpers.ex
|
starcoder
|
defmodule ConciergeSite.DaySelectHelper do
@moduledoc """
Render the day select component in templates
"""
import Phoenix.HTML.Tag, only: [content_tag: 3, tag: 2]
@weekdays ["monday", "tuesday", "wednesday", "thursday", "friday"]
@weekend ["saturday", "sunday"]
@days @weekdays ++ @weekend
@short_days ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
@spec render(atom) :: Phoenix.HTML.safe()
def render(input_name, checked \\ []) do
checked_set = prepare_checked_set(checked)
content_tag :div, class: "day-selector", data: [selector: "date"] do
[title(), day(input_name, checked_set), group(checked_set)]
end
end
defp prepare_checked_set(checked) do
checked
|> ensure_days_as_string()
|> MapSet.new()
|> if_needed_add_weekday()
|> if_needed_add_weekend()
end
defp ensure_days_as_string([]), do: []
defp ensure_days_as_string(days), do: Enum.map(days, &ensure_day_as_string/1)
defp ensure_day_as_string(day) when is_atom(day), do: Atom.to_string(day)
defp ensure_day_as_string(day) when is_binary(day), do: day
defp if_needed_add_weekday(checked) do
if MapSet.subset?(MapSet.new(@weekdays), checked) do
MapSet.put(checked, "weekdays")
else
checked
end
end
defp if_needed_add_weekend(checked) do
if MapSet.subset?(MapSet.new(@weekend), checked) do
MapSet.put(checked, "weekend")
else
checked
end
end
defp title do
content_tag :div, class: "title-part" do
Enum.map(@short_days, &content_tag(:div, &1, class: "day-header"))
end
end
defp day(input_name, checked_set) do
content_tag :div, class: "day-part" do
content_tag :div, class: "btn-group btn-group-toggle" do
Enum.map(@days, &label(input_name, &1, Enum.member?(checked_set, &1)))
end
end
end
defp label(input_name, day, selected?) do
content_tag :label,
class: label_class(selected?),
tabindex: "0",
role: "button",
aria_label: day_aria_label(day),
aria_pressed: aria_pressed?(selected?) do
[
tag(
:input,
type: "checkbox",
autocomplete: "off",
value: day,
name: "#{input_name}[relevant_days][]",
checked: selected?
),
check_icon(selected?)
]
end
end
defp day_aria_label(day), do: "travel day: #{day}"
defp aria_pressed?(true), do: "true"
defp aria_pressed?(false), do: "false"
defp check_icon(true), do: content_tag(:i, "", class: "fa fa-check")
defp check_icon(_), do: content_tag(:i, "", class: "fa")
defp label_class(true), do: "btn btn-outline-primary btn-day active"
defp label_class(_), do: "btn btn-outline-primary btn-day"
defp group(checked_set) do
content_tag :div, class: "group-part invisible-no-js" do
content_tag :div, class: "btn-group btn-group-toggle" do
Enum.map(["weekdays", "weekend"], &group_label(&1, Enum.member?(checked_set, &1)))
end
end
end
defp group_label(name, selected?) do
content_tag :label,
class: "#{label_class(selected?)} btn-#{name}",
tabindex: "0",
role: "button",
aria_label: group_aria_label(name),
aria_pressed: aria_pressed?(selected?) do
[
tag(:input, type: "checkbox", autocomplete: "off", value: name, checked: selected?),
String.capitalize(name)
]
end
end
defp group_aria_label("weekdays"), do: "travel days: all weekdays"
defp group_aria_label("weekend"), do: "travel days: weekend"
end
|
apps/concierge_site/lib/views/day_select_helper.ex
| 0.564459
| 0.415136
|
day_select_helper.ex
|
starcoder
|
defmodule RobotSimulator do
defp is_valid_position(position) do
is_tuple(position) && tuple_size(position) == 2 && Enum.all?(Tuple.to_list(position), &(is_integer(&1)))
end
defp list_contains(list, item) do
item in list
end
defp next_direction(direction, move) do
case {direction, move} do
{:north, ?L} -> :west
{:north, ?R} -> :east
{:east, ?L} -> :north
{:east, ?R} -> :south
{:south, ?L} -> :east
{:south, ?R} -> :west
{:west, ?L} -> :south
{:west, ?R} -> :north
end
end
defp next_position(position, direction) do
{x, y} = position
case direction do
:north -> {x, y + 1}
:east -> {x + 1, y}
:south -> {x, y - 1}
:west -> {x - 1, y}
end
end
@doc """
Create a Robot Simulator given an initial direction and position.
Valid directions are: `:north`, `:east`, `:south`, `:west`
"""
@spec create(direction :: atom, position :: {integer, integer}) :: any
def create(direction \\ :north, position \\ {0, 0}) do
valid_directions = [:north, :east, :south, :west]
cond do
!list_contains(valid_directions, direction) -> {:error, "invalid direction"}
!is_valid_position(position) -> {:error, "invalid position"}
true -> %{direction: direction, position: position}
end
end
@doc """
Simulate the robot's movement given a string of instructions.
Valid instructions are: "R" (turn right), "L", (turn left), and "A" (advance)
"""
@spec simulate(robot :: any, instructions :: String.t()) :: any
def simulate(robot, instructions) do
char_list = String.to_charlist(instructions)
cond do
Enum.all?(char_list, &(list_contains('ALR', &1))) -> List.foldl(char_list, robot, fn(char, robot) ->
cond do
char == ?L || char == ?R -> %{robot | direction: next_direction(robot[:direction], char) }
char == ?A -> %{robot | position: next_position(robot[:position], robot[:direction]) }
true -> robot
end
end)
true -> {:error, "invalid instruction"}
end
end
@doc """
Return the robot's direction.
Valid directions are: `:north`, `:east`, `:south`, `:west`
"""
@spec direction(robot :: any) :: atom
def direction(robot) do
Map.get(robot, :direction)
end
@doc """
Return the robot's position.
"""
@spec position(robot :: any) :: {integer, integer}
def position(robot) do
Map.get(robot, :position)
end
end
|
robot-simulator/lib/robot_simulator.ex
| 0.835148
| 0.63426
|
robot_simulator.ex
|
starcoder
|
defmodule JSX do
def encode!(term, opts \\ []) do
parser_opts = :jsx_config.extract_config(opts ++ [:escaped_strings])
parser(:jsx_to_json, opts, parser_opts).(JSX.Encoder.json(term) ++ [:end_json])
end
def encode(term, opts \\ []) do
{ :ok, encode!(term, opts) }
rescue
ArgumentError -> { :error, :badarg }
end
def decode!(json, opts \\ []) do
decoder_opts = :jsx_config.extract_config(opts)
case decoder(JSX.Decoder, opts ++ [:return_maps], decoder_opts).(json) do
{ :incomplete, _ } -> raise ArgumentError
result -> result
end
end
def decode(term, opts \\ []) do
{ :ok, decode!(term, opts) }
rescue
ArgumentError -> { :error, :badarg }
end
def format!(json, opts \\ []) do
case :jsx.format(json, opts) do
{ :incomplete, _ } -> raise ArgumentError
result -> result
end
end
def format(json, opts \\ []) do
{ :ok, format!(json, opts) }
rescue
ArgumentError -> { :error, :badarg }
end
def minify!(json), do: format!(json, [space: 0, indent: 0])
def minify(json) do
{ :ok, minify!(json) }
rescue
ArgumentError -> { :error, :badarg }
end
def prettify!(json), do: format!(json, [space: 1, indent: 2])
def prettify(json) do
{ :ok, prettify!(json) }
rescue
ArgumentError -> { :error, :badarg }
end
def is_json?(json, opts \\ []) do
case :jsx.is_json(json, opts) do
{ :incomplete, _ } -> false
result -> result
end
rescue
_ -> false
end
def is_term?(term, opts \\ []) do
parser_opts = :jsx_config.extract_config(opts)
parser(:jsx_verify, opts, parser_opts).(JSX.Encoder.json(term) ++ [:end_json])
rescue
_ -> false
end
def encoder(handler, initialstate, opts) do
:jsx.encoder(handler, initialstate, opts)
end
def decoder(handler, initialstate, opts) do
:jsx.decoder(handler, initialstate, opts)
end
def parser(handler, initialstate, opts) do
:jsx.parser(handler, initialstate, opts)
end
end
defmodule JSX.Decoder do
def init(opts) do
:jsx_to_term.init(opts)
end
def handle_event({ :literal, :null }, state), do: :jsx_to_term.insert(:nil, state)
def handle_event(event, state), do: :jsx_to_term.handle_event(event, state)
end
defprotocol JSX.Encoder do
@fallback_to_any true
def json(term)
end
defimpl JSX.Encoder, for: Map do
def json(map) do
[:start_object] ++ unpack(map, Map.keys(map))
end
defp unpack(map, [k|rest]) when is_integer(k) or is_binary(k) or is_atom(k) do
[k] ++ JSX.Encoder.json(Map.get(map, k)) ++ unpack(map, rest)
end
defp unpack(_, []), do: [:end_object]
end
defimpl JSX.Encoder, for: List do
def json([]), do: [:start_array, :end_array]
def json([{}]), do: [:start_object, :end_object]
def json([{ _, _ }|_] = list) do
[:start_object] ++ unzip(list)
end
def json(list) do
[:start_array] ++ unhitch(list)
end
defp unzip([{k, v}|rest]) when is_integer(k) or is_binary(k) or is_atom(k) do
[k] ++ JSX.Encoder.json(v) ++ unzip(rest)
end
defp unzip([]), do: [:end_object]
defp unhitch([v|rest]) do
JSX.Encoder.json(v) ++ unhitch(rest)
end
defp unhitch([]), do: [:end_array]
end
defimpl JSX.Encoder, for: Atom do
def json(nil), do: [:null]
def json(true), do: [true]
def json(false), do: [false]
def json(atom), do: [atom]
end
defimpl JSX.Encoder, for: [Integer, Float, BitString] do
def json(value), do: [value]
end
defimpl JSX.Encoder, for: HashDict do
def json(dict), do: JSX.Encoder.json(HashDict.to_list(dict))
end
defimpl JSX.Encoder, for: [Range, Stream, HashSet] do
def json(enumerable) do
JSX.Encoder.json(Enum.to_list(enumerable))
end
end
defimpl JSX.Encoder, for: [Tuple, PID, Port, Reference, Function, Any] do
def json(map) when is_map(map), do: JSX.Encoder.Map.json(Map.delete(map, :__struct__))
def json(_), do: raise ArgumentError
end
|
lib/jsx.ex
| 0.656328
| 0.613208
|
jsx.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.