| code
				 stringlengths 114 1.05M | path
				 stringlengths 3 312 | quality_prob
				 float64 0.5 0.99 | learning_prob
				 float64 0.2 1 | filename
				 stringlengths 3 168 | kind
				 stringclasses 1
				value | 
|---|---|---|---|---|---|
| 
	defmodule Date.Range do
  @moduledoc """
  Returns an inclusive range between dates.
  Ranges must be created with the `Date.range/2` function.
  The following fields are public:
    * `:first` - the initial date on the range
    * `:last` - the last date on the range
  The remaining fields are private and should not be accessed.
  """
  @type t :: %__MODULE__{
          first: Date.t(),
          last: Date.t(),
          first_in_iso_days: Calendar.iso_days(),
          last_in_iso_days: Calendar.iso_days()
        }
  defstruct [:first, :last, :first_in_iso_days, :last_in_iso_days]
  defimpl Enumerable do
    def member?(
          %{
            first: %{calendar: calendar, year: first_year, month: first_month, day: first_day},
            last: %{calendar: calendar, year: last_year, month: last_month, day: last_day},
            first_in_iso_days: first_in_iso_days,
            last_in_iso_days: last_in_iso_days
          },
          %Date{calendar: calendar, year: year, month: month, day: day}
        ) do
      first = {first_year, first_month, first_day}
      last = {last_year, last_month, last_day}
      date = {year, month, day}
      if first_in_iso_days <= last_in_iso_days do
        {:ok, date >= first and date <= last}
      else
        {:ok, date >= last and date <= first}
      end
    end
    def member?(_, _) do
      {:ok, false}
    end
    def count(%Date.Range{
          first_in_iso_days: first_in_iso_days,
          last_in_iso_days: last_in_iso_days
        }) do
      {:ok, abs(first_in_iso_days - last_in_iso_days) + 1}
    end
    def reduce(
          %Date.Range{
            first_in_iso_days: first_in_iso_days,
            last_in_iso_days: last_in_iso_days,
            first: %{calendar: calendar}
          },
          acc,
          fun
        ) do
      reduce(
        first_in_iso_days,
        last_in_iso_days,
        acc,
        fun,
        calendar,
        first_in_iso_days <= last_in_iso_days
      )
    end
    defp reduce(_x, _y, {:halt, acc}, _fun, _calendar, _up?) do
      {:halted, acc}
    end
    defp reduce(x, y, {:suspend, acc}, fun, calendar, up?) do
      {:suspended, acc, &reduce(x, y, &1, fun, calendar, up?)}
    end
    defp reduce(x, y, {:cont, acc}, fun, calendar, up? = true) when x <= y do
      reduce(x + 1, y, fun.(date_from_iso_days(x, calendar), acc), fun, calendar, up?)
    end
    defp reduce(x, y, {:cont, acc}, fun, calendar, up? = false) when x >= y do
      reduce(x - 1, y, fun.(date_from_iso_days(x, calendar), acc), fun, calendar, up?)
    end
    defp reduce(_, _, {:cont, acc}, _fun, _calendar, _up) do
      {:done, acc}
    end
    defp date_from_iso_days(days, Calendar.ISO) do
      {year, month, day} = Calendar.ISO.date_from_iso_days(days)
      %Date{year: year, month: month, day: day, calendar: Calendar.ISO}
    end
    defp date_from_iso_days(days, calendar) do
      {year, month, day, _, _, _, _} =
        calendar.naive_datetime_from_iso_days({days, {0, 86_400_000_000}})
      %Date{year: year, month: month, day: day, calendar: calendar}
    end
  end
  defimpl Inspect do
    def inspect(%Date.Range{first: first, last: last}, _) do
      "#DateRange<" <> inspect(first) <> ", " <> inspect(last) <> ">"
    end
  end
end | 
	lib/elixir/lib/calendar/date_range.ex | 0.860296 | 0.762114 | 
	date_range.ex | 
	starcoder | 
| 
	defmodule Sneex.CpuHelper do
  @moduledoc "This module defines helper functions for checking CPU flags."
  use Bitwise
  @doc "
  This function will determine new values for several of the CPU flags.
  ## Examples
  iex> 0 |> Sneex.CpuHelper.check_flags_for_value(:bit8)
  %{carry: false, negative: false, overflow: false, zero: true}
  iex> 0 |> Sneex.CpuHelper.check_flags_for_value(:bit16)
  %{carry: false, negative: false, overflow: false, zero: true}
  iex> 0x80 |> Sneex.CpuHelper.check_flags_for_value(:bit8)
  %{carry: false, negative: true, overflow: false, zero: false}
  iex> 0x80 |> Sneex.CpuHelper.check_flags_for_value(:bit16)
  %{carry: false, negative: false, overflow: false, zero: false}
  iex> 0x7FFF |> Sneex.CpuHelper.check_flags_for_value(:bit16)
  %{carry: false, negative: false, overflow: true, zero: false}
  iex> 0x8000 |> Sneex.CpuHelper.check_flags_for_value(:bit16)
  %{carry: false, negative: true, overflow: false, zero: false}
  "
  @spec check_flags_for_value(integer(), Sneex.Cpu.bit_size()) :: %{
          carry: boolean(),
          negative: boolean(),
          overflow: boolean(),
          zero: boolean()
        }
  def check_flags_for_value(value, bitness) do
    %{
      negative: check_negative_flag(value, bitness),
      overflow: check_overflow_flag(value, bitness),
      zero: check_zero_flag(value),
      carry: check_carry_flag(value)
    }
  end
  defp check_negative_flag(value, :bit8) when 0x80 == band(value, 0x80), do: true
  defp check_negative_flag(value, :bit16) when 0x8000 == band(value, 0x8000), do: true
  defp check_negative_flag(_value, _bitness), do: false
  # Still need to figure this out
  defp check_overflow_flag(value, :bit8) when 0x40 == band(value, 0x40), do: true
  defp check_overflow_flag(value, :bit16) when 0x4000 == band(value, 0x4000), do: true
  defp check_overflow_flag(_value, _bitness), do: false
  defp check_zero_flag(0), do: true
  defp check_zero_flag(_), do: false
  # Still need to figure this out
  defp check_carry_flag(_value), do: false
  @doc "
  This function will rotate a value 1 step to the left or right, filling in 0's.
  It returns a tuple containing the updated value and the bit that was rotated off the value.
  ## Examples - Rotating Left
  iex> 0 |> Sneex.CpuHelper.rotate(:bit8, :left)
  {0, false}
  iex> 0 |> Sneex.CpuHelper.rotate(:bit16, :left)
  {0, false}
  iex> 0x80 |> Sneex.CpuHelper.rotate(:bit8, :left)
  {0, true}
  iex> 0x80 |> Sneex.CpuHelper.rotate(:bit16, :left)
  {0x0100, false}
  iex> 0xFF |> Sneex.CpuHelper.rotate(:bit8, :left)
  {0xFE, true}
  iex> 0xFF |> Sneex.CpuHelper.rotate(:bit16, :left)
  {0x01FE, false}
  iex> 0x7FFF |> Sneex.CpuHelper.rotate(:bit16, :left)
  {0xFFFE, false}
  iex> 0x8000 |> Sneex.CpuHelper.rotate(:bit16, :left)
  {0, true}
  iex> 0xFFFF |> Sneex.CpuHelper.rotate(:bit16, :left)
  {0xFFFE, true}
  ## Examples - Rotating Right
  iex> 0 |> Sneex.CpuHelper.rotate(:bit8, :right)
  {0, false}
  iex> 0 |> Sneex.CpuHelper.rotate(:bit16, :right)
  {0, false}
  iex> 0x80 |> Sneex.CpuHelper.rotate(:bit8, :right)
  {0x40, false}
  iex> 0x80 |> Sneex.CpuHelper.rotate(:bit16, :right)
  {0x40, false}
  iex> 0xFF |> Sneex.CpuHelper.rotate(:bit8, :right)
  {0x7F, true}
  iex> 0xFF |> Sneex.CpuHelper.rotate(:bit16, :right)
  {0x7F, true}
  iex> 0x7FFF |> Sneex.CpuHelper.rotate(:bit16, :right)
  {0x3FFF, true}
  iex> 0x8000 |> Sneex.CpuHelper.rotate(:bit16, :right)
  {0x4000, false}
  iex> 0xFFFF |> Sneex.CpuHelper.rotate(:bit16, :right)
  {0x7FFF, true}
  "
  @spec rotate(integer(), Sneex.Cpu.bit_size(), :left | :right) :: {integer(), boolean()}
  def rotate(value, bitness, :left) do
    mask = bitness |> rotate_left_mask()
    negative? = value |> check_negative_flag(bitness)
    new_value = value |> bsl(1) |> band(mask)
    {new_value, negative?}
  end
  def rotate(value, _bitness, :right) do
    mask = 0x0001
    is_zero? = value |> band(mask) |> check_zero_flag()
    new_value = value |> bsr(1)
    {new_value, not is_zero?}
  end
  defp rotate_left_mask(:bit8), do: 0xFF
  defp rotate_left_mask(:bit16), do: 0xFFFF
end | 
	lib/sneex/cpu_helper.ex | 0.876641 | 0.531331 | 
	cpu_helper.ex | 
	starcoder | 
| 
	defmodule ExOwm do
  require Logger
  @moduledoc """
  ExOwm, OpenWeatherMap API Elixir client.
  This module contains main public interface of the application.
  """
  @typedoc """
  Current weather data API request.
  """
  @type request ::
          %{city: String.t()}
          | %{city: String.t(), country_code: String.t()}
          | %{id: integer()}
          | %{lat: float(), lon: float()}
          | %{lat: float(), lon: float(), dt: integer()}
          | %{zip: String.t(), country_code: String.t()}
  @typedoc """
  Current weather data API requests.
  """
  @type requests :: [request]
  @typedoc """
  Current weather data API call options corresponding to OWM APIs described in
  official docs: http://openweathermap.org/api
  The output of the request can be specified according to below options.
  """
  @type option :: :format | :units | :lang | :cnt | :like | :accurate | :mode | :cnt
  @typedoc """
  Keyword list of options.
  """
  @type options :: [option: term]
  @doc """
  Gets weather data of the given location with specified options.
  ## Examples
      iex> ExOwm.get_current_weather([%{city: "Warsaw"}, %{city: "London", country_code: "uk"}], units: :metric, lang: :pl)
  """
  @spec get_current_weather(requests, options) :: map
  def get_current_weather(loc, opts \\ [])
  def get_current_weather(locations, opts) when is_list(locations),
    do: ExOwm.CurrentWeather.Coordinator.get_weather(locations, opts)
  def get_current_weather(location, opts) when is_map(location),
    do: get_current_weather([location], opts)
  @doc """
  Gets weather data of the given location with specified options.
  ## Examples
      iex> ExOwm.get_weather([%{lat: 52.374031, lon: 4.88969}], units: :metric, lang: :pl)
  """
  @spec get_weather(requests, options) :: map
  def get_weather(loc, opts \\ [])
  def get_weather(locations, opts) when is_list(locations),
    do: ExOwm.Weather.Coordinator.get_weather(locations, opts)
  def get_weather(location, opts) when is_map(location),
    do: get_weather([location], opts)
  @doc """
  Gets 5 day forecast data of the given location with specified options.
  ## Examples
      iex> ExOwm.get_five_day_forecast([%{city: "Warsaw"}, %{city: "London", country_code: "uk"}], units: :metric, lang: :pl)
  """
  @spec get_five_day_forecast(requests, options) :: map
  def get_five_day_forecast(locations, opts \\ [])
  def get_five_day_forecast(locations, opts) when is_list(locations),
    do: ExOwm.FiveDayForecast.Coordinator.get_weather(locations, opts)
  def get_five_day_forecast(location, opts) when is_map(location),
    do: get_five_day_forecast([location], opts)
  @doc """
  Gets 1 to 16 days forecast data of the given location with specified options.
  ## Examples
      iex> ExOwm.get_sixteen_day_forecast([%{city: "Warsaw"}, %{city: "London", country_code: "uk"}], units: :metric, lang: :pl, cnt: 16)
  """
  @spec get_sixteen_day_forecast(requests, options) :: map
  def get_sixteen_day_forecast(locations, opts \\ [])
  def get_sixteen_day_forecast(locations, opts) when is_list(locations),
    do: ExOwm.SixteenDayForecast.Coordinator.get_weather(locations, opts)
  def get_sixteen_day_forecast(location, opts) when is_map(location),
    do: get_sixteen_day_forecast([location], opts)
  @doc """
  Gets historical weather data of the given location with specified options.
  dt should be within the last 5 days.
  ## Examples
      iex> ExOwm.get_historical_weather([%{lat: 52.374031, lon: 4.88969, dt: 1615546800}], units: :metric, lang: :pl)
  """
  @spec get_historical_weather(requests, options) :: map
  def get_historical_weather(loc, opts \\ [])
  def get_historical_weather(locations, opts) when is_list(locations),
    do: ExOwm.HistoricalWeather.Coordinator.get_weather(locations, opts)
  def get_historical_weather(location, opts) when is_map(location),
    do: get_historical_weather([location], opts)
end | 
	lib/ex_owm.ex | 0.890945 | 0.466785 | 
	ex_owm.ex | 
	starcoder | 
| 
	defmodule RayTracer.Canvas do
  @moduledoc """
  This module defines methods for drawing pixels
  """
  alias RayTracer.Color
  alias RayTracer.Matrix
  @type t :: %__MODULE__{
    width: integer,
    height: integer,
    pixels: Matrix.matrix
  }
  @ppm_magic_number "P3"
  @ppm_max_color_value 255
  @ppm_max_line_length 70
  defstruct width: 0, height: 0, pixels: %{}
  @spec new(integer, integer, Color.t) :: t
  def new(width, height, color \\ Color.new(0, 0, 0)) do
    pixels = Matrix.new(height, width, color)
    %__MODULE__{width: width, height: height, pixels: pixels}
  end
  @spec pixel_at(t, integer, integer) :: Color.t
  def pixel_at(canvas, x, y), do: Matrix.elem(canvas.pixels, y, x)
  @spec write_pixel(t, integer, integer, Color.t) :: t
  def write_pixel(canvas = %__MODULE__{width: w, height: h, pixels: p}, x, y, color) do
    if x < 0 || x >= canvas.width || y < 0 || y >= canvas.height do
      raise ArgumentError, message: "Position out of bounds: #{x}, #{y}. Size is #{w},#{h}."
    end
    new_pixels = p |> Matrix.set(y, x, color)
    %__MODULE__{canvas | pixels: new_pixels}
  end
  @spec export_to_ppm_file(t, String.t) :: :error | :ok
  def export_to_ppm_file(canvas, filename \\ "canvas.ppm") do
    case File.write(filename, canvas |> to_ppm) do
      :ok -> :ok
      _ -> :error
    end
  end
  @spec to_ppm(t) :: String.t
  def to_ppm(%RayTracer.Canvas{width: width, height: height, pixels: pixels}) do
    pd =
      pixels
      |> Enum.map(&row_to_ppm/1)
      |> Enum.join("\n")
    "#{@ppm_magic_number}\n#{width} #{height}\n#{@ppm_max_color_value}\n#{pd}\n"
  end
  @spec row_to_ppm(Matrix.row) :: String.t
  defp row_to_ppm(row) do
    row
    |> Enum.map(&color_to_ppm/1)
    |> Enum.join(" ")
    # |> break_ppm_line
  end
  @spec break_ppm_line(String.t) :: String.t
  defp break_ppm_line(row_string, break_at \\ @ppm_max_line_length) do
    case String.at(row_string, break_at) do
      " " ->
        row_string
        |> String.split_at(break_at)
        |> Tuple.to_list
        |> Enum.map(&String.trim/1)
        |> Enum.reject(fn(x) -> x |> String.length == 0 end)
        |> Enum.map(&break_ppm_line/1)
        |> Enum.join("\n")
      nil -> row_string
      _ -> break_ppm_line(row_string, break_at - 1)
    end
  end
  @spec color_to_ppm(Color.t) :: String.t
  defp color_to_ppm(color) do
   color
   |> Color.scale(@ppm_max_color_value)
   |> Color.map(fn (v) -> v |> round() |> to_string() end)
   |> Enum.join(" ")
  end
end | 
	lib/canvas.ex | 0.878158 | 0.512449 | 
	canvas.ex | 
	starcoder | 
| 
	defmodule Ash.DataLayer do
  @moduledoc """
  The interface for being an ash data layer.
  This is a large behaviour, and this capability is not complete, but the idea
  is to have a large amount of optional callbacks, and use the `can?/2` callback
  to ensure that the engine only ever tries to interact with the data layer in ways
  that it supports.
  """
  alias Ash.Dsl.Extension
  @type t :: module
  @type data_layer_query() :: struct
  @type feature() ::
          :transact
          | :multitenant
          | {:lateral_join, Ash.Resource.t()}
          | {:join, Ash.Resource.t()}
          | {:aggregate, Ash.Query.Aggregate.kind()}
          | {:query_aggregate, Ash.Query.Aggregate.kind()}
          | :aggregate_filter
          | :aggregate_sort
          | :boolean_filter
          | :async_engine
          | :create
          | :read
          | :update
          | :destroy
          | :limit
          | :offset
          | :transact
          | :filter
          | {:filter_expr, struct}
          | :sort
          | {:sort, Ash.Type.t()}
          | :upsert
          | :composite_primary_key
  @callback functions(Ash.Resource.t()) :: [module]
  @callback operators(Ash.Resource.t()) :: [module]
  @callback filter(data_layer_query(), Ash.Filter.t(), resource :: Ash.Resource.t()) ::
              {:ok, data_layer_query()} | {:error, term}
  @callback sort(data_layer_query(), Ash.Sort.t(), resource :: Ash.Resource.t()) ::
              {:ok, data_layer_query()} | {:error, term}
  @callback distinct(data_layer_query(), list(atom), resource :: Ash.Resource.t()) ::
              {:ok, data_layer_query()} | {:error, term}
  @callback limit(
              data_layer_query(),
              limit :: non_neg_integer(),
              resource :: Ash.Resource.t()
            ) ::
              {:ok, data_layer_query()} | {:error, term}
  @callback offset(
              data_layer_query(),
              offset :: non_neg_integer(),
              resource :: Ash.Resource.t()
            ) :: {:ok, data_layer_query()} | {:error, term}
  @callback set_tenant(Ash.Resource.t(), data_layer_query(), term) ::
              {:ok, data_layer_query()} | {:error, term}
  @callback resource_to_query(Ash.Resource.t(), Ash.Api.t()) :: data_layer_query()
  @callback transform_query(Ash.Query.t()) :: Ash.Query.t()
  @callback run_query(data_layer_query(), Ash.Resource.t()) ::
              {:ok, list(Ash.Resource.t())} | {:error, term}
  @callback equal?(Ash.DataLayer.t()) :: boolean
  @callback run_aggregate_query(
              data_layer_query(),
              list(Ash.Query.Aggregate.t()),
              Ash.Resource.t()
            ) ::
              {:ok, map} | {:error, term}
  @callback run_aggregate_query_with_lateral_join(
              data_layer_query(),
              list(Ash.Query.Aggregate.t()),
              [Ash.Resource.record()],
              source_resource :: Ash.Resource.t(),
              destination_resource :: Ash.Resource.t(),
              source :: atom,
              destination :: atom
            ) ::
              {:ok, list(Ash.Resource.t())} | {:error, term}
  @callback run_query_with_lateral_join(
              data_layer_query(),
              [Ash.Resource.record()],
              source_resource :: Ash.Resource.t(),
              destination_resource :: Ash.Resource.t(),
              source :: atom,
              destination :: atom
            ) ::
              {:ok, list(Ash.Resource.t())} | {:error, term}
  @callback create(Ash.Resource.t(), Ash.Changeset.t()) ::
              {:ok, Ash.Resource.t()} | {:error, term}
  @callback upsert(Ash.Resource.t(), Ash.Changeset.t()) ::
              {:ok, Ash.Resource.t()} | {:error, term}
  @callback update(Ash.Resource.t(), Ash.Changeset.t()) ::
              {:ok, Ash.Resource.t()} | {:error, term}
  @callback add_aggregate(
              data_layer_query(),
              Ash.Query.Aggregate.t(),
              Ash.Resource.t()
            ) ::
              {:ok, data_layer_query()} | {:error, term}
  @callback destroy(Ash.Resource.t(), Ash.Changeset.t()) :: :ok | {:error, term}
  @callback transaction(Ash.Resource.t(), (() -> term)) :: {:ok, term} | {:error, term}
  @callback in_transaction?(Ash.Resource.t()) :: boolean
  @callback source(Ash.Resource.t()) :: String.t()
  @callback rollback(Ash.Resource.t(), term) :: no_return
  @callback can?(Ash.Resource.t(), feature()) :: boolean
  @callback set_context(Ash.Resource.t(), data_layer_query(), map) ::
              {:ok, data_layer_query()} | {:error, term}
  @optional_callbacks source: 1,
                      equal?: 1,
                      run_query: 2,
                      distinct: 3,
                      run_query_with_lateral_join: 6,
                      create: 2,
                      update: 2,
                      set_context: 3,
                      destroy: 2,
                      filter: 3,
                      sort: 3,
                      limit: 3,
                      offset: 3,
                      transaction: 2,
                      rollback: 2,
                      upsert: 2,
                      operators: 1,
                      functions: 1,
                      in_transaction?: 1,
                      add_aggregate: 3,
                      run_aggregate_query: 3,
                      run_aggregate_query_with_lateral_join: 7,
                      transform_query: 1,
                      set_tenant: 3,
                      resource_to_query: 2
  @doc "The data layer of the resource, or nil if it does not have one"
  @spec data_layer(Ash.Resource.t()) :: Ash.DataLayer.t()
  def data_layer(resource) do
    Extension.get_persisted(resource, :data_layer)
  end
  @doc "Whether or not the data layer supports a specific feature"
  @spec data_layer_can?(Ash.Resource.t(), Ash.DataLayer.feature()) :: boolean
  def data_layer_can?(resource, feature) do
    data_layer = data_layer(resource)
    data_layer && Ash.DataLayer.can?(feature, resource)
  end
  @doc "Custom functions supported by the data layer of the resource"
  @spec data_layer_functions(Ash.Resource.t()) :: map
  def data_layer_functions(resource) do
    Ash.DataLayer.functions(resource)
  end
  @doc "Wraps the execution of the function in a transaction with the resource's data_layer"
  @spec transaction(Ash.Resource.t(), (() -> term)) :: term
  def transaction(resource, func) do
    if data_layer_can?(resource, :transact) do
      data_layer(resource).transaction(resource, func)
    else
      func.()
    end
  end
  @doc "Rolls back the current transaction"
  @spec rollback(Ash.Resource.t(), term) :: no_return
  def rollback(resource, term) do
    data_layer(resource).rollback(resource, term)
  end
  @spec resource_to_query(Ash.Resource.t(), Ash.Api.t()) :: data_layer_query()
  def resource_to_query(resource, api) do
    data_layer = Ash.DataLayer.data_layer(resource)
    if :erlang.function_exported(data_layer, :resource_to_query, 2) do
      Ash.DataLayer.data_layer(resource).resource_to_query(resource, api)
    else
      Ash.DataLayer.data_layer(resource).resource_to_query(resource)
    end
  end
  @spec update(Ash.Resource.t(), Ash.Changeset.t()) ::
          {:ok, Ash.Resource.record()} | {:error, term}
  def update(resource, changeset) do
    Ash.DataLayer.data_layer(resource).update(resource, changeset)
  end
  @spec create(Ash.Resource.t(), Ash.Changeset.t()) ::
          {:ok, Ash.Resource.record()} | {:error, term}
  def create(resource, changeset) do
    Ash.DataLayer.data_layer(resource).create(resource, changeset)
  end
  @spec destroy(Ash.Resource.t(), Ash.Changeset.t()) :: :ok | {:error, term}
  def destroy(resource, changeset) do
    Ash.DataLayer.data_layer(resource).destroy(resource, changeset)
  end
  @spec source(Ash.Resource.t()) :: String.t()
  def source(resource) do
    data_layer = Ash.DataLayer.data_layer(resource)
    if :erlang.function_exported(data_layer, :source, 1) do
      data_layer.source(resource)
    else
      ""
    end
  end
  @spec set_tenant(Ash.Resource.t(), data_layer_query(), term) ::
          {:ok, data_layer_query()} | {:error, term}
  def set_tenant(resource, query, term) do
    Ash.DataLayer.data_layer(resource).set_tenant(resource, query, term)
  end
  @spec upsert(Ash.Resource.t(), Ash.Changeset.t()) ::
          {:ok, Ash.Resource.record()} | {:error, term}
  def upsert(resource, changeset) do
    Ash.DataLayer.data_layer(resource).upsert(resource, changeset)
  end
  @spec set_context(Ash.Resource.t(), data_layer_query(), map) ::
          {:ok, data_layer_query()} | {:error, term}
  def set_context(resource, query, map) do
    data_layer = Ash.DataLayer.data_layer(resource)
    if :erlang.function_exported(data_layer, :set_context, 3) do
      data_layer.set_context(resource, query, map)
    else
      {:ok, query}
    end
  end
  @spec filter(data_layer_query(), Ash.Filter.t(), Ash.Resource.t()) ::
          {:ok, data_layer_query()} | {:error, term}
  def filter(query, nil, _), do: {:ok, query}
  def filter(query, filter, resource) do
    data_layer = Ash.DataLayer.data_layer(resource)
    if data_layer.can?(resource, :filter) do
      if data_layer.can?(resource, :boolean_filter) do
        data_layer.filter(query, filter, resource)
      else
        simple_filter = Ash.Filter.to_simple_filter(filter)
        data_layer.filter(query, simple_filter, resource)
      end
    else
      {:error, "Data layer does not support filtering"}
    end
  end
  @spec sort(data_layer_query(), Ash.Sort.t(), Ash.Resource.t()) ::
          {:ok, data_layer_query()} | {:error, term}
  def sort(query, sort, resource) do
    if can?(:sort, resource) do
      data_layer = Ash.DataLayer.data_layer(resource)
      data_layer.sort(query, sort, resource)
    else
      {:ok, query}
    end
  end
  @spec distinct(data_layer_query(), list(atom) | nil, Ash.Resource.t()) ::
          {:ok, data_layer_query()} | {:error, term}
  def distinct(query, distinct, resource) do
    if can?(:distinct, resource) && distinct do
      data_layer = Ash.DataLayer.data_layer(resource)
      data_layer.distinct(query, distinct, resource)
    else
      {:ok, query}
    end
  end
  @spec limit(data_layer_query(), limit :: non_neg_integer, Ash.Resource.t()) ::
          {:ok, data_layer_query()} | {:error, term}
  def limit(query, nil, _resource), do: {:ok, query}
  def limit(query, limit, resource) do
    if can?(:limit, resource) do
      data_layer = Ash.DataLayer.data_layer(resource)
      data_layer.limit(query, limit, resource)
    else
      {:ok, query}
    end
  end
  @spec offset(data_layer_query(), offset :: non_neg_integer, Ash.Resource.t()) ::
          {:ok, data_layer_query()} | {:error, term}
  def offset(query, nil, _resource), do: {:ok, query}
  def offset(query, offset, resource) do
    if can?(:offset, resource) do
      data_layer = Ash.DataLayer.data_layer(resource)
      data_layer.offset(query, offset, resource)
    else
      {:ok, query}
    end
  end
  @spec add_aggregate(data_layer_query(), Ash.Query.Aggregate.t(), Ash.Resource.t()) ::
          {:ok, data_layer_query()} | {:error, term}
  def add_aggregate(query, aggregate, resource) do
    data_layer = Ash.DataLayer.data_layer(resource)
    data_layer.add_aggregate(query, aggregate, resource)
  end
  @spec can?(feature, Ash.Resource.t()) :: boolean
  def can?(feature, resource) do
    data_layer = Ash.DataLayer.data_layer(resource)
    data_layer.can?(resource, feature)
  end
  @spec run_aggregate_query(
          data_layer_query(),
          list(Ash.Query.Aggregate.t()),
          Ash.Resource.t()
        ) ::
          {:ok, map} | {:error, term}
  def run_aggregate_query(query, aggregates, resource) do
    data_layer = Ash.DataLayer.data_layer(resource)
    if :erlang.function_exported(data_layer, :run_aggregate_query, 3) do
      data_layer.run_aggregate_query(query, aggregates, resource)
    else
      {:error, "Aggregate queries not supported"}
    end
  end
  @spec run_query(data_layer_query(), central_resource :: Ash.Resource.t()) ::
          {:ok, list(Ash.Resource.record())} | {:error, term}
  def run_query(query, central_resource) do
    Ash.DataLayer.data_layer(central_resource).run_query(query, central_resource)
  end
  def run_aggregate_query_with_lateral_join(
        query,
        aggregates,
        root_data,
        source_resource,
        destination_resource,
        source,
        destination
      ) do
    Ash.DataLayer.data_layer(source_resource).run_query_with_lateral_join(
      query,
      aggregates,
      root_data,
      source_resource,
      destination_resource,
      source,
      destination
    )
  end
  def run_query_with_lateral_join(
        query,
        root_data,
        source_resource,
        destination_resource,
        source,
        destination
      ) do
    Ash.DataLayer.data_layer(source_resource).run_query_with_lateral_join(
      query,
      root_data,
      source_resource,
      destination_resource,
      source,
      destination
    )
  end
  def transact(resource, func) do
    if can?(:transact, resource) && not in_transaction?(resource) do
      data_layer = Ash.DataLayer.data_layer(resource)
      data_layer.transaction(resource, func)
    else
      {:ok, func.()}
    end
  end
  def in_transaction?(resource) do
    if can?(:transact, resource) do
      data_layer = Ash.DataLayer.data_layer(resource)
      data_layer.in_transaction?(resource)
    else
      false
    end
  end
  def functions(resource) do
    data_layer = Ash.DataLayer.data_layer(resource)
    if :erlang.function_exported(data_layer, :functions, 1) do
      data_layer.functions(resource)
    else
      %{}
    end
  end
  def transform_query(query) do
    data_layer = Ash.DataLayer.data_layer(query.resource)
    if :erlang.function_exported(data_layer, :transform_query, 1) do
      data_layer.transform_query(query)
    else
      query
    end
  end
end | 
	lib/ash/data_layer/data_layer.ex | 0.860486 | 0.513668 | 
	data_layer.ex | 
	starcoder | 
| 
	defmodule OffBroadway.Kafka do
  @moduledoc ~S"""
  Defines a macro to easily define a Kafka Broadway pipeline in your
  application, configuring Broadway and Kafka via callbacks.
  It starts a Broadway pipeline for each topic and partition for increased
  concurrency processing events, receiving partition assignments from the group
  coordinator and starting an Elsa group supervisor for each.
  It uses the following callbacks:
    1. `c:kafka_config/1` receives `start_link` options and
       returns the Kafka consumer configuration which is passed to
       `Elsa.Supervisor.start_link/1`.
    2. `c:broadway_config/3` receives a keyword list of configuration
       options, a topic and a partition. It returns the keyword
       list to configure the Broadway processors, batchers and contexts.
       Called by `OffBroadway.Kafka.ShowtimeHandler`.
  For example:
  ```elixir
  defmodule ShowtimeBroadway do
    use OffBroadway.Kafka
    def kafka_config(_opts) do
      [
        connection: :per_partition,
        endpoints: [localhost: 9092],
        group_consumer: [
          group: "per_partition",
          topics: ["topic1"],
          config: [
            prefetch_count: 5,
            prefetch_bytes: 0,
            begin_offset: :earliest
          ]
        ]
      ]
    end
    def broadway_config(opts, topic, partition) do
      [
        name: :"broadway_per_partition_#{topic}_#{partition}",
        processors: [
          default: [
            concurrency: 5
          ]
        ],
        context: %{
          pid: Keyword.get(opts, :pid)
        }
      ]
    end
    def handle_message(processor, message, context) do
      send(context.pid, {:message, message})
      message
    end
  end
  ```
  """
  @callback broadway_config(keyword(), String.t(), non_neg_integer()) :: keyword()
  @callback kafka_config(term()) :: keyword()
  @doc """
  Macro which starts pipeline for each Elsa consumer group manager instantiated.
  """
  defmacro __using__(_opts) do
    quote do
      use Broadway
      @behaviour OffBroadway.Kafka
      def start_link(opts) do
        kafka_config = kafka_config(opts)
        new_group_consumer =
          Keyword.fetch!(kafka_config, :group_consumer)
          |> Keyword.put(:handler, OffBroadway.Kafka.ShowtimeHandler)
          |> Keyword.put(:handler_init_args, broadway_module: __MODULE__, opts: opts)
        config = Keyword.put(kafka_config, :group_consumer, new_group_consumer)
        Elsa.Supervisor.start_link(config)
      end
    end
  end
end | 
	lib/off_broadway/kafka.ex | 0.847621 | 0.836688 | 
	kafka.ex | 
	starcoder | 
| 
	defmodule XGPS.Ports do
  use Supervisor
  @doc """
  Open one port to be consumed. Needs to have one GPS attached to the port to work.
  To simulate, give port_name = :simulate
  """
  def start_port(port_name) do
    Supervisor.start_child(__MODULE__, [{port_name}])
  end
  def start_simulator(file_name) do
    Supervisor.start_child(__MODULE__, [{:simulate, file_name}])
  end
  def stop_simulator() do
    stop_port(:simulate)
  end
  def stop_port(port_name_to_stop) do
    children =
      Supervisor.which_children(__MODULE__)
      |> Enum.map(fn({_, pid, :supervisor, _}) -> pid end)
      |> Enum.map(fn(pid) -> {pid, XGPS.Port.Supervisor.get_port_name(pid)} end)
      |> Enum.filter(fn({_pid, port_name}) -> port_name == port_name_to_stop end)
      |> Enum.map(fn({pid, _port_name}) -> pid end)
    case length(children) do
      0 -> {:ok, :no_port_running}
      1 ->
        pid = Enum.at(children, 0)
        :ok = Supervisor.stop(pid)
    end
  end
  @doc """
  Return all the connected port names
  """
  def get_running_port_names do
    Supervisor.which_children(__MODULE__)
    |> Enum.map(fn({_, pid, :supervisor, _}) -> pid end)
    |> Enum.map(fn(pid) -> XGPS.Port.Supervisor.get_port_name(pid) end)
  end
  @doc """
  Return the latest position if atteched to GPS.
  """
  def get_one_position do
    children = Supervisor.which_children(__MODULE__)
    case length(children) do
      0 -> {:error, :no_port_running}
      _ ->
       {_, pid, :supervisor, _} = Enum.at(children, 0)
       gps_data = XGPS.Port.Supervisor.get_gps_data(pid)
       {:ok, gps_data}
    end
  end
  @doc """
  Will send one GPS report as the give position.
  Since this will effectively generate both RMC and GGA sentences, the broadcaster will produce two values
  """
  def send_simulated_position(lat, lon, alt) when is_float(lat) and is_float(lon) and is_float(alt) do
    now = DateTime.utc_now()
    send_simulated_position(lat, lon, alt, now)
  end
  @doc """
  Will send one GPS report as the give position.
  Since this will effectively generate both RMC and GGA sentences, the broadcaster will produce two values
  """
  def send_simulated_position(lat, lon, alt, date_time) when is_float(lat) and is_float(lon) and is_float(alt) do
    simulators = get_running_simulators()
    case length(simulators) do
      0 -> {:error, :no_simulator_running}
      _ ->
        {sim_pid, :simulate} = Enum.at(simulators, 0)
        XGPS.Port.Supervisor.send_simulated_position(sim_pid, lat, lon, alt, date_time)
        :ok
    end
  end
  def reset_simulated_port_state() do
    simulators = get_running_simulators()
    case length(simulators) do
      0 -> {:error, :no_simulator_running}
      _ ->
        {sim_pid, :simulate} = Enum.at(simulators, 0)
        XGPS.Port.Supervisor.reset_simulated_port_state(sim_pid)
        :ok
    end
  end
  @doc """
  Will send one GPS report as no fix.
  Since this will effectively generate both RMC and GGA sentences, the broadcaster will produce two values
  """
  def send_simulated_no_fix() do
    now = DateTime.utc_now()
    send_simulated_no_fix(now)
  end
  def send_simulated_no_fix(date_time) do
    simulators = get_running_simulators()
    case length(simulators) do
      0 -> {:error, :no_simulator_running}
      _ ->
        {sim_pid, :simulate} = Enum.at(simulators, 0)
        XGPS.Port.Supervisor.send_simulated_no_fix(sim_pid, date_time)
        :ok
    end
  end
  def start_link do
    result = {:ok, pid} = Supervisor.start_link(__MODULE__, :ok, name: __MODULE__)
    start_port_if_defined_in_config(pid)
    result
  end
  defp start_port_if_defined_in_config(pid) do
    case Application.get_env(:xgps, :port_to_start) do
      nil ->
        :ok
      portname_with_args ->
        Supervisor.start_child(pid, [portname_with_args])
    end
  end
  # Callbacks
  def init(:ok) do
    children = [
      supervisor(XGPS.Port.Supervisor, [], restart: :transient)
    ]
    supervise(children, strategy: :simple_one_for_one)
  end
  defp get_running_simulators do
    Supervisor.which_children(__MODULE__)
    |> Enum.map(fn({_, pid, :supervisor, _}) -> pid end)
    |> Enum.map(fn(pid) -> {pid, XGPS.Port.Supervisor.get_port_name(pid)} end)
    |> Enum.filter(fn({_pid, port_name}) -> port_name == :simulate end)
  end
end | 
	lib/xgps/ports.ex | 0.62395 | 0.469399 | 
	ports.ex | 
	starcoder | 
| 
	defmodule PlugSessionMnesia do
  @moduledoc """
  An application for storing and managing Plug sessions with Mnesia.
  This application provides a `Plug.Session.Store` using Mnesia as back-end, and
  a session cleaner for automatically deleting inactive sessions. It also
  provide helpers for creating the Mnesia table.
  Using Mnesia enables session persistence between application reboots and
  distribution. However, distribution is not yet supported out of the box by the
  table creation helpers. You must create the Mnesia table yourself to use this
  feature.
  ## Setup
  To use it in your app, add this to your dependencies:
      {:plug_session_mnesia, "~> #{Mix.Project.config()[:version]}"}
  Then, add to your configuration:
      config :plug_session_mnesia,
        table: :session,
        max_age: 86_400
  It will store the sessions in a Mnesia table named `session` and discard them
  if they are inactive for more than 1 day. You can also choose to discard
  sessions after a given amount of time, regardless they have been active or
  not. To do this, simply add `timestamp: :fixed` to the configuration.
  By default, `PlugSessionMnesia.Cleaner` checks every minute for outdated
  sessions. You can change this behaviour by setting the `:cleaner_timeout` key
  in the configuration with a value in seconds.
  You must also tell `Plug.Session` that you use this store:
      plug Plug.Session,
        key: "_app_key",
        store: PlugSessionMnesia.Store
  You can then create the Mnesia table:
      $ mix session.setup
  If you want to use a node name or a custom directory for the Mnesia database,
  you can take a look at `Mix.Tasks.Session.Setup`.
  You can also create it directly from Elixir using
  `PlugSessionMnesia.Helpers.setup!/0`. This can be useful to include in a setup
  task to be run in a release environment.
  """
  use Application
  alias PlugSessionMnesia.Cleaner
  @impl true
  def start(_type, _args) do
    Supervisor.start_link(
      [Cleaner],
      strategy: :one_for_one,
      name: PlugSessionMnesia.Supervisor
    )
  end
end | 
	lib/plug_session_mnesia.ex | 0.78968 | 0.418697 | 
	plug_session_mnesia.ex | 
	starcoder | 
| 
	defmodule Modbux.Rtu.Slave do
  @moduledoc """
  API for a Modbus RTU Slave device.
  """
  use GenServer, restart: :transient
  alias Modbux.Model.Shared
  alias Modbux.Rtu.{Slave, Framer}
  alias Modbux.Rtu
  alias Circuits.UART
  require Logger
  @timeout 1000
  @speed 115_200
  defstruct model_pid: nil,
            uart_pid: nil,
            tty: nil,
            uart_opts: nil,
            parent_pid: nil
  @doc """
  Starts a Modbus RTU Slave process.
  The following options are available:
    * `tty` - defines the serial port to spawn the Slave.
    * `gen_opts` - defines extra options for the Genserver OTP configuration.
    * `uart_opts` - defines extra options for the UART configuration (defaults:
          [speed: 115200, rx_framing_timeout: 1000]).
    * `model` - defines the DB initial state.
    * `active` - (`true` or `false`) enable/disable DB updates notifications (mailbox).
  The messages (when active mode is true) have the following form:
    `{:modbus_rtu, {:slave_request, payload}}`
  or
    `{:modbus_rtu, {:slave_error, payload, reason}}`
  The following are some reasons:
    * `:ecrc`  - corrupted message (invalid crc).
    * `:einval`  - invalid function.
    * `:eaddr`  - invalid memory address requested.
  ## Model (DB)
  The model or data base (DB) defines the slave memory map, the DB is defined by the following syntax:
  ```elixir
  %{slave_id => %{{memory_type, address_number} => value}}
  ```
  where:
    * `slave_id` - specifies a unique unit address from 1 to 247.
    * `memory_type` - specifies the memory between:
         * `:c` - Discrete Output Coils.
         * `:i` - Discrete Input Contacts.
         * `:ir` - Analog Input Registers.
         * `:hr` - Analog Output Registers.
    * `address_number` - specifies the memory address.
    * `value` - the current value from that memory.
  ## Example
  ```elixir
  model = %{80 => %{{:c, 20818} => 0, {:hr, 20818} => 0}}
  Modbux.Tcp.Server.start_link(model: model, port: 2000)
  ```
  """
  @spec start_link(keyword) :: :ignore | {:error, any} | {:ok, pid}
  def start_link(params) do
    gen_opts = Keyword.get(params, :gen_opts, [])
    GenServer.start_link(__MODULE__, {params, self()}, gen_opts)
  end
  @spec stop(atom | pid | {atom, any} | {:via, atom, any}) :: :ok
  def stop(pid) do
    GenServer.stop(pid)
  end
  @doc """
  Gets the Slave state.
  """
  @spec state(atom | pid | {atom, any} | {:via, atom, any}) :: any
  def state(pid) do
    GenServer.call(pid, :state)
  end
  @doc """
  Updates the state of the Slave DB.
  `cmd` is a 4 elements tuple, as follows:
    - `{:rc, slave, address, count}` read `count` coils.
    - `{:ri, slave, address, count}` read `count` inputs.
    - `{:rhr, slave, address, count}` read `count` holding registers.
    - `{:rir, slave, address, count}` read `count` input registers.
    - `{:fc, slave, address, value}` force single coil.
    - `{:phr, slave, address, value}` preset single holding register.
    - `{:fc, slave, address, values}` force multiple coils.
    - `{:phr, slave, address, values}` preset multiple holding registers.
  """
  @spec request(atom | pid | {atom, any} | {:via, atom, any}, any) :: any
  def request(pid, cmd) do
    GenServer.call(pid, {:request, cmd})
  end
  @doc """
  Gets the current state of the Slave DB.
  """
  @spec get_db(atom | pid | {atom, any} | {:via, atom, any}) :: any
  def get_db(pid) do
    GenServer.call(pid, :get_db)
  end
  @doc """
  Send a raw frame through the serial port.
  """
  @spec raw_write(atom | pid | {atom, any} | {:via, atom, any}, any) :: any
  def raw_write(pid, data) do
    GenServer.call(pid, {:raw_write, data})
  end
  def init({params, parent_pid}) do
    parent_pid = if Keyword.get(params, :active, false), do: parent_pid
    tty = Keyword.fetch!(params, :tty)
    model = Keyword.fetch!(params, :model)
    Logger.debug("(#{__MODULE__}) Starting Modbux Slave at \"#{tty}\"")
    uart_opts = Keyword.get(params, :uart_opts, speed: @speed, rx_framing_timeout: @timeout)
    {:ok, model_pid} = Shared.start_link(model: model)
    {:ok, u_pid} = UART.start_link()
    UART.open(u_pid, tty, [framing: {Framer, behavior: :slave}] ++ uart_opts)
    state = %Slave{
      model_pid: model_pid,
      parent_pid: parent_pid,
      tty: tty,
      uart_pid: u_pid,
      uart_opts: uart_opts
    }
    {:ok, state}
  end
  def terminate(:normal, _state), do: nil
  def terminate(reason, state) do
    Logger.error("(#{__MODULE__}) Error: #{inspect(reason)}, state: #{inspect(state)}")
  end
  def handle_call(:state, _from, state), do: {:reply, state, state}
  def handle_call({:request, cmd}, _from, state) do
    res =
      case Shared.apply(state.model_pid, cmd) do
        {:ok, values} ->
          Logger.debug("(#{__MODULE__}) DB request: #{inspect(cmd)}, #{inspect(values)}")
          values
        nil ->
          Logger.debug("(#{__MODULE__}) DB update: #{inspect(cmd)}")
        error ->
          Logger.debug("(#{__MODULE__}) An error has occur #{inspect(error)}")
          error
      end
    {:reply, res, state}
  end
  def handle_call(:get_db, _from, state) do
    {:reply, Shared.state(state.model_pid), state}
  end
  def handle_call({:raw_write, data}, _from, state) do
    UART.write(state.uart_pid, data)
    {:reply, :ok, state}
  end
  def handle_info({:circuits_uart, device, {:error, reason, bad_frame}}, state) do
    Logger.warn("(#{__MODULE__}) Error with \"#{device}\" received: #{bad_frame}, reason: #{reason}")
    case reason do
      :einval ->
        if valid_slave_id?(state, bad_frame) do
          response = Rtu.pack_res(bad_frame, :einval)
          Logger.debug("(#{__MODULE__}) Sending error code: #{inspect(response)}, reason: #{reason}")
          UART.write(state.uart_pid, response)
        end
      _ ->
        nil
    end
    if !is_nil(state.parent_pid), do: notify(state.parent_pid, reason, bad_frame)
    {:noreply, state}
  end
  def handle_info({:circuits_uart, _device, {:partial, data}}, state) do
    Logger.warn("(#{__MODULE__})  Timeout: #{inspect(data)}")
    {:noreply, state}
  end
  def handle_info({:circuits_uart, device, modbus_frame}, state) do
    Logger.debug("(#{__MODULE__}) Recieved from UART (#{device}): #{inspect(modbus_frame)}")
    cmd = Rtu.parse_req(modbus_frame)
    Logger.debug("(#{__MODULE__}) Received Modbux request: #{inspect(cmd)}")
    case Shared.apply(state.model_pid, cmd) do
      {:ok, values} ->
        response = Rtu.pack_res(cmd, values)
        if !is_nil(state.parent_pid), do: notify(state.parent_pid, nil, cmd)
        UART.write(state.uart_pid, response)
      {:error, reason} ->
        response = Rtu.pack_res(modbus_frame, reason)
        if !is_nil(state.parent_pid), do: notify(state.parent_pid, reason, cmd)
        UART.write(state.uart_pid, response)
        Logger.debug(
          "(#{__MODULE__}) An error has occur for cmd: #{inspect(cmd)}, response #{inspect(response)}"
        )
      nil ->
        nil
    end
    {:noreply, state}
  end
  # Catch all clause
  def handle_info(msg, state) do
    Logger.warn("(#{__MODULE__})  Unknown msg: #{inspect(msg)}")
    {:noreply, state}
  end
  defp valid_slave_id?(state, <<slave_id, _b_tail::binary>>) do
    state.model_pid
    |> Shared.state()
    |> Map.has_key?(slave_id)
  end
  defp notify(pid, nil, cmd) do
    send(pid, {:modbus_rtu, {:slave_request, cmd}})
  end
  defp notify(pid, reason, cmd) do
    send(pid, {:modbus_rtu, {:slave_error, cmd, reason}})
  end
end | 
	lib/rtu/slave.ex | 0.893527 | 0.758085 | 
	slave.ex | 
	starcoder | 
| 
	defmodule Desktop.Menu do
  @moduledoc """
  Menu module used to create and handle menus in Desktop
  Menues are defined similiar to Live View using a callback module an XML:
  ```
    defmodule ExampleMenuBar do
      use Desktop.Menu
      @impl true
      def mount(menu) do
        menu = assign(menu, items: ExampleRepo.all_items())
        {:ok, menu}
      end
      @impl true
      def handle_event(command, menu) do
        case command do
          <<"open">> -> :not_implemented
          <<"quit">> -> Desktop.Window.quit()
          <<"help">> -> :wx_misc.launchDefaultBrowser(\'https://google.com\')
          <<"about">> -> :not_implemented
        end
        {:noreply, menu}
      end
      @impl true
      def render(assigns) do
        ~E\"""
        <menubar>
          <menu label="<%= gettext "File" %>">
              <item onclick="open"><%= gettext "Open" %></item>
              <hr/>
              <item onclick="quit"><%= gettext "Quit" %></item>
          </menu>
          <menu label="<%= gettext "Items" %>">
            <%= for item <- @items do %>
              <item><%= item.name %></item>
            <% end %>
          </menu>
          <menu label="<%= gettext "Help" %>">
              <item onclick="help"><%= gettext "Show Documentation" %></item>
              <item onclick="about"><%= gettext "About" %></item>
          </menu>
        </menubar>
        \"""
      end
    end
  ```
  # Template
  As in live view the template can either be embedded in the `def render(assigns)`
  method or it can be side loaded as a .eex file next to the menues .ex file.
  # XML Structure
  These items are defined:
  ## `<menubar>...menues...</menubar>`
  For an application (window) menubar this must be the root element. When
  passing a menubar to `Desktop.Window` start parameters this has to be the root element.
  It has no attributes
  ## `<menu label="Label">...items...</menu>`
  For an icon menu `menu` must be the root element. Menu elements can contain multiple
  children of type `menu`, `item` and `hr`
  ### Attributes
  * `label` - the label that should be displayed on the menu
  ## `<item ...>Label</item>`
  This is an entry in the menu with a text a type and an onclick action
  ### Attributes
  * `onclick` - an event name that should be fired when this item is clicked. It will cause `handle_event/2` to be called
  * `type`        - the type of the item. The default is `normal`, but it can be either
    * `normal`    - a normal text item
    * `radio`     - a radio button
    * `checkbox`  - a checkbox item
  * `checked` - whether the `checkbox` or `radio` button should be checked. `nil`, `false` and `0` are treated
  as false values, every other value is treated as true.
  * `disabled` - whether the item should be disabled. `nil`, `false` and `0` are treated
  as false values, every other value is treated as true.
  ## `<hr />`
  A separator item
  """
  use GenServer
  require Logger
  alias Desktop.Menu
  alias Desktop.Menu.{Adapter, Parser}
  defstruct [
    :__adapter__,
    :app,
    :assigns,
    :module,
    :dom,
    :pid,
    :last_render
  ]
  @type t() :: %Menu{
          __adapter__: any(),
          app: nil,
          assigns: %{},
          module: module,
          dom: any(),
          pid: nil | pid(),
          last_render: nil | DateTime.t()
        }
  @callback mount(assigns :: map()) :: {:ok, map()}
  @callback handle_event(event_name :: String.t(), assigns :: map()) :: {:noreply, map()}
  @callback handle_info(any(), assigns :: map()) :: {:noreply, map()}
  @callback render(Keyword.t()) :: String.t()
  @doc false
  defmacro __using__(opts) do
    Module.register_attribute(__CALLER__.module, :is_menu_server, persist: true, accumulate: false)
    Module.put_attribute(__CALLER__.module, :is_menu_server, Keyword.get(opts, :server, true))
    # Phoenix.LiveView.HTMLEngine vs. Phoenix.HTML.Engine
    quote do
      @behaviour Desktop.Menu
      import Desktop.Menu, only: [assign: 2, connected?: 1]
      import Phoenix.HTML, only: [sigil_e: 2, sigil_E: 2]
      import Phoenix.LiveView.Helpers, only: [sigil_L: 2, sigil_H: 2]
      alias Desktop.Menu
      @before_compile Desktop.Menu
    end
  end
  defmacro __before_compile__(env) do
    render? = Module.defines?(env.module, {:render, 1})
    root = Path.dirname(env.file)
    filename = template_filename(env)
    templates = Phoenix.Template.find_all(root, filename)
    case {render?, templates} do
      {true, [template | _]} ->
        IO.warn(
          "ignoring template #{inspect(template)} because the Menu " <>
            "#{inspect(env.module)} defines a render/1 function",
          Macro.Env.stacktrace(env)
        )
        :ok
      {true, []} ->
        :ok
      {false, [template]} ->
        ext = template |> Path.extname() |> String.trim_leading(".") |> String.to_atom()
        engine = Map.fetch!(Phoenix.Template.engines(), ext)
        ast = engine.compile(template, filename)
        quote do
          @file unquote(template)
          @external_resource unquote(template)
          def render(var!(assigns)) when is_map(var!(assigns)) do
            unquote(ast)
          end
        end
      {false, [_ | _]} ->
        IO.warn(
          "multiple templates were found for #{inspect(env.module)}: #{inspect(templates)}",
          Macro.Env.stacktrace(env)
        )
        :ok
      {false, []} ->
        template = Path.join(root, filename <> ".heex")
        message = ~s'''
        render/1 was not implemented for #{inspect(env.module)}.
        Make sure to either explicitly define a render/1 clause with a Menu template:
            def render(assigns) do
              ~H"""
              ...
              """
            end
        Or create a file at #{inspect(template)} with the Menu template.
        '''
        IO.warn(message, Macro.Env.stacktrace(env))
        quote do
          @external_resource unquote(template)
          def render(_assigns) do
            raise unquote(message)
          end
        end
    end
  end
  defp template_filename(env) do
    env.module
    |> Module.split()
    |> List.last()
    |> Macro.underscore()
    |> Kernel.<>(".html")
  end
  def connected?(_menu), do: true
  def assign(menu, properties \\ [])
  def assign(menu, properties) when is_list(properties) do
    assign(menu, Map.new(properties))
  end
  def assign(menu = %Menu{assigns: assigns}, properties) when is_map(properties) do
    %Menu{menu | assigns: Map.merge(assigns, properties)}
  end
  def assign(menu, property, value) when is_atom(property) do
    assign(menu, %{property => value})
  end
  def assign_new(menu = %Menu{assigns: assigns}, property, fun)
      when is_atom(property) and is_function(fun) do
    %Menu{menu | assigns: Map.put_new_lazy(assigns, property, fun)}
  end
  # GenServer implementation
  def start!(init_opts \\ [], opts \\ [])
  def start!(init_opts, opts) do
    case start_link(init_opts, opts) do
      {:ok, pid} -> pid
      {:error, {:already_started, pid}} -> pid
      {:error, reason} -> raise reason
      :ignore -> nil
    end
  end
  @spec start_link(keyword(), keyword()) :: GenServer.on_start()
  def start_link(init_opts \\ [], opts \\ [])
  def start_link(init_opts, opts) do
    GenServer.start_link(Menu, init_opts, opts)
  end
  @impl true
  def init(init_opts) do
    menu_pid = self()
    module = Keyword.get(init_opts, :module)
    dom = Keyword.get(init_opts, :dom, [])
    app = Keyword.get(init_opts, :app, nil)
    adapter_module =
      case Keyword.get(init_opts, :adapter, Adapter.Wx) do
        mod when mod in [Adapter.Wx, Adapter.DBus] -> mod
        _ -> Adapter.Wx
      end
    adapter_opts =
      init_opts
      |> Keyword.drop([:dom, :adapter])
      |> Keyword.put(:menu_pid, menu_pid)
    adapter =
      adapter_opts
      |> adapter_module.new()
      |> Adapter.create(dom)
    menu =
      %Menu{
        __adapter__: adapter,
        app: app,
        module: module,
        dom: dom,
        assigns: %{},
        pid: menu_pid
      }
      |> do_mount()
    if is_module_server?(module) do
      Process.register(menu_pid, module)
    end
    {:ok, menu}
  end
  def trigger_event(menu_pid, event) do
    GenServer.call(menu_pid, {:trigger_event, event})
  end
  def popup_menu(menu_pid) do
    GenServer.call(menu_pid, :popup_menu)
  end
  def menubar(menu_pid) do
    GenServer.call(menu_pid, :menubar)
  end
  def get_icon(%Menu{pid: menu_pid}) when is_pid(menu_pid) do
    get_icon(menu_pid)
  end
  def get_icon(menu_pid) when is_pid(menu_pid) do
    GenServer.call(menu_pid, :get_icon)
  end
  def set_icon(%Menu{pid: menu_pid}, icon) when is_pid(menu_pid) do
    set_icon(menu_pid, icon)
  end
  def set_icon(menu_pid, icon) when is_pid(menu_pid) do
    if menu_pid == self() do
      spawn_link(Menu, :set_icon, [menu_pid, icon])
    else
      GenServer.call(menu_pid, {:set_icon, icon})
    end
  end
  @impl true
  def handle_call(:menubar, _from, menu = %{__adapter__: adapter}) do
    {:reply, Adapter.menubar(adapter), menu}
  end
  @impl true
  def handle_call(:get_icon, _from, menu) do
    {:reply, get_adapter_icon(menu), menu}
  end
  @impl true
  def handle_call({:set_icon, icon}, _from, menu) do
    case set_adapter_icon(menu, icon) do
      {:ok, menu} -> {:reply, get_adapter_icon(menu), menu}
      error -> {:reply, error, menu}
    end
  end
  @impl true
  def handle_call({:trigger_event, event}, _from, menu = %{module: module}) do
    menu =
      with {:ok, {:noreply, menu}} <- invoke_module_func(module, :handle_event, [event, menu]),
           {:ok, _updated?, menu} <- update_dom(menu) do
        menu
      else
        _ -> menu
      end
    {:reply, menu.assigns, menu}
  end
  @impl true
  def handle_cast(:popup_menu, menu = %{__adapter__: adapter}) do
    adapter = Adapter.popup_menu(adapter)
    {:noreply, %{menu | __adapter__: adapter}}
  end
  @impl true
  def handle_cast(:recreate_menu, menu = %{__adapter__: adapter, dom: dom}) do
    # This is called from within the Adapter
    adapter = Adapter.recreate_menu(adapter, dom)
    {:noreply, %{menu | __adapter__: adapter}}
  end
  @impl true
  def handle_cast(:mount, menu) do
    {:noreply, do_mount(menu)}
  end
  @impl true
  def handle_info(event, menu = %{__adapter__: adapter = %{__struct__: adapter_module}})
      when is_tuple(event) and elem(event, 0) == :wx do
    {:noreply, adapter} = adapter_module.handle_info(event, adapter)
    {:noreply, %{menu | __adapter__: adapter}}
  end
  @impl true
  def handle_info(msg, menu) do
    {:noreply, proxy_handle_info(msg, menu)}
  end
  # Private functions
  defp get_adapter_icon(%{__adapter__: adapter}) do
    Adapter.get_icon(adapter)
  end
  defp set_adapter_icon(menu = %{app: app}, {:file, icon}) do
    with {:ok, wx_icon} <- Desktop.Image.new_icon(app, icon),
         ret = {:ok, _menu} <- set_adapter_icon(menu, wx_icon) do
      # Destroy the :wxIcon
      Desktop.Image.destroy(wx_icon)
      # Now return the result
      ret
    end
  end
  defp set_adapter_icon(menu = %{__adapter__: adapter}, icon) do
    with {:ok, adapter} <- Adapter.set_icon(adapter, icon) do
      menu = %{menu | __adapter__: adapter}
      {:ok, menu}
    end
  end
  defp do_mount(menu = %Menu{module: module}) do
    case invoke_module_func(module, :mount, [menu]) do
      {:ok, {:ok, menu}} ->
        case update_dom(menu) do
          {:ok, _updated?, menu} -> menu
          _error -> menu
        end
      _ ->
        menu
    end
  end
  defp proxy_handle_info(msg, menu = %Menu{module: module}) do
    with {:ok, {:noreply, menu}} <- invoke_module_func(module, :handle_info, [msg, menu]),
         {:ok, _updated?, menu} <- update_dom(menu) do
      menu
    else
      _ -> menu
    end
  end
  @spec update_dom(menu :: t()) :: {:ok, updated :: boolean(), menu :: t()} | {:error, binary()}
  defp update_dom(menu = %Menu{__adapter__: adapter, module: module, dom: dom, assigns: assigns}) do
    with {:ok, new_dom} <- invoke_render(module, assigns) do
      if new_dom != dom do
        adapter = Adapter.update_dom(adapter, new_dom)
        {:ok, true, %{menu | __adapter__: adapter, dom: new_dom, last_render: DateTime.utc_now()}}
      else
        {:ok, false, menu}
      end
    end
  end
  @spec invoke_render(module :: module(), assigns :: map()) ::
          {:ok, any()} | {:error, binary()}
  defp invoke_render(module, assigns) do
    with {:ok, str_render} <- invoke_module_func(module, :render, [assigns]) do
      {:ok, Parser.parse(str_render)}
    end
  end
  @spec invoke_module_func(module :: module(), func :: atom(), args :: list(any())) ::
          {:error, binary()} | {:ok, any()}
  defp invoke_module_func(module, func, args) do
    try do
      Kernel.apply(module, func, args)
    rescue
      error ->
        Logger.error(Exception.format(:error, error, __STACKTRACE__))
        {:error, "Failed to invoke #{module}.#{func}/#{Enum.count(args)}"}
    else
      return -> {:ok, return}
    end
  end
  defp is_module_server?(module) do
    try do
      case Keyword.get(module.__info__(:attributes), :is_menu_server, false) do
        [true] -> true
        _ -> false
      end
    rescue
      _error -> false
    end
  end
end | 
	lib/desktop/menu.ex | 0.694924 | 0.54819 | 
	menu.ex | 
	starcoder | 
| 
	defmodule Advent.Day17 do
  defmodule Conway3D do
    def new(input) do
      for {line, y} <- Enum.with_index(String.split(input, "\n", trim: true)),
          {char, x} <- Enum.with_index(String.codepoints(line)),
          char == "#",
          into: MapSet.new(),
          do: {x, y, 0}
    end
    defp active?(actives, {x, y, z}) do
      MapSet.member?(actives, {x, y, z})
    end
    def count_actives(actives), do: Enum.count(actives)
    defp count_active_neighbors(actives, {x, y, z}) do
      neighbors =
        for dx <- -1..1,
            dy <- -1..1,
            dz <- -1..1,
            {dx, dy, dz} != {0, 0, 0},
            do: {x + dx, y + dy, z + dz}
      Enum.count(neighbors, &active?(actives, &1))
    end
    defp coordinate_range(actives) do
      Enum.reduce(actives, {{0, 0}, {0, 0}, {0, 0}}, fn {x, y, z},
                                                        {{min_x, max_x}, {min_y, max_y},
                                                         {min_z, max_z}} ->
        {
          Enum.min_max([min_x, x, max_x]),
          Enum.min_max([min_y, y, max_y]),
          Enum.min_max([min_z, z, max_z])
        }
      end)
    end
    def next_cycle(actives) do
      {{min_x, max_x}, {min_y, max_y}, {min_z, max_z}} = coordinate_range(actives)
      for x <- (min_x - 1)..(max_x + 1),
          y <- (min_y - 1)..(max_y + 1),
          z <- (min_z - 1)..(max_z + 1),
          reduce: MapSet.new() do
        acc ->
          coordinate = {x, y, z}
          next_active =
            case {active?(actives, coordinate), count_active_neighbors(actives, coordinate)} do
              {true, neighbors} when neighbors in [2, 3] -> true
              {false, 3} -> true
              _ -> false
            end
          if next_active, do: MapSet.put(acc, coordinate), else: acc
      end
    end
  end
  def part_1(input) do
    Enum.reduce(1..6, Conway3D.new(input), fn _, actives -> Conway3D.next_cycle(actives) end)
    |> Conway3D.count_actives()
  end
  defmodule Conway4D do
    def new(input) do
      for {line, y} <- Enum.with_index(String.split(input, "\n", trim: true)),
          {char, x} <- Enum.with_index(String.codepoints(line)),
          char == "#",
          into: MapSet.new(),
          do: {x, y, 0, 0}
    end
    defp active?(actives, {x, y, z, w}) do
      MapSet.member?(actives, {x, y, z, w})
    end
    def count_actives(actives), do: Enum.count(actives)
    defp count_active_neighbors(actives, {x, y, z, w}) do
      neighbors =
        for dx <- -1..1,
            dy <- -1..1,
            dz <- -1..1,
            dw <- -1..1,
            {dx, dy, dz, dw} != {0, 0, 0, 0},
            do: {x + dx, y + dy, z + dz, w + dw}
      Enum.count(neighbors, &active?(actives, &1))
    end
    defp coordinate_range(actives) do
      Enum.reduce(actives, {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, fn {x, y, z, w},
                                                                {{min_x, max_x}, {min_y, max_y},
                                                                 {min_z, max_z},
                                                                 {min_w, max_w}} ->
        {
          Enum.min_max([min_x, x, max_x]),
          Enum.min_max([min_y, y, max_y]),
          Enum.min_max([min_z, z, max_z]),
          Enum.min_max([min_w, w, max_w])
        }
      end)
    end
    def next_cycle(actives) do
      {{min_x, max_x}, {min_y, max_y}, {min_z, max_z}, {min_w, max_w}} = coordinate_range(actives)
      for x <- (min_x - 1)..(max_x + 1),
          y <- (min_y - 1)..(max_y + 1),
          z <- (min_z - 1)..(max_z + 1),
          w <- (min_w - 1)..(max_w + 1),
          reduce: MapSet.new() do
        acc ->
          coordinate = {x, y, z, w}
          next_active =
            case {active?(actives, coordinate), count_active_neighbors(actives, coordinate)} do
              {true, neighbors} when neighbors in [2, 3] -> true
              {false, 3} -> true
              _ -> false
            end
          if next_active, do: MapSet.put(acc, coordinate), else: acc
      end
    end
  end
  def part_2(input) do
    Enum.reduce(1..6, Conway4D.new(input), fn _, actives -> Conway4D.next_cycle(actives) end)
    |> Conway4D.count_actives()
  end
end | 
	shritesh+elixir/lib/advent/day_17.ex | 0.550124 | 0.602646 | 
	day_17.ex | 
	starcoder | 
| 
	defmodule ExCell.Adapters.CellJS do
  @moduledoc """
  The CellJS adapter can be used to output the cells as HTML compatible with
  [cells-js](https://github.com/DefactoSoftware/cells-js). CellsJS was written
  with ExCell in mind.
  Tags are automatically closed when they are part of the
  [void elements](https://stackoverflow.com/questions/4693939/self-closing-tags-void-elements-in-html5)
  specification.
  CellsJS uses two predefined attributes to parse the Javascript. First it
  will look for the `data-cell` cell attribute and match it to the defined Cell
  in Javascript.
  Second it will take the JSON arguments set on the `data-cell-params` attribute
  and use that to initialize the cell with user defined parameters.
  """
  @behaviour ExCell.Adapter
  alias Phoenix.HTML
  alias Phoenix.HTML.Tag
  def void_elements, do: [
    "area",
    "base",
    "br",
    "col",
    "command",
    "embed",
    "hr",
    "img",
    "input",
    "keygen",
    "link",
    "meta",
    "param",
    "source",
    "track",
    "wbr"
  ]
  def void_element?(tag) when is_atom(tag), do: void_element?(Atom.to_string(tag))
  def void_element?(tag), do: tag in void_elements()
  @doc """
  The data_attribute function is used to build up the data attributes and set
  the default `data-cell` and `data-cell-params` attributes.
  """
  def data_attribute(name, id, data \\ [], params \\ %{})
  def data_attribute(name, id, nil, params), do: data_attribute(name, id, [], params)
  def data_attribute(name, id, data, params),
    do:
      Keyword.merge(
        data,
        cell: name,
        cell_id: id,
        cell_params: Jason.encode!(params)
      )
  @doc """
  The attributes function is used to auto fill the attributes for a container
  with the data attributes.
  """
  def attributes(name, id, attributes \\ [], params \\ %{}) do
    Keyword.put(
      attributes,
      :data,
      data_attribute(name, id, Keyword.get(attributes, :data, []), params)
    )
  end
  @doc """
  The container renders HTML with the attributes, class name and data attributes
  prefilled. The HTML is rendered with Phoenix.HTML.Tag.
  """
  def container(%{
    name: name,
    attributes: attributes,
    params: params,
    content: content,
    id: id
  }) do
    {tag, attributes} = Keyword.pop(attributes, :tag, :div)
    attributes = attributes(name, id, attributes, params)
    case void_element?(tag) do
      true -> Tag.tag(tag, attributes)
      false -> Tag.content_tag(tag, content, attributes)
    end
  end
  def container(%{id: id} = options, callback) do
    options
    |> Map.put(:content, callback.(%{element: &(element(id, &1))}))
    |> container()
  end
  def element(id, element) do
    HTML.raw(~s(data-cell-parent-id="#{id}" data-cell-element="#{element}"))
  end
end | 
	lib/ex_cell/adapters/cell_js.ex | 0.797714 | 0.688468 | 
	cell_js.ex | 
	starcoder | 
| 
	defmodule ExthCrypto.Math do
  @moduledoc """
  Helpers for basic math functions.
  """
  @doc """
  Simple function to compute modulo function to work on integers of any sign.
  ## Examples
      iex> ExthCrypto.Math.mod(5, 2)
      1
      iex> ExthCrypto.Math.mod(-5, 1337)
      1332
      iex> ExthCrypto.Math.mod(1337 + 5, 1337)
      5
      iex> ExthCrypto.Math.mod(0, 1337)
      0
  """
  def mod(x, n) when x > 0, do: rem(x, n)
  def mod(x, n) when x < 0, do: rem(n + x, n)
  def mod(0, _n), do: 0
  @doc """
  Simple wrapper function to convert a hex string to a binary.
  ## Examples
      iex> ExthCrypto.Math.hex_to_bin("01020a0d")
      <<0x01, 0x02, 0x0a, 0x0d>>
  """
  @spec hex_to_bin(String.t()) :: binary()
  def hex_to_bin(hex) do
    {:ok, bin} = Base.decode16(hex, case: :lower)
    bin
  end
  @doc """
  Left pads a given binary to specified length in bytes.
  This function raises if binary longer than given length already.
  ## Examples
      iex> ExthCrypto.Math.pad(<<1, 2, 3>>, 6)
      <<0x00, 0x00, 0x00, 0x01, 0x02, 0x03>>
      iex> ExthCrypto.Math.pad(<<1, 2, 3>>, 4)
      <<0x00, 0x01, 0x02, 0x03>>
      iex> ExthCrypto.Math.pad(<<1, 2, 3>>, 3)
      <<0x01, 0x02, 0x03>>
      iex> ExthCrypto.Math.pad(<<1, 2, 3>>, 0)
      ** (ArgumentError) argument error
      iex> ExthCrypto.Math.pad(<<>>, 0)
      <<>>
  """
  @spec pad(binary(), non_neg_integer()) :: binary()
  def pad(bin, length) do
    padding_bits = (length - byte_size(bin)) * 8
    <<0x00::size(padding_bits)>> <> bin
  end
  @doc """
  Simple wrapper function to convert a binary to a hex string.
  ## Examples
      iex> ExthCrypto.Math.bin_to_hex(<<0x01, 0x02, 0x0a, 0x0d>>)
      "01020a0d"
  """
  @spec bin_to_hex(binary()) :: String.t()
  def bin_to_hex(bin), do: Base.encode16(bin, case: :lower)
  @doc """
  Generate a random nonce value of specified length.
  ## Examples
      iex> ExthCrypto.Math.nonce(32) |> byte_size
      32
      iex> ExthCrypto.Math.nonce(32) == ExthCrypto.Math.nonce(32)
      false
  """
  @spec nonce(non_neg_integer()) :: binary()
  def nonce(nonce_size) do
    :crypto.strong_rand_bytes(nonce_size)
  end
  @doc """
  Computes the xor between two equal length binaries.
  ## Examples
      iex> ExthCrypto.Math.xor(<<0b10101010>>, <<0b11110000>>)
      <<0b01011010>>
  """
  @spec xor(binary(), binary()) :: binary()
  def xor(a, b) when byte_size(a) == byte_size(b) do
    :crypto.exor(a, b)
  end
end | 
	apps/exth_crypto/lib/math/math.ex | 0.837686 | 0.530541 | 
	math.ex | 
	starcoder | 
| 
	defmodule AttributeRepositoryRiak do
  @moduledoc """
  ## Initializing a bucket type for attribute repository
  ```sh
  $ sudo riak-admin bucket-type create attr_rep '{"props":{"datatype":"map", "backend":"leveldb_mult"}}'
  attr_rep created
  $ sudo riak-admin bucket-type activate attr_rep
  attr_rep has been activated
  ```
  ## Options
  ### run options (`run_opts`)
  - `:instance`: instance name (an `atom()`)
  - `:bucket_type`: a `String.t()` for the bucket type that must be created beforehand
  """
  require Logger
  alias AttributeRepository.Search.AttributePath
  use AttributeRepository.Read
  use AttributeRepository.Write
  use AttributeRepository.Search
  @behaviour AttributeRepository.Install
  @behaviour AttributeRepository.Read
  @behaviour AttributeRepository.Write
  @behaviour AttributeRepository.Search
  @impl AttributeRepository.Install
  def install(run_opts, _init_opts) do
    :ok =
      Riak.Search.Schema.create(
        schema_name(run_opts),
        (:code.priv_dir(:attribute_repository_riak) ++ '/schema.xml') |> File.read!()
      )
    :ok = Riak.Search.Index.put(index_name(run_opts), schema_name(run_opts))
    :ok =
      Riak.Search.Index.set(
        {run_opts[:bucket_type], bucket_name(run_opts)},
        index_name(run_opts)
      )
  end
  @impl AttributeRepository.Read
  def get(resource_id, result_attributes, run_opts) do
    case Riak.find(run_opts[:bucket_type], bucket_name(run_opts), resource_id) do
      nil ->
        {:error, AttributeRepository.Read.NotFoundError.exception("Entry not found")}
      attribute_list ->
        {
          :ok,
          Enum.reduce(
            Riak.CRDT.Map.value(attribute_list),
            %{},
            fn
              {{attribute_name, _attribute_type}, attribute_value}, acc ->
                {attribute_name, attribute_type} =
                  case Regex.run(~r/^(.*)(_integer|_date|_binarydata)?$/U, attribute_name,
                         capture: :all_but_first
                       ) do
                    [attribute_name, "_integer"] ->
                      {attribute_name, :integer}
                    [attribute_name, "_date"] ->
                      {attribute_name, :date}
                    [attribute_name, "_binarydata"] ->
                      {attribute_name, :binary_data}
                    [attribute_name] ->
                      {attribute_name, :string}
                  end
                if result_attributes == :all or attribute_name in result_attributes do
                  case attribute_type do
                    :string ->
                      Map.put(acc, attribute_name, attribute_value)
                    :date ->
                      Map.put(
                        acc,
                        attribute_name,
                        elem(DateTime.from_iso8601(attribute_value), 1)
                      )
                    :binary_data ->
                      Map.put(acc, attribute_name, {:binary_data, attribute_value})
                    :integer ->
                      {int, _} = Integer.parse(attribute_value)
                      Map.put(acc, attribute_name, int)
                  end
                else
                  acc
                end
            end
          )
        }
    end
  end
  @impl AttributeRepository.Write
  def put(resource_id, resource, run_opts) do
    new_base_obj =
      case Riak.find(run_opts[:bucket_type], bucket_name(run_opts), resource_id) do
        obj when not is_nil(obj) ->
          # FIXME: mwe may not need to keep the same object in the case of repacement:
          # just deleting it and creating a new could be enough?
          # There would be however a short time with no object
          Enum.reduce(
            Riak.CRDT.Map.keys(obj),
            obj,
            fn
              {key, type}, acc ->
                Riak.CRDT.Map.delete(acc, {key, type})
            end
          )
        nil ->
          Riak.CRDT.Map.new()
      end
    riak_res =
      Enum.reduce(
        resource,
        new_base_obj,
        fn
          {key, value}, acc ->
            pre_insert_map_put(acc, key, value)
        end
      )
    case Riak.update(riak_res, run_opts[:bucket_type], bucket_name(run_opts), resource_id) do
      :ok ->
        {:ok, resource}
      _ ->
        {:error, AttributeRepository.WriteError.exception("Write error")}
    end
  end
  @impl AttributeRepository.Write
  def modify(resource_id, modify_ops, run_opts) do
    case Riak.find(run_opts[:bucket_type], bucket_name(run_opts), resource_id) do
      obj when not is_nil(obj) ->
        modified_obj =
          Enum.reduce(
            modify_ops,
            obj,
            fn
              {:add, attribute_name, attribute_value}, acc ->
                pre_insert_map_put(acc, attribute_name, attribute_value)
              {:replace, attribute_name, value}, acc ->
                try do
                  # sets can only be for strings - so no need to handle date, etc. here
                  Riak.CRDT.Map.update(
                    acc,
                    :set,
                    attribute_name,
                    fn
                      set ->
                        set =
                          Enum.reduce(
                            Riak.CRDT.Set.value(set),
                            set,
                            fn
                              val, acc ->
                                Riak.CRDT.Set.delete(acc, val)
                            end
                          )
                        Riak.CRDT.Set.put(set, value)
                    end
                  )
                rescue
                  _ ->
                    pre_insert_map_put(acc, attribute_name, value)
                end
              {:replace, attribute_name, old_value, new_value}, acc ->
                try do
                  # sets can only be for strings - so no need to handle date, etc. here
                  Riak.CRDT.Map.update(
                    acc,
                    :set,
                    attribute_name,
                    fn
                      set ->
                        set
                        |> Riak.CRDT.Set.delete(old_value)
                        |> Riak.CRDT.Set.put(new_value)
                    end
                  )
                rescue
                  _ ->
                    pre_insert_map_put(acc, attribute_name, new_value)
                end
              {:delete, attribute_name}, acc ->
                case map_entry_data_type_of_key(obj, attribute_name) do
                  data_type when not is_nil(data_type) ->
                    Riak.CRDT.Map.delete(acc, {attribute_name, data_type})
                  nil ->
                    acc
                end
              {:delete, attribute_name, attribute_value}, acc ->
                try do
                  Riak.CRDT.Map.update(
                    acc,
                    :set,
                    attribute_name,
                    fn
                      obj ->
                        Riak.CRDT.Set.delete(obj, attribute_value)
                    end
                  )
                rescue
                  _ ->
                    acc
                end
            end
          )
        case Riak.update(
               modified_obj,
               run_opts[:bucket_type],
               bucket_name(run_opts),
               resource_id
             ) do
          :ok ->
            :ok
          _ ->
            {:error, AttributeRepository.WriteError.exception("Write error")}
        end
      nil ->
        {:error, AttributeRepository.Read.NotFoundError.exception("Entry not found")}
    end
  end
  defp pre_insert_map_put(map, attribute_name, value) when is_integer(value) do
    map
    |> crdt_map_delete_if_present(attribute_name)
    |> crdt_map_delete_if_present(attribute_name <> "_binarydata")
    |> crdt_map_delete_if_present(attribute_name <> "_date")
    |> Riak.CRDT.Map.put(attribute_name <> "_integer", to_riak_crdt(value))
  end
  defp pre_insert_map_put(map, attribute_name, %DateTime{} = value) do
    map
    |> crdt_map_delete_if_present(attribute_name)
    |> crdt_map_delete_if_present(attribute_name <> "_integer")
    |> crdt_map_delete_if_present(attribute_name <> "_binarydata")
    |> Riak.CRDT.Map.put(attribute_name <> "_date", to_riak_crdt(value))
  end
  defp pre_insert_map_put(map, attribute_name, {:binary_data, binary_data}) do
    map
    |> crdt_map_delete_if_present(attribute_name)
    |> crdt_map_delete_if_present(attribute_name <> "_integer")
    |> crdt_map_delete_if_present(attribute_name <> "_date")
    |> Riak.CRDT.Map.put(attribute_name <> "_binarydata", to_riak_crdt(binary_data))
  end
  defp pre_insert_map_put(map, attribute_name, value) do
    map
    |> crdt_map_delete_if_present(attribute_name <> "_integer")
    |> crdt_map_delete_if_present(attribute_name <> "_binarydata")
    |> crdt_map_delete_if_present(attribute_name <> "_date")
    |> Riak.CRDT.Map.put(attribute_name, to_riak_crdt(value))
  end
  defp crdt_map_delete_if_present(map, attribute_name) do
    if Riak.CRDT.Map.has_key?(map, attribute_name) do
      Riak.CRDT.Map.delete(map, {attribute_name, :register})
    else
      map
    end
  end
  @impl AttributeRepository.Write
  def delete(resource_id, run_opts) do
    Riak.delete(run_opts[:bucket_type], bucket_name(run_opts), resource_id)
  end
  @impl AttributeRepository.Search
  def search(filter, attributes, run_opts) do
    case Riak.Search.query(index_name(run_opts), build_riak_filter(filter)) do
      {:ok, {:search_results, result_list, _, _}} ->
        for {_index_name, result_attributes} <- result_list do
          {
            id_from_search_result(result_attributes),
            Enum.reduce(
              result_attributes,
              %{},
              fn {attribute_name, attribute_value}, acc ->
                to_search_result_map(acc, attribute_name, attribute_value, attributes)
              end
            )
          }
        end
      {:error, reason} ->
        {:error, AttributeRepository.ReadError.exception(inspect(reason))}
    end
  rescue
    e in AttributeRepository.UnsupportedError ->
      {:error, e}
  end
  defp id_from_search_result(result_attributes) do
    :proplists.get_value("_yz_id", result_attributes)
    |> String.split("*")
    |> Enum.at(3)
  end
  defp to_search_result_map(result_map, attribute_name, attribute_value, attribute_list) do
    res =
      Regex.run(
        ~r/(.*)_(register|flag|counter|set|integer_register|date_register|binarydata_register)/U,
        attribute_name,
        capture: :all_but_first
      )
    if res != nil and (attribute_list == :all or List.first(res) in attribute_list) do
      case res do
        [attribute_name, "register"] ->
          Map.put(result_map, attribute_name, attribute_value)
        [attribute_name, "flag"] ->
          Map.put(result_map, attribute_name, attribute_value == "true")
        [attribute_name, "counter"] ->
          {int, _} = Integer.parse(attribute_value)
          Map.put(result_map, attribute_name, int)
        [attribute_name, "set"] ->
          Map.put(
            result_map,
            attribute_name,
            [attribute_value] ++ (result_map[attribute_name] || [])
          )
        [attribute_name, "integer_register"] ->
          {int, _} = Integer.parse(attribute_value)
          Map.put(result_map, attribute_name, int)
        [attribute_name, "date_register"] ->
          {:ok, date, _} = DateTime.from_iso8601(attribute_value)
          Map.put(result_map, attribute_name, date)
        [attribute_name, "binarydata_register"] ->
          Map.put(result_map, attribute_name, {:binary_data, attribute_value})
        _ ->
          result_map
      end
    else
      result_map
    end
  end
  defp build_riak_filter({:attrExp, attrExp}) do
    build_riak_filter(attrExp)
  end
  defp build_riak_filter({:and, lhs, rhs}) do
    "(" <> build_riak_filter(lhs) <> " AND " <> build_riak_filter(rhs) <> ")"
  end
  defp build_riak_filter({:or, lhs, rhs}) do
    "(" <> build_riak_filter(lhs) <> ") OR (" <> build_riak_filter(rhs) <> ")"
  end
  defp build_riak_filter({:not, filter}) do
    "(*:* NOT " <> build_riak_filter(filter) <> ")"
  end
  defp build_riak_filter(
         {:pr,
          %AttributePath{
            attribute: attribute,
            sub_attribute: nil
          }}
       ) do
    attribute <>
      "_register:* OR " <>
      attribute <>
      "_integer_register:* OR " <>
      attribute <>
      "_date_register:* OR " <>
      attribute <>
      "_binarydata_register:* OR " <>
      attribute <> "_flag:* OR " <> attribute <> "_counter:* OR " <> attribute <> "_set:*"
  end
  defp build_riak_filter(
         {:eq,
          %AttributePath{
            attribute: attribute,
            sub_attribute: nil
          }, value}
       )
       when is_binary(value) do
    # special case to handle equality in sets
    attribute <>
      "_register:" <>
      to_escaped_string(value) <> " OR " <> attribute <> "_set:" <> to_escaped_string(value)
  end
  defp build_riak_filter(
         {:eq,
          %AttributePath{
            attribute: attribute,
            sub_attribute: nil
          }, value}
       )
       when is_boolean(value) or is_integer(value) do
    riak_attribute_name(attribute, value) <> ":" <> to_string(value)
  end
  defp build_riak_filter(
         {:eq,
          %AttributePath{
            attribute: attribute,
            sub_attribute: nil
          }, %DateTime{} = value}
       ) do
    riak_attribute_name(attribute, value) <>
      ":[" <> DateTime.to_iso8601(value) <> " TO " <> DateTime.to_iso8601(value) <> "]"
  end
  defp build_riak_filter(
         {:eq,
          %AttributePath{
            attribute: attribute,
            sub_attribute: nil
          }, {:binary_data, value}}
       ) do
    riak_attribute_name(attribute, value) <> ":" <> to_string(value)
  end
  defp build_riak_filter({:ne, attribute_path, value}) do
    "(*:* NOT " <> build_riak_filter({:eq, attribute_path, value}) <> ")"
  end
  defp build_riak_filter(
         {:ge,
          %AttributePath{
            attribute: attribute,
            sub_attribute: nil
          }, value}
       )
       when is_binary(value) or is_integer(value) do
    riak_attribute_name(attribute, value) <> ":[" <> to_escaped_string(value) <> " TO *]"
  end
  defp build_riak_filter(
         {:ge,
          %AttributePath{
            attribute: attribute,
            sub_attribute: nil
          }, %DateTime{} = value}
       ) do
    riak_attribute_name(attribute, value) <> ":[" <> DateTime.to_iso8601(value) <> " TO *]"
  end
  defp build_riak_filter(
         {:le,
          %AttributePath{
            attribute: attribute,
            sub_attribute: nil
          }, value}
       )
       when is_binary(value) or is_integer(value) do
    riak_attribute_name(attribute, value) <> ":[* TO " <> to_escaped_string(value) <> "]"
  end
  defp build_riak_filter(
         {:le,
          %AttributePath{
            attribute: attribute,
            sub_attribute: nil
          }, %DateTime{} = value}
       ) do
    riak_attribute_name(attribute, value) <> ":[* TO " <> DateTime.to_iso8601(value) <> "]"
  end
  defp build_riak_filter(
         {:gt,
          %AttributePath{
            attribute: attribute,
            sub_attribute: nil
          } = attribute_path, value}
       )
       when is_binary(value) or is_integer(value) do
    # attribute does exist
    "(" <>
      riak_attribute_name(attribute, value) <>
      ":* AND " <> "(*:* NOT " <> build_riak_filter({:le, attribute_path, value}) <> "))"
  end
  defp build_riak_filter(
         {:gt,
          %AttributePath{
            attribute: attribute,
            sub_attribute: nil
          } = attribute_path, %DateTime{} = value}
       ) do
    # attribute does exist
    "(" <>
      riak_attribute_name(attribute, value) <>
      ":* AND " <> "(*:* NOT " <> build_riak_filter({:le, attribute_path, value}) <> "))"
  end
  defp build_riak_filter(
         {:lt,
          %AttributePath{
            attribute: attribute,
            sub_attribute: nil
          } = attribute_path, value}
       )
       when is_binary(value) or is_integer(value) do
    # attribute does exist
    "(" <>
      riak_attribute_name(attribute, value) <>
      ":* AND " <> "(*:* NOT " <> build_riak_filter({:ge, attribute_path, value}) <> "))"
  end
  defp build_riak_filter(
         {:lt,
          %AttributePath{
            attribute: attribute,
            sub_attribute: nil
          } = attribute_path, %DateTime{} = value}
       ) do
    # attribute does exist
    "(" <>
      riak_attribute_name(attribute, value) <>
      ":* AND " <> "(*:* NOT " <> build_riak_filter({:ge, attribute_path, value}) <> "))"
  end
  defp build_riak_filter(
         {:co,
          %AttributePath{
            attribute: attribute,
            sub_attribute: nil
          }, value}
       )
       when is_binary(attribute) do
    # special case to handle equality in sets
    attribute <>
      "_register:*" <>
      to_escaped_string(value) <>
      "* OR " <> attribute <> "_set:*" <> to_escaped_string(value) <> "*"
  end
  defp build_riak_filter(
         {:sw,
          %AttributePath{
            attribute: attribute,
            sub_attribute: nil
          }, value}
       )
       when is_binary(attribute) do
    # special case to handle equality in sets
    attribute <>
      "_register:" <>
      to_escaped_string(value) <>
      "* OR " <> attribute <> "_set:" <> to_escaped_string(value) <> "*"
  end
  defp build_riak_filter(
         {:ew,
          %AttributePath{
            attribute: attribute,
            sub_attribute: nil
          }, value}
       )
       when is_binary(attribute) do
    # special case to handle equality in sets
    attribute <>
      "_register:*" <>
      to_escaped_string(value) <> " OR " <> attribute <> "_set:*" <> to_escaped_string(value)
  end
  defp build_riak_filter({_, _, value}) when is_float(value) or is_nil(value) do
    raise AttributeRepository.UnsupportedError, message: "Unsupported data type"
  end
  defp build_riak_filter({_, _, {:ref, _, _}}) do
    raise AttributeRepository.UnsupportedError, message: "Unsupported data type"
  end
  defp riak_attribute_name(name, value) when is_binary(value), do: name <> "_register"
  defp riak_attribute_name(name, value) when is_integer(value), do: name <> "_integer_register"
  defp riak_attribute_name(name, %DateTime{}), do: name <> "_date_register"
  defp riak_attribute_name(name, {:binary_data, _value}), do: name <> "_binarydata_register"
  defp riak_attribute_name(name, value) when is_boolean(value), do: name <> "_flag"
  @spec to_riak_crdt(AttributeRepository.attribute_data_type()) :: any()
  defp to_riak_crdt(value) when is_binary(value) do
    Riak.CRDT.Register.new(value)
  end
  defp to_riak_crdt(true) do
    Riak.CRDT.Flag.new()
    |> Riak.CRDT.Flag.enable()
  end
  defp to_riak_crdt(false) do
    Riak.CRDT.Flag.new()
    |> Riak.CRDT.Flag.disable()
  end
  defp to_riak_crdt(value) when is_integer(value) do
    value
    |> to_string()
    |> Riak.CRDT.Register.new()
  end
  defp to_riak_crdt(%DateTime{} = value) do
    value
    |> DateTime.to_iso8601()
    |> Riak.CRDT.Register.new()
  end
  defp to_riak_crdt({:binary_data, value}) do
    Riak.CRDT.Register.new(value)
  end
  defp to_riak_crdt(value) when is_list(value) do
    Enum.reduce(
      value,
      Riak.CRDT.Set.new(),
      fn
        list_element, acc ->
          Riak.CRDT.Set.put(acc, list_element)
      end
    )
  end
  defp to_escaped_string(value) do
    value
    |> to_string()
    |> String.replace(" ", "\\ ")
  end
  @spec bucket_name(AttributeRepository.run_opts()) :: String.t()
  defp bucket_name(run_opts), do: "attribute_repository_" <> to_string(run_opts[:instance])
  @spec index_name(AttributeRepository.run_opts()) :: String.t()
  defp index_name(run_opts),
    do: "attribute_repository_" <> to_string(run_opts[:instance]) <> "_index"
  @spec schema_name(AttributeRepository.run_opts()) :: String.t()
  def schema_name(_run_opts), do: "attribute_repository_schema"
  defp map_entry_data_type_of_key(obj, key) do
    keys = Riak.CRDT.Map.keys(obj)
    case Enum.find(
           keys,
           fn
             {^key, _} ->
               true
             _ ->
               false
           end
         ) do
      {^key, type} ->
        type
      _ ->
        nil
    end
  end
end | 
	lib/attribute_repository_riak.ex | 0.630344 | 0.418578 | 
	attribute_repository_riak.ex | 
	starcoder | 
| 
	defmodule Zaryn.OracleChain.Summary do
  @moduledoc false
  alias Zaryn.Crypto
  alias Zaryn.TransactionChain.Transaction
  alias Zaryn.TransactionChain.Transaction.ValidationStamp
  alias Zaryn.TransactionChain.TransactionData
  defstruct [:transactions, :previous_date, :date, :aggregated]
  @type t :: %__MODULE__{
          transactions: list(Transaction.t()) | Enumerable.t(),
          previous_date: DateTime.t() | nil,
          date: DateTime.t() | nil,
          aggregated:
            %{
              DateTime.t() => map()
            }
            | nil
        }
  @doc ~S"""
      Aggregate the oracle chain data into a single map
      ## Examples
      iex> %Summary{ transactions: [
      ...>   %Transaction{validation_stamp: %ValidationStamp{timestamp: ~U[2021-04-29 13:10:00Z]}, data: %TransactionData{content: "{\"zaryn\":{\"eur\":0.02, \"usd\":0.018}}"}},
      ...>   %Transaction{validation_stamp: %ValidationStamp{timestamp: ~U[2021-04-29 13:00:00Z]}, data: %TransactionData{content: "{\"zaryn\":{\"eur\":0.021, \"usd\":0.019}}"}}
      ...> ]}
      ...> |> Summary.aggregate()
      %Summary{
        transactions: [
          %Transaction{validation_stamp: %ValidationStamp{timestamp: ~U[2021-04-29 13:10:00Z]}, data: %TransactionData{content: "{\"zaryn\":{\"eur\":0.02, \"usd\":0.018}}"}},
          %Transaction{validation_stamp: %ValidationStamp{timestamp: ~U[2021-04-29 13:00:00Z]}, data: %TransactionData{content: "{\"zaryn\":{\"eur\":0.021, \"usd\":0.019}}"}}
        ],
        aggregated: %{
          ~U[2021-04-29 13:00:00Z] => %{ "zaryn" => %{ "eur" => 0.021, "usd" => 0.019 }},
          ~U[2021-04-29 13:10:00Z] => %{ "zaryn" => %{ "eur" => 0.02, "usd" => 0.018 }}
        }
      }
  """
  @spec aggregate(t()) :: t()
  def aggregate(summary = %__MODULE__{transactions: transactions}) do
    aggregated =
      transactions
      |> Stream.map(fn %Transaction{
                         data: %TransactionData{content: content},
                         validation_stamp: %ValidationStamp{timestamp: timestamp}
                       } ->
        data = Jason.decode!(content)
        {DateTime.truncate(timestamp, :second), data}
      end)
      |> Enum.into(%{})
    %{summary | aggregated: aggregated}
  end
  @doc ~S"""
  Verify if the aggregated data is correct from the list transaction passed
  ## Examples
      iex> %Summary{
      ...>   aggregated: %{
      ...>      ~U[2021-04-29 13:00:00Z] => %{ "zaryn" => %{ "eur" => 0.021, "usd" => 0.019 }},
      ...>      ~U[2021-04-29 13:10:00Z] => %{ "zaryn" => %{ "eur" => 0.02, "usd" => 0.018 }}
      ...>   },
      ...>   transactions: [
      ...>     %Transaction{validation_stamp: %ValidationStamp{timestamp: ~U[2021-04-29 13:10:00Z]}, data: %TransactionData{content: "{\"zaryn\":{\"eur\":0.02, \"usd\":0.018}}"}},
      ...>     %Transaction{validation_stamp: %ValidationStamp{timestamp: ~U[2021-04-29 13:00:00Z]}, data: %TransactionData{content: "{\"zaryn\":{\"eur\":0.021, \"usd\":0.019}}"}}
      ...>   ]
      ...> } |> Summary.verify?()
      true
  """
  @spec verify?(t()) :: boolean()
  def verify?(%__MODULE__{transactions: transactions, aggregated: aggregated}) do
    %__MODULE__{aggregated: transaction_lookup} =
      %__MODULE__{transactions: transactions} |> aggregate()
    Enum.all?(aggregated, fn {timestamp, data} ->
      case Map.get(transaction_lookup, timestamp) do
        ^data ->
          true
        _ ->
          false
      end
    end)
  end
  @doc """
  Build a transaction from the oracle chain's summary
  """
  @spec to_transaction(t()) :: Transaction.t()
  def to_transaction(%__MODULE__{
        aggregated: aggregated_data,
        previous_date: previous_date,
        date: date
      }) do
    {prev_pub, prev_pv} = Crypto.derive_oracle_keypair(previous_date)
    {next_pub, _} = Crypto.derive_oracle_keypair(date)
    Transaction.new(
      :oracle_summary,
      %TransactionData{
        code: """
          # We stop the inheritance of transaction by ensuring no other
          # summary transaction will continue on this chain
          condition inherit: [ content: "" ]
        """,
        content:
          aggregated_data
          |> Enum.map(&{DateTime.to_unix(elem(&1, 0)), elem(&1, 1)})
          |> Enum.into(%{})
          |> Jason.encode!()
      },
      prev_pv,
      prev_pub,
      next_pub
    )
  end
end | 
	lib/zaryn/oracle_chain/summary.ex | 0.87672 | 0.423756 | 
	summary.ex | 
	starcoder | 
| 
	defmodule StepFlow.WorkflowDefinitionController do
  use StepFlow, :controller
  use BlueBird.Controller
  import Plug.Conn
  alias StepFlow.Controller.Helpers
  alias StepFlow.WorkflowDefinitions
  alias StepFlow.WorkflowDefinitions.WorkflowDefinition
  require Logger
  action_fallback(StepFlow.FallbackController)
  def index(%Plug.Conn{assigns: %{current_user: user}} = conn, params) do
    params =
      params
      |> Map.put("rights", StepFlow.Map.get_by_key_or_atom(user, :rights, []))
    workflow_definitions = WorkflowDefinitions.list_workflow_definitions(params)
    conn
    |> put_view(StepFlow.WorkflowDefinitionView)
    |> render("index.json", workflow_definitions: workflow_definitions)
  end
  def index(conn, _) do
    conn
    |> put_status(403)
    |> put_view(StepFlow.WorkflowDefinitionView)
    |> render("error.json",
      errors: %{reason: "Forbidden to view workflows"}
    )
  end
  def show(%Plug.Conn{assigns: %{current_user: user}} = conn, %{"identifier" => identifier}) do
    case WorkflowDefinitions.get_workflow_definition(identifier) do
      nil ->
        conn
        |> put_status(:unprocessable_entity)
        |> put_view(StepFlow.WorkflowDefinitionView)
        |> render("error.json",
          errors: %{reason: "Unable to locate workflow with this identifier"}
        )
      workflow_definition ->
        if Helpers.has_right(workflow_definition, user, "view") do
          conn
          |> put_view(StepFlow.WorkflowDefinitionView)
          |> render("show.json", workflow_definition: workflow_definition)
        else
          conn
          |> put_status(:forbidden)
          |> put_view(StepFlow.WorkflowDefinitionView)
          |> render("error.json",
            errors: %{reason: "Forbidden to access workflow definition with this identifier"}
          )
        end
    end
  end
  def show(conn, _) do
    conn
    |> put_status(403)
    |> put_view(StepFlow.WorkflowDefinitionView)
    |> render("error.json",
      errors: %{reason: "Forbidden to view workflow with this identifier"}
    )
  end
  def create(conn, _) do
    workflows = WorkflowDefinition.load_workflows_in_database()
    Logger.info("#{inspect(workflows)}")
    json(conn, %{})
  end
end | 
	lib/step_flow/controllers/workflow_definition_controller.ex | 0.504883 | 0.441312 | 
	workflow_definition_controller.ex | 
	starcoder | 
| 
	defmodule Mix.Tasks.Stats do
  @moduledoc false
  use Mix.Task
  def get_datum(module, learn_val, args) do
    to_train = module.new(args)
    role_model = module.new(args)
    {_, info} = NeuralNet.Tester.test_training(to_train, role_model, 100, 10, learn_val, 2, 0, false)
    {info.eval_time, info.iterations}
  end
  def perform_statistics(str, data) do
    n = length(data)
    mean = Enum.reduce data, 0, fn diff, mean ->
      mean + (diff / n)
    end
    variance = Enum.reduce data, 0, fn datum, var ->
      var + (:math.pow(datum - mean, 2) / (n - 1))
    end
    sd = :math.sqrt(variance)
    digits = 5
    IO.puts "#{str} | Mean: #{Float.round(mean, digits)}, SD: #{Float.round(sd, digits)}, N: #{n}"
  end
  def compare(module1, module2, argfun1, argfun2, sample_size \\ 200) do
    {time_data, its_data} = Enum.reduce 1..sample_size, {[], []}, fn num, {time_data, its_data} ->
      learn_val = 0.5 + :random.uniform*2.5
      IO.write "\rCollecting data sample #{num}/#{sample_size} with learn_val #{Float.round(learn_val, 3)}"
      {time1, iterations1} = get_datum(module1, learn_val, argfun1.())
      {time2, iterations2} = get_datum(module2, learn_val, argfun2.())
      {[time1 - time2 | time_data], [iterations1 - iterations2 | its_data]}
    end
    IO.puts ""
    perform_statistics("Time      ", time_data)
    perform_statistics("Iterations", its_data)
  end
  def get_data(module, argfun, sample_size \\ 200, record \\ false) do
    time_file = if record, do: File.open!("#{inspect(module)}_time.dat", [:write])
    iterations_file = if record, do: File.open!("#{inspect(module)}_iterations.dat", [:write])
    {time_data, its_data} = Enum.reduce 1..sample_size, {[], []}, fn num, {time_data, its_data} ->
      learn_val = 0.5 + :random.uniform*2.5
      IO.write "\rCollecting #{inspect(module)} data sample #{num}/#{sample_size} with learn_val #{Float.round(learn_val, 3)}"
      {time, iterations} = get_datum(module, learn_val, argfun.())
      if record do
        IO.puts(time_file, time)
        IO.puts(iterations_file, iterations)
      end
      {[time | time_data], [iterations | its_data]}
    end
    if record do
      File.close(time_file)
      File.close(iterations_file)
    end
    IO.puts ""
    perform_statistics("#{inspect(module)} Time      ", time_data)
    perform_statistics("#{inspect(module)} Iterations", its_data)
  end
  def run(_) do
    argfun = fn ->
      in_size = 1+:random.uniform(9)
      out_size = 1+:random.uniform(9)
      mem_size = 1+:random.uniform(9)
      %{input_ids: Enum.to_list(1..in_size), output_ids: Enum.to_list(1..out_size), memory_size: mem_size}
    end
    # compare(GRU, GRUM, argfun, argfun)
    get_data(GRU, argfun, 200, true)
    get_data(GRUM, argfun, 200, true)
  end
end | 
	lib/mix/tasks/stats.ex | 0.512693 | 0.498718 | 
	stats.ex | 
	starcoder | 
| 
	defmodule SSTable do
  @moduledoc """
  # Specification of Sorted String Table files
  A Sorted String Table contains zero or more _gzipped key/value chunks_.
  ## GZipped key/value chunks
  A _gzip key/value chunk_ follows this binary specification:
  1. Four bytes: length of the gzipped chunk
  2. Variable length: gzipped chunk of key/value pairs, with tombstones.
  ## Unzipped key/value chunks
  Once unzipped, each key/value chunk contains zero or more key/value records.
  Each record describes its own length. Some keys may point to
  tombstones.
  ### Value records
  1. Four bytes: Length of key
  2. Four bytes: Length of value
  3. Variable length: Raw key, not escaped
  4. Variable length: Raw value, not escaped
  ### Tombstone records
  1. Four bytes: Length of key in bytes
  2. Four bytes: `2^32 - 1` to indicate tombstone
  3. Variable length: Raw key, not escaped
  """
  import SSTable.Settings
  defstruct [:index, :table]
  @doc """
  Query all SSTable files using their associated index file and a key,
  returning a value if present. Filters tombstone entries.
  There must be an associated `<timestamp>.idx` file present for each SSTable,
  or the private query function will fail.
  This function returns `:tombstone` in the case of deleted entries.
  ## Examples
  ### As used during a normal run
  Pass a MapSet of paths to exclude from
  the tables queried.
  ```elixir
  SSTable.query("a", CuckooFilter.eliminate("a"))
  ```
  ### Basic query
  ```elixir
  SSTable.query("a")
  ```
  ### Combined with Memtable
  ```elixir
  Memtable.update("bar","BAZ"); Memtable.delete("foo"); Memtable.flush()
  :ok
  SSTable.query("bar")
  "BAZ"
  SSTable.query("foo")
  :tombstone
  SSTable.query("a")
  :none
  ```
  """
  def query(key, exclude_paths \\ MapSet.new()) do
    sst_files =
      Path.wildcard("*.sst")
      |> Enum.sort()
      |> Enum.reverse()
      |> Enum.filter(&(!MapSet.member?(exclude_paths, &1)))
    query_all(key, sst_files)
  end
  @doc """
  Write a list of key/value pairs to binary SSTable file (<timestamp>.sst)
  Also write a sparse index of offsets (<timestamp>.idx)
  ## Example
  ```elixir
  tree = :gb_trees.enter("k3","uuu",:gb_trees.enter("k2","ww",:gb_trees.enter("k1","v",:gb_trees.empty())))
  SSTable.dump(tree)
  ```
  """
  def dump(gb_tree) do
    maybe_kvs =
      for entry <- :gb_trees.to_list(gb_tree) do
        case entry do
          {key, {:value, value, _time}} -> {key, value}
          {key, {:tombstone, _time}} -> {key, :tombstone}
          _ -> nil
        end
      end
    kvs = Enum.filter(maybe_kvs, &(&1 != nil))
    {payload, sparse_index} = SSTable.Zip.zip(kvs)
    time = :erlang.system_time()
    sst_path = new_filename(time)
    idx_path = "#{time}.idx"
    File.write!(sst_path, payload)
    File.write!(idx_path, :erlang.term_to_binary(sparse_index))
    IO.puts("Dumped SSTable to #{sst_path}")
    {sst_path, sparse_index}
  end
  def new_filename(time_name \\ :erlang.system_time()) do
    "#{time_name}.sst"
  end
  defp query_all(_key, []) do
    :none
  end
  defp query_all(key, [sst_file | rest]) do
    case query_file(key, sst_file) do
      :none -> query_all(key, rest)
      :tombstone -> :tombstone
      value -> value
    end
  end
  @tombstone tombstone()
  @gzip_length_bytes SSTable.Settings.gzip_length_bytes()
  defp query_file(key, sst_filename) when is_binary(sst_filename) do
    index = SSTable.Index.fetch(sst_filename)
    nearest_offset =
      case find_nearest_offset(index, key) do
        nil -> :none
        offset -> offset
      end
    case nearest_offset do
      :none ->
        :none
      offset ->
        {:ok, sst} = :file.open(sst_filename, [:read, :raw])
        {:ok, iod} = :file.pread(sst, offset, @gzip_length_bytes)
        <<gzipped_chunk_size::@gzip_length_bytes*8>> = IO.iodata_to_binary(iod)
        {:ok, gzipped_chunk} = :file.pread(sst, offset + @gzip_length_bytes, gzipped_chunk_size)
        :file.close(sst)
        chunk = :zlib.gunzip(IO.iodata_to_binary(gzipped_chunk))
        keep_reading(key, chunk)
    end
  end
  @tombstone SSTable.Settings.tombstone()
  defp keep_reading(key, chunk) do
    case chunk do
      "" ->
        :none
      <<next_key_len::32, next_value_len_tombstone::32, r::binary>> ->
        <<next_key::binary-size(next_key_len), s::binary>> = r
        {value_or_tombstone, next_vt_len} =
          case next_value_len_tombstone do
            t when t == @tombstone ->
              {:tombstone, 0}
            vl ->
              <<next_value::binary-size(vl), _::binary>> = IO.iodata_to_binary(s)
              {next_value, vl}
          end
        if next_key == key do
          value_or_tombstone
        else
          case next_vt_len do
            0 ->
              # no need to skip tombstone
              keep_reading(key, s)
            n ->
              # skip the next value, then keep reading
              <<_::binary-size(n), u::binary>> = s
              keep_reading(key, u)
          end
        end
    end
  end
  defp find_nearest_offset(index, key) do
    Enum.reduce_while(index, 0, fn {next_key, next_offset}, last_offset ->
      case next_key do
        n when n > key -> {:halt, last_offset}
        n when n == key -> {:halt, next_offset}
        _ -> {:cont, next_offset}
      end
    end)
  end
end | 
	lib/august_db/sstable/sstable.ex | 0.886994 | 0.873323 | 
	sstable.ex | 
	starcoder | 
| 
	defmodule Is do
  @moduledoc ~S"""
  Fast, extensible and easy to use data structure validation with nested structures support.
  """
  alias Is.{AliasType, Validator, Validators}
  @validators_map Validator.to_map(
    Application.get_env(:is, :validators, Validators.get_default())
  )
  @doc ~S"""
  Validate data with schema and return list of errors.
  ## Examples
      iex> validate(true, :boolean)
      []
      iex> validate(true, boolean: true)
      []
      iex> validate(true, boolean: false)
      [{:error, [], "must not be a boolean"}]
      iex> validate(true, or: [:boolean, :atom])
      []
      iex> validate(:ok, or: [:boolean, :atom])
      []
      iex> data = Enum.map(1..10, &(%{
      ...>   a: "a",
      ...>   b: true,
      ...>   c: ["a", "b", false],
      ...>   d: [[1, 2, 3], [4, 5, 6]],
      ...>   index: &1,
      ...> }))
      iex> schema = [list: [map: %{
      ...>   a: :binary,
      ...>   b: :boolean,
      ...>   c: [list: [or: [:binary, :boolean]]],
      ...>   d: [list: [list: :integer]],
      ...>   e: [and: [:optional, :binary]],
      ...>   index: [and: [:integer, in_range: [min: 0]]],
      ...> }]]
      iex> validate(data, schema)
      []
      iex> data = Enum.map(1..2, &(%{
      ...>   a: 1,
      ...>   b: "b",
      ...>   c: {"a", "b", false},
      ...>   d: [[1, 2, "3"], [4, false, 6]],
      ...>   e: -1,
      ...>   f: "1234567891011",
      ...>   index: &1 - 10,
      ...> }))
      iex> schema = [list: [map: %{
      ...>   a: :binary,
      ...>   b: :boolean,
      ...>   c: [list: [or: [:binary, :boolean]]],
      ...>   d: [list: [list: :integer]],
      ...>   e: [and: [:optional, :binary]],
      ...>   index: [and: [:integer, in_range: [min: 0]]],
      ...> }]]
      iex> validate(data, schema)
      [
        {:error, [0, :a], "must be a binary"},
        {:error, [0, :b], "must be a boolean"},
        {:error, [0, :c], "must be a list"},
        {:error, [0, :d, 0, 2], "must be an integer"},
        {:error, [0, :d, 1, 1], "must be an integer"},
        {:error, [0, :e], "must be a binary"},
        {:error, [0, :index], "must at least be 0"},
        {:error, [1, :a], "must be a binary"},
        {:error, [1, :b], "must be a boolean"},
        {:error, [1, :c], "must be a list"},
        {:error, [1, :d, 0, 2], "must be an integer"},
        {:error, [1, :d, 1, 1], "must be an integer"},
        {:error, [1, :e], "must be a binary"},
        {:error, [1, :index], "must at least be 0"},
      ]
      iex> validate(%{}, :unknown)
      {:error, "Validator :unknown does not exist"}
  """
  @spec validate(any, any) :: :break | [{:error, [any], binary}]
  def validate(data, validator) do
    validate(data, validator, [], false)
  end
  @doc """
  Validate data with schema and return list of errors prepend with path, or internal status if nested.
  Use given path to prepend all errors.
  If nested is true will return internal status (like `:break`),
  else will always return normalized errors.
  """
  @spec validate(any, any, [any], boolean) :: :break | [{:error, [any], binary}]
  def validate(data, validator, path, nested) when is_atom(validator) do
    validate(data, [{validator, true}], path, nested)
  end
  def validate(data, validator, path, nested) when is_tuple(validator) do
    validate(data, [validator], path, nested)
  end
  def validate(data, [{validator, options}], path, nested) when is_atom(validator) do
    with {:ok, validator} <- get_validator(validator) do
      case apply(validator, :validate, [data, options]) do
        :ok -> []
        :break when nested === true -> :break
        :break when nested === false -> []
        {:error, nested_path, error} -> [{:error, path ++ nested_path, error}]
        {:error, error} -> [{:error, path, error}]
        errors when is_list(errors) -> normalize_errors(errors, path)
      end
    else
      {:error, error} -> {:error, error}
    end
  end
  def validate(data, options, path, nested) do
    case get_alias(options) do
      {:ok, type} -> validate(data, [{type, options}], path, nested)
      {:ok, type, nested_options} ->
        validate(data, [{type, nested_options}], path, nested)
      {:error, error} -> {:error, error}
    end
  end
  @doc ~S"""
  Returns true if data is valid, else false.
  ## Examples
      iex> valid?(true, :boolean)
      true
      iex> valid?(true, boolean: true)
      true
      iex> valid?(true, boolean: false)
      false
      iex> valid?(true, or: [:boolean, :atom])
      true
      iex> valid?(:ok, or: [:boolean, :atom])
      true
      iex> data = Enum.map(1..10, &(%{
      ...>   a: "a",
      ...>   b: true,
      ...>   c: ["a", "b", false],
      ...>   d: [[1, 2, 3], [4, 5, 6]],
      ...>   index: &1,
      ...> }))
      iex> schema = [list: [map: %{
      ...>   a: :binary,
      ...>   b: :boolean,
      ...>   c: [list: [or: [:binary, :boolean]]],
      ...>   d: [list: [list: :integer]],
      ...>   e: [and: [:optional, :binary]],
      ...>   index: [and: [:integer, in_range: [min: 0]]],
      ...> }]]
      iex> valid?(data, schema)
      true
      iex> data = Enum.map(1..2, &(%{
      ...>   a: 1,
      ...>   b: "b",
      ...>   c: {"a", "b", false},
      ...>   d: [[1, 2, "3"], [4, false, 6]],
      ...>   e: -1,
      ...>   f: "1234567891011",
      ...>   index: &1 - 10,
      ...> }))
      iex> schema = [list: [map: %{
      ...>   a: :binary,
      ...>   b: :boolean,
      ...>   c: [list: [or: [:binary, :boolean]]],
      ...>   d: [list: [list: :integer]],
      ...>   e: [and: [:optional, :binary]],
      ...>   index: [and: [:integer, in_range: [min: 0]]],
      ...> }]]
      iex> valid?(data, schema)
      false
      iex> valid?(true, {:boolean, true})
      true
      iex> valid?(%{a: true}, %{a: :boolean})
      true
      iex> valid?(%{}, :unknown)
      {:error, "Validator :unknown does not exist"}
  """
  @spec valid?(any, any) :: boolean | {:error, binary}
  def valid?(data, validator) do
    valid?(data, validator, [], false)
  end
  @spec valid?(any, any, [any], boolean) :: boolean | {:error, binary}
  def valid?(data, validator, path, nested) when is_atom(validator) do
    valid?(data, [{validator, true}], path, nested)
  end
  def valid?(data, validator, path, nested) when is_tuple(validator) do
    valid?(data, [validator], path, nested)
  end
  def valid?(data, [{validator, options}], _path, _nested) when is_atom(validator) do
    with {:ok, validator} <- get_validator(validator) do
      case apply(validator, :validate, [data, options]) do
        :ok -> true
        [] -> true
        :break -> true
        _ -> false
      end
    else
      {:error, error} -> {:error, error}
    end
  end
  def valid?(data, options, path, nested) do
    case get_alias(options) do
      {:ok, type} -> valid?(data, [{type, options}], path, nested)
      {:ok, type, nested_options} ->
        valid?(data, [{type, nested_options}], path, nested)
      {:error, error} -> {:error, error}
    end
  end
  @doc """
  Returns validator associate with given id if any.
  """
  @spec get_validator(atom) :: {:ok, atom} | {:error, any}
  def get_validator(id) do
    Validator.get(@validators_map, id)
  end
  @doc """
  Returns alias associate with given value if any.
  """
  @spec get_alias(any) :: {:ok, atom} | {:ok, atom, boolean | Keyword.t} |
    {:error, any}
  def get_alias(value) do
    AliasType.get(value)
  end
  @doc """
  Returns given errors normalized.
  """
  @spec normalize_errors([any], [any]) :: [{:error, [any], [any]}]
  def normalize_errors(errors, path) when is_list(errors) and is_list(path) do
    Enum.map(errors, fn
      {:error, error} -> {:error, path, error}
      {:error, nested_path, error} -> {:error, path ++ nested_path, error}
      {nested_path, error} -> {:error, path ++ nested_path, error}
    end)
  end
end | 
	lib/is.ex | 0.900696 | 0.658843 | 
	is.ex | 
	starcoder | 
| 
	defmodule Firmata.Board do
  use GenServer
  use Firmata.Protocol.Mixin
  require Logger
  alias Firmata.Protocol.State, as: ProtocolState
  @initial_state %{
    pins: [],
    outbox: [],
    processor_pid: nil,
    parser: {},
    firmware_name: "",
    interface: nil,
    serial: nil
  }
  def start_link(port, opts \\ [], name \\ nil) do
    opts = Keyword.put(opts, :interface, self)
    GenServer.start_link(__MODULE__, {port, opts}, name: name)
  end
  def stop(pid) do
    GenServer.call(pid, :stop)
  end
  def report_analog_channel(board, channel, value) do
    GenServer.call(board, {:report_analog_channel, channel, value})
  end
  def set_pin_mode(board, pin, mode) do
    GenServer.call(board, {:set_pin_mode, pin, mode})
  end
  def pin_state(board, pin) do
    board |> sysex_write(@pin_state_query, <<pin>>)
  end
  def digital_write(board, pin, value) do
    GenServer.call(board, {:digital_write, pin, value})
  end
  def sonar_config(board, trigger, echo, max_distance, ping_interval) do
    set_pin_mode(board, trigger, @sonar)
    set_pin_mode(board, echo, @sonar)
    max_distance_lsb = max_distance &&& 0x7f
    max_distance_msb = (max_distance >>> 7) &&& 0x7f
    data = <<trigger, echo, max_distance_lsb, max_distance_msb, ping_interval>>
    board |> sysex_write(@sonar_config, data)
  end
  def neopixel_register(board, pin, num_pixels) do
    data = <<pin, num_pixels>>
    board |> sysex_write(@neopixel_register, data)
  end
  def neopixel(board, index, {r, g, b}) do
    data = <<index, r, g, b>>
    board |> sysex_write(@neopixel, data)
  end
  def neopixel_brightness(board, brightness) do
    data = <<brightness>>
    board |> sysex_write(@neopixel_brightness, data)
  end
  def sysex_write(board, cmd, data) do
    GenServer.call(board, {:sysex_write, cmd, data})
  end
  ## Server Callbacks
  def init({port, opts}) do
    speed = opts[:speed] || 57600
    uart_opts = [speed: speed, active: true]
    {:ok, serial} = Nerves.UART.start_link
    :ok = Nerves.UART.open(serial, port, uart_opts)
    Nerves.UART.write(serial, <<0xFF>>)
    Nerves.UART.write(serial, <<0xF9>>)
    state =
      @initial_state
      |> Map.put(:serial, serial)
      |> Map.put(:interface, opts[:interface])
    {:ok, state}
  end
  def handle_call(:stop, _from, state) do
    Process.exit(state[:processor_pid], :normal)
    {:reply, :ok, state}
  end
  def handle_call({:report_analog_channel, channel, value}, {interface, _}, state) do
    state = state
    |> put_analog_channel(channel, :report, value)
    |> put_analog_channel(channel, :interface, interface)
    send_data(state, <<@report_analog ||| channel, value>>)
    {:reply, :ok, state}
  end
  def handle_call({:set_pin_mode, pin, mode}, _from, state) do
    state = state |> put_pin(pin, :mode, mode)
    send_data(state, <<@pin_mode,pin,mode>>)
    {:reply, :ok, state}
  end
  def handle_call({:digital_write, pin, value}, _from, state) do
    state = state |> put_pin(pin, :value, value)
    signal = state[:pins] |> Firmata.Protocol.digital_write(pin, value)
    send_data(state, signal)
    {:reply, :ok, state}
  end
  def handle_call({:sysex_write, cmd, data}, _from, state) do
    send_data(state, <<@start_sysex, cmd>> <> data <> <<@end_sysex>>)
    {:reply, :ok, state}
  end
  def handle_info({:nerves_uart, _port, data}, state) do
    {outbox, parser} = Enum.reduce(data, {state.outbox, state.parser}, &Firmata.Protocol.parse(&2, &1))
    Enum.each(outbox, &send(self, &1))
    {:noreply, %{state | outbox: [], parser: parser}}
  end
  def handle_info({:report_version, major, minor}, state) do
    send_data(state, <<@start_sysex, @capability_query, @end_sysex>>)
    state = Map.put(state, :version, {major, minor})
    send_info(state, {:version, major, minor})
    {:noreply, state}
  end
  def handle_info({:firmware_name, name}, state) do
    state = Map.put(state, :firmware_name, name)
    send_info(state, {:firmware_name, state[:firmware_name]})
    {:noreply, state}
  end
  def handle_info({:capability_response, pins}, state) do
    state = Map.put(state, :pins, pins)
    send_data(state, <<@start_sysex, @analog_mapping_query, @end_sysex>>)
    {:noreply, state}
  end
  def handle_info({:analog_mapping_response, mapping}, state) do
    pins = state[:pins]
    |> Enum.zip(mapping)
    |> Enum.map(fn({pin, map})-> Keyword.merge(pin, map) end)
    |> Enum.map(fn pin -> Keyword.merge(pin, [interface: nil]) end)
    state = Map.put(state, :pins, pins)
    send_info(state, {:pin_map, state[:pins]})
    {:noreply, state}
  end
  def handle_info({:analog_read, channel, value }, state) do
    state = state |> put_analog_channel(channel, :value, value, fn(pin) ->
      send_info(state, {:analog_read, pin[:analog_channel], value}, pin[:interface])
    end)
    {:noreply, state}
  end
  def handle_info({:i2c_response, [value: value] }, state) do
    send_info(state, {:i2c_response, value})
    {:noreply, state}
  end
  def handle_info({:string_data, [value: value] }, state) do
    send_info(state, {:string_data, value |> parse_ascii})
    {:noreply, state}
  end
  def handle_info({:sonar_data, [value: value, pin: pin]}, state) do
    send_info(state, {:sonar_data, pin, value})
    {:noreply, state}
  end
  def handle_info({:pin_state, pin, mode, pin_state}, state) do
    send_info(state, {:pin_state, pin, mode, pin_state})
    {:noreply, state}
  end
  def handle_info(unknown, state) do
    Logger.error("Unknown message in #{__MODULE__}: #{inspect unknown}")
    {:noreply, state}
  end
  defp send_data(state, data), do: Nerves.UART.write(state.serial, data)
  defp send_info(state, info, interface \\ nil) do
    case interface do
      nil -> send_to(state[:interface], {:firmata, info})
      _ -> send_to(interface, {:firmata, info})
    end
  end
  defp send_to(interface, message), do: send(interface, message)
  defp put_pin(state, index, key, value, found_callback \\ nil) do
    pins = state[:pins] |> List.update_at(index, fn(pin) ->
      pin = Keyword.put(pin, key, value)
      if (found_callback), do: found_callback.(pin)
      pin
    end)
    state = Map.put(state, :pins, pins)
  end
  defp analog_channel_to_pin_index(state, channel) do
    Enum.find_index(state[:pins], fn(pin) ->
      pin[:analog_channel] === channel
    end)
  end
  defp put_analog_channel(state, channel, key, value, found_callback \\ nil) do
    pin = analog_channel_to_pin_index(state, channel)
    put_pin(state, pin, key, value, found_callback)
  end
  defp parse_ascii(data), do: for n <- data, n != <<0>>, into: "", do: n
end | 
	lib/firmata/board.ex | 0.547948 | 0.409339 | 
	board.ex | 
	starcoder | 
| 
	defmodule Cased.Sensitive.String do
  @moduledoc """
  Used to mask sensitive string values.
  """
  @enforce_keys [:data, :label]
  defstruct [
    :data,
    :label
  ]
  @type t :: %__MODULE__{
          data: String.t(),
          label: nil | atom() | String.t()
        }
  @type new_opts :: [new_opt()]
  @type new_opt :: {:label, String.t() | atom()}
  @doc """
  Create a new `Cased.Sensitive.String` struct.
  ## Example
  ```
  Cased.Sensitive.String.new("<EMAIL>", label: :email)
  ```
  """
  @spec new(raw_string :: String.t(), opts :: new_opts()) :: t()
  def new(raw_string, opts \\ []) do
    %__MODULE__{
      data: raw_string,
      label: Keyword.get(opts, :label, nil)
    }
  end
  @doc """
  Extract all `{begin_offset, end_offset}` values for matches of a given regular expression.
  ## Examples
  ```
  Cased.Sensitive.String.new("Hello @username and @username")
  |> Cased.Sensitive.String.matches(~r/@\w+/)
  # => [{6, 15}, {20, 29}]
  ```
  """
  @spec matches(string :: t(), regex :: Regex.t()) :: [{non_neg_integer(), non_neg_integer()}]
  def matches(string, regex) do
    Regex.scan(regex, string.data, return: :index)
    |> List.flatten()
    |> Enum.map(fn {offset, length} ->
      {offset, offset + length}
    end)
  end
  @doc """
  Check two sensitive strings for equality.
  ## Examples
  Two strings with the same data and label are equivalent:
  ```
  iex> string1 = Cased.Sensitive.String.new("text", label: "username")
  iex> string2 = Cased.Sensitive.String.new("text", label: "username")
  iex> Cased.Sensitive.String.equal?(string1, string2)
  true
  ```
  If the contents are different, two sensitive strings are not equal:
  ```
  iex> string1 = Cased.Sensitive.String.new("text", label: "username")
  iex> string2 = Cased.Sensitive.String.new("txet", label: "username")
  iex> Cased.Sensitive.String.equal?(string1, string2)
  false
  ```
  If the labels are different, two sensitive strings are not equal:
  ```
  iex> string1 = Cased.Sensitive.String.new("text", label: "username")
  iex> string2 = Cased.Sensitive.String.new("text", label: "email")
  iex> Cased.Sensitive.String.equal?(string1, string2)
  false
  ```
  """
  @spec equal?(string1 :: t(), string2 :: t()) :: boolean()
  def equal?(string1, string2) do
    string1.data == string2.data && string1.label == string2.label
  end
  @spec to_range(string :: t(), key :: String.t()) :: Cased.Sensitive.Range.t()
  def to_range(string, key) do
    %Cased.Sensitive.Range{
      label: string.label,
      key: key,
      begin_offset: 0,
      end_offset: byte_size(string.data)
    }
  end
end | 
	lib/cased/sensitive/string.ex | 0.915879 | 0.829492 | 
	string.ex | 
	starcoder | 
| 
	defmodule Eml.Compiler do
  @moduledoc """
  Various helper functions for implementing an Eml compiler.
  """
  @type chunk :: String.t | { :safe, String.t } | Macro.t
  # Options helper
  @default_opts [escape: true,
                 transform: nil,
                 fragment: false,
                 compiler: Eml.HTML.Compiler]
  defp new_opts(opts), do: Keyword.merge(@default_opts, opts)
  # API
  @doc """
  Compiles eml to a string, or a quoted expression when the input contains
  contains quoted expressions too.
  Accepts the same options as `Eml.render/3`
  In case of error, raises an Eml.CompileError exception.
  ### Examples:
      iex> Eml.Compiler.compile(body(h1(id: "main-title")))
      {:safe, "<body><h1 id='main-title'></h1></body>"
  """
  @spec compile(Eml.t, Keyword.t) :: { :safe, String.t } | Macro.t
  def compile(eml, opts \\ []) do
    opts = new_opts(opts)
    opts = Keyword.merge(opts[:compiler].opts(), opts)
    compile_node(eml, opts, []) |> to_result(opts)
  end
  @spec precompile(Macro.Env.t, Keyword.t) :: { :safe, String.t } | Macro.t
  def precompile(env \\ %Macro.Env{}, opts) do
    mod = env.module
    mod_opts = if mod && Module.open?(mod),
                 do: Module.get_attribute(mod, :eml_compile) |> Macro.escape(),
                 else: []
    opts = Keyword.merge(mod_opts, opts)
    { file, opts } = Keyword.pop(opts, :file)
    { block, opts } = Keyword.pop(opts, :do)
    ast = if file do
            string = File.read!(file)
            Code.string_to_quoted!(string, file: file, line: 1)
          else
            block
          end |> prewalk(opts[:fragment])
    { expr, _ } = Code.eval_quoted(ast, [], env)
    { opts, _ } = Code.eval_quoted(opts, [], env)
    compile(expr, opts)
  end
  # Content parsing
  @spec compile_node(Eml.t, map, [chunk]) :: [chunk]
  def compile_node(list, opts, chunks) when is_list(list) do
    Enum.reduce(list, chunks, fn node, chunks ->
      compile_node(node, opts, chunks)
    end)
  end
  def compile_node(node, opts, chunks) do
    node = node
    |> maybe_transform(opts)
    |> Eml.Encoder.encode()
    case opts[:compiler].compile_node(node, opts, chunks) do
      :unhandled ->
        default_compile_node(node, opts, chunks)
      s ->
        s
    end
  end
  @spec default_compile_node(Eml.node_primitive, map, [chunk]) :: [chunk]
  defp default_compile_node(node, opts, chunks) when is_binary(node) do
    add_chunk(maybe_escape(node, opts), chunks)
  end
  defp default_compile_node({ :safe, node }, _opts, chunks) when is_binary(node) do
    add_chunk(node, chunks)
  end
  defp default_compile_node(node, _opts, chunks) when is_tuple(node) do
    add_chunk(node, chunks)
  end
  defp default_compile_node(%Eml.Element{template: fun} = node, opts, chunks) when is_function(fun) do
    node |> Eml.Element.apply_template() |> compile_node(opts, chunks)
  end
  defp default_compile_node(nil, _opts, chunks) do
    chunks
  end
  defp default_compile_node(node, _, _) do
    raise Eml.CompileError, message: "Bad node primitive: #{inspect node}"
  end
  # Attributes parsing
  @spec compile_attrs(Eml.Element.attrs, map, [chunk]) :: [chunk]
  def compile_attrs(attrs, opts, chunks) when is_map(attrs) do
    Enum.reduce(attrs, chunks, fn
      { _, nil }, chunks -> chunks
      { k, v }, chunks   -> compile_attr(k, v, opts, chunks)
    end)
  end
  @spec compile_attr(atom, Eml.t, map, [chunk]) :: [chunk]
  def compile_attr(field, value, opts, chunks) do
    opts[:compiler].compile_attr(field, value, opts, chunks)
  end
  @spec compile_attr_value(Eml.t, map, [chunk]) :: [chunk]
  def compile_attr_value(list, opts, chunks) when is_list(list) do
    Enum.reduce(list, chunks, fn value, chunks ->
      compile_attr_value(value, opts, chunks)
    end)
  end
  def compile_attr_value(value, opts, chunks) do
    value = Eml.Encoder.encode(value)
    case opts[:compiler].compile_attr_value(value, opts, chunks) do
      :unhandled ->
        default_compile_node(value, opts, chunks)
      s ->
        s
    end
  end
  # Text escaping
  entity_map = %{"&" => "&",
                 "<" => "<",
                 ">" => ">",
                 "\"" => """,
                 "'" => "'",
                 "…" => "…"}
  def escape(eml) do
    Eml.transform(eml, fn
      node when is_binary(node) ->
        escape(node, "")
      node ->
        node
    end)
  end
  for {char, entity} <- entity_map do
    defp escape(unquote(char) <> rest, acc) do
      escape(rest, acc <> unquote(entity))
    end
  end
  defp escape(<<char::utf8, rest::binary>>, acc) do
    escape(rest, acc <> <<char::utf8>>)
  end
  defp escape("", acc) do
    acc
  end
  # Create final result.
  defp to_result([{ :safe, string }], _opts) do
    { :safe, string }
  end
  defp to_result(chunks, opts) do
    template = :lists.reverse(chunks)
    if opts[:fragment] do
      template
    else
      quote do
        Eml.Compiler.concat(unquote(template), unquote(Macro.escape(opts)))
      end
    end
  end
  def maybe_transform(node, opts) do
    fun = opts[:transform]
    if is_function(fun), do: fun.(node), else: node
  end
  def maybe_escape(node, opts) do
    if opts[:escape], do: escape(node, ""), else: node
  end
  def add_chunk(chunk, [{:safe, safe_chunk} | rest]) when is_binary(chunk) do
    [{:safe, safe_chunk <> chunk } | rest]
  end
  def add_chunk(chunk, chunks) when is_binary(chunk) do
    [{ :safe, chunk } | chunks]
  end
  def add_chunk(chunk, chunks) do
    [chunk | chunks]
  end
  def concat(buffer, opts) do
    try do
      { :safe, concat(buffer, "", opts) }
    catch
      :throw, { :illegal_quoted, stacktrace } ->
        reraise Eml.CompileError,
        [message: "It's only possible to pass assigns to templates or components when using &"],
        stacktrace
    end
  end
  defp concat({ :safe, chunk }, acc, _opts) do
    acc <> chunk
  end
  defp concat(chunk, acc, opts) when is_binary(chunk) do
    acc <> maybe_escape(chunk, opts)
  end
  defp concat([chunk | rest], acc, opts) do
    concat(rest, concat(chunk, acc, opts), opts)
  end
  defp concat([], acc, _opts) do
    acc
  end
  defp concat(nil, acc, _opts) do
    acc
  end
  defp concat(node, acc, opts) do
    case Eml.Compiler.compile(node, opts) do
      { :safe, chunk } ->
        acc <> chunk
      _ ->
        throw { :illegal_quoted, System.stacktrace() }
    end
  end
  def prewalk(quoted, fragment?) do
    handler = if fragment?,
              do: &handle_fragment/1,
              else: &handle_template/1
    Macro.prewalk(quoted, handler)
  end
  defp handle_fragment({ :@, meta, [{ name, _, atom }] }) when is_atom(name) and is_atom(atom) do
    line = meta[:line] || 0
    Macro.escape(quote line: line do
      Access.get(var!(assigns), unquote(name))
    end)
  end
  defp handle_fragment({ :&, _meta, [{ _fun, _, args }] } = ast) do
    case Macro.prewalk(args, false, &handle_capture_args/2) do
      { _, true } ->
        ast
      { _, false } ->
        raise Eml.CompileError,
        message: "It's not possible to use & inside fragments"
    end
  end
  defp handle_fragment(arg) do
    arg
  end
  defp handle_template({ :&, meta, [{ fun, _, args }] }) do
    case Macro.prewalk(args, false, &handle_capture_args/2) do
      { _, true } ->
        raise Eml.CompileError,
        message: "It's not possible to use & for captures inside templates or components"
      { new_args, false } ->
        line = Keyword.get(meta, :line, 0)
        Macro.escape(quote line: line do
          unquote(fun)(unquote_splicing(List.wrap(new_args)))
        end)
    end
  end
  defp handle_template({ :@, meta, [{ name, _, atom }]}) when is_atom(name) and is_atom(atom) do
    line = Keyword.get(meta, :line, 0)
    Macro.escape(quote line: line do
      Eml.Compiler.get_assign(unquote(name), var!(assigns), var!(funs))
    end)
  end
  defp handle_template(ast) do
    ast
  end
  defp handle_capture_args({ :@, meta, [{ name, _, atom }]}, regular_capure?) when is_atom(name) and is_atom(atom) do
    line = Keyword.get(meta, :line, 0)
    ast = quote line: line do
      Eml.Compiler.get_assign(unquote(name), var!(assigns), var!(funs))
    end
    { ast, regular_capure? }
  end
  defp handle_capture_args({ :&, _meta, [num]} = ast, _regular_capure?) when is_integer(num) do
    { ast, true }
  end
  defp handle_capture_args({ :/, _meta, _args} = ast, _regular_capure?) do
    { ast, true }
  end
  defp handle_capture_args(ast, regular_capure?) do
    { ast, regular_capure? }
  end
  @doc false
  def get_assign(key, assigns, funs) do
    x = if is_map(assigns), do: Map.get(assigns, key), else: Keyword.get(assigns, key)
    case Keyword.get(funs, key) do
      nil -> x
      fun -> fun.(x)
    end
  end
end | 
	lib/eml/compiler.ex | 0.768299 | 0.472623 | 
	compiler.ex | 
	starcoder | 
| 
	defmodule Periodic do
  @moduledoc """
  Periodic job execution.
  ## Quick start
  It is recommended (but not required) to implement the job in a dedicated module. For example:
      defmodule SomeCleanup do
        def child_spec(_arg) do
          Periodic.child_spec(
            id: __MODULE__,
            run: &run/0,
            every: :timer.hours(1)
          )
        end
        defp run(), do: # ...
      end
  With such module implemented, you can place the job somewhere in the supervision tree:
      Supervisor.start_link(
        [
          SomeCleanup,
          # ...
        ],
        # ...
      )
  You can of course start multiple periodic jobs in the system, and they don't have to be the
  children of the same supervisor. You're advised to place the job in the proper part of the
  supervision tree. For example, a database cleanup job should share the ancestor with the
  repo, while a job working with Phoenix channels should share the ancestor with the
  endpoint.
  As mentioned, you don't need to create a dedicated module to run a job. It's also possible to
  provide `{Periodic, opts}` in the supervisor child list. Finally, if you need more runtime
  flexibility, you can also start the job with `start_link/1`.
  ## Process structure
  The process started with `start_link` is called the _scheduler_. This is the process which
  regularly "ticks" in the given interval and executes the _job_. The job is executed in a separate
  one-off process, which is the child of the scheduler. When the job is done, the job process
  stops. Therefore, each job instance is running in a separate process.
  Depending on the overlapping mode (see the `:on_overlap` option), it can happen that multiple
  instances of the same job are running simultaneously.
  ## Options
  - `:run` (required) - Zero arity function or MFA invoked to run the job. This function is
    invoked in a separate one-off process which is a child of the scheduler.
  - `:every` (required) - Time in milliseconds between two consecutive job executions (see
    `:delay_mode` option for details).
  - `:initial_delay` - Time in milliseconds before the first execution of the job. If not provided,
    the default value of `:every` is used. In other words, the first execution will by default
    take place after the `:initial_delay` interval has passed.
  - `:delay_mode` - Controls how the `:every` interval is interpreted. Following options are
    possible:
      - `:regular` (default) - `:every` represents the time between two consecutive starts
      - `:shifted` - `:every` represents the time between the termination of the previous and the
        start of the next instance.
    See the "Delay mode" section for more details.
  - `:when` - Function which acts as an additional runtime guard to decide if the job will be
    started. This can be useful for implementing fixed scheduled jobs. See the "Fixed scheduling"
    section for details.
  - `:on_overlap` - Defines the desired behaviour when the job is about to be started while the
    previous instance is still running.
      - `:run` (default) - always start the new job
      - `:ignore` - don't start the new job if the previous instance is still running
      - `:stop_previous` - stop the previous instance before starting the new one
  - `:timeout` - Defines the maximum running time of the job. If the job doesn't finish in the
    given time, it is forcefully terminated. In this case, the job's shutdown specification is
    ignored. Defaults to `:infinity`
  - `:job_shutdown` - Shutdown value of the job process. See the "Shutdown" section
    for details.
  - `:id` - Supervisor child id of the scheduler process. Defaults to `Periodic`. If you plan on
    running multiple periodic jobs under the same supervisor, make sure that they have different
    id values.
  - `:name` - Registered name of the scheduler process. If not provided, the process will not be
    registered.
  - `:telemetry_id` - Id used in telemetry event names. See the "Telemetry" section for more
    details. If not provided, telemetry events won't be emitted.
  - `:mode` - When set to `:manual`, the jobs won't be started automatically. Instead you have to
    manually send tick signals to the scheduler. This should be used only in `:test` mix env. See
    the "Testing" section for details.
  ## Delay mode
  In the `:regular` mode (which is the default), the interval indicates time between two
  consecutive starts. This mode is typically useful if you want to maintain a stable execution
  rate (the number of executions per some time period). It is also a better choice if you're
  implementing fixed scheduling, as advised in the "Fixed scheduling" section.
  In the `:shifted` mode the interval represents the pause time between the end of the job and the
  start of the next one. This mode is likely a better choice if you want to have a fixed "cool off"
  period between two consecutive executions, to reduce the load on the system.
  Internally, Periodic relies on Erlang's monotonic time, which improves rate stability regardless
  of system time changes (see [Time correction](http://erlang.org/doc/apps/erts/time_correction.html#time-correction)).
  Consider using the "Multi-time warp mode" (see [here](http://erlang.org/doc/apps/erts/time_correction.html#time-warp-modes))
  to further improve rate stability in the situations when system time changes.
  In general, the overhead introduced by Periodic as well as job processing will be compensated,
  and you can usually expect stable intervals with very small variations (typically in sub
  milliseconds range), and no steady shift over time. However, in some cases, for example when the
  system is overloaded, the variations might be more significant.
  In the `:shifted` mode the job duration will affect the execution of the next job. In addition,
  Periodic will induce a slight (usually less than 100 microseconds), but a steady skew, due to
  its own internal processing.
  ## Shutdown
  To stop the scheduler, you need to ask its parent supervisor to stop the scheduler using
  [Supervisor.terminate_child](https://hexdocs.pm/elixir/Supervisor.html#terminate_child/2).
  The scheduler process acts as a supervisor, and so it has the same shutdown behaviour. When
  ordered to terminate by its parent, the scheduler will stop currently running job instances
  according to the `:job_shutdown` configuration.
  The default behaviour is to wait 5 seconds for the job to finish. However, in order for this
  waiting to actually happen, you need to invoke `Process.flag(:trap_exit, true)` from the run
  function.
  You can change the waiting time with the `:job_shutdown` option, which has the same semantics as
  in `Supervisor`. See [corresponding Supervisor documentation]
  (https://hexdocs.pm/elixir/Supervisor.html#module-shutdown-values-shutdown) for details.
  ## Fixed scheduling
  Periodic doesn't have explicit support for scheduling jobs at some particular time (e.g. every
  day at midnight). However, you can implement this on top of the provided functionality using
  the `:when` option
      defmodule SomeCleanup do
        def child_spec(_arg) do
          Periodic.child_spec(
            # check every minute if we need to run the cleanup
            every: :timer.minutes(1),
            # start the job only if it's midnight
            when: fn -> match?(%Time{hour: 0, minute: 0}, Time.utc_now()) end,
            # ...
          )
        end
        # ...
      end
  Note that the execution guarantees here are "at most once". If the system is down at the
  scheduled time, the job won't be executed. Stronger guarantees can be obtained by basing the
  conditional logic on some persistence mechanism.
  Note that the `:when` guard is executed in the scheduler process. If the guard execution time is
  larger than the ticking period, time drifts will occur.
  ## Telemetry
  The scheduler optionally emits telemetry events. To configure telemetry you need to provide
  the `:telemetry_id` option. For example:
      Periodic.start_link(telemetry_id: :db_cleanup, ...)
  This will emit various events in the shape of `[Periodic, telemetry_id, event]`. Currently
  supported events are:
  - `:started` - a new job instance is started
  - `:finished` - job instance has finished or crashed (see related metadata for the reason)
  - `:skipped` - new instance hasn't been started because the previous one is still running
  - `:stopped_previous` - previous instance has been stopped because the new one is about to be
    started
  To consume the desired events, install the corresponding telemetry handler.
  ## Logging
  Basic logger is provided in `Periodic.Logger`. To use it, the scheduler needs to be started with
  the `:telemetry_id` option.
  To install logger handlers, you can invoke `Periodic.Logger.install(telemetry_id)`. This function
  should be invoked only once per each scheduler during the system lifetime, preferably before the
  scheduler is started. A convenient place to do it is your application start callback.
  ## Testing
  The scheduler can be deterministically tested by setting the `:mode` option to `:manual`.
  In this mode, the scheduler won't tick on its own, and so it won't start any jobs unless
  instructed to by the client code.
  The `:mode` should be set to `:manual` only in test mix environment. Here's a simple approach
  which doesn't require app env and config files:
      defmodule MyPeriodicJob do
        @mode if Mix.env() != :test, do: :auto, else: :manual
        def child_spec(_arg) do
          Periodic.child_spec(
            mode: @mode,
            name: __MODULE__,
            # ...
          )
        end
        # ...
      end
  Of course, you can alternatively use app env or any other approach you prefer. Just make sure
  to set the mode to manual only in test env.
  Notice that we're also setting the registered name and telemetry id. We'll need both to
  interact with the scheduler
  With such setup in place, the general shape of the periodic job test would look like this:
      def MyPeriodicJobTest do
        use ExUnit.Case, async: true
        require Periodic.Test
        test "my periodic job" do
          bring_the_system_into_the_desired_state()
          # tick the scheduler
          assert Periodic.Test.sync_tick(MyPeriodicJob) == {:ok, :normal}
          verify_side_effect_of_the_job()
        end
      end
  Note that this won't suffice for fixed schedules. Consider again the cleanup job which runs
  at midnight:
      defmodule SomeCleanup do
        def child_spec(_arg) do
          Periodic.child_spec(
            every: :timer.minutes(1),
            when: fn -> match?(%Time{hour: 0, minute: 0}, Time.utc_now()) end,
            # ...
          )
        end
        # ...
      end
  Manually ticking won't start the job, unless the test is running exactly at midnight. To make
  this module testable, you need to use a different implementation of `:when` in test environment:
      defmodule SomeCleanup do
        def child_spec(_arg) do
          Periodic.child_spec(
            every: :timer.minutes(1),
            when: &should_run?/0
            # ...
          )
        end
        if Mix.env() != :test do
          defp should_run?(), do: match?(%Time{hour: 0, minute: 0}, Time.utc_now())
        else
          defp should_run?(), do: true
        end
        # ...
      end
  ## Comparison to other schedulers
  There are various other abstractions for running periodic jobs in BEAM, such as:
    - the built-in [:timer](https://erlang.org/doc/man/timer.html) module from Erlang stdlib
    - [erlcron](https://github.com/erlware/erlcron)
    - [quantum](https://hexdocs.pm/quantum/readme.html)
    - [Oban](https://hexdocs.pm/oban/Oban.html#module-periodic-cron-jobs)
  Compared to `:timer`, Periodic offers some additional features, such as overlap handling,
  distributed scheduling, and telemetry support.
  Compared to most other third party libraries, Periodic will likely provide much less features
  out of the box. So in some situations, such as database persistence or back-pressure, you might
  need to invest more effort with Periodic. On the plus side Periodic should be simpler to use
  in typical scenarios, and much easier to reason about, while still providing enough flexibility
  to handle arbitrarily complex scenarios.
  For a more detailed discussion, see [this blog post](https://www.theerlangelist.com/article/periodic).
  """
  use Parent.GenServer
  require Logger
  @type opts :: [
          id: term,
          name: GenServer.name(),
          telemetry_id: term,
          mode: :auto | :manual,
          every: pos_integer,
          initial_delay: non_neg_integer,
          delay_mode: :regular | :shifted,
          run: (() -> term) | {module, atom, [term]},
          when: (() -> boolean) | {module, atom, [term]},
          on_overlap: :run | :ignore | :stop_previous,
          timeout: pos_integer | :infinity,
          job_shutdown: :brutal_kill | :infinity | non_neg_integer()
        ]
  @doc "Starts the periodic executor."
  @spec start_link(opts) :: GenServer.on_start()
  def start_link(opts) do
    gen_server_opts = Keyword.take(opts, [:name])
    Parent.GenServer.start_link(__MODULE__, Map.new(opts), gen_server_opts)
  end
  @doc "Builds a child specification for starting the periodic executor."
  @spec child_spec(opts) :: Supervisor.child_spec()
  def child_spec(opts) do
    opts
    |> super()
    |> Supervisor.child_spec(id: Keyword.get(opts, :id, __MODULE__))
  end
  @impl GenServer
  def init(opts) do
    state = Map.merge(defaults(), opts)
    {initial_delay, state} = Map.pop(state, :initial_delay, state.every)
    enqueue_next_tick(state, initial_delay)
    {:ok, state}
  end
  @impl GenServer
  def handle_info({:tick, expected_now}, state) do
    handle_tick(state, now: expected_now)
    {:noreply, state}
  end
  @impl GenServer
  def handle_call({:tick, opts}, from, %{mode: :manual} = state) do
    caller = if Keyword.get(opts, :await_job?), do: from, else: nil
    response = handle_tick(state, caller: caller)
    cond do
      is_nil(caller) -> {:reply, :ok, state}
      match?({:ok, _pid}, response) -> {:noreply, state}
      true -> {:reply, {:error, :job_not_started}, state}
    end
  end
  @impl Parent.GenServer
  def handle_stopped_children(info, state) do
    [info] = Map.values(info)
    with from when not is_nil(from) <- info.meta.caller,
         do: GenServer.reply(from, {:ok, info.exit_reason})
    if state.delay_mode == :shifted, do: enqueue_next_tick(state, state.every)
    duration =
      :erlang.convert_time_unit(
        :erlang.monotonic_time() - info.meta.started_at,
        :native,
        :microsecond
      )
    telemetry(state, :finished, %{job: info.pid, reason: info.exit_reason}, %{time: duration})
    {:noreply, state}
  end
  defp defaults() do
    %{
      telemetry_id: nil,
      mode: :auto,
      delay_mode: :regular,
      on_overlap: :run,
      timeout: :infinity,
      job_shutdown: :timer.seconds(5),
      when: nil
    }
  end
  defp handle_tick(state, opts) do
    if state.delay_mode == :regular, do: enqueue_next_tick(state, state.every, opts)
    if job_guard_satisfied?(state), do: start_job(state, opts)
  end
  defp job_guard_satisfied?(%{when: nil}), do: true
  defp job_guard_satisfied?(%{when: {m, f, a}}), do: apply(m, f, a)
  defp job_guard_satisfied?(%{when: fun}) when is_function(fun), do: fun.()
  defp start_job(%{on_overlap: :run} = state, opts),
    do: start_job_process(state, opts)
  defp start_job(%{on_overlap: :ignore} = state, opts) do
    case previous_instance() do
      {:ok, pid} ->
        telemetry(state, :skipped, %{still_running: pid})
        nil
      :error ->
        start_job_process(state, opts)
    end
  end
  defp start_job(%{on_overlap: :stop_previous} = state, opts) do
    with {:ok, pid} <- previous_instance() do
      Parent.shutdown_all(:kill)
      telemetry(state, :stopped_previous, %{pid: pid})
    end
    start_job_process(state, opts)
  end
  defp start_job_process(state, opts) do
    job = state.run
    {:ok, pid} =
      Parent.start_child(%{
        id: make_ref(),
        start: {Task, :start_link, [fn -> invoke_job(job) end]},
        timeout: state.timeout,
        shutdown: state.job_shutdown,
        restart: :temporary,
        meta: %{started_at: :erlang.monotonic_time(), caller: Keyword.get(opts, :caller)},
        ephemeral?: true
      })
    telemetry(state, :started, %{job: pid})
    {:ok, pid}
  end
  defp invoke_job({mod, fun, args}), do: apply(mod, fun, args)
  defp invoke_job(fun) when is_function(fun, 0), do: fun.()
  defp previous_instance() do
    case Parent.children() do
      [child] -> {:ok, child.pid}
      [] -> :error
    end
  end
  defp enqueue_next_tick(state, delay, opts \\ []) do
    telemetry(state, :next_tick, %{in: delay})
    if state.mode == :auto do
      now = Keyword.get_lazy(opts, :now, fn -> :erlang.monotonic_time(:millisecond) end)
      next_tick_abs_time = now + delay
      Process.send_after(self(), {:tick, next_tick_abs_time}, next_tick_abs_time, abs: true)
    end
  end
  defp telemetry(state, event, data, measurements \\ %{})
  if Mix.env() != :test do
    defp telemetry(_state, :next_tick, _data, _measurements), do: :ok
  end
  defp telemetry(%{telemetry_id: nil}, _event, _data, _measurements), do: :ok
  defp telemetry(state, event, data, measurements) do
    :telemetry.execute(
      [__MODULE__, state.telemetry_id, event],
      measurements,
      Map.merge(data, %{scheduler: self()})
    )
  end
end | 
	lib/periodic.ex | 0.895517 | 0.745375 | 
	periodic.ex | 
	starcoder | 
| 
	defmodule LogiStd.Sink.File do
  @moduledoc """
  A sink which writes log messages to a file.
  ## Note
  The sink has no overload protections,
  so it is recommended to use it together with (for example) `Logi.Sink.Flowlimiter`
  in a production environment.
  (See also `LogiStd.Sink.Ha`)
  ## Examples
  ```
  iex> sink = LogiStd.Sink.File.new(:sample_file, "sample.log")
  iex> {:ok, _} = Logi.Channel.install_sink(sink, :info)
  iex> reuire Logi
  iex> Logi.info "hello world"
  iex> :file.read_file "sample.log"
  {:ok, "2016-12-05 23:30:39.810 [info] nonode@nohost <0.159.0> nil:nil:49 [] hello world\\n"}
  ```
  ## Memo (TODO: rewrite)
  As for the rotation of the log file, the implementation module of `Logi.File.Rotator` is in charge.
  (e.g., rotation in units of dates and sizes, deletion of old log files, compression when rotated etc)
  The writer process periodically checks the output destination path, and if the log file does not exist,
  it is opened again.
  However, if the path is the same even if the file (i.e., i-node in the case of unix) has changed,
  as in the case of being overwritten,
  Care should be taken because the change is not detected.
  (Log messages will continue to be written to files that do not already exist)
  If the log file can not be opened or written due to disk full or authorization error etc.,
  An error of `alert` level is reported using the logger specified when starting writer.
  (See also `LogiStd.Sink.Ha`)
  """
  @typedoc "A log file path."
  @type filepath :: binary
  @typedoc """
  Options for this sink.
  ### layout
  - The layout instance used by the sink
  - Default: `LogiStd.Sink.File.default_layout`
  ### logger
  - The logger instance which is used to report internal events of the sink process
  - Default: `Logi.default_logger`
  ### rotator
  - The rotator instance used by the sink
  - Default: `LogiStd.File.Rotator.Noop.new`
  ### open_opt
  - Log file open options (i.e., the second argument of `:file:open/2`)
  - Default: `[:append, :raw, :delayed_write]`
  """
  @type options :: [
    layout: Logi.Layout.layout,
    logger: Logi.logger,
    rotator: LogiStd.File.Rotator.rotator,
    open_opt: open_options
  ]
  @typedoc """
  Log file open options.
  See [file:mode/0](http://www.erlang.org/doc/man/file.html#type-mode) for more details.
  """
  @type open_options :: list
  @doc "Creates a new sink."
  @spec new(Logi.Sink.id, :file.name_all, options) :: Logi.Sink.sink
  def new(sink_id, filepath, options \\ []) do
    :logi_sink_file.new sink_id, filepath, options
  end
  @doc """
  Default layout.
  Returns `LogiStd.Layout.Default.new |> LogiStd.Layout.Limit.new |> LogiStd.Layout.Newline.new`.
  """
  @spec default_layout :: Logi.Layout.layout
  def default_layout do
    :logi_sink_file.default_layout
  end
end | 
	lib/logi_std/sink/file.ex | 0.743727 | 0.670864 | 
	file.ex | 
	starcoder | 
| 
	defmodule Xxo.State do
  @moduledoc """
  This module is used to track game state and declare when a user has won/lost.
  """
  require Logger
  #   alias Xxo.Game
  @doc """
  Checks if there are 3 symbols in a row
  """
  def winner?(symbol, state) do
    won(symbol, state.board)
  end
  ### Top row win
  defp won(
         player,
         %{
           {0, 0} => player,
           {0, 1} => player,
           {0, 2} => player,
           {1, 0} => _,
           {1, 1} => _,
           {1, 2} => _,
           {2, 0} => _,
           {2, 1} => _,
           {2, 2} => _
         } = _board
       ),
       do: {:winner, player}
  # Middle row win
  defp won(
         player,
         %{
           {0, 0} => _,
           {0, 1} => _,
           {0, 2} => _,
           {1, 0} => player,
           {1, 1} => player,
           {1, 2} => player,
           {2, 0} => _,
           {2, 1} => _,
           {2, 2} => _
         } = _board
       ),
       do: {:winner, player}
  # Bottom row win
  defp won(
         player,
         %{
           {0, 0} => _,
           {0, 1} => _,
           {0, 2} => _,
           {1, 0} => _,
           {1, 1} => _,
           {1, 2} => _,
           {2, 0} => player,
           {2, 1} => player,
           {2, 2} => player
         } = board
       ),
       do: {:winner, player}
  # Left column win
  defp won(
         player,
         %{
           {0, 0} => player,
           {0, 1} => _,
           {0, 2} => _,
           {1, 0} => player,
           {1, 1} => _,
           {1, 2} => _,
           {2, 0} => player,
           {2, 1} => _,
           {2, 2} => _
         } = board
       ),
       do: {:winner, player}
  # Middle column win
  defp won(
         player,
         %{
           {0, 0} => _,
           {0, 1} => player,
           {0, 2} => _,
           {1, 0} => _,
           {1, 1} => player,
           {1, 2} => _,
           {2, 0} => _,
           {2, 1} => player,
           {2, 2} => _
         } = board
       ),
       do: {:winner, player}
  # Right column win
  defp won(
         player,
         %{
           {0, 0} => _,
           {0, 1} => _,
           {0, 2} => player,
           {1, 0} => _,
           {1, 1} => _,
           {1, 2} => player,
           {2, 0} => _,
           {2, 1} => _,
           {2, 2} => player
         } = board
       ),
       do: {:winner, player}
  # Diagonal win, top left -> bottom right
  defp won(
         player,
         %{
           {0, 0} => player,
           {0, 1} => _,
           {0, 2} => _,
           {1, 0} => _,
           {1, 1} => player,
           {1, 2} => _,
           {2, 0} => _,
           {2, 1} => _,
           {2, 2} => player
         } = board
       ),
       do: {:winner, player}
  # Diagonal win, bottom left -> top right
  defp won(
         player,
         %{
           {0, 0} => _,
           {0, 1} => _,
           {0, 2} => player,
           {1, 0} => _,
           {1, 1} => player,
           {1, 2} => _,
           {2, 0} => player,
           {2, 1} => _,
           {2, 2} => _
         } = board
       ),
       do: {:winner, player}
  # No other function matches - game is still in progress
  defp won(_player, _board), do: {:nowinner}
end | 
	lib/xxo/state.ex | 0.586523 | 0.616719 | 
	state.ex | 
	starcoder | 
| 
	defmodule Kademlia do
  @moduledoc """
    Kademlia.ex is in fact a K* implementation. K* star is a modified version of Kademlia
    using the same KBuckets scheme to keep track of which nodes to remember. But instead of
    using the XOR metric it is using geometric distance on a ring as node value distance.
    Node distance is symmetric on the ring.
  """
  use GenServer
  alias Network.PeerHandler, as: Client
  alias Object.Server, as: Server
  alias Model.KademliaSql
  @k 3
  @relay_factor 3
  @broadcast_factor 50
  defstruct tasks: %{}, network: nil, cache: Lru.new(1024)
  @type t :: %Kademlia{tasks: Map.t(), network: KBuckets.t(), cache: Lru.t()}
  def start_link(_opts) do
    GenServer.start_link(__MODULE__, :ok, name: __MODULE__, hibernate_after: 5_000)
  end
  @spec ping(any) :: any
  def ping(node_id) do
    rpc(find_node(node_id), [Client.ping()])
  end
  @doc """
    broadcast is used to broadcast self generated blocks/transactions through the network
  """
  def broadcast(msg) do
    msg = [Client.publish(), msg]
    list = KBuckets.to_list(network())
    max = length(list) |> :math.sqrt() |> trunc
    num = if max > @broadcast_factor, do: max, else: @broadcast_factor
    list
    |> Enum.reject(fn a -> KBuckets.is_self(a) end)
    |> Enum.take_random(num)
    |> Enum.each(fn item -> rpcast(item, msg) end)
  end
  @doc """
    relay is used to forward NEW received blocks/transactions further through the network
  """
  def relay(msg) do
    msg = [Client.publish(), msg]
    KBuckets.next_n(network(), @relay_factor)
    |> Enum.each(fn item -> rpcast(item, msg) end)
  end
  @doc """
    store/1 same as store/2 but usees Object.key/1 and Object.encode/1
  """
  @spec store(tuple()) :: any()
  def store(object) when is_tuple(object) do
    key = Object.key(object)
    value = Object.encode!(object)
    store(key, value)
  end
  @doc """
    store() stores the given key-value pair in the @k nodes
    that are closest to the key
  """
  @spec store(binary(), binary()) :: any()
  def store(key, value) when is_binary(value) do
    nodes = find_nodes(key)
    key = hash(key)
    nearest = KBuckets.nearest_n(nodes, key, @k)
    # :io.format("Storing #{value} at ~p as #{Base16.encode(key)}~n", [Enum.map(nearest, &port/1)])
    rpc(nearest, [Client.store(), key, value])
  end
  @doc """
    find_value() is different from store() in that it might return
    an earlier result
  """
  @spec find_value(binary()) :: any()
  def find_value(key) do
    key = hash(key)
    case do_node_lookup(key) do
      {_, nearest} ->
        nodes = do_find_nodes(key, nearest, KBuckets.k(), Client.find_value())
        case nodes do
          {:value, value, visited} ->
            result = KBuckets.nearest_n(visited, key, KBuckets.k())
            call(fn _from, state ->
              network = KBuckets.insert_items(state.network, visited)
              cache = Lru.insert(state.cache, key, result)
              {:reply, :ok, %Kademlia{state | network: network, cache: cache}}
            end)
            # Kademlia logic: Writing found result to second nearest node
            case Enum.at(result, 1) do
              nil -> :nothing
              second_nearest -> rpcast(second_nearest, [Client.store(), key, value])
            end
            value
          visited ->
            call(fn _from, state ->
              network = KBuckets.insert_items(state.network, visited)
              {:reply, :ok, %Kademlia{state | network: network}}
            end)
            nil
        end
    end
  end
  @spec find_node(Wallet.address()) :: nil | KBuckets.Item.t()
  def find_node(address) do
    case find_nodes(address) do
      [] ->
        nil
      [first | _] ->
        case Wallet.address!(first.node_id) do
          ^address -> first
          _ -> nil
        end
    end
  end
  @spec find_nodes(any()) :: [KBuckets.Item.t()]
  def find_nodes(key) do
    key = hash(key)
    case do_node_lookup(key) do
      {:cached, result} ->
        result
      {:network, nearest} ->
        visited = do_find_nodes(key, nearest, KBuckets.k(), Client.find_node())
        result = KBuckets.nearest_n(visited, key, KBuckets.k())
        call(fn _from, state ->
          network = KBuckets.insert_items(state.network, visited)
          cache = Lru.insert(state.cache, key, result)
          {:reply, :ok, %Kademlia{state | network: network, cache: cache}}
        end)
        result
    end
  end
  @spec find_node_lookup(any()) :: [KBuckets.item()]
  def find_node_lookup(key) do
    {_, nodes} = do_node_lookup(key)
    nodes
  end
  def network() do
    call(fn _from, state -> {:reply, state.network, state} end)
  end
  def handle_call({:call, fun}, from, state) do
    fun.(from, state)
  end
  def handle_call({:append, key, value, _store_self}, _from, queue) do
    KademliaSql.append!(key, value)
    {:reply, :ok, queue}
  end
  def handle_call(:get_network, _from, state) do
    {:reply, state.network, state}
  end
  def handle_call({:register_node, node_id, server}, _from, state) do
    {:reply, :ok, register_node(state, node_id, server)}
  end
  def handle_info(:save, state) do
    spawn(fn -> Chain.store_file(Diode.data_dir("kademlia.etf"), state) end)
    Process.send_after(self(), :save, 60_000)
    {:noreply, state}
  end
  def handle_info(:contact_seeds, state) do
    for seed <- Diode.seeds() do
      %URI{userinfo: node_id, host: address, port: port} = URI.parse(seed)
      id =
        case node_id do
          nil -> Wallet.new()
          str -> Wallet.from_address(Base16.decode(str))
        end
      Network.Server.ensure_node_connection(Network.PeerHandler, id, address, port)
    end
    Process.send_after(self(), :contact_seeds, 60_000)
    {:noreply, state}
  end
  def handle_continue(:seed, state) do
    Process.send_after(self(), :save, 60_000)
    handle_info(:contact_seeds, state)
    {:noreply, state}
  end
  # Private call used by PeerHandler when connections are established
  def handle_cast({:register_node, node_id, server}, state) do
    {:noreply, register_node(state, node_id, server)}
  end
  # Private call used by PeerHandler when is stable for 10 msgs and 30 seconds
  def handle_cast({:stable_node, node_id, server}, state) do
    case KBuckets.item(state.network, node_id) do
      nil ->
        {:noreply, register_node(state, node_id, server)}
      node ->
        network = KBuckets.update_item(state.network, %{node | retries: 0})
        if node.retries > 0, do: redistribute(network, node)
        {:noreply, %{state | network: network}}
    end
  end
  # Private call used by PeerHandler when connections fail
  def handle_cast({:failed_node, node}, state) do
    case KBuckets.item(state.network, node) do
      nil -> {:noreply, state}
      item -> {:noreply, %{state | network: do_failed_node(item, state.network)}}
    end
  end
  defp register_node(state = %Kademlia{network: network}, node_id, server) do
    node = %KBuckets.Item{
      node_id: node_id,
      object: server,
      last_seen: System.os_time(:second)
    }
    network =
      if KBuckets.member?(network, node_id) do
        KBuckets.update_item(network, node)
      else
        KBuckets.insert_item(network, node)
      end
    # Because of bucket size limit, the new node might not get stored
    if KBuckets.member?(network, node_id) do
      redistribute(network, node)
    end
    %{state | network: network}
  end
  def rpc(nodes, call) when is_list(nodes) do
    me = self()
    ref = make_ref()
    Enum.map(nodes, fn node ->
      spawn_link(fn ->
        send(me, {ref, rpc(node, call)})
      end)
    end)
    |> Enum.map(fn _pid ->
      receive do
        {^ref, ret} ->
          ret
      end
    end)
  end
  def rpc(%KBuckets.Item{node_id: node_id} = node, call) do
    pid = ensure_node_connection(node)
    try do
      GenServer.call(pid, {:rpc, call}, 2000)
    rescue
      error ->
        IO.puts("Failed to get a result from #{Wallet.printable(node_id)} #{inspect(error)}")
        []
    catch
      :exit, {:timeout, _} ->
        IO.puts("Timeout while getting a result from #{Wallet.printable(node_id)}")
        # TODO: This *always* happens when a node is still syncing. How to handle this better?
        # Process.exit(pid, :timeout)
        []
      any, what ->
        IO.puts(
          "Failed(2) to get a result from #{Wallet.printable(node_id)} #{inspect({any, what})}"
        )
        []
    end
  end
  def rpcast(%KBuckets.Item{} = node, call) do
    GenServer.cast(ensure_node_connection(node), {:rpc, call})
  end
  @doc """
    redistribute resends all key/values that are nearer to the given node to
    that node
  """
  @max_key 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
  def redistribute(network, node) do
    node = %KBuckets.Item{} = KBuckets.item(network, node)
    # IO.puts("redistribute(#{inspect(node)})")
    previ =
      case KBuckets.prev_n(network, node, 1) do
        [prev] -> KBuckets.integer(prev)
        [] -> KBuckets.integer(node)
      end
    nodei = KBuckets.integer(node)
    nexti =
      case KBuckets.next_n(network, node, 1) do
        [next] -> KBuckets.integer(next)
        [] -> KBuckets.integer(node)
      end
    range_start = rem(div(previ + nodei, 2), @max_key)
    range_end = rem(div(nexti + nodei, 2), @max_key)
    objs = KademliaSql.objects(range_start, range_end)
    # IO.puts("redistribute() -> #{length(objs)}")
    Enum.each(objs, fn {key, value} -> rpcast(node, [Client.store(), key, value]) end)
  end
  # -------------------------------------------------------------------------------------
  # Helpers calls
  # -------------------------------------------------------------------------------------
  def port(nil) do
    nil
  end
  def port(node) do
    if is_atom(node.object), do: node.object, else: Object.Server.edge_port(node.object)
  end
  def init(:ok) do
    kb =
      Chain.load_file(Diode.data_dir("kademlia.etf"), fn ->
        %Kademlia{network: KBuckets.new(Diode.miner())}
      end)
    {:ok, kb, {:continue, :seed}}
  end
  @doc "Method used for testing"
  def reset() do
    call(fn _from, _state ->
      {:reply, :ok, %Kademlia{network: KBuckets.new(Diode.miner())}}
    end)
  end
  def append(key, value, store_self \\ false) do
    GenServer.call(__MODULE__, {:append, key, value, store_self})
  end
  # -------------------------------------------------------------------------------------
  # Private calls
  # -------------------------------------------------------------------------------------
  defp ensure_node_connection(%KBuckets.Item{node_id: node_id, object: :self}) do
    Network.Server.ensure_node_connection(
      Network.PeerHandler,
      node_id,
      "localhost",
      Diode.peer_port()
    )
  end
  defp ensure_node_connection(%KBuckets.Item{node_id: node_id, object: server}) do
    host = Server.host(server)
    port = Server.peer_port(server)
    Network.Server.ensure_node_connection(Network.PeerHandler, node_id, host, port)
  end
  defp do_failed_node(%{object: :self}, network) do
    network
  end
  defp do_failed_node(item, network) do
    now = System.os_time(:second)
    case item.retries do
      0 ->
        KBuckets.update_item(network, %{item | retries: 1, last_seen: now + 5})
      failures when failures > 10 ->
        # With
        # 5 + 5×5 + 5×5×5 + 5×5×5×5 + 5×5×5×5×5 +
        # 5x (5×5×5×5×5×5)
        # This will delete an item after 24h of failures
        IO.puts("Deleting node #{Wallet.printable(item.node_id)} after 10 retries")
        KBuckets.delete_item(network, item)
      failures ->
        factor = min(failures, 5)
        next = now + round(:math.pow(5, factor))
        KBuckets.update_item(network, %{item | retries: failures + 1, last_seen: next})
    end
  end
  defp do_find_nodes(key, nearest, k, cmd) do
    # :io.format("KademliaSearch.find_nodes(key=#{Base16.encode(key)}, nearest=~p, k=#{k}, cmd=#{cmd})~n", [Enum.map(nearest, &port/1)])
    KademliaSearch.find_nodes(key, nearest, k, cmd)
  end
  # Retrieves for the target key either the last cached values or
  # the nearest k entries from the KBuckets store
  @spec do_node_lookup(any()) :: {:network | :cached, [KBuckets.item()]}
  defp do_node_lookup(key) do
    call(fn _from, state ->
      # case Lru.get(state.cache, key) do
      # nil ->
      nodes = {:network, KBuckets.nearest_n(state.network, key, KBuckets.k())}
      # cached -> {:cached, cached}
      # end
      # :io.format("do_node_lookup(key) -> ~p, ~p~n", [elem(nodes, 0), length(elem(nodes, 1))])
      {:reply, nodes, state}
    end)
  end
  defp call(fun) do
    GenServer.call(__MODULE__, {:call, fun})
  end
  def hash(binary) do
    Diode.hash(binary)
  end
end | 
	lib/kademlia.ex | 0.839767 | 0.437763 | 
	kademlia.ex | 
	starcoder | 
| 
	defmodule Zaryn.BeaconChain.Subset do
  @moduledoc """
  Represents a beacon slot running inside a process
  waiting to receive transactions to register in a beacon slot
  """
  alias Zaryn.BeaconChain.Slot
  alias Zaryn.BeaconChain.Slot.EndOfNodeSync
  alias Zaryn.BeaconChain.Slot.TransactionSummary
  alias Zaryn.BeaconChain.SlotTimer
  alias Zaryn.BeaconChain.Summary
  alias Zaryn.BeaconChain.SummaryTimer
  alias __MODULE__.P2PSampling
  alias Zaryn.BeaconChain.SubsetRegistry
  alias Zaryn.Crypto
  alias Zaryn.Election
  alias Zaryn.P2P
  alias Zaryn.P2P.Message.ReplicateTransaction
  alias Zaryn.TransactionChain
  alias Zaryn.TransactionChain.Transaction
  alias Zaryn.TransactionChain.Transaction.ValidationStamp
  alias Zaryn.TransactionChain.TransactionData
  alias Zaryn.Utils
  use GenServer
  require Logger
  def start_link(opts) do
    subset = Keyword.get(opts, :subset)
    GenServer.start_link(__MODULE__, [subset], name: via_tuple(subset))
  end
  @doc """
  Add transaction summary to the current slot for the given subset
  """
  @spec add_transaction_summary(subset :: binary(), TransactionSummary.t()) :: :ok
  def add_transaction_summary(subset, tx_summary = %TransactionSummary{})
      when is_binary(subset) do
    GenServer.cast(via_tuple(subset), {:add_transaction_summary, tx_summary})
  end
  @doc """
  Add an end of synchronization to the current slot for the given subset
  """
  @spec add_end_of_node_sync(subset :: binary(), EndOfNodeSync.t()) :: :ok
  def add_end_of_node_sync(subset, end_of_node_sync = %EndOfNodeSync{}) when is_binary(subset) do
    GenServer.cast(via_tuple(subset), {:add_end_of_node_sync, end_of_node_sync})
  end
  @doc """
  Add the beacon slot proof for validation
  """
  @spec add_slot(Slot.t(), Crypto.key(), binary()) :: :ok
  def add_slot(slot = %Slot{subset: subset}, node_public_key, signature)
      when is_binary(node_public_key) and is_binary(signature) do
    GenServer.cast(via_tuple(subset), {:add_slot, slot, node_public_key, signature})
  end
  @doc """
  Get the current slot
  """
  @spec get_current_slot(binary()) :: Slot.t()
  def get_current_slot(subset) when is_binary(subset) do
    GenServer.call(via_tuple(subset), :get_current_slot)
  end
  defp via_tuple(subset) do
    {:via, Registry, {SubsetRegistry, subset}}
  end
  def init([subset]) do
    {:ok,
     %{
       node_public_key: Crypto.first_node_public_key(),
       subset: subset,
       current_slot: %Slot{subset: subset, slot_time: SlotTimer.next_slot(DateTime.utc_now())}
     }}
  end
  def handle_cast(
        {:add_transaction_summary,
         tx_summary = %TransactionSummary{address: address, type: type}},
        state = %{current_slot: current_slot, subset: subset}
      ) do
    if Slot.has_transaction?(current_slot, address) do
      {:reply, :ok, state}
    else
      current_slot = Slot.add_transaction_summary(current_slot, tx_summary)
      Logger.info("Transaction #{type}@#{Base.encode16(address)} added to the beacon chain",
        beacon_subset: Base.encode16(subset)
      )
      # Request the P2P view sampling if the not perfomed from the last 3 seconds
      case Map.get(state, :sampling_time) do
        nil ->
          new_state =
            state
            |> Map.put(:current_slot, add_p2p_view(current_slot))
            |> Map.put(:sampling_time, DateTime.utc_now())
          {:noreply, new_state}
        time ->
          if DateTime.diff(DateTime.utc_now(), time) > 3 do
            new_state =
              state
              |> Map.put(:current_slot, add_p2p_view(current_slot))
              |> Map.put(:sampling_time, DateTime.utc_now())
            {:noreply, new_state}
          else
            {:noreply, %{state | current_slot: current_slot}}
          end
      end
    end
  end
  def handle_cast(
        {:add_end_of_node_sync, end_of_sync = %EndOfNodeSync{public_key: node_public_key}},
        state = %{current_slot: current_slot, subset: subset}
      ) do
    Logger.info(
      "Node #{Base.encode16(node_public_key)} synchronization ended added to the beacon chain",
      beacon_subset: Base.encode16(subset)
    )
    current_slot = Slot.add_end_of_node_sync(current_slot, end_of_sync)
    {:noreply, %{state | current_slot: current_slot}}
  end
  def handle_info(
        {:create_slot, time},
        state = %{subset: subset, node_public_key: node_public_key, current_slot: current_slot}
      ) do
    if beacon_slot_node?(subset, time, node_public_key) do
      handle_slot(time, current_slot, node_public_key)
      if summary_time?(time) and beacon_summary_node?(subset, time, node_public_key) do
        handle_summary(time, subset)
      end
    end
    {:noreply, next_state(state, time)}
  end
  defp handle_slot(
         time,
         current_slot = %Slot{subset: subset},
         node_public_key
       ) do
    current_slot = ensure_p2p_view(current_slot)
    # Avoid to store or dispatch an empty beacon's slot
    unless Slot.empty?(current_slot) do
      beacon_transaction = create_beacon_transaction(current_slot)
      summary_time =
        if summary_time?(time) do
          time
        else
          SummaryTimer.next_summary(time)
        end
      # Local summary node prestore the transaction
      if beacon_summary_node?(subset, summary_time, node_public_key) do
        chain_address = beacon_summary_address(subset, summary_time)
        TransactionChain.write_transaction(beacon_transaction, chain_address)
      end
      # Before the summary time, we dispatch to other summary nodes the transaction
      unless summary_time?(time) do
        next_time = SlotTimer.next_slot(time)
        broadcast_beacon_transaction(subset, next_time, beacon_transaction, node_public_key)
      end
    end
  end
  defp next_state(state = %{subset: subset}, time) do
    next_time = SlotTimer.next_slot(time)
    Map.put(
      state,
      :current_slot,
      %Slot{subset: subset, slot_time: next_time}
    )
  end
  defp broadcast_beacon_transaction(subset, next_time, transaction, node_public_key) do
    %Slot{subset: subset, slot_time: next_time}
    |> Slot.involved_nodes()
    |> Enum.reject(&(&1.first_public_key == node_public_key))
    |> P2P.broadcast_message(%ReplicateTransaction{
      transaction: transaction,
      welcome_node_public_key: Crypto.last_node_public_key()
    })
  end
  defp handle_summary(time, subset) do
    chain_address = beacon_summary_address(subset, time)
    case TransactionChain.get(chain_address, data: [:content]) do
      [] ->
        :ok
      beacon_chain ->
        beacon_chain
        |> create_summary_transaction(subset, time)
        |> TransactionChain.write_transaction()
    end
  end
  defp summary_time?(time) do
    SummaryTimer.match_interval?(DateTime.truncate(time, :millisecond))
  end
  defp beacon_summary_address(subset, time) do
    Crypto.derive_beacon_chain_address(subset, time, true)
  end
  defp beacon_slot_node?(subset, slot_time, node_public_key) do
    %Slot{subset: subset, slot_time: slot_time}
    |> Slot.involved_nodes()
    |> Utils.key_in_node_list?(node_public_key)
  end
  defp beacon_summary_node?(subset, summary_time, node_public_key) do
    node_list =
      Enum.filter(
        P2P.authorized_nodes(),
        &(DateTime.compare(&1.authorization_date, summary_time) == :lt)
      )
    Election.beacon_storage_nodes(
      subset,
      summary_time,
      node_list,
      Election.get_storage_constraints()
    )
    |> Utils.key_in_node_list?(node_public_key)
  end
  defp add_p2p_view(current_slot = %Slot{subset: subset}) do
    p2p_views = P2PSampling.get_p2p_views(P2PSampling.list_nodes_to_sample(subset))
    Slot.add_p2p_view(current_slot, p2p_views)
  end
  defp ensure_p2p_view(slot = %Slot{p2p_view: %{availabilities: <<>>}}) do
    add_p2p_view(slot)
  end
  defp ensure_p2p_view(slot = %Slot{}), do: slot
  defp create_beacon_transaction(slot = %Slot{subset: subset, slot_time: slot_time}) do
    {prev_pub, prev_pv} = Crypto.derive_beacon_keypair(subset, SlotTimer.previous_slot(slot_time))
    {next_pub, _} = Crypto.derive_beacon_keypair(subset, slot_time)
    tx =
      Transaction.new(
        :beacon,
        %TransactionData{content: Slot.serialize(slot) |> Utils.wrap_binary()},
        prev_pv,
        prev_pub,
        next_pub
      )
    previous_address = Transaction.previous_address(tx)
    prev_tx =
      case TransactionChain.get_transaction(previous_address) do
        {:error, :transaction_not_exists} ->
          nil
        {:ok, prev_tx} ->
          prev_tx
      end
    stamp = create_validation_stamp(tx, prev_tx, slot_time)
    %{tx | validation_stamp: stamp}
  end
  defp create_summary_transaction(beacon_chain, subset, summary_time) do
    {prev_pub, prev_pv} = Crypto.derive_beacon_keypair(subset, summary_time)
    {pub, _} = Crypto.derive_beacon_keypair(subset, summary_time, true)
    previous_slots =
      Enum.map(beacon_chain, fn %Transaction{
                                  data: %TransactionData{content: content}
                                } ->
        {slot, _} = Slot.deserialize(content)
        slot
      end)
    tx_content =
      %Summary{subset: subset, summary_time: summary_time}
      |> Summary.aggregate_slots(previous_slots)
      |> Summary.serialize()
    tx =
      Transaction.new(
        :beacon_summary,
        %TransactionData{content: tx_content |> Utils.wrap_binary()},
        prev_pv,
        prev_pub,
        pub
      )
    stamp = create_validation_stamp(tx, Enum.at(beacon_chain, 0), summary_time)
    %{tx | validation_stamp: stamp}
  end
  defp create_validation_stamp(tx = %Transaction{}, prev_tx, time = %DateTime{}) do
    poi = [tx, prev_tx] |> Enum.filter(& &1) |> TransactionChain.proof_of_integrity()
    %ValidationStamp{
      proof_of_work: Crypto.first_node_public_key(),
      proof_of_integrity: poi,
      proof_of_election: <<0::size(512)>>,
      timestamp: time
    }
    |> ValidationStamp.sign()
  end
end | 
	lib/zaryn/beacon_chain/subset.ex | 0.864139 | 0.418905 | 
	subset.ex | 
	starcoder | 
| 
	defmodule Day25 do
  @moduledoc """
  Assembunny code interpreter, with output command
  """
  @clk_count_enough 10000
  defmodule State do
    defstruct pc: 0, a: 0, init_a: 0, b: 0, c: 1, d: 0, clk_last: 1, clk_count: 0, clk_error: false
  end
  def evaluate_file(path, state \\ %State{}) do
    path
    |> File.read!
    |> evaluate(state)
  end
  @doc """
  Evaluate the given instruction set
  """
  def evaluate(str, state \\ %State{}) do
    str
    |> parse_commands
    |> search(state)
  end
  defp search(instructions, %State{a: a}) do
    new_state = %State{a: a, init_a: a}
    # IO.puts "Testing a = #{inspect a}"
    result = run(instructions, new_state)
    if !result.clk_error do
      result.init_a
    else
      search(instructions, %State{new_state | a: new_state.init_a + 1})
    end
  end
  defp run(instructions, state = %State{pc: pc}) do
    instruction = Enum.at(instructions, pc)
    # IO.puts "Eval: #{inspect instruction}"
    new_state = eval(instruction, state)
    # IO.inspect new_state
    unless new_state.pc >= length(instructions) do
      run(instructions, new_state)
    else
      new_state
    end
  end
  defp eval({:cpy, value, target}, state) when is_atom(value) do
    state
    |> Map.put(target, Map.get(state, value))
    |> update_pc(1)
  end
  defp eval({:cpy, value, target}, state) when is_integer(value) do
    state
    |> Map.put(target, value)
    |> update_pc(1)
  end
  defp eval({:inc, target}, state) do
    state
    |> Map.update!(target, &(&1 + 1))
    |> update_pc(1)
  end
  defp eval({:dec, target}, state) do
    state
    |> Map.update!(target, &(&1 - 1))
    |> update_pc(1)
  end
  defp eval({:jnz, test, offset}, state) when is_atom(test) do
    val = Map.get(state, test)
    if 0 == val do
      update_pc(state, 1)
    else
      update_pc(state, offset)
    end
  end
  defp eval({:jnz, test, offset}, state) when is_integer(test) do
    if 0 == test do
      update_pc(state, 1)
    else
      update_pc(state, offset)
    end
  end
  defp eval({:out, target}, state) when is_atom(target) do
    val = Map.get(state, target)
    check_clk(val, state)
  end
  defp eval({:out, target}, state) when is_integer(target) do
    check_clk(target, state)
  end
  defp check_clk(a, state = %State{clk_last: a}) do
    # IO.puts "Matched, got: #{inspect a}, last: #{inspect a}, cnt: #{state.clk_count}"
    %State{state | clk_error: true, pc: 99999999}
  end
  defp check_clk(a, state = %State{clk_last: b, clk_count: cnt}) when a != b and cnt >= @clk_count_enough do
    # IO.puts "Got enough chars"
    %State{state | clk_error: false, pc: 99999999}
  end
  defp check_clk(a, state = %State{clk_last: b}) when a != b do
    # IO.puts "Ok, got: #{inspect a}, last: #{inspect b}"
    new = %State{state | clk_last: a, clk_count: state.clk_count + 1}
    update_pc(new, 1)
  end
  defp update_pc(state, offset) do
    Map.update!(state, :pc, &(&1 + offset))
  end
  defp parse_commands(str) do
    str
    |> String.trim
    |> String.split("\n")
    |> Enum.map(&String.trim/1)
    |> Enum.map(&convert_command/1)
  end
  defp convert_command(<<"inc ", rest :: binary>>) do
    {:inc, string_to_register(rest)}
  end
  defp convert_command(<<"dec ", rest :: binary>>) do
    {:dec, string_to_register(rest)}
  end
  defp convert_command(<<"cpy ", rest :: binary>>) do
    [value, register] = String.split(rest)
    {:cpy, convert_value(value), string_to_register(register)}
  end
  defp convert_command(<<"jnz ", rest :: binary>>) do
    [test, offset] = String.split(rest)
    {:jnz, convert_value(test), convert_value(offset)}
  end
  defp convert_command(<<"out ", rest :: binary>>) do
    {:out, convert_value(rest)}
  end
  defp convert_value(str) do
    list = String.to_charlist(str)
    if Enum.at(list, 0) >= ?a do
      string_to_register(str)
    else
      String.to_integer(str)
    end
  end
  defp string_to_register(str) do
    String.to_atom(str)
  end
end | 
	day25/lib/day25.ex | 0.527073 | 0.558086 | 
	day25.ex | 
	starcoder | 
| 
	defmodule RDF.XSD.Decimal do
  @moduledoc """
  `RDF.XSD.Datatype` for XSD decimals.
  """
  @type valid_value :: Decimal.t()
  use RDF.XSD.Datatype.Primitive,
    name: "decimal",
    id: RDF.Utils.Bootstrapping.xsd_iri("decimal")
  alias RDF.XSD
  alias Elixir.Decimal, as: D
  def_applicable_facet XSD.Facets.MinInclusive
  def_applicable_facet XSD.Facets.MaxInclusive
  def_applicable_facet XSD.Facets.MinExclusive
  def_applicable_facet XSD.Facets.MaxExclusive
  def_applicable_facet XSD.Facets.TotalDigits
  def_applicable_facet XSD.Facets.FractionDigits
  def_applicable_facet XSD.Facets.Pattern
  @doc false
  def min_inclusive_conform?(min_inclusive, value, _lexical) do
    not (D.cmp(value, D.new(min_inclusive)) == :lt)
  end
  @doc false
  def max_inclusive_conform?(max_inclusive, value, _lexical) do
    not (D.cmp(value, D.new(max_inclusive)) == :gt)
  end
  @doc false
  def min_exclusive_conform?(min_exclusive, value, _lexical) do
    D.cmp(value, D.new(min_exclusive)) == :gt
  end
  @doc false
  def max_exclusive_conform?(max_exclusive, value, _lexical) do
    D.cmp(value, D.new(max_exclusive)) == :lt
  end
  @doc false
  def total_digits_conform?(total_digits, value, _lexical) do
    do_digit_count(to_string(value)) <= total_digits
  end
  @doc false
  def fraction_digits_conform?(fraction_digits, value, _lexical) do
    do_fraction_digit_count(to_string(value)) <= fraction_digits
  end
  @doc false
  def pattern_conform?(pattern, _value, lexical) do
    XSD.Facets.Pattern.conform?(pattern, lexical)
  end
  @impl XSD.Datatype
  def lexical_mapping(lexical, opts) do
    if String.contains?(lexical, ~w[e E]) do
      @invalid_value
    else
      case D.parse(lexical) do
        {:ok, decimal} -> elixir_mapping(decimal, opts)
        :error -> @invalid_value
      end
    end
  end
  @impl XSD.Datatype
  @spec elixir_mapping(valid_value | integer | float | any, Keyword.t()) :: value
  def elixir_mapping(value, _)
  def elixir_mapping(%D{coef: coef}, _) when coef in ~w[qNaN sNaN inf]a,
    do: @invalid_value
  def elixir_mapping(%D{} = decimal, _),
    do: canonical_decimal(decimal)
  def elixir_mapping(value, opts) when is_integer(value),
    do: value |> D.new() |> elixir_mapping(opts)
  def elixir_mapping(value, opts) when is_float(value),
    do: value |> D.from_float() |> elixir_mapping(opts)
  def elixir_mapping(_, _), do: @invalid_value
  @doc false
  @spec canonical_decimal(valid_value) :: valid_value
  def canonical_decimal(decimal)
  def canonical_decimal(%D{coef: 0} = decimal),
    do: %{decimal | exp: -1}
  def canonical_decimal(%D{coef: coef, exp: 0} = decimal),
    do: %{decimal | coef: coef * 10, exp: -1}
  def canonical_decimal(%D{coef: coef, exp: exp} = decimal)
      when exp > 0,
      do: canonical_decimal(%{decimal | coef: coef * 10, exp: exp - 1})
  def canonical_decimal(%D{coef: coef} = decimal)
      when Kernel.rem(coef, 10) != 0,
      do: decimal
  def canonical_decimal(%D{coef: coef, exp: exp} = decimal),
    do: canonical_decimal(%{decimal | coef: Kernel.div(coef, 10), exp: exp + 1})
  @impl XSD.Datatype
  @spec canonical_mapping(valid_value) :: String.t()
  def canonical_mapping(value)
  def canonical_mapping(%D{sign: sign, coef: :qNaN}),
    do: if(sign == 1, do: "NaN", else: "-NaN")
  def canonical_mapping(%D{sign: sign, coef: :sNaN}),
    do: if(sign == 1, do: "sNaN", else: "-sNaN")
  def canonical_mapping(%D{sign: sign, coef: :inf}),
    do: if(sign == 1, do: "Infinity", else: "-Infinity")
  def canonical_mapping(%D{} = decimal),
    do: D.to_string(decimal, :normal)
  @impl RDF.Literal.Datatype
  def do_cast(value)
  def do_cast(%XSD.String{} = xsd_string) do
    xsd_string.value |> new() |> canonical()
  end
  def do_cast(literal) do
    cond do
      XSD.Boolean.datatype?(literal) ->
        case literal.value do
          false -> new(0.0)
          true -> new(1.0)
        end
      XSD.Integer.datatype?(literal) ->
        new(literal.value)
      # we're catching XSD.Floats with this too
      is_float(literal.value) and XSD.Double.datatype?(literal) ->
        new(literal.value)
      true ->
        super(literal)
    end
  end
  @impl RDF.Literal.Datatype
  def do_equal_value_same_or_derived_datatypes?(left, right),
    do: XSD.Numeric.do_equal_value?(left, right)
  @impl RDF.Literal.Datatype
  def do_equal_value_different_datatypes?(left, right),
    do: XSD.Numeric.do_equal_value?(left, right)
  @impl RDF.Literal.Datatype
  def do_compare(left, right), do: XSD.Numeric.do_compare(left, right)
  @doc """
  The number of digits in the XML Schema canonical form of the literal value.
  """
  @spec digit_count(RDF.Literal.t()) :: non_neg_integer | nil
  def digit_count(literal)
  def digit_count(%RDF.Literal{literal: datatype_literal}), do: digit_count(datatype_literal)
  def digit_count(%datatype{} = literal) do
    cond do
      XSD.Integer.datatype?(datatype) ->
        XSD.Integer.digit_count(literal)
      datatype?(datatype) and datatype.valid?(literal) ->
        literal
        |> datatype.canonical_lexical()
        |> do_digit_count()
      true ->
        nil
    end
  end
  @doc false
  def do_digit_count(decimal_string) do
    decimal_string
    |> String.replace(".", "")
    |> String.replace("-", "")
    |> String.length()
  end
  @doc """
  The number of digits to the right of the decimal point in the XML Schema canonical form of the literal value.
  """
  @spec fraction_digit_count(RDF.Literal.t()) :: non_neg_integer | nil
  def fraction_digit_count(%RDF.Literal{literal: datatype_literal}),
    do: fraction_digit_count(datatype_literal)
  def fraction_digit_count(%datatype{} = literal) do
    cond do
      XSD.Integer.datatype?(literal) ->
        0
      datatype?(literal) and datatype.valid?(literal) ->
        literal
        |> datatype.canonical_lexical()
        |> do_fraction_digit_count()
      true ->
        nil
    end
  end
  @doc false
  def do_fraction_digit_count(decimal_string) do
    [_, fraction] = String.split(decimal_string, ".")
    String.length(fraction)
  end
end | 
	lib/rdf/xsd/datatypes/decimal.ex | 0.886211 | 0.594963 | 
	decimal.ex | 
	starcoder | 
| 
	defmodule WaveshareHat.SMS do
  @moduledoc """
  Includes helper functions for sending and receiving SMS.
  """
  import WaveshareHat.Utils
  @doc """
  Set the number from which SMS message are sent.
  """
  def set_local_number(pid, number), do: write(pid, "AT+CSCA=\"#{number}\"")
  @doc """
  Set the format of SMS messages to Text (1) or PDU (0) mode.
  """
  def set_sms_mode(pid, mode) when mode in [0, 1] do
    write(pid, "AT+CMGF=#{mode}")
  end
  @doc """
  Set the TE character set to GSM or UCS2.
  """
  def set_te_character_set(pid, set) when set in ["GSM", "UCS2"] do
    write(pid, "AT+CSCS=#{set}")
  end
  @doc """
  Set the SMS text mode parameters.
  The parameters must be a list in the following order: `[fo, vp, pid, dcs]`.
  Every value but the `fo`-parameter are optional.
  Below is a list of possible parameters and their meaning.
  ## fo
  Depending on the command or result code:
  * first octet of GSM 03.40 SMS-DELIVER
  * SMS-SUBMIT (default `17`)
  * SMS-STATUS-REPORT
  * SMS-COMMAND (default `2`) in integer format.
  > SMS status report is supported under text mode if `<fo>` is set to `49`.
  ## vp
  Depending on SMS-SUBMIT `<fo>` setting:
  GSM 03.40 TP-Validity-Period either in
  * integer format (default `167`) or
  * in time-string format (refer `<dt>`)
  ## pid
  GSM 03.40 TP-Protocol-Identifier in integer format (default `0`).
  ## dcs
  GSM 03.38 SMS Data Coding Scheme in Integer format.
  """
  def set_sms_mode_params(pid, params) when is_list(params) do
    write(pid, "AT+CSMP=#{Enum.join(params, ",")}")
  end
  def set_sms_mode_params(pid, param) when is_integer(param) do
    set_sms_mode_params(pid, [param])
  end
  @doc """
  Set the SMS message text body.
  Can be used multiple times for multiple lines.
  Don't forget to finish the text input with an `end_mark/1`.
  """
  def set_sms_body(pid, body) when is_binary(body) do
    write(pid, body)
  end
  @doc """
  Reads a SMS message at a given position of the inbox.
  """
  def read_sms(pid, position), do: write(pid, "AT+CMGR=#{position}")
  @doc """
  Sends a previously entered message to a given number.
  This is the last command necessary for sending a SMS.
  Make sure to write a message before like this:
      iex> WaveshareHat.set_local_number(pid, "YOUR_NUMBER")
      iex> WaveshareHat.set_sms_body(pid, "Hello there, friend!")
      iex> WaveshareHat.set_receiver(pid, "YOUR_FRIENDS_NUMBER")
      iex> WaveshareHat.end_mark(pid)
  """
  def set_receiver(pid, to_number), do: write(pid, "AT+CMGS=\"#{to_number}\"")
  # Configuration
  @doc """
  Set the ATE echo mode On (1) or Off (0).
  """
  def set_echo_mode(pid, mode) when mode in [0, 1] do
    write(pid, "ATE#{mode}")
  end
  @doc """
  Enable (1) or disable (0) COLP notifications.
  """
  def set_colp_notification(pid, status) when status in [0, 1] do
    write(pid, "AT+COLP=#{status}")
  end
  @doc """
  Set the SMS text mode parameters.
  The parameters must be a list in the following order: `[mode, mt, bm, ds, bfr]`.
  Every value but the `mode`-parameter are optional.
  Below is a list of possible parameters and their meaning.
  ## Mode
    * `0` -  Buffer unsolicited result codes in the TA. If TA result
    code buffer is full, indications can be buffered in some other place or the
    oldest indications may be discarded and replaced with the new received
    indications.
    * `1` - Discard indication and reject new received message
    unsolicited result codes when TA-TE link is reserved (e.g. in on-line data
    mode). Otherwise forward them directly to the TE.
    * `2` - Buffer unsolicited result codes in the TA when TA-TE
    link is reserved (e.g. in on-line data mode) and flush them to the TE after
    reservation. Otherwise forward them directly to the TE.
    * `3` - Forward unsolicited result codes directly to the TE.
    TA-TE link specific inband technique used to embed result codes and data
    when TA is in on-line data mode.
  ## mt
    * `0` - No SMS-DELIVER indications are routed to the TE.
    * `1` - If SMS-DELIVER is stored into ME/TA, indication of the memory location
    is routed to the TE using unsolicited result code: `+CMTI: <mem>,<index>`
    * `2` - SMS-DELIVERs (except class 2) are routed directly to the TE using unsolicited result code:
    `+CMT: [<alpha>],<length><CR><LF><pdu>` (PDU mode enabled) or
    `+CMT: <oa>,[<alpha>],<scts> [,<tooa>,<fo>,<pid>,<dcs>,<sca>,<tosca>,<length>]<CR><LF><data>`
      Class 2 messages result in indication as defined in <mt>=1.
    * `3` - Class 3 SMS-DELIVERs are routed directly to TE using unsolicited result codes defined in `<mt>=2`.
    Messages of other classes result in indication as defined in `<mt>=1`.
  ## bm
  > The rules for storing received CBMs depend on its data coding scheme (refer GSM 03.38 [2]), the setting of Select CBM Types (+CSCB) and this value:
  * `0` - No CBM indications are routed to the TE.
  * `2` - New CBMs are routed directly to the TE using unsolicited result code:
  `+CBM: <length><CR><LF><pdu>` (PDU mode enabled) or
  `+CBM: <sn>,<mid>,<dcs>,<page>,<pages><CR><LF><data>` (text mode enabled).
  ## ds
  * `0` - No SMS-STATUS-REPORTs are routed to the TE.
  * `1` - SMS-STATUS-REPORTs are routed to the TE using unsolicited result code:
  `+CDS:<length><CR><LF><pdu>` (PDU mode enabled) or
  `+CDS: <fo>,<mr>[,<ra>][,<tora>],<scts>,<dt>,<st>` (text mode enabled)
  ## bfr
  * `0` - TA buffer of unsolicited result codes defined within this Command is flushed to the TE when `<mode> 1...3` is entered (OK response shall be given before flushing the codes).
  * `1` - TA buffer of unsolicited result codes defined within this command is cleared when `<mode> 1...3` is entered
  """
  def set_new_sms_indicator(pid, modes) when is_list(modes) do
    write(pid, "AT+CNMI=#{Enum.join(modes, ",")}")
  end
  def set_new_sms_indicator(pid, mode) when is_integer(mode) do
    set_new_sms_indicator(pid, [mode])
  end
end | 
	lib/waveshare/sms.ex | 0.792906 | 0.5144 | 
	sms.ex | 
	starcoder | 
| 
	defmodule Ockam.Session.Handshake do
  @moduledoc """
  Session handshake behaviour.
  Used in `Ockam.Session.Pluggable` and `Ockam.Session.Separate` modules
  """
  @type message() :: Ockam.Message.t()
  @typedoc """
  State passed to the callbacks, can be modified, but following fields are required:
  `init_route` - (initiator only) session initial route to send initial handshake to
  `worker_address` - inner address of the data worker
  `handshake_address` - inner address of the handshake worker
  For pluggable session, `worker_address` and `handshake_address` are the same
  """
  @type handshake_state() :: %{
          :init_route => Ockam.Address.route(),
          :worker_address => Ockam.Address.t(),
          :handshake_address => Ockam.Address.t(),
          any() => any()
        }
  @doc """
  Create initial handshake message using handshake options and state
  Called by session initiator
  """
  @callback init(options :: Keyword.t(), handshake_state()) ::
              {:next, message(), handshake_state()} | {:error, any()}
  ## TODO: error result
  @doc """
  Handle handshake message on session initiator
  Arguments:
  `handshake_options` - options set in `handshake_options` for session worker
  `message` - handshake message received
  `handshake_state` - additional worker state, see type `handshake_state/0`
  Returns:
  `{:next, handshake_state()}` - wait for the next handshake message
  `{:next, message, handshake_state()}` - send a handshake message and wait for next one
  `{:ready, options, handshake_state()}` - start data worker with options and switch to data mode
  `{:ready, message, options, handshake_state()}` - start data worker and send handshake message
  """
  @callback handle_initiator(
              handshake_options :: Keyword.t(),
              message(),
              handshake_state()
            ) ::
              {:next, handshake_state()}
              | {:next, message(), handshake_state()}
              | {:ready, worker_options :: Keyword.t(), handshake_state()}
              | {:ready, message(), worker_options :: Keyword.t(), handshake_state()}
  @doc """
  Handle handshake on session responder
  see `handle_initiator/3`
  """
  @callback handle_responder(
              handshake_options :: Keyword.t(),
              message :: message(),
              state :: handshake_state()
            ) ::
              {:next, handshake_state()}
              | {:next, message(), handshake_state()}
              | {:ready, worker_options :: Keyword.t(), handshake_state()}
              | {:ready, message(), worker_options :: Keyword.t(), handshake_state()}
end | 
	implementations/elixir/ockam/ockam/lib/ockam/session/handshake.ex | 0.773901 | 0.452173 | 
	handshake.ex | 
	starcoder | 
| 
	defmodule BNO055 do
  use BNO055.SensorInterface
  @moduledoc """
  This module is used to create commands for interacting with a Bosch BNO055 sensor. This
  module is intended to be an unopinionated collection of functions to created data for
  communicating with the sensor, but does not handle actual communication.
  Set functions return tuple with address and data to be written to that address
  ```elixir
  iex> BNO055.set_mode(:config)
  {0x3D, <<0x00>>}
  ```
  Get functions return tuple with address and number of bytes to read
  ```elixir
  iex> BNO055.get_chip_address
  {0x00, 1}
  ```
  `write` functions take the get or set results and return a binary for writing to the 
  device based on the protocol type
  iex> BNO055.set_mode(:config) |> BNO055.IO.I2c.write_data
  {:write, <<0x3D, 0x00>>}
  iex> BNO055.set_mode(:config) |> BNO055.IO.Serial.write_data
  <<0xAA, 0x00, 0x3D, 0x01, 0x00>>
  Decode functions take the data returned from get functions and returns formatted results
  """
  @type register_address :: 0..0x6A
  @type set_result :: {register_address, binary}
  @type get_result :: {register_address, pos_integer}
  @type operational_modes :: :config | :acconly | :magonly | :gyroonly | :accmag |
    :accgyro | :maggyro | :amg | :imu | :compass | :m4g | :ndof_fmc_off | :ndof
  @type power_mode :: :normal | :lowpower | :suspend
  @type axis_remap :: :x_axis|:y_axis|:z_axis
  @type axis_sign :: :positive | :negative
  @doc """
  Sets the operational mode of the BNO055 sensor.
  
  ## Valid Modes:
   - :config - Used to configure the sensor
  ** Non-fusion modes **
   - :acconly - Only accelerometer sensor on
   - :magonly - Only magnetometer sensor on
   - :gyroonly - Only gyroscope sensor on
   - :accmag - Both accelerometer & magnetometer sensors on
   - :accgyro - Both accelerometer & gyroscope sensors on
   - :maggyro - Both magnetometer & gyroscope sensors on
   - :amg - All three sensors on, but no fusion data generated
  ** Fusion modes **
   - :imu - the relative orientation of the BNO055 in space is calculated from the accelerometer and gyroscope data. The calculation is fast (i.e. high output data rate)
   - :compass - th absolute orientation of the BNO055 is given. (requires calibration. see datasheet)
   - :m4g - Magnet for Gyroscope - similar to IMU, but uses magnetometer to detect rotation
   - :ndof_fmc_off - same as NDOF, but with Fast Magnetometer Calibration turned off
   - :ndof - Fusion mode with 9 degrees of freedom where the fused absolute orientation data is calculate from all three sensors.
  See section 3.3 of the datasheet for more detailed information
  on the operational modes.
  """
  @spec set_mode(operational_modes) :: set_result
  def set_mode(:config), do: {@opr_mode_addr, <<0x00>>}
  def set_mode(:acconly), do: {@opr_mode_addr, <<0x01>>}
  def set_mode(:magonly), do: {@opr_mode_addr, <<0x02>>}
  def set_mode(:gyroonly), do: {@opr_mode_addr, <<0x03>>}
  def set_mode(:accmag), do: {@opr_mode_addr, <<0x04>>}
  def set_mode(:accgyro), do: {@opr_mode_addr, <<0x05>>}
  def set_mode(:maggyro), do: {@opr_mode_addr, <<0x06>>}
  def set_mode(:amg), do: {@opr_mode_addr, <<0x07>>}
  def set_mode(:imu), do: {@opr_mode_addr, <<0x08>>}
  def set_mode(:compass), do: {@opr_mode_addr, <<0x09>>}
  def set_mode(:m4g), do: {@opr_mode_addr, <<0x0A>>}
  def set_mode(:ndof_fmc_off), do: {@opr_mode_addr, <<0x0B>>}
  def set_mode(:ndof), do: {@opr_mode_addr, <<0x0C>>}
  def set_mode(inv_mode), do: raise ArgumentError, "Invalid mode #{inv_mode} given!"
  @doc """
  Sets if an external crystal is attached to the sensor.
  ** Sensor must be in config mode before receiving this command
  """
  @spec set_external_crystal(true|false) :: set_result
  def set_external_crystal(true), do: {@sys_trigger_addr, <<0x80>>}
  def set_external_crystal(false), do: {@sys_trigger_addr, <<0x00>>}
  @doc """
  Set the sensor calibration offsets by sending previously generated
  calibration data received from `get_calibration/0` or decoded map
  from calibration data.
  ```
  %{
    accel: %{
      x: acc_x,
      y: acc_y,
      z: acc_z,
      radius: acc_radius
    },
    mag: %{
      x: mag_x,
      y: mag_y,
      z: mag_z,
      radius: mag_radius
    },
    gyro: %{
      x: gyro_x,
      y: gyro_y,
      z: gyro_z
    }
  }
  ```
  See section 3.6.4 of datasheet for detailed information about the valid
  values for sensor configuration.
  """
  @spec set_calibration(binary) :: set_result
  def set_calibration(data) when is_binary(data) and byte_size(data) == 22 do
    {@accel_offset_x_lsb_addr, data}
  end
  def set_calibration(%{accel: %{x: acc_x, y: acc_y, z: acc_z, radius: acc_radius}, mag: %{x: mag_x, y: mag_y, z: mag_z, radius: mag_radius}, gyro: %{x: gyro_x, y: gyro_y, z: gyro_z}}) do
    {@accel_offset_x_lsb_addr, <<
      acc_x :: size(16)-signed-little,
      acc_y :: size(16)-signed-little,
      acc_z :: size(16)-signed-little,
      mag_x :: size(16)-signed-little,
      mag_y :: size(16)-signed-little,
      mag_z :: size(16)-signed-little,
      gyro_x :: size(16)-signed-little,
      gyro_y :: size(16)-signed-little,
      gyro_z :: size(16)-signed-little,
      acc_radius :: size(16)-signed-little,
      mag_radius :: size(16)-signed-little
    >>}
  end
  @doc """
  Sets the power mode of the BNO055.
  ## Valid Modes
   - :normal - All sensors for selected operational mode are turned on
   - :lowpower - If no motion is detected for a set period of time (default 5 seconds), then then BNO055 enters a low power mode where only the accelerometer is active.
   - :suspend - All sensors and microcontroller are put into sleep mode.
  See section 3.2 of datasheet for more detailed information on power modes.
  """
  @spec set_power_mode(power_mode) :: set_result
  def set_power_mode(:normal), do: {@pwr_mode_addr, <<0x00>>}
  def set_power_mode(:lowpower), do: {@pwr_mode_addr, <<0x01>>}
  def set_power_mode(:suspend), do: {@pwr_mode_addr, <<0x02>>}
  def set_power_mode(inv_mode), do: raise ArgumentError, "Invalid power mode #{inv_mode} given!"
  @doc """
  Sets the current register page for the BNO055. Valid pages are 0 or 1
  """
  @spec set_page(0|1) :: set_result
  def set_page(0), do:  {@page_id_addr, <<0>>}
  def set_page(1), do:  {@page_id_addr, <<1>>}
  def set_page(inv_page), do: raise ArgumentError, "Invalid page #{inv_page} given!"
  @doc """
  Sets the outputed units for orientation mode, temperature, euler angles, gyroscope,
  acceleration.
  ## Orientation Mode
  :windows
  :android
  ## Temperature
  :celsius
  :fahrenheit
  ## Euler Angles
  :degrees
  :radians
  ## Gyroscope angular rate units
  :dps
  :rps
  ## Accleration units
  :ms2
  :mg
  See section 3.6.1 of the datasheet for more details on output units
  """
  @spec set_output_units(:windows|:android, :celsius|:fahrenheit, :degrees|:radians, :dps|:rps, :ms2|:mg) :: set_result
  def set_output_units(orientation, temp, euler, gyro, acc) do
    orientation_val = case orientation do
      :windows -> 0
      :android -> 1
      _ -> raise ArgumentError, "Invalid orientation mode #{orientation} given!"
    end
    temp_val = case temp do
      :celsius -> 0
      :fahrenheit -> 1
      _ -> raise ArgumentError, "Invalid temperature units #{temp} given!"
    end
    euler_val = case euler do
      :degrees -> 0
      :radians -> 1
      _ -> raise ArgumentError, "Invalid euler units #{euler} given!"
    end
    gyro_val = case gyro do
      :dps -> 0
      :rps -> 1
      _ -> raise ArgumentError, "Invalid gyro #{gyro} given!"
    end
    acc_val = case acc do
      :ms2 -> 0
      :mg -> 1
      _ -> raise ArgumentError, "Invalid acceleration units #{acc} given!"
    end
    {
      @unit_sel_addr,
      <<
        orientation_val::size(1),
        0::size(2),
        temp_val::size(1),
        0::size(1),
        euler_val::size(1),
        gyro_val::size(1),
        acc_val::size(1)
      >>
    }
  end
  @doc """
  Sets the axis remap for each of the 3 axis, as well as the sign for each axis as :positive or :negative (inverted)
  Valid axis remap values - :x_axis, :y_axis, :z_axis
  Valid axis sign values - :positive, :negative
  Note two axises cannot be mapped to the same axis remap value.
  See section 3.4 of the datasheet for more information.
  """
  @spec set_axis_mapping(axis_remap, axis_remap, axis_remap, axis_sign, axis_sign, axis_sign) :: set_result
  def set_axis_mapping(x, y, z, x_sign, y_sign, z_sign) do
    x_val = case x do
      :x_axis -> 0
      :y_axis -> 1
      :z_axis -> 2
      _ -> raise ArgumentError, "Invalid x axis mapping x: #{x} given!"
    end
    y_val = case y do
      :x_axis -> 0
      :y_axis -> 1
      :z_axis -> 2
      _ -> raise ArgumentError, "Invalid y axis mapping y: #{y} given!"
    end
    z_val = case z do
      :x_axis -> 0
      :y_axis -> 1
      :z_axis -> 2
      _ -> raise ArgumentError, "Invalid z axis mapping z: #{z} given!"
    end
    case {x,y,z} do
      {_, ^x, _} -> raise ArgumentError, "Invalid axis mappings given, axis mappings must be mutually exclusive. x == y"
      {_, _, ^x} -> raise ArgumentError, "Invalid axis mappings given, axis mappings must be mutually exclusive. x == z"
      {_, _, ^y} -> raise ArgumentError, "Invalid axis mappings given, axis mappings must be mutually exclusive. y == z"
      _ -> true
    end
    x_sign_val = case x_sign do
      :positive -> 0
      :negative -> 1
      _ -> raise ArgumentError, "Invalid x axis sign mapping #{x_sign} given!"
    end
    y_sign_val = case y_sign do
      :positive -> 0
      :negative -> 1
      _ -> raise ArgumentError, "Invalid y axis sign mapping #{y_sign} given!"
    end
    z_sign_val = case z_sign do
      :positive -> 0
      :negative -> 1
      _ -> raise ArgumentError, "Invalid z axis sign mapping #{z_sign} given!"
    end
    data = <<
      0 :: size(2),
      z_val :: size(2),
      y_val :: size(2),
      x_val :: size(2),
      0 :: size(5),
      x_sign_val :: size(1),
      y_sign_val :: size(1),
      z_sign_val :: size(1)
    >>
    {@axis_map_config_addr, data}
  end
  @spec set_axis_mapping(map) :: set_result
  def set_axis_mapping(%{x_axis: x, y_axis: y, z_axis: z, x_sign: x_sign, y_sign: y_sign, z_sign: z_sign}) do
    set_axis_mapping(
      x,
      y,
      z,
      x_sign,
      y_sign,
      z_sign
    )
  end
  @doc """
  BNO055 is reset, rebooting microcontroller and clearing current configuration.
  The Sensor will be unavailable while reseting and your app should sleep before executing
  the next command.
  """
  @spec reset() :: set_result
  def reset(), do: {@sys_trigger_addr, <<0x20>>}
  @doc """
  Resets the system trigger back to 0x00. All bits off.
  """
  @spec reset_system_trigger() :: set_result
  def reset_system_trigger(), do: {@sys_trigger_addr, <<0x00 :: size(8)>>}
  @doc """
  Command to get the sensor chip address
  """
  @spec get_chip_address() :: get_result
  def get_chip_address, do:  {@chip_id_addr, 1}
  @doc """
  Command to get system status
  """
  @spec get_system_status() :: get_result
  def get_system_status, do: {@sys_stat_addr, 1}
  @doc """
  Command to get last sensor self test result
  """
  @spec get_self_test_result() :: get_result
  def get_self_test_result, do: {@selftest_result_addr, 1}
  @doc """
  Command to get system error data
  """
  @spec get_system_error_data() :: get_result
  def get_system_error_data, do: {@sys_err_addr, 1}
  @doc """
  Command to get sensor revision infomation
  """
  @spec get_revision_info() :: get_result
  def get_revision_info, do: {@accel_rev_id_addr, 6}
  @doc """
  Command to get calibration status
  """
  @spec get_calibration_status() :: get_result
  def get_calibration_status, do: {@calib_stat_addr, 1}
  @doc """
  Command to get sensor calibration data
  """
  @spec get_calibration() :: get_result
  def get_calibration, do: {@accel_offset_x_lsb_addr, 22}
  @doc """
  Command to get sensor axis remapping
  """
  @spec get_axis_mapping() :: get_result
  def get_axis_mapping, do: {@axis_map_config_addr, 2}
  @doc """
  Command to read latest euler angles from fusion mode
  """
  @spec get_euler_reading() :: get_result
  def get_euler_reading, do: {@euler_h_lsb_addr, 6}
  @doc """
  Command to read latest magnetometer values
  """
  @spec get_magnetometer_reading() :: get_result
  def get_magnetometer_reading, do: {@mag_data_x_lsb_addr, 6}
  @doc """
  Command to read latest gyroscope values
  """
  @spec get_gyroscope_reading() :: get_result
  def get_gyroscope_reading, do: {@gyro_data_x_lsb_addr, 6}
  @doc """
  Command to read latest accelerometer values
  """
  @spec get_accelerometer_reading() :: get_result
  def get_accelerometer_reading, do: {@accel_data_x_lsb_addr, 6}
  @doc """
  Command to read latest linear acceleration values
  """
  @spec get_linear_acceleration_reading() :: get_result
  def get_linear_acceleration_reading, do: {@linear_accel_data_x_lsb_addr, 6}
  @doc """
  Command to read latest gravity values
  """
  @spec get_gravity_reading() :: get_result
  def get_gravity_reading, do: {@gravity_data_x_lsb_addr, 6}
  @doc """
  Command to read latest quaternion values
  """
  @spec get_quaternion_reading() :: get_result
  def get_quaternion_reading, do: {@quaternion_data_w_lsb_addr, 8}
  @doc """
  Command to read latest temperature value
  """
  @spec get_temperature_reading() :: get_result
  def get_temperature_reading, do: {@temp_addr, 1}
  @doc """
  Takes binary data returned from sensor system status and returns decoded string
  """
  @spec decode_system_status(binary) :: String.t
  def decode_system_status(data) when is_binary(data) do    
    case data do
      <<0>> -> "Idle"
      <<1>> -> "System Error"
      <<2>> -> "Initializing Peripherals"
      <<3>> -> "System Iniitalization"
      <<4>> -> "Executing Self-Test"
      <<5>> -> "Sensor fusion algorithm running"
      <<6>> -> "System running without fusion algorithms"
      _ -> "Unknown status: #{data}"
    end
  end
  @doc """
  Takes binary data returned from sensor self test and returns decoded data in a map
  %{
    mcu: "Pass",
    gyro: "Pass",
    mag: "Fail",
    accel: "Fail"
  }
  """
  @spec decode_self_test_result(binary) :: map
  def decode_self_test_result(data) when is_binary(data) do
    <<
      _ :: size(4),
      mcu_st :: size(1),
      gyro_st :: size(1),
      mag_st :: size(1),
      acc_st :: size(1)
    >> = data
    %{
      mcu: (if mcu_st == 1, do: "Pass", else: "Fail"),
      gyro: (if gyro_st == 1, do: "Pass", else: "Fail"),
      mag: (if mag_st == 1, do: "Pass", else: "Fail"),
      accel: (if acc_st == 1, do: "Pass", else: "Fail")
    }
  end
  @doc """
  Takes binary data returned from sensor error data and returns decoded string
  """
  @spec decode_system_error_data(binary) :: String.t
  def decode_system_error_data(data) when is_binary(data) do
    case data do
      <<0x00>> -> "No error"
      <<0x01>> -> "Peripheral initialization error"
      <<0x02>> -> "System initialization error"
      <<0x03>> -> "Self test result failed"
      <<0x04>> -> "Register map value out of range"
      <<0x05>> -> "Register map address out of range"
      <<0x06>> -> "Register map write error"
      <<0x07>> -> "BNO low power mode not available for selected operation mode"
      <<0x08>> -> "Accelerometer power mode not available"
      <<0x09>> -> "Fusion algorithm configuration error"
      <<0x0A>> -> "Sensor configuration error"
      _ -> "Unknown system error value: #{data}"
    end
  end
  @doc """
  Takes binary data returned from sensor revision info and returns decoded map
  %{
    accel: 0,
    mag: 0,
    gyro: 0,
    bl: 0,
    sw: 0
  }
  """
  @spec decode_revision_info(binary) :: map
  def decode_revision_info(data) when is_binary(data) do
    <<
      accel_rev::size(8),
      mag_rev::size(8),
      gyro_rev::size(8),
      sw_rev::size(16),
      bl_rev::size(8)
    >> = data
    %{
      accel: accel_rev,
      mag: mag_rev,
      gyro: gyro_rev,
      bl: bl_rev,
      sw: sw_rev
    }
  end
  @doc """
  Takes binary data returned from sensor calibration status and returns decoded map
  %{
    system: :not_calibrated,
    gyro: :fully_calibrated,
    accel: :fully_calibrated,
    mag: :not_calibrated
  }
  """
  @spec decode_calibration_status(binary) :: map
  def decode_calibration_status(data) do
    <<
      sys_stat::size(2),
      gyr_stat::size(2),
      acc_stat::size(2),
      mag_stat::size(2)
    >> = data
    %{
      system: (if (sys_stat == 3), do: :fully_calibrated, else: :not_calibrated),
      gyro: (if (gyr_stat == 3), do: :fully_calibrated, else: :not_calibrated),
      accel: (if (acc_stat == 3), do: :fully_calibrated, else: :not_calibrated),
      mag: (if (mag_stat == 3), do: :fully_calibrated, else: :not_calibrated)
    }
  end
  @doc """
  Takes binary data returned from sensor calibration and returns decoded map
  %{
    %{
      accel: %{
        x: 0,
        y: 0,
        z: 0,
        radius: 0
      },
      mag: %{
        x: 0,
        y: 0,
        z: 0,
        radius: 0
      },
      gyro: %{
        x: 0,
        y: 0,
        z: 0
      }
    }
  }
  """
  @spec decode_calibration(binary) :: map
  def decode_calibration(data) when is_binary(data) and byte_size(data) == 22 do
    <<
      acc_x :: size(16)-signed-little,
      acc_y :: size(16)-signed-little,
      acc_z :: size(16)-signed-little,
      mag_x :: size(16)-signed-little,
      mag_y :: size(16)-signed-little,
      mag_z :: size(16)-signed-little,
      gyro_x :: size(16)-signed-little,
      gyro_y :: size(16)-signed-little,
      gyro_z :: size(16)-signed-little,
      acc_radius :: size(16)-signed-little,
      mag_radius :: size(16)-signed-little
    >> = data
    %{
      accel: %{
        x: acc_x,
        y: acc_y,
        z: acc_z,
        radius: acc_radius
      },
      mag: %{
        x: mag_x,
        y: mag_y,
        z: mag_z,
        radius: mag_radius
      },
      gyro: %{
        x: gyro_x,
        y: gyro_y,
        z: gyro_z
      }
    }
  end
  @doc """
  Takes binary data returned from sensor axis remapping and returns decoded map
  %{
      x_axis: :x_axis,
      y_axis: :y_axis,
      z_axis: :z_axis,
      x_sign: :positive,
      y_sign: :negative,
      z_sign: :positive
    }
  """
  @spec decode_axis_mapping(binary) :: map
  def decode_axis_mapping(data) when is_binary(data) do
    <<
      _ :: size(2),
      z :: size(2),
      y :: size(2),
      x :: size(2),
      _ :: size(5),
      x_sign :: size(1),
      y_sign :: size(1),
      z_sign :: size(1)
    >> = data
    %{
      x_axis: get_axis_mapping_from_val(x),
      y_axis: get_axis_mapping_from_val(y),
      z_axis: get_axis_mapping_from_val(z),
      x_sign: get_axis_sign_from_val(x_sign),
      y_sign: get_axis_sign_from_val(y_sign),
      z_sign: get_axis_sign_from_val(z_sign)
    }
  end
  defp get_axis_mapping_from_val(0), do: :x_axis
  defp get_axis_mapping_from_val(1), do: :y_axis
  defp get_axis_mapping_from_val(2), do: :z_axis
  defp get_axis_sign_from_val(0), do: :positive
  defp get_axis_sign_from_val(1), do: :negative
  @degrees_factor 16.0
  @radians_factor 900.0
  @ms2_factor 100.0
  @mg_factor 1.0
  @doc """
  Decodes the absolute orientation as a map with heading, roll and pitch angles. Supports both
  degrees and radians, default is degrees.
  """
  @spec decode_euler_reading(binary, :degrees|:radians) :: map | :no_data
  def decode_euler_reading(data, units \\ :degrees)
  def decode_euler_reading(<<>>, _), do: :no_data
  def decode_euler_reading(data, :degrees), do: _decode_euler(data, @degrees_factor)
  def decode_euler_reading(data, :radians), do: _decode_euler(data, @radians_factor)
  defp _decode_euler(data, unit_factor) do
    <<
  	  heading_rdg :: size(16)-signed-little,
  	  roll_rdg :: size(16)-signed-little,
  	  pitch_rdg :: size(16)-signed-little
  	>> = data
  	heading = heading_rdg / unit_factor
  	roll = roll_rdg / unit_factor
  	pitch = pitch_rdg / unit_factor
  	%{
  	  heading: heading,
  	  roll: roll,
  	  pitch: pitch,
  	}
  end
  @doc """
  Decodes the magnetometer reading returning a map of the current values for each
  axis in micro-Teslas
  """
  @spec decode_magnetometer_reading(binary) :: map | :no_data
  def decode_magnetometer_reading(<<>>), do: :no_data
  def decode_magnetometer_reading(data), do: _decode_vector(data, @degrees_factor)
  
  @doc """
  Decodes the gyroscope (angular velocity) reading returning a map of the current values for each
  axis in either degrees per second or radians per second. Default is degrees per second.
  """
  @spec decode_gyroscope_reading(binary, :dps|:rps) :: map | :no_data
  def decode_gyroscope_reading(data, units \\ :dps)
  def decode_gyroscope_reading(<<>>, _), do: :no_data
  def decode_gyroscope_reading(data, :dps), do: _decode_vector(data, @degrees_factor)
  def decode_gyroscope_reading(data, :rps), do: _decode_vector(data, @radians_factor)
  @doc """
  Decodes the accelerometer reading returning a map of the current values for each
  axis in meters/second^2 or milli-g. Default is meters/second^2
  """
  @spec decode_accelerometer_reading(binary, :ms2|:mg) :: map | :no_data
  def decode_accelerometer_reading(data, units \\ :ms2)
  def decode_accelerometer_reading(<<>>, _), do: :no_data
  def decode_accelerometer_reading(data, :ms2), do: _decode_vector(data, @ms2_factor)
  def decode_accelerometer_reading(data, :mg), do: _decode_vector(data, @mg_factor)
  @doc """
  Decodes the linear acceleration (acceleration from movement, not from gravity) reading 
  returning a map of the current values for each axis in meters/second^2 or milli-g. 
  Default is meters/second^2
  """
  @spec decode_linear_acceleration_reading(binary, :ms2|:mg) :: map | :no_data
  def decode_linear_acceleration_reading(data, units \\ :ms2)
  def decode_linear_acceleration_reading(<<>>, _), do: :no_data
  def decode_linear_acceleration_reading(data, :ms2), do: _decode_vector(data, @ms2_factor)
  def decode_linear_acceleration_reading(data, :mg), do: _decode_vector(data, @mg_factor)
  @doc """
  Decodes the gravity acceleration reading returning a map of the current values for each
  axis in meters/second^2 or milli-g. Default is meters/second^2
  """
  @spec decode_gravity_reading(binary, :ms2|:mg) :: map | :no_data
  def decode_gravity_reading(data, units \\ :ms2)
  def decode_gravity_reading(<<>>, _), do: :no_data
  def decode_gravity_reading(data, :ms2), do: _decode_vector(data, @ms2_factor)
  def decode_gravity_reading(data, :mg), do: _decode_vector(data, @mg_factor)
  @quaternion_scale (1.0 / :math.pow(2, 14))
  @doc """
  Decodes the orientation returning a map of the X, Y, Z, & W quaternion values
  """
  @spec decode_quaternion_reading(binary) :: map | :no_data
  def decode_quaternion_reading(<<>>), do: :no_data
  def decode_quaternion_reading(data) do
    <<
      w_raw :: size(16)-signed-little,
      x_raw :: size(16)-signed-little,
      y_raw :: size(16)-signed-little,
      z_raw :: size(16)-signed-little
    >> = data
    x_val = x_raw * @quaternion_scale
    y_val = y_raw * @quaternion_scale
    z_val = z_raw * @quaternion_scale
    w_val = w_raw * @quaternion_scale
    %{
      x: x_val,
      y: y_val,
      z: z_val,
      w: w_val
    }
  end
  defp _decode_vector(data, unit_factor) do
    <<
      x_raw :: size(16)-signed-little,
      y_raw :: size(16)-signed-little,
      z_raw :: size(16)-signed-little
    >> = data
    x_val = x_raw / unit_factor
    y_val = y_raw / unit_factor
    z_val = z_raw / unit_factor
    %{
      x: x_val,
      y: y_val,
      z: z_val
    }
  end  
end | 
	lib/bno055.ex | 0.928384 | 0.872836 | 
	bno055.ex | 
	starcoder | 
| 
	defmodule CSSEx.RGBA do
  @moduledoc """
  Struct and helper functions for generating RGBA values.
  """
  @colors CSSEx.Helpers.Colors.colors_tuples()
  alias CSSEx.Unit
  defstruct r: 0, g: 0, b: 0, a: 1
  @type t() :: %CSSEx.RGBA{
          r: non_neg_integer,
          g: non_neg_integer,
          b: non_neg_integer,
          a: non_neg_integer
        }
  @doc """
  Accepts any value in the form of a binary `"hsla(0, 10%, 20%, 0.5)"` or `"hsl(0, 10%, 20%)"`, any hexadecimal representation in binary in the form of `"#xxx"`, `"#xxxx"`, `"#xxxxxx"` or `"#xxxxxxxx"`, rgb/a as `"rgba(100,100,100,0.1)"` or `"rgb(10,20,30)"`, or any literal color name defined as web colors (CSSEx.Colors) - returns a `%CSSEx.HSLA{}` struct.
  """
  def new_rgba(<<"rgba", values::binary>>) do
    case Regex.run(~r/\((.+),(.+),(.+),(.+)\)/, values) do
      [_, r, g, b, a] ->
        new(
          String.trim(r),
          String.trim(g),
          String.trim(b),
          String.trim(a)
        )
      _ ->
        {:error, :invalid}
    end
  end
  def new_rgba(<<"rgb", values::binary>>) do
    case Regex.run(~r/\((.+),(.+),(.+)\)/, values) do
      [_, r, g, b] ->
        new(
          String.trim(r),
          String.trim(g),
          String.trim(b),
          "1"
        )
      _ ->
        {:error, :invalid}
    end
  end
  def new_rgba(<<"hsla", _::binary>> = full) do
    case CSSEx.HSLA.new_hsla(full) do
      {:ok, %CSSEx.HSLA{} = hsla} -> from_hsla(hsla)
      error -> error
    end
  end
  def new_rgba(<<"hsl", _::binary>> = full) do
    case CSSEx.HSLA.new_hsla(full) do
      {:ok, %CSSEx.HSLA{} = hsla} -> from_hsla(hsla)
      error -> error
    end
  end
  def new_rgba(<<"#", hex::binary>>) do
    case hex do
      <<r::binary-size(2), g::binary-size(2), b::binary-size(2), a::binary-size(2)>> ->
        new(r, g, b, a, 16)
      <<r::binary-size(2), g::binary-size(2), b::binary-size(2)>> ->
        new(r, g, b, "100", 16)
      <<r::binary-size(1), g::binary-size(1), b::binary-size(1), a::binary-size(1)>> ->
        new(r, g, b, a, 16)
      <<r::binary-size(1), g::binary-size(1), b::binary-size(1)>> ->
        new(r, g, b, "100", 16)
      _ ->
        {:error, :invalid}
    end
  end
  Enum.each(@colors, fn [color, rgba] ->
    def new_rgba(unquote(color)), do: new_rgba(unquote(rgba))
  end)
  @doc """
  Converts an existing `%CSSEx.HSLA{}` struct into a `%CSSEx.RGBA{}` struct.
  Taken from https://www.niwa.nu/2013/05/math-behind-colorspace-conversions-rgb-hsl
  """
  def from_hsla(%CSSEx.HSLA{s: %Unit{value: 0}, l: %Unit{value: l}, a: a}) do
    gray = l / 100 * 255
    {:ok, %CSSEx.RGBA{r: gray, g: gray, b: gray, a: a}}
  end
  def from_hsla(%CSSEx.HSLA{h: %Unit{value: h}, s: %Unit{value: s}, l: %Unit{value: l}, a: a}) do
    n_l = (l / 100) |> Float.round(3)
    n_s = (s / 100) |> Float.round(3)
    convert_val_1 =
      case n_l >= 0.5 do
        true -> n_l + n_s - n_l * n_s
        _ -> n_l * (1 + n_s)
      end
      |> Float.round(3)
    convert_val_2 = (2 * n_l - convert_val_1) |> Float.round(3)
    hue_norm = (h / 360) |> Float.round(3)
    r = hue_norm + 0.333
    r_1 = if(r >= 0, do: if(r > 1, do: r - 1, else: r), else: r + 1)
    g = hue_norm
    g_1 = if(g >= 0, do: if(g > 1, do: g - 1, else: g), else: g + 1)
    b = hue_norm - 0.333
    b_1 = if(b >= 0, do: if(b > 1, do: b - 1, else: b), else: b + 1)
    red = convert_color_chan(convert_val_1, convert_val_2, r_1) * 255
    green = convert_color_chan(convert_val_1, convert_val_2, g_1) * 255
    blue = convert_color_chan(convert_val_1, convert_val_2, b_1) * 255
    {:ok,
     %__MODULE__{
       r: round(red),
       g: round(green),
       b: round(blue),
       a: a
     }}
  end
  @doc """
  Generates a `%CSSEx.RGBA{}` wrapped in an :ok tuple, from the values of r, g, b, and alpha. All values are treated as decimal by default but another base can be provided as an optional argument.
  """
  def new(r, g, b, a, base \\ 10),
    do: {
      :ok,
      %__MODULE__{
        r: color_value(r, base),
        g: color_value(g, base),
        b: color_value(b, base),
        a: alpha_value(a, base)
      }
    }
  @doc false
  def color_value(val, base) when is_binary(val) do
    case Integer.parse(val, base) do
      {parsed, _} -> valid_rgb_val(parsed)
      :error -> 0
    end
  end
  @doc false
  def alpha_value(val, 10) do
    case Float.parse(val) do
      {parsed, _} -> valid_alpha_val(parsed)
      :error -> 1
    end
  end
  def alpha_value(val, 16) do
    case Integer.parse(val, 16) do
      {parsed, _} -> valid_alpha_val(parsed / 256)
      :error -> 1
    end
  end
  @doc false
  def valid_rgb_val(n) when n <= 255 and n >= 0, do: n
  def valid_rgb_val(n) when n > 255, do: 255
  def valid_rgb_val(n) when n < 0, do: 0
  @doc false
  def valid_alpha_val(n) when n > 0 and n <= 1, do: n
  def valid_alpha_val(_n), do: 1
  @doc false
  def convert_color_chan(pass_1, pass_2, temp_color) do
    case {6 * temp_color < 1, 2 * temp_color < 1, 3 * temp_color < 2} do
      {true, _, _} -> pass_2 + (pass_1 - pass_2) * 6 * temp_color
      {_, true, _} -> pass_1
      {_, _, true} -> pass_2 + (pass_1 - pass_2) * (0.666 - temp_color) * 6
      _ -> pass_2
    end
  end
end
defimpl String.Chars, for: CSSEx.RGBA do
  def to_string(%CSSEx.RGBA{r: r, g: g, b: b, a: a}), do: "rgba(#{r},#{g},#{b},#{a})"
end | 
	lib/structs/rgba.ex | 0.84241 | 0.502502 | 
	rgba.ex | 
	starcoder | 
| 
	defmodule Advent.Y2021.D11 do
  @moduledoc """
  https://adventofcode.com/2021/day/11
  """
  @typep point :: {non_neg_integer(), non_neg_integer()}
  @typep grid :: %{point => non_neg_integer()}
  @doc """
  Given the starting energy levels of the dumbo octopuses in your cavern,
  simulate 100 steps. How many total flashes are there after 100 steps?
  """
  @spec part_one(Enumerable.t(), non_neg_integer()) :: non_neg_integer()
  def part_one(input, steps) do
    input
    |> parse_input()
    |> Stream.unfold(&step/1)
    |> Stream.take(steps)
    |> Enum.sum()
  end
  @doc """
  What is the first step during which all octopuses flash?
  """
  @spec part_two(Enumerable.t()) :: non_neg_integer()
  def part_two(input) do
    grid = parse_input(input)
    grid_size = map_size(grid)
    grid
    |> Stream.unfold(&step/1)
    |> Stream.with_index(1)
    |> Enum.find(fn {flashes, _step} -> flashes == grid_size end)
    |> elem(1)
  end
  @spec parse_input(Enumerable.t()) :: grid()
  defp parse_input(input) do
    input
    |> Stream.with_index()
    |> Stream.flat_map(fn {line, row} ->
      line
      |> String.graphemes()
      |> Enum.map(&String.to_integer/1)
      |> Enum.with_index()
      |> Enum.map(fn {energy, col} ->
        {{col, row}, energy}
      end)
    end)
    |> Map.new()
  end
  @spec step(grid()) :: {non_neg_integer(), grid()}
  defp step(grid) do
    grid = Map.map(grid, fn {_, e} -> rem(e + 1, 10) end)
    {flash_points, num_flashes} =
      Enum.reduce(grid, {[], 0}, fn
        {point, 0}, {acc, n} -> {[point | acc], n + 1}
        _, {acc, n} -> {acc, n}
      end)
    do_step(grid, flash_points, num_flashes)
  end
  @spec do_step(grid(), [point()], non_neg_integer()) :: {non_neg_integer(), grid()}
  defp do_step(grid, [], num_flashes), do: {num_flashes, grid}
  defp do_step(grid, flash_points, total_flashes) do
    {grid, flash_points, num_flashes} =
      flash_points
      |> Enum.flat_map(&neighbors(grid, &1))
      |> Enum.reduce({grid, [], 0}, fn point, {grid, acc, n} ->
        {old, grid} =
          Map.get_and_update(grid, point, fn
            0 -> {0, 0}
            e -> {e, rem(e + 1, 10)}
          end)
        if old == 9 do
          {grid, [point | acc], n + 1}
        else
          {grid, acc, n}
        end
      end)
    do_step(grid, flash_points, total_flashes + num_flashes)
  end
  @spec neighbors(grid(), point()) :: [point()]
  defp neighbors(grid, {col, row}) do
    # > Multiple generators can also be used to calculate the cartesian product
    # > of two lists...
    # -- https://elixir-lang.org/getting-started/comprehensions.html#generators-and-filters
    for(x <- -1..1, y <- -1..1, {x, y} != {0, 0}, do: {col + x, row + y})
    |> Enum.filter(&Map.has_key?(grid, &1))
  end
end | 
	lib/advent/y2021/d11.ex | 0.82994 | 0.660662 | 
	d11.ex | 
	starcoder | 
| 
	defmodule GingerbreadShop.Service.Store.Model do
    use Ecto.Schema
    import Ecto
    import Ecto.Changeset
    import Protecto
    @moduledoc """
      A model representing a store.
      ##Fields
      ###:id
      Is the unique reference to the store entry. Is an `integer`.
      ###:entity
      Is the entity the store belongs to. Is an `UUID`.
      ###:public
      Whether the store is publicly listed or private. Is a `boolean`.
      ###:status
      Is the current operating status of the store. Is a `GingerbreadShop.Service.Store.StatusEnum`.
      ###:name
      Is the name of the store. Is a `string`.
      ###:phone
      Is the phone number of the store. Is a `string`.
      ###:address
      Is the address of the store. Is a `map`.
      ###:place
      Is the place/building complex the store is located at. Is a `string`.
      ###:geo
      Is the longitude/latitude of where the store is located. Is a `geometry`.          null: false
      ###:services
      Are the services the store offers. Is a `map`.
      ###:assets
      Are the image assets that have been associated with the store. Is a `map`.
    """
    schema "stores" do
        field :entity, Ecto.UUID
        field :public, :boolean
        field :status, GingerbreadShop.Service.Store.StatusEnum
        field :name, :string
        field :phone, :string
        field :address, :map
        field :place, :string
        field :geo, Geo.Point
        field :services, :map
        field :assets, :map
        timestamps()
    end
    @doc """
      Builds a changeset for insertion based on the `struct` and `params`.
      Enforces:
      * `entity` field is required
      * `public` field is required
      * `status` field is required
      * `name` field is required
      * `phone` field is required
      * `address` field is required
      * `geo` field is required
      * checks uniqueness of the entity
    """
    def insert_changeset(struct, params \\ %{}) do
        struct
        |> cast(params, [:entity, :public, :status, :name, :phone, :address, :place, :geo, :services, :assets])
        |> validate_required([:entity, :public, :status, :name, :phone, :address, :geo])
        |> validate_phone_number(:phone)
        |> unique_constraint(:entity)
    end
    @doc """
      Builds a changeset for update based on the `struct` and `params`.
      Enforces:
      * `entity` field is not empty
      * `public` field is not empty
      * `status` field is not empty
      * `name` field is not empty
      * `phone` field is not empty
      * `address` field is not empty
      * `geo` field is not empty
      * checks uniqueness of the entity
    """
    def update_changeset(struct, params \\ %{}) do
        struct
        |> cast(params, [:entity, :public, :status, :name, :phone, :address, :place, :geo, :services, :assets])
        |> validate_emptiness(:entity)
        |> validate_emptiness(:public)
        |> validate_emptiness(:status)
        |> validate_emptiness(:name)
        |> validate_emptiness(:phone)
        |> validate_emptiness(:address)
        |> validate_emptiness(:geo)
        |> validate_phone_number(:phone)
        |> unique_constraint(:entity)
    end
end | 
	apps/gingerbread_shop_service/lib/gingerbread_shop.service/store/model.ex | 0.831793 | 0.604807 | 
	model.ex | 
	starcoder | 
| 
	defmodule Google.Protobuf.Compiler.Version do
  @moduledoc false
  alias Pbuf.Decoder
  import Bitwise, only: [bsr: 2, band: 2]
  @derive Jason.Encoder
  defstruct [
    major: nil,
    minor: nil,
    patch: nil,
    suffix: nil
  ]
  @type t :: %__MODULE__{
    major: integer,
    minor: integer,
    patch: integer,
    suffix: String.t
  }
  @spec new(Enum.t) :: t
  def new(data) do
    struct(__MODULE__, data)
  end
  @spec encode_to_iodata!(t | map) :: iodata
  def encode_to_iodata!(data) do
    alias Elixir.Pbuf.Encoder
    [
      Encoder.field(:int32, data.major, <<8>>),
      Encoder.field(:int32, data.minor, <<16>>),
      Encoder.field(:int32, data.patch, <<24>>),
      Encoder.field(:string, data.suffix, <<34>>),
    ]
  end
  @spec encode!(t | map) :: binary
  def encode!(data) do
    :erlang.iolist_to_binary(encode_to_iodata!(data))
  end
  @spec decode!(binary) :: t
  def decode!(data) do
    Decoder.decode!(__MODULE__, data)
  end
  @spec decode(binary) :: {:ok, t} | :error
  def decode(data) do
    Decoder.decode(__MODULE__, data)
  end
  
  def decode(acc, <<8, data::binary>>) do
    Decoder.field(:int32, :major, acc, data)
  end
  
  def decode(acc, <<16, data::binary>>) do
    Decoder.field(:int32, :minor, acc, data)
  end
  
  def decode(acc, <<24, data::binary>>) do
    Decoder.field(:int32, :patch, acc, data)
  end
  
  def decode(acc, <<34, data::binary>>) do
    Decoder.field(:string, :suffix, acc, data)
  end
  # failed to decode, either this is an unknown tag (which we can skip), or
  # it is a wrong type (which is an error)
  def decode(acc, data) do
    {prefix, data} = Decoder.varint(data)
    tag = bsr(prefix, 3)
    type = band(prefix, 7)
    case tag in [1,2,3,4] do
      false -> {acc, Decoder.skip(type, data)}
      true ->
        err = %Decoder.Error{
          tag: tag,
          module: __MODULE__,
          message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
        }
        {:error, err}
    end
  end
  def __finalize_decode__(args) do
    struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
                  {k, v}, acc -> Map.put(acc, k, v)
    end)
    struct
  end
end
defmodule Google.Protobuf.Compiler.CodeGeneratorRequest do
  @moduledoc false
  alias Pbuf.Decoder
  import Bitwise, only: [bsr: 2, band: 2]
  @derive Jason.Encoder
  defstruct [
    file_to_generate: [],
    parameter: nil,
    compiler_version: nil,
    proto_file: []
  ]
  @type t :: %__MODULE__{
    file_to_generate: [String.t],
    parameter: String.t,
    compiler_version: Google.Protobuf.Compiler.Version.t,
    proto_file: [Google.Protobuf.FileDescriptorProto.t]
  }
  @spec new(Enum.t) :: t
  def new(data) do
    struct(__MODULE__, data)
  end
  @spec encode_to_iodata!(t | map) :: iodata
  def encode_to_iodata!(data) do
    alias Elixir.Pbuf.Encoder
    [
      Encoder.repeated_unpacked_field(:string, data.file_to_generate, <<10>>),
      Encoder.field(:string, data.parameter, <<18>>),
      Encoder.field(:struct, data.compiler_version, <<26>>),
      Encoder.repeated_unpacked_field(:struct, data.proto_file, <<122>>),
    ]
  end
  @spec encode!(t | map) :: binary
  def encode!(data) do
    :erlang.iolist_to_binary(encode_to_iodata!(data))
  end
  @spec decode!(binary) :: t
  def decode!(data) do
    Decoder.decode!(__MODULE__, data)
  end
  @spec decode(binary) :: {:ok, t} | :error
  def decode(data) do
    Decoder.decode(__MODULE__, data)
  end
  
  def decode(acc, <<10, data::binary>>) do
    Decoder.field(:string, :file_to_generate, acc, data)
  end
  
  def decode(acc, <<18, data::binary>>) do
    Decoder.field(:string, :parameter, acc, data)
  end
  
  def decode(acc, <<26, data::binary>>) do
    Decoder.struct_field(Google.Protobuf.Compiler.Version, :compiler_version, acc, data)
  end
  
  def decode(acc, <<122, data::binary>>) do
    Decoder.struct_field(Google.Protobuf.FileDescriptorProto, :proto_file, acc, data)
  end
  # failed to decode, either this is an unknown tag (which we can skip), or
  # it is a wrong type (which is an error)
  def decode(acc, data) do
    {prefix, data} = Decoder.varint(data)
    tag = bsr(prefix, 3)
    type = band(prefix, 7)
    case tag in [1,2,3,15] do
      false -> {acc, Decoder.skip(type, data)}
      true ->
        err = %Decoder.Error{
          tag: tag,
          module: __MODULE__,
          message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
        }
        {:error, err}
    end
  end
  def __finalize_decode__(args) do
    struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
      
      {:proto_file, v}, acc -> Map.update(acc, :proto_file, [v], fn e -> [v | e] end)
      {:file_to_generate, v}, acc -> Map.update(acc, :file_to_generate, [v], fn e -> [v | e] end)
            {k, v}, acc -> Map.put(acc, k, v)
    end)
    struct = Map.put(struct, :proto_file, Elixir.Enum.reverse(struct.proto_file))
    struct = Map.put(struct, :file_to_generate, Elixir.Enum.reverse(struct.file_to_generate))
    struct
  end
end
defmodule Google.Protobuf.Compiler.CodeGeneratorResponse do
  @moduledoc false
  alias Pbuf.Decoder
  import Bitwise, only: [bsr: 2, band: 2]
  @derive Jason.Encoder
  defstruct [
    error: nil,
    file: []
  ]
  @type t :: %__MODULE__{
    error: String.t,
    file: [Google.Protobuf.Compiler.CodeGeneratorResponse.File.t]
  }
  @spec new(Enum.t) :: t
  def new(data) do
    struct(__MODULE__, data)
  end
  @spec encode_to_iodata!(t | map) :: iodata
  def encode_to_iodata!(data) do
    alias Elixir.Pbuf.Encoder
    [
      Encoder.field(:string, data.error, <<10>>),
      Encoder.repeated_unpacked_field(:struct, data.file, <<122>>),
    ]
  end
  @spec encode!(t | map) :: binary
  def encode!(data) do
    :erlang.iolist_to_binary(encode_to_iodata!(data))
  end
  @spec decode!(binary) :: t
  def decode!(data) do
    Decoder.decode!(__MODULE__, data)
  end
  @spec decode(binary) :: {:ok, t} | :error
  def decode(data) do
    Decoder.decode(__MODULE__, data)
  end
  
  def decode(acc, <<10, data::binary>>) do
    Decoder.field(:string, :error, acc, data)
  end
  
  def decode(acc, <<122, data::binary>>) do
    Decoder.struct_field(Google.Protobuf.Compiler.CodeGeneratorResponse.File, :file, acc, data)
  end
  # failed to decode, either this is an unknown tag (which we can skip), or
  # it is a wrong type (which is an error)
  def decode(acc, data) do
    {prefix, data} = Decoder.varint(data)
    tag = bsr(prefix, 3)
    type = band(prefix, 7)
    case tag in [1,15] do
      false -> {acc, Decoder.skip(type, data)}
      true ->
        err = %Decoder.Error{
          tag: tag,
          module: __MODULE__,
          message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
        }
        {:error, err}
    end
  end
  def __finalize_decode__(args) do
    struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
      
      {:file, v}, acc -> Map.update(acc, :file, [v], fn e -> [v | e] end)
            {k, v}, acc -> Map.put(acc, k, v)
    end)
    struct = Map.put(struct, :file, Elixir.Enum.reverse(struct.file))
    struct
  end
end
defmodule Google.Protobuf.Compiler.CodeGeneratorResponse.File do
  @moduledoc false
  alias Pbuf.Decoder
  import Bitwise, only: [bsr: 2, band: 2]
  @derive Jason.Encoder
  defstruct [
    name: nil,
    insertion_point: nil,
    content: nil
  ]
  @type t :: %__MODULE__{
    name: String.t,
    insertion_point: String.t,
    content: String.t
  }
  @spec new(Enum.t) :: t
  def new(data) do
    struct(__MODULE__, data)
  end
  @spec encode_to_iodata!(t | map) :: iodata
  def encode_to_iodata!(data) do
    alias Elixir.Pbuf.Encoder
    [
      Encoder.field(:string, data.name, <<10>>),
      Encoder.field(:string, data.insertion_point, <<18>>),
      Encoder.field(:string, data.content, <<122>>),
    ]
  end
  @spec encode!(t | map) :: binary
  def encode!(data) do
    :erlang.iolist_to_binary(encode_to_iodata!(data))
  end
  @spec decode!(binary) :: t
  def decode!(data) do
    Decoder.decode!(__MODULE__, data)
  end
  @spec decode(binary) :: {:ok, t} | :error
  def decode(data) do
    Decoder.decode(__MODULE__, data)
  end
  
  def decode(acc, <<10, data::binary>>) do
    Decoder.field(:string, :name, acc, data)
  end
  
  def decode(acc, <<18, data::binary>>) do
    Decoder.field(:string, :insertion_point, acc, data)
  end
  
  def decode(acc, <<122, data::binary>>) do
    Decoder.field(:string, :content, acc, data)
  end
  # failed to decode, either this is an unknown tag (which we can skip), or
  # it is a wrong type (which is an error)
  def decode(acc, data) do
    {prefix, data} = Decoder.varint(data)
    tag = bsr(prefix, 3)
    type = band(prefix, 7)
    case tag in [1,2,15] do
      false -> {acc, Decoder.skip(type, data)}
      true ->
        err = %Decoder.Error{
          tag: tag,
          module: __MODULE__,
          message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
        }
        {:error, err}
    end
  end
  def __finalize_decode__(args) do
    struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
                  {k, v}, acc -> Map.put(acc, k, v)
    end)
    struct
  end
end | 
	lib/protoc/google/protobuf/compiler/plugin.pb.ex | 0.794704 | 0.522872 | 
	plugin.pb.ex | 
	starcoder | 
| 
	defmodule Sippet.Proxy do
  @moduledoc """
  Defines very basic operations commonly used in SIP Proxies.
  """
  alias Sippet.Message, as: Message
  alias Sippet.Message.RequestLine, as: RequestLine
  alias Sippet.Message.StatusLine, as: StatusLine
  alias Sippet.URI, as: URI
  alias Sippet.Transactions, as: Transactions
  alias Sippet.Transports, as: Transports
  @type client_key :: Transactions.Client.Key.t
  @type request :: Message.request
  @type on_request_sent ::
      {:ok, client_key, request} |
      {:error, reason :: term} |
      no_return
  @type on_request_sent_stateless ::
      {:ok, request} |
      {:error, reason :: term} |
      no_return
  @type on_response_sent ::
      :ok |
      {:error, reason :: term} |
      no_return
  @doc """
  Adds a Record-Route header to the request.
  When a proxy wishes to remain on the path of future requests in a dialog
  created by this request (assuming the request creates a dialog), it inserts a
  Record-Route header field value in the request, before forwarding.
  The indicated `hop` parameter should indicate the destination where requests
  and responses in a dialog should pass. The `hop` SIP-URI will get a `"lr"`
  parameter, if it does not have one, and will be placed as the first header
  value.
  """
  @spec add_record_route(Message.request, URI.t) :: Message.request
  def add_record_route(%Message{start_line: %RequestLine{}} = request,
      %URI{} = hop) do
    parameters =
      if hop.parameters == nil do
        ";lr"
      else
        hop.parameters
        |> URI.decode_parameters()
        |> Map.put("lr", nil)
        |> URI.encode_parameters()
      end
    record_route = {"", %{hop | parameters: parameters}, %{}}
    request |> Message.update_header(:record_route, [record_route],
      fn list -> [record_route | list] end)
  end
  @doc """
  Adds a Via header to the request.
  A proxy must insert a Via header field value before the existing request Via
  header field values. A `"branch"` parameter will be randomly computed as
  being a 72-bit random string starting with the magic cookie `"z9hG4bK"`.
  """
  @spec add_via(Message.request, Message.protocol, host :: String.t,
                dport :: integer) :: Message.request
  def add_via(%Message{start_line: %RequestLine{}} = request,
      protocol, host, port) do
    add_via(request, protocol, host, port, Message.create_branch())
  end
  @doc """
  Adds a Via header to the request with a supplied `branch`.
  A proxy must insert a Via header field value before the existing request Via
  header field values. If the `branch` parameter does not start with the magic
  cookie `"z9hG4bK"`, one will be added.
  """
  @spec add_via(Message.request, Message.protocol, host :: String.t,
                dport :: integer, branch :: String.t) :: Message.request
  def add_via(%Message{start_line: %RequestLine{}} = request,
      protocol, host, port, branch) do
    branch =
      if branch |> String.starts_with?(Sippet.Message.magic_cookie) do
        branch
      else
        Sippet.Message.magic_cookie <> branch
      end
    params = %{"branch" => branch}
    new_via = {{2, 0}, protocol, {host, port}, params}
    request |> Message.update_header(:via, [new_via],
      fn list -> [new_via | list] end)
  end
  @doc """
  Returns a binary representing a textual branch identifier obtained from the
  topmost Via header of the request.
  This derived branch has the property to be the same case the topmost Via
  header of the `request` is also the same, as in the case of retransmissions.
  This operation is usually performed for stateless proxying, like in the case
  of ACK requests, and contains the magic cookie. In order to correctly derive
  the branch, the input `request` must not have been modified after reception.
  """
  @spec derive_branch(request) :: binary
  def derive_branch(%Message{start_line: %RequestLine{}} = request) do
    [{_, _, _, %{"branch" => branch}} | _] = request.headers.via
    input =
      if branch |> String.starts_with?(Message.magic_cookie) do
        branch
      else
        request_uri = URI.to_string(request.start_line.request_uri)
        [{_, protocol, {address, port}, params} | _] = request.headers.via
        {_, _, %{"tag" => from_tag}} = request.headers.from
        call_id = request.headers.call_id
        {sequence, _method} = request.headers.cseq
        to_tag =
          case request.headers.to do
            {_, _, %{"tag" => to_tag}} ->
              to_tag
            _other ->
              ""
          end
        via_params =
          Map.to_list(params)
          |> Enum.reduce([], fn {k, v}, acc -> [k, v | acc] end)
        [request_uri, to_string(protocol), address, to_string(port),
         call_id, from_tag, to_tag, to_string(sequence), via_params]
      end
    hash =
      :crypto.hmac(:ripemd160, "sippet", input)
      |> Base.url_encode64(padding: false)
    Message.magic_cookie <> hash
  end
  @doc """
  Forwards the request statefully.
  The request is sent using a client transaction. If it cannot be sent using
  one, it will raise an exception.
  This function will honor the start line `request_uri`.
  """
  @spec forward_request(Message.request) :: on_request_sent
  def forward_request(%Message{start_line: %RequestLine{}} = request) do
    request =
      request
      |> do_handle_max_forwards()
      |> do_maybe_handle_route()
    case request |> Transactions.send_request() do
      {:ok, client_key} -> {:ok, client_key, request}
      other -> other
    end
  end
  defp do_handle_max_forwards(message) do
    if message |> Message.has_header?(:max_forwards) do
      max_fws = message.headers.max_forwards
      if max_fws <= 0 do
        raise ArgumentError, "invalid :max_forwards => #{inspect max_fws}"
      else
        message |> Message.put_header(:max_forwards, max_fws - 1)
      end
    else
      message |> Message.put_header(:max_forwards, 70)
    end
  end
  defp do_maybe_handle_route(%Message{start_line: %RequestLine{}} = request) do
    {is_strict, target_uri} =
      if request |> Message.has_header?(:route) do
        {_, target_uri, _} = hd(request.headers.route)
        if target_uri.parameters == nil do
          {false, nil}
        else
          case URI.decode_parameters(target_uri.parameters) do
            %{"lr" => _} -> {false, nil}
            _no_lr -> {true, target_uri}
          end
        end
      else
        {false, nil}
      end
    if is_strict do
      # strict-routing requirements
      request_uri = request.start_line.request_uri
      request =
        request
        |> Message.put_header_back(:route, {"", request_uri, %{}})
        |> Message.delete_header_front(:route)
      
      %{request | start_line:
        %{request.start_line | request_uri: target_uri}}
    else
      request  # no change
    end
  end
  @doc """
  Forwards the request to a given `request_uri`.
  If the method is `:ack`, the request will be sent directly to the network transport.
  Otherwise, a new client transaction will be created.
  This function will override the start line `request_uri` with the supplied one.
  """
  @spec forward_request(Message.request, URI.t) :: on_request_sent
  def forward_request(%Message{start_line: %RequestLine{}} = request,
      %URI{} = request_uri) do
    %{request | start_line: %{request.start_line | request_uri: request_uri}}
    |> forward_request()
  end
  @doc """
  Forwards the request statelessly.
  The request will be sent directly to the network transport.
  """
  @spec stateless_forward_request(request) :: on_request_sent_stateless
  def stateless_forward_request(
      %Message{start_line: %RequestLine{}} = request) do
    request =
      request
      |> do_handle_max_forwards()
      |> do_maybe_handle_route()
    request |> Transports.send_message(nil)
    {:ok, request}
  end
  @doc """
  Forwards a response.
  You should check and remove the topmost Via before calling this function.
  The response will find its way back to an existing server transaction, if one
  exists, or will be sent directly to the network transport otherwise.
  """
  @spec forward_response(Message.response) :: on_response_sent
  def forward_response(%Message{start_line: %StatusLine{}} = response) do
    response
    |> fallback_to_transport(&Transactions.send_response/1)
  end
  defp fallback_to_transport(message, fun) do
    case fun.(message) do
      {:error, :no_transaction} ->
        message |> Transports.send_message()
      other ->
        other
    end
  end
  @doc """
  Forwards a response using an existing server transaction key.
  See `forward_response/1`.
  """
  @spec forward_response(Message.response, Transactions.Server.Key.t)
                         :: on_response_sent
  def forward_response(%Message{start_line: %StatusLine{}} = response,
                       %Transactions.Server.Key{} = server_key) do
    response
    |> fallback_to_transport(&Transactions.send_response(&1, server_key))
  end
end | 
	lib/sippet/proxy.ex | 0.869853 | 0.400749 | 
	proxy.ex | 
	starcoder | 
| 
	defmodule Dion.Lexers.RST.Inline do
  @moduledoc """
  Provides function used to perform lexical analysis, searching for
  inline reStructuredText (RST) elements.
  """
  @moduledoc since: "0.1.0"
  @doc """
  Performs inline lexical analysis on the provided text.
  The `lineno` argument is set on the return elements for reference and
  error reporting purposes.
  """
  def lex(text, lineno) do
    text
    |> String.split(~r/ +/)
    |> Enum.map(
      fn word -> format(word, lineno) end
    )
    |> Stream.map(&Task.async(Dion.Lexers.RST.Inline, :analyze_init, [&1]))
    |> Enum.map(&Task.await(&1))
    |> List.flatten
    |> Stream.map(&Task.async(Dion.Lexers.RST.Inline, :analyze_term, [&1]))
    |> Enum.map(&Task.await(&1))
    |> List.flatten
  end
  @doc """
  Formats the given string, setting line number, type, text, observe and reverse
  enumerated text, text, and raw text.
  """
  @doc since: "0.1.0"
  def format(text, lineno, type) do
    graph = String.graphemes(text)
    data = %{
      lineno: lineno,
      rawtext: text,
      text: text,
      chars: graph,
      rchars: Enum.reverse(graph),
      type: :string
    }
    if type == nil do
      data
    else
      Map.put(data, :type, type)
    end
  end
  def format(text, lineno) do
    format(text, lineno, nil)
  end
  @doc """
  Removes initial characters and reanalyzes the start of the string.
  """
  @doc since: "0.1.0"
  def strip_init(word, chars) do
    format(
      String.slice(word[:text], chars..-1),
      word[:lineno]
    )
    |> analyze_init
  end
  @doc """
  Removes terminal characters and reanalyzes the end of the string.
  """
  @doc since: "0.1.0"
  def strip_term(word, chars) do
    format(
      String.slice(word[:text], 0..chars),
      word[:lineno]
    )
    |> analyze_term
  end
  @doc """
  Performs lexical analysis on the start of the string.
  """
  @doc since: "0.1.0"
  def analyze_init(word) do
    case word[:chars] do
      # Bold
      ["*", "*" | _] -> [:open_bold, strip_init(word, 2)]
      ["*" | _] -> [:open_italic, strip_init(word, 1)]
      ["`", "`" | _] -> [:open_code, strip_init(word, 2)]
      ["`" | _] -> [:open_literal, strip_init(word, 1)]
      ["\"" | _] -> [:open_quote, strip_init(word, 2)]
      [":" | _] -> [:open_colon, strip_init(word, 1)]
      ["_" | _] -> [:open_score, strip_init(word, 1)]
      _ -> [word]
    end
  end
  @doc """
  Evaluates the given word, returns atoms without conideration and passes
  maps on for lexical analysis of the end of the string.
  """
  @doc since: "0.1.0"
  def analyze_term(word) do
    if is_atom(word) do
      word
    else
      analyze_term_content(word)
    end
  end
  @doc """
  Performs lexical analysis on the end of the string.
  """
  @doc since: "0.1.0"
  def analyze_term_content(word) do
    term = Enum.at(word[:rchars], 0)
    cond do
      # Punctation
      term in [".", ",", ";", "'", "?", "-", "]", ")", "}"] ->
        [strip_term(word, -2), format(term, word[:lineno], :punct)]
      true ->
        case word[:rchars] do
          ["*", "*" | _] -> [strip_term(word, -3), :close_bold]
          ["*" | _] -> [strip_term(word, -2), :close_italic]
          ["`", "`" | _] -> [strip_term(word, -3), :close_code]
          ["`" | _] -> [strip_term(word, -2), :close_literal]
          ["\"" | _] -> [strip_term(word, -2), :close_quote]
          [":" | _] -> [strip_term(word, -2), :close_colon]
          ["_" | _] -> [strip_term(word, -2), :close_score]
          _ -> [word]
        end
    end
  end
end | 
	lib/dion/lexers/rst/inline.ex | 0.725649 | 0.498779 | 
	inline.ex | 
	starcoder | 
| 
	defmodule Samples do
  def extract(contents, type) do
    contents
    |> extract_table_parts
    |> (fn {vars, _type, keyword_lists} -> {vars, type, keyword_lists} end).()
    |> process_table_parts
  end
  def extract(contents) do
    contents
    |> extract_table_parts
    |> process_table_parts
  end
  defp extract_table_parts(contents) do
    {vars, type, fields, fields_values} =
      contents
      |> normalize_contents
      |> contents_to_table
      |> slice_table
    keyword_lists = zip_fields_and_values(fields, fields_values)
    {vars, type, keyword_lists}
  end
  defp process_table_parts({[], type, keyword_lists}) do
    to_typed_list(keyword_lists, type)
  end
  defp process_table_parts({vars, type, keyword_lists}) do
    to_assignments(vars, type, keyword_lists)
  end
  defp slice_table(table) do
    [header | rows] = extract_header_rows(table)
    {type, fields} = extract_type_and_fields(header)
    {vars, fields_values} = extract_vars_and_fields_values(type, rows)
    {vars, type, fields, fields_values}
  end
  defp to_assignments(vars, type, keyword_lists) do
    vars
    |> Enum.zip(keyword_lists)
    |> Enum.map(fn {var_name, value} ->
      var = Macro.var(var_name, nil)
      quote do
        unquote(var) = unquote(replace_value(type, value))
      end
    end)
  end
  defp to_typed_list(contents, nil) do
    to_typed_list(contents, {:%{}, [], []})
  end
  defp to_typed_list(contents, type) do
    Enum.map(contents, fn item ->
      replace_value(type, item)
    end)
  end
  defp extract_header_rows([]), do: [[nil]]
  defp extract_header_rows(table), do: table
  def extract_type_and_fields([type = {atom, _, []} | fields]) when atom == :%{} do
    {type, fields}
  end
  def extract_type_and_fields([{:__aliases__, _, [_]} = type | fields]) do
    {type, fields}
  end
  def extract_type_and_fields(fields = [{field, [_], _} | _]) when is_atom(field) do
    {nil, Enum.map(fields, fn {field, [_], _} -> field end)}
  end
  def extract_type_and_fields(fields = [field | _]) when is_atom(field) do
    {nil, fields}
  end
  def extract_type_and_fields([type | fields]) do
    {type, fields}
  end
  def extract_vars_and_fields_values(nil, rows) do
    {[], rows}
  end
  def extract_vars_and_fields_values(_type, rows) do
    rows
    |> Enum.map(fn [{var, [line: _line], _} | fields_values] -> {var, fields_values} end)
    |> :lists.unzip()
  end
  defp zip_fields_and_values(fields, rows) do
    Enum.map(rows, fn row ->
      Enum.zip(fields, row)
    end)
  end
  # As structs by module name
  defp replace_value({:__aliases__, [counter: _, line: _], [module]}, value) do
    {:%, [], [{:__aliases__, [], [module]}, {:%{}, [], value}]}
  end
  defp replace_value({:__aliases__, [line: _], [module]}, value) do
    {:%, [], [{:__aliases__, [], [module]}, {:%{}, [], value}]}
  end
  # As structs
  defp replace_value({:%, meta, [lhs, {:%{}, _, _value}]}, value) do
    {:%, meta, [lhs, {:%{}, [], value}]}
  end
  # As maps
  defp replace_value({:%{}, meta, []}, value) do
    {:%{}, meta, value}
  end
  # As keyword list
  defp replace_value([], value) do
    value
  end
  defp contents_to_table(contents) do
    case contents do
      [do: nil] -> []
      nil -> []
      _ -> extract_rows(contents)
    end
  end
  defp extract_rows(contents) do
    contents |> Enum.map(&extract_row(&1))
  end
  defp extract_row([row]) do
    row |> extract_row
  end
  defp extract_row(row) do
    row |> extract_cells([]) |> Enum.reverse()
  end
  defp extract_cells({:|, _, [lhs, rhs]}, values) do
    rhs |> extract_cells([lhs | values])
  end
  defp extract_cells(value, values) do
    [value | values]
  end
  defp normalize_contents(contents) do
    case contents do
      [do: {:__block__, _, code}] -> code
      [do: code] -> [code]
    end
  end
end | 
	lib/samples.ex | 0.581065 | 0.458349 | 
	samples.ex | 
	starcoder | 
| 
	defmodule GitGud.OAuth2.Provider do
  @moduledoc """
  OAuth2.0 provider schema and helper functions.
  """
  use Ecto.Schema
  alias GitGud.DB
  alias GitGud.Account
  import Ecto.Changeset
  schema "oauth2_providers" do
    belongs_to :account, Account
    field :provider, :string
    field :provider_id, :integer
    field :token, :string
    timestamps()
  end
  @type t :: %__MODULE__{
    id: pos_integer,
    account_id: pos_integer,
    account: Account.t,
    provider: binary,
    provider_id: pos_integer,
    token: binary,
    inserted_at: NaiveDateTime.t,
    updated_at: NaiveDateTime.t
  }
  @doc """
  Creates a new OAuth2.0 provider with the given `params`.
  ```elixir
  {:ok, provider} = GitGud.OAuth2.Provider.create(auth_id: user.account.id, provider: "github", provider_id: 12345, token: "<PASSWORD>")
  ```
  This function validates the given `params` using `changeset/2`.
  """
  @spec create(map|keyword) :: {:ok, t} | {:error, Ecto.Changeset.t}
  def create(params) do
    DB.insert(changeset(%__MODULE__{}, Map.new(params)))
  end
  @doc """
  Similar to `create/1`, but raises an `Ecto.InvalidChangesetError` if an error occurs.
  """
  @spec create!(map|keyword) :: t
  def create!(params) do
    DB.insert!(changeset(%__MODULE__{}, Map.new(params)))
  end
  @doc """
  Deletes the given OAuth2.0 `provider`.
  """
  @spec delete(t) :: {:ok, t} | {:error, Ecto.Changeset.t}
  def delete(%__MODULE__{} = provider) do
    DB.delete(provider)
  end
  @doc """
  Similar to `delete!/1`, but raises an `Ecto.InvalidChangesetError` if an error occurs.
  """
  @spec delete!(t) :: t
  def delete!(%__MODULE__{} = provider) do
    DB.delete!(provider)
  end
  @doc """
  Returns an authentication provider changeset for the given `params`.
  """
  @spec changeset(t, map) :: Ecto.Changeset.t
  def changeset(%__MODULE__{} = provider, params \\ %{}) do
    provider
    |> cast(params, [:account_id, :provider, :provider_id, :token])
    |> validate_required([:provider, :provider_id, :token])
    |> assoc_constraint(:account)
    |> unique_constraint(:provider_id, name: :authentication_providers_provider_provider_id_index)
  end
end | 
	apps/gitgud/lib/gitgud/schemas/oauth2_provider.ex | 0.839405 | 0.695015 | 
	oauth2_provider.ex | 
	starcoder | 
| 
	defmodule TelemetryMetricsPrometheus.Core do
  @moduledoc """
  Prometheus Reporter for [`Telemetry.Metrics`](https://github.com/beam-telemetry/telemetry_metrics) definitions.
  Provide a list of metric definitions to the `child_spec/1` function. It's recommended to
  add this to your supervision tree.
      def start(_type, _args) do
        # List all child processes to be supervised
        children = [
          {TelemetryMetricsPrometheus.Core, [
            metrics: [
              counter("http.request.count"),
              sum("http.request.payload_size", unit: :byte),
              last_value("vm.memory.total", unit: :byte)
            ]
          ]}
        ]
        opts = [strategy: :one_for_one, name: ExampleApp.Supervisor]
        Supervisor.start_link(children, opts)
      end
  Note that aggregations for distributions (histogram) only occur at scrape time.
  These aggregations only have to process events that have occurred since the last
  scrape, so it's recommended at this time to keep an eye on scrape durations if
  you're reporting a large number of distributions or you have a high tag cardinality.
  ## Telemetry.Metrics to Prometheus Equivalents
  Metric types:
    * Counter - Counter
    * Distribution - Histogram
    * LastValue - Gauge
    * Sum - Counter
    * Summary - Summary (Not supported)
  ### Units
  Prometheus recommends the usage of base units for compatibility - [Base Units](https://prometheus.io/docs/practices/naming/#base-units).
  This is simple to do with `:telemetry` and `Telemetry.Metrics` as all memory
  related measurements in the BEAM are reported in bytes and Metrics provides
  automatic time unit conversions.
  Note that measurement unit should used as part of the reported name in the case of
  histograms and gauges to Prometheus. As such, it is important to explicitly define
  the unit of measure for these types when the unit is time or memory related.
  It is suggested to not mix units, e.g. seconds with milliseconds.
  It is required to define your buckets according to the end unit translation
  since this measurements are converted at the time of handling the event, prior
  to bucketing.
  #### Memory
  Report memory as `:byte`.
  #### Time
  Report durations as `:second`. The BEAM and `:telemetry` events use `:native` time
  units. Converting to seconds is as simple as adding the conversion tuple for
  the unit - `{:native, :second}`
  ### Naming
  `Telemetry.Metrics` definition names do not translate easily to Prometheus naming
  conventions. By default, the name provided when creating your definition uses parts
  of the provided name to determine what event to listen to and which event measurement
  to use.
  For example, `"http.request.duration"` results in listening for  `[:http, :request]`
  events and use `:duration` from the event measurements. Prometheus would recommend
  a name of `http_request_duration_seconds` as a good name.
  It is therefore recommended to use the name in your definition to reflect the name
  you wish to see reported, e.g. `http.request.duration.seconds` or `[:http, :request, :duration, :seconds]` and use the `:event_name` override and `:measurement` options in your definition.
  Example:
      Metrics.distribution(
        "http.request.duration.seconds",
        event_name: [:http, :request, :complete],
        measurement: :duration,
        unit: {:native, :second},
        reporter_options: [
          buckets: [0.01, 0.025, 0.05, 0.1, 0.2, 0.5, 1]
        ]
      )
  The exporter sanitizes names to Prometheus' requirements ([Metric Naming](https://prometheus.io/docs/instrumenting/writing_exporters/#naming)) and joins the event name parts with an underscore.
  ### Labels
  Labels in Prometheus are referred to as `:tags` in `Telemetry.Metrics` - see the docs
  for more information on tag usage.
  **Important: Each tag + value results in a separate time series. For distributions, this
  is further complicated as a time series is created for each bucket plus one for measurements
  exceeding the limit of the last bucket - `+Inf`.**
  It is recommended, but not required, to abide by Prometheus' best practices regarding labels -
  [Label Best Practices](https://prometheus.io/docs/practices/naming/#labels)
  ### Missing or Invalid Measurements and Tags
  If a measurement value is missing or non-numeric, the error is logged at the `debug` level
  and the event is not recorded. Events with missing tags are also logged and skipped.
  """
  alias Telemetry.Metrics
  alias TelemetryMetricsPrometheus.Core.{Aggregator, Exporter, Registry}
  require Logger
  @type metrics :: [Metrics.t()]
  @type prometheus_option ::
          {:metrics, metrics()}
          | {:name, atom()}
  @type prometheus_options :: [prometheus_option()]
  @doc """
  Reporter's child spec.
  This function allows you to start the reporter under a supervisor like this:
  children = [
    {TelemetryMetricsPrometheus.Core, options}
  ]
  See `start_child/1` for options.
  """
  @spec child_spec(prometheus_options()) :: Supervisor.child_spec()
  def child_spec(options) do
    opts = ensure_options(options)
    id =
      case Keyword.get(opts, :name, :prometheus_metrics) do
        name when is_atom(name) -> name
        {:global, name} -> name
        {:via, _, name} -> name
      end
    spec = %{
      id: id,
      start: {Registry, :start_link, [opts]}
    }
    Supervisor.child_spec(spec, [])
  end
  @doc """
  Start the `TelemetryMetricsPrometheus.Core.Supervisor`
  Available options:
  * `:name` - name of the reporter instance. Defaults to `:prometheus_metrics`
  * `:metrics` - a list of metrics to track.
  """
  @spec start_link(prometheus_options()) :: GenServer.on_start()
  def start_link(options) do
    opts = ensure_options(options)
    Registry.start_link(opts)
  end
  @doc """
  Returns a metrics scrape in Prometheus exposition format for the given reporter
  name - defaults to `:prometheus_metrics`.
  """
  @spec scrape(name :: atom()) :: String.t()
  def scrape(name \\ :prometheus_metrics) do
    config = Registry.config(name)
    metrics = Registry.metrics(name)
    :ok = Aggregator.aggregate(metrics, config.aggregates_table_id, config.dist_table_id)
    Aggregator.get_time_series(config.aggregates_table_id)
    |> Exporter.export(metrics)
  end
  @spec ensure_options(prometheus_options()) :: prometheus_options()
  defp ensure_options(options) do
    Keyword.merge(default_options(), options)
  end
  @spec default_options() :: prometheus_options()
  defp default_options() do
    [
      name: :prometheus_metrics
    ]
  end
end | 
	lib/core.ex | 0.935236 | 0.632716 | 
	core.ex | 
	starcoder | 
| 
	defmodule ToyRobot.Api do
  @moduledoc """
  The Api module contains the common higher level functions
  that are called from the Command Line `ToyRobot.Cli`
  and File Driven `ToyRobot.FromFile` interfaces.
  This includes functions to:
  - start and stop the `ToyRobot.Server`
  - run commands
  - get server state
  - load a command file
  - and check if command text is vlaid
  Aliased Modules
      ToyRobot.Server
      ToyRobot.Parser
      ToyRobot.Logic
  """
  alias ToyRobot.Server
  alias ToyRobot.Parser
  alias ToyRobot.Logic
  @doc """
  Starts a supervised gen_server to manage state
  for the current position of the toy robot.
  See: `ToyRobot.Server`
  ## Examples
      iex> start_server()
      {:ok, server_pid}
  """
  def start_server() do
    Server.start_link()
  end
  @doc """
  Parses a command via `ToyRobot.Parser.parse_command/1`
  Which converts the command to a map in the form of `%{cmd: cmd, x: x, y: y, face: face}`
  then pipes to process_cmd/1.
  Valid but un-runnable command, such as a "MOVE" that would cause the ToyRobot to fall off the table will be ignored.
  ## Aguments
  cmd:        String
  Server_pid: PID (Process ID)
  ## Examples
      iex> ToyRobot.Api.run_cmd("MOVE", server_pid)
      :ok
      iex> ToyRobot.Api.run_cmd(10, server_pid)
      nil
  """
  def run_cmd(cmd, server_pid) do
    if is_binary(cmd) do
      Parser.parse_command(cmd)
      |> process_cmd(server_pid)
    end
  end
  @doc """
  Processes commands to update servers state.
  Takes a `ToyRobot.Logic` Struct and the `ToyRobot.Server` PID and delegates
  valid commands to command calls in the ToyRobot.Server.
  Invalid commands will print: "invalid commad command_name".
  """
  def process_cmd(map, server_pid) do
    # IO.puts("processing cmd: #{inspect(map)}")
    if (map.cmd in Parser.cmds()) do
      cond do
        map.cmd == "MOVE"   -> Server.move(server_pid)
        map.cmd == "LEFT"   -> Server.left(server_pid)
        map.cmd == "RIGHT"  -> Server.right(server_pid)
        map.cmd == "PLACE"  -> Server.place(server_pid, [map.x, map.y, map.face])
        map.cmd == "REPORT" -> cur_state = get_server_state(server_pid)
                               Logic.report(cur_state)
        true -> IO.puts("invalid commad: #{map.cmd}")
      end
    end
  end
  @doc """
  Gets the current state of the ToyRobot.
  Delegates to `ToyRobot.Server.current_state(server_pid)`.
  Returns a ToyRobot.Logic struct in the form of: `%ToyRobot.Logic{face: :n, x: 0, y: 0}`
  """
  def get_server_state(server_pid) do
    Server.current_state(server_pid)
  end
  @doc """
  Stops the server by delegating to `ToyRobot.Server.current_state(server_pid)`.
  """
  def stop_server(server_pid) do
    Process.exit(server_pid, :normal)
  end
  @doc """
  Returns true if `command text (cmd_txt)` is valid, returns false otherwise.
  """
  def valid_cmd?(cmd_txt) do
    if cmd_txt != "" do
      [cmd, _args] =
        Parser.split_cmd_txt(cmd_txt)
        |> Parser.validate_cmd()
      !is_nil(cmd)
    else
      false
    end
  end
end | 
	lib/toy_robot/api.ex | 0.726329 | 0.568566 | 
	api.ex | 
	starcoder | 
| 
	defmodule Kalevala.Character.Controller do
  @moduledoc ~S"""
  A `Kalevala.Character.Controller` is the largest building block of handling 
  texting. When starting the foreman, an initial controller is given. This 
  controller is initialized and used from then on. The callbacks required will
  be called at the appropriate time with a new `Conn`.
  Controllers act as a simple state machine, only allowing transitioning to the 
  next one you set in the `Conn`. For instance, you can contain all login logic
  in a `LoginController`, and handle game commands in its own controller, any
  paging can be handled in a `PagerController` which can suppress any outgoing
  text to prevent scrolling while reading, etc.
  ## Example Controller
  ```elixir
    defmodule Kantele.Character.LoginController do
      use Kalevala.Character.Controller
      # ... code 
      
      @impl true
      def init(conn) do
        conn
        |> put_session(:login_state, :username)
        |> render(LoginView, "welcome")
        |> prompt(LoginView, "name")
      end
      @impl true
      def recv_event(conn, event) do
        case event.topic do
          "Login" ->
            conn
            |> process_username(event.data["username"])
            |> process_password(event.data["password"])
          _ ->
            conn
        end
      end
      @impl true
      def recv(conn, ""), do: conn
      def recv(conn, data) do
        data = String.trim(data)
        case get_session(conn, :login_state) do
          :username ->
            process_username(conn, data)
          :password ->
            process_password(conn, data)
          :registration ->
            process_registration(conn, data)
        end
      end
      defp process_username(conn, username) do
        case username do
          "" ->
            prompt(conn, LoginView, "name")
          <<4>> ->
            conn
            |> prompt(QuitView, "goodbye")
            |> halt()
          "quit" ->
            conn
            |> prompt(QuitView, "goodbye")
            |> halt()
          username ->
            case Accounts.exists?(username) do
              true ->
                conn
                |> put_session(:login_state, :password)
                |> put_session(:username, username)
                |> send_option(:echo, true)
                |> prompt(LoginView, "password")
              false ->
                conn
                |> put_session(:login_state, :registration)
                |> put_session(:username, username)
                |> prompt(LoginView, "check-registration")
            end
        end
      end
      # ... code ...
    end
  ```
  ## Managing State (assigns, session, and flash)
  Controller state is managed in one of three different ways, `session`, `assigns`, and
  `flash`. These states are made avaiable to the `Views` which can utilize them as
  variables as needed.
  ### Session
  The `session` maintains state for the lifetime of the player connection. Session state
  can be set using `put_session/3` and variables can be retrieved using `get_session/2`.
  ### Flash
  The `flash` maintains state for the duration of a player's interaction with a single
  controller. Switching between controllers will cause the flash to be reset.
  ### Assings
  Assigns are temporary storage that allow the setting of variables to be made available
  to `Views`  
  ## Prompts and Render
  A prompt is text that is sent followed by a newline character. The above code
  `prompt(LoginView, "check-registration")` will render the the 
  `"check-registration"` prompt of the `LoginView` followed by a newline.
  Render on the other outputs the text but is not followed by a newline. That
  means contiguous calls to `render` will append the output to the same line
  as the previous.
  ## Switching Controllers
  Switching controllers is done by calling the
  `Kalevala.Character.Conn.put_controller/2` function. This will immediately switch
  to the provided controller and call it's `init/1` function.
  """
  alias Kalevala.Character.Conn
  alias Kalevala.Character.Event
  @doc """
  Called when the controller is first switched to
  """
  @callback init(Conn.t()) :: Conn.t()
  @doc """
  Called when text is received from the player
  """
  @callback recv(Conn.t(), String.t()) :: Conn.t()
  @doc """
  Called when the connection receives an event (e.g. incoming GMCP)
  """
  @callback recv_event(Conn.t(), any()) :: Conn.t()
  @doc """
  Called when a `Kalevala.Character.Event` is sent to the foreman process
  """
  @callback event(Conn.t(), Event.t()) :: Conn.t()
  @doc """
  Called when a `Kalevala.Character.Event.Display` is sent to the foreman process
  """
  @callback display(Conn.t(), Event.t()) :: Conn.t()
  @doc """
  Marks the module as a controller and imports controller functions
  """
  defmacro __using__(_opts) do
    quote do
      @behaviour unquote(__MODULE__)
      import Kalevala.Character.Conn
      require Logger
      alias Kalevala.Character.Event
      @impl true
      def recv_event(conn, event) do
        Logger.debug("Received event - #{inspect(event)}")
        conn
      end
      @impl true
      def event(conn, event) do
        Logger.debug("Received event - #{inspect(event)}")
        conn
      end
      @impl true
      def display(conn, event) do
        conn
        |> Map.put(:options, event.options)
        |> Map.put(:output, event.output)
      end
      defoverridable display: 2, event: 2, recv_event: 2
    end
  end
end | 
	lib/kalevala/character/controller.ex | 0.889156 | 0.836955 | 
	controller.ex | 
	starcoder | 
| 
	defmodule Pixie.Monitor do
  use Timex
  @moduledoc """
  Allows you to monitor various events within Pixie.
  Internally Pixie.Monitor is implemented using GenEvent, so you're free to
  bypass using the Pixie.Monitor behaviour and use your own GenEvent if the
  need arises.
  Usage example:
  ```elixir
  defmodule MyMonitor do
    use Pixie.Monitor
    def created_channel channel_name, at do
      Logger.info "Channel \#\{channel_name} created at \#\{format at}"
    end
    def destroyed_channel channel_name, at do
      Logger.info "Channel \#\{channel_name} destroyed at \#\{format at}"
    end
    defp format timestamp do
      timestamp
        |> Date.from(:timestamp)
        |> DateFormat.format!("{UNIX}")
    end
  end
  ```
  """
  defmacro __using__(_opts) do
    quote do
      use GenEvent
      use Timex
      def handle_event({fun, args}, state) when is_atom(fun) and is_list(args) do
        apply __MODULE__, fun, args
        {:ok, state}
      end
      def created_client(_client_id, _at), do: :ok
      def destroyed_client(_client_id, _reason, _at), do: :ok
      def created_channel(_channel_name, _at), do: :ok
      def destroyed_channel(_channel_name, _at), do: :ok
      def client_subscribed(_client_id, _channel_name, _at), do: :ok
      def client_unsubscribed(_client_id, _channel_name, _at), do: :ok
      def received_message(_client_id, _message_id, _at), do: :ok
      def delivered_message(_client_id, _message_id, _at), do: :ok
      defoverridable [
        created_client: 2,
        destroyed_client: 3,
        created_channel: 2,
        destroyed_channel: 2,
        client_subscribed: 3,
        client_unsubscribed: 3,
        received_message: 3,
        delivered_message: 3
      ]
    end
  end
  @doc """
  Called when a new client is created during protocol handshake.
  """
  @callback created_client(client_id :: binary, at :: {megasecs :: integer, seconds :: integer, microsecs :: integer}) :: atom
  @doc """
  Called when a client is destroyed - either by an explicit disconnect request
  from the client, or by a system generated timeout.
  """
  @callback destroyed_client(client_id :: binary, reason :: binary | atom, at :: {megasecs :: integer, seconds :: integer, microsecs :: integer}) :: atom
  @doc """
  Called when a new channel is created - this happens when a client subscribes
  to it for the first time.
  """
  @callback created_channel(channel_name :: binary, at :: {megasecs :: integer, seconds :: integer, microsecs :: integer}) :: atom
  @doc """
  Called when a channel is destroyed - this happens when the last client
  unsubscribes from it.
  """
  @callback destroyed_channel(channel_name :: binary, at :: {megasecs :: integer, seconds :: integer, microsecs :: integer}) :: atom
  @doc """
  Called when a client subscribes to a channel.
  """
  @callback client_subscribed(client_id :: binary, channel_name :: binary, at :: {megasecs :: integer, seconds :: integer, microsecs :: integer}) :: atom
  @doc """
  Called when a client unsubscribes from a channel.
  """
  @callback client_unsubscribed(client_id :: binary, channel_name :: binary, at :: {megasecs :: integer, seconds :: integer, microsecs :: integer}) :: atom
  @doc """
  Called when a message is received with the ID of the message.
  Some caveats:
    - This function is only called when a publish message is received, not when
      any protocol messages, such as connect or subscribe are received.
    - Message IDs are only unique per client, not globally.
    - If the message was generated on the server (ie via `Pixie.publish/2`) then
      the Client ID is likely to be `nil`.
  """
  @callback received_message(client_id :: binary, message_id :: binary, at :: {megasecs :: integer, seconds :: integer, microsecs :: integer}) :: atom
  @doc """
  Called when a message is delivered to a client.
  Some caveats:
    - This function is only called when a publish message is delivered, not when
      any protocol messages, such as connect or subscribe are.
    - Message IDs are only unique per client, not globally.
    - The Client ID is that of the *sender*, not the receiver.
    - If the message was generated on the server (ie via `Pixie.publish/2`) then
      the Client ID is likely to be `nil`.
    - You will likely receive a lot of delivered calls for each received call
      as one message published to a channel may be relayed to thousands of
      receivers.
  """
  @callback delivered_message(client_id :: binary, message_id :: binary, at :: {megasecs :: integer, seconds :: integer, microsecs :: integer}) :: atom
  def start_link handlers do
    {:ok, pid} = GenEvent.start_link name: __MODULE__
    Enum.each handlers, fn
      {handler, args} ->
        add_handler handler, args
      handler ->
        add_handler handler, []
    end
    {:ok, pid}
  end
  @doc """
  Allows you to add a `Pixie.Monitor` or any other `GenEvent` handler to the
  event stream.  Expects the name of your handler module and any args which
  you wish to be provided to your module's `init/1` callback.
  """
  def add_handler handler, args \\ [] do
    GenEvent.add_handler __MODULE__, handler, args
  end
  @doc """
  Called by the backend when a new client is created either by protocol
  handshake, or via `Pixie.subscribe/2`
  """
  def created_client client_id do
    GenEvent.notify __MODULE__, {:created_client, [client_id, Time.now]}
  end
  @doc """
  Called by the backend when a client is destroyed, either by an expicit
  protocol disconnect or for a system generated reason, such as a timeout.
  """
  def destroyed_client client_id, reason \\ "Unknown reason" do
    GenEvent.notify __MODULE__, {:destroyed_client, [client_id, reason, Time.now]}
  end
  @doc """
  Called by the backend when a new channel is created.
  New channels are created when the first client subscribes to them.
  """
  def created_channel channel_name do
    GenEvent.notify __MODULE__, {:created_channel, [channel_name, Time.now]}
  end
  @doc """
  Called by the backend when a channel is destroyed.
  Channels are destroyed when the last client unsubscribes from them.
  """
  def destroyed_channel channel_name do
    GenEvent.notify __MODULE__, {:destroyed_channel, [channel_name, Time.now]}
  end
  @doc """
  Called by the backend when a client subscribes to a channel.
  """
  def client_subscribed client_id, channel_name do
    GenEvent.notify __MODULE__, {:client_subscribed, [client_id, channel_name, Time.now]}
  end
  @doc """
  Called by the backend when a client unsubscribes from a channel.
  """
  def client_unsubscribed client_id, channel_name do
    GenEvent.notify __MODULE__, {:client_unsubscribed, [client_id, channel_name, Time.now]}
  end
  @doc """
  Called whenever a new message is received for publication.
  This includes server-generated messages using `Pixie.publish/2`
  """
  def received_message %Pixie.Message.Publish{client_id: client_id, id: message_id} do
    GenEvent.notify __MODULE__, {:received_message, [client_id, message_id, Time.now]}
  end
  def received_message(_), do: :ok
  @doc """
  Called by adapters when a message is finally delivered to a client.
  """
  def delivered_message %Pixie.Message.Publish{client_id: client_id, id: message_id} do
    GenEvent.notify __MODULE__, {:delivered_message, [client_id, message_id, Time.now]}
  end
  def delivered_message(_), do: :ok
end | 
	lib/pixie/monitor.ex | 0.839537 | 0.506103 | 
	monitor.ex | 
	starcoder | 
| 
	defmodule Dirwalk do
  @moduledoc """
  A simple-to-use module to help traverse directories. Interface inspired by Python's `os.walk`.
  `Dirwalk` enables you to walk directories lazily or greedily. Lazy traversal means that the minimum
  amount of work is needed to get the next result, and each next step has to be done explicitly.
  You must provide a startpoint, which is a path on the filesystem. `Dirwalk` will then
  recursively walk across and down subdirectories.
  Symlink and error handling are included. See `Dirwalk.walk` options for alternatives to the
  top-down, depth-first walk done by default.
  The data structure used is a triple / 3-tuple consisting of the current directory, and the
  subdirectories and files in that directory.
  In the most raw form, you can use `Dirwalk.walk`, and manually call the continuation function
  when you want to consume the next result. This gives you control of how much to do.
  ## Using `walk` (see `testdirs` structure in this repo as an example)
      iex> {{"testdirs", ["dogs", "cats", "felines"], []}, next} = Dirwalk.walk("testdirs")
      iex> {{"testdirs/dogs", ["wild", "domestic"], []}, _next} = next.()
  You can also use the struct based approach to simplify this a bit.
  ## Using helper functions
      iex> dirwalk = Dirwalk.new("testdirs") |> Dirwalk.next()
      iex> Dirwalk.results(dirwalk)
      [{"testdirs", ["dogs", "cats", "felines"], []}]
  But because `Dirwalk` implements `Enumerable`, it is probably easier to use `Enum` functions.
  This allows for greedy traversal too.
  ## Using `Enum` functions
      iex> Dirwalk.new("testdirs") |> Enum.take(1)
      [{"testdirs", ["dogs", "cats", "felines"], []}]
  """
  @type path :: String.t()
  @type dirs :: [String.t()]
  @type files :: [String.t()]
  @type dirlist :: {path, dirs, files}
  @type opts :: []
  @type t :: %__MODULE__{}
  defstruct [:next, results: [], done: false]
  @doc """
  Initialises a `Dirwalk` struct. Options are passed through and are the same as in `Dirwalk.walk`
  """
  @spec new(binary, list) :: Dirwalk.t()
  def new(root, opts \\ []) when is_binary(root) do
    %Dirwalk{next: fn -> Dirwalk.walk(root, opts) end}
  end
  @doc """
  Does the next traversal in the file tree. Stores result and handles completion
  """
  @spec next(Dirwalk.t()) :: Dirwalk.t()
  def next(%Dirwalk{next: next, results: results} = dirwalk) do
    case next.() do
      :done -> %Dirwalk{dirwalk | done: true}
      {dirlist, next} -> %Dirwalk{dirwalk | next: next, results: [dirlist | results]}
    end
  end
  @doc """
  Returns whether traversal has finished.
  """
  @spec done?(Dirwalk.t()) :: boolean
  def done?(%Dirwalk{done: done}), do: done
  @doc """
  Returns accumulated results from the traversal.
  """
  @spec results(Dirwalk.t()) :: [dirlist]
  def results(%Dirwalk{results: results}), do: Enum.reverse(results)
  @doc """
  Returns last accumulated result.
  """
  @spec last(Dirwalk.t()) :: nil | :done | dirlist
  def last(%Dirwalk{results: []}), do: nil
  def last(%Dirwalk{results: [head | _tail]}), do: head
  defimpl Enumerable, for: __MODULE__ do
    def count(_dirwalk), do: {:error, __MODULE__}
    def member?(_dirwalk, _value), do: {:error, __MODULE__}
    def slice(_dirwalk), do: {:error, __MODULE__}
    def reduce(_dirwalk, {:halt, acc}, _fun), do: {:halted, acc}
    def reduce(%Dirwalk{} = dirwalk, {:suspend, acc}, fun) do
      {:suspended, acc, &reduce(dirwalk, &1, fun)}
    end
    def reduce(%Dirwalk{} = dirwalk, {:cont, acc}, fun) do
      dirwalk = Dirwalk.next(dirwalk)
      if Dirwalk.done?(dirwalk) do
        {:done, acc}
      else
        last = Dirwalk.last(dirwalk)
        reduce(dirwalk, fun.(last, acc), fun)
      end
    end
  end
  @doc """
  `walk` takes a directory path and lazily and recursively traverses directories from that root.
  It returns a tuple, consisting of a triple of `{path, directories, files}`, and a `next` function
  to be invoked when the next traversal needs to be done. When there are no more subdirectories
  to handle, `:done` is returned.
  The default behaviour is a depth-first, top-down walk - this can be configured.
  By default errors are silently ignored, though an optional handler can be passed in.
  ## Options:
  - `:on_error`: optional 1- or 2-arity callback that is invoked with either `path` and `error`
    or a tuple of `{path, error}` when an error occurs
  - `:depth_first`: unless `false`, the walk is depth-first, otherwise breadth-first
  - `:top_down`: unless `false`, the traversal is top-down.
  ## Examples (see `testdirs` structure)
      # Top-down, depth-first
      iex> {{"testdirs", ["dogs", "cats", "felines"], []}, next} = Dirwalk.walk("testdirs")
      iex> {{"testdirs/dogs", ["wild", "domestic"], []}, _next} = next.()
      # Bottom-up
      iex> {{"testdirs/dogs/wild", [], ["coyote.txt", "wolf.txt"]}, next} = \
            Dirwalk.walk("testdirs", top_down: false)
      iex> {{"testdirs/dogs/domestic", [], ["dog.txt"]}, _next} = next.()
      # Breadth-first
      iex> {{"testdirs", ["dogs", "cats", "felines"], []}, next} = Dirwalk.walk("testdirs", depth_first: false)
      iex> {{"testdirs/dogs", ["wild", "domestic"], []}, next} = next.()
      iex> {{"testdirs/cats", ["wild", "domestic"], []}, _next} = next.()
  """
  @spec walk(path, opts) :: {dirlist, (() -> any())} | :done
  def walk(path, opts \\ []) do
    on_error = Keyword.get(opts, :on_error)
    depth_first = !!Keyword.get(opts, :depth_first, true)
    top_down = !!Keyword.get(opts, :top_down, true)
    follow_symlinks = !!Keyword.get(opts, :follow_symlinks, false)
    opts = %{
      top_down: top_down,
      depth_first: depth_first,
      follow_symlinks: follow_symlinks,
      on_error: on_error
    }
    do_walk([path], opts, fn -> :done end)
  end
  defp do_walk([], _opts, next), do: next.()
  defp do_walk(
         [path | remaining_dirs],
         %{on_error: on_error, follow_symlinks: follow_symlinks} = opts,
         next
       ) do
    if should_list?(path, follow_symlinks) do
      case get_dirs_and_files(path, on_error) do
        {:ok, {dirs, files}} ->
          child_dirs = build_child_paths(dirs, path)
          {next_dirs, next_fun} =
            prepare_continuation({path, dirs, files}, child_dirs, remaining_dirs, opts, next)
          do_walk(next_dirs, opts, next_fun)
        :error ->
          do_walk(remaining_dirs, opts, next)
      end
    else
      do_walk(remaining_dirs, opts, next)
    end
  end
  defp should_list?(_path, _follow_symlinks = true), do: true
  defp should_list?(path, _follow_symlinks = false), do: not symlink?(path)
  defp get_dirs_and_files(path, on_error) do
    case partition_files(path) do
      {:ok, results} ->
        {:ok, results}
      {:error, reason} ->
        maybe_call_on_error(on_error, path, reason)
        :error
    end
  end
  defp partition_files(path) do
    path
    |> File.ls()
    |> case do
      {:ok, files} ->
        {:ok, Enum.split_with(files, fn f -> path |> Path.join(f) |> File.dir?() end)}
      {:error, _reason} = error ->
        error
    end
  end
  defp build_child_paths(dirs, path), do: Enum.map(dirs, &Path.join(path, &1))
  defp prepare_continuation(
         dirlist,
         child_dirs,
         remaining_dirs,
         %{top_down: true, depth_first: true} = opts,
         next
       ) do
    # Top-down: yield this directory listing first, before recursing on children and siblings
    next_fun = fn ->
      {dirlist, fn -> do_walk(child_dirs ++ remaining_dirs, opts, next) end}
    end
    {[], next_fun}
  end
  defp prepare_continuation(
         dirlist,
         child_dirs,
         remaining_dirs,
         %{top_down: true, depth_first: false} = opts,
         next
       ) do
    next_fun = fn ->
      {dirlist, fn -> do_walk(remaining_dirs ++ child_dirs, opts, next) end}
    end
    {[], next_fun}
  end
  defp prepare_continuation(dirlist, child_dirs, remaining_dirs, %{top_down: false} = opts, next) do
    # Bottom-up: recurse on children dirs first, before yielding this directory's results
    # and only then recurse on siblings
    next_fun = fn ->
      {dirlist,
       fn ->
         do_walk(remaining_dirs, opts, next)
       end}
    end
    {child_dirs, next_fun}
  end
  defp maybe_call_on_error(on_error, path, reason) when is_function(on_error, 2) do
    on_error.(path, reason)
  end
  defp maybe_call_on_error(on_error, path, reason) when is_function(on_error, 1) do
    on_error.({path, reason})
  end
  defp maybe_call_on_error(_on_error, _path, _reason), do: nil
  defp symlink?(path) do
    case File.lstat(path) do
      {:ok, %File.Stat{type: :symlink}} ->
        true
      {:ok, _file_stat} ->
        false
      {:error, _reason} ->
        # Error handling will have already been done
        false
    end
  end
end | 
	lib/dirwalk.ex | 0.921313 | 0.589716 | 
	dirwalk.ex | 
	starcoder | 
| 
	defmodule Mix.Tasks.Format do
  use Mix.Task
  @shortdoc "Formats the given files/patterns"
  @moduledoc """
  Formats the given files and patterns.
      mix format mix.exs "lib/**/*.{ex,exs}" "test/**/*.{ex,exs}"
  Formatting is done with the `Code.format_string/2` function.
  ## Options
    * `--check-formatted` - check that the file is already formatted.
      This is useful in pre-commit hooks and CI scripts if you want to
      reject contributions with unformatted code. However, keep in mind,
      that the formatting output may differ between Elixir versions as
      improvements and fixes are applied to the formatter.
    * `--check-equivalent` - check if the file after formatting has the
      same AST. If the ASTs are not equivalent, it is a bug in the code
      formatter. This option is recommended if you are automatically
      formatting files.
    * `--dry-run` - do not save files after formatting.
    * `--print` - write formatted files to stdout instead of saving to disk.
    * `--dot-formatter` - the file with formatter configuration.
      Defaults to `.formatter.exs` if one is available, see next section.
  If any of the `--check-*` flags are given and a check fails, the formatted
  contents won't be written to disk nor printed to stdout (if the `--print`
  flag is given).
  ## .formatter.exs
  The formatter will read a `.formatter.exs` in the current directory for
  formatter configuration. It should return a keyword list with any of the
  options supported by `Code.format_string!/2`.
  The `.formatter.exs` also supports an `:inputs` field which specifies the
  default inputs to be used by this task.
  ## When to format code
  We recommend developers to format code directly in their editors. Either
  automatically on save or via an explicit command/key binding. If such option
  is not yet available in your editor of choice, adding the required integration
  is relatively simple as it is a matter of invoking
      cd $project && mix format $file
  where `$file` refers to the current file and `$project` is the root of your
  project.
  It is also possible to format code across the whole project by passing a list
  of patterns and files to `mix format`, as showed at the top of this task
  documentation. This list can also be set in the `.formatter.exs` under the
  `:inputs` key.
  """
  @switches [
    check_equivalent: :boolean,
    check_formatted: :boolean,
    dot_formatter: :string,
    dry_run: :boolean,
    print: :boolean
  ]
  def run(args) do
    {opts, args} = OptionParser.parse!(args, strict: @switches)
    formatter_opts = eval_dot_formatter(opts)
    args
    |> expand_args(formatter_opts)
    |> Enum.reduce({[], []}, &format_file(&1, &2, opts, formatter_opts))
    |> check!()
  end
  defp eval_dot_formatter(opts) do
    case dot_formatter(opts) do
      {:ok, dot_formatter} ->
        {formatter_opts, _} = Code.eval_file(dot_formatter)
        unless Keyword.keyword?(formatter_opts) do
          Mix.raise(
            "Expected #{inspect(dot_formatter)} to return a keyword list, " <>
              "got: #{inspect(formatter_opts)}"
          )
        end
        formatter_opts
      :error ->
        []
    end
  end
  defp dot_formatter(opts) do
    cond do
      dot_formatter = opts[:dot_formatter] -> {:ok, dot_formatter}
      File.regular?(".formatter.exs") -> {:ok, ".formatter.exs"}
      true -> :error
    end
  end
  defp expand_args([], formatter_opts) do
    if inputs = formatter_opts[:inputs] do
      expand_files_and_patterns(List.wrap(inputs), ".formatter.exs")
    else
      Mix.raise(
        "Expected one or more files/patterns to be given to mix format " <>
          "or for a .formatter.exs to exist with an :inputs key"
      )
    end
  end
  defp expand_args(files_and_patterns, _formatter_opts) do
    expand_files_and_patterns(files_and_patterns, "command line")
  end
  defp expand_files_and_patterns(files_and_patterns, context) do
    files_and_patterns
    |> Enum.flat_map(fn file_or_pattern ->
         files = Path.wildcard(file_or_pattern)
         cond do
           files != [] ->
             files
           File.regular?(file_or_pattern) ->
             [file_or_pattern]
           true ->
             Mix.raise(
               "Pattern #{inspect(file_or_pattern)} from #{context} " <>
                 "does not expand to any existing file"
             )
         end
       end)
    |> Enum.uniq()
  end
  defp format_file(file, {not_equivalent, not_formatted}, task_opts, formatter_opts) do
    input = File.read!(file)
    output = [Code.format_string!(input, formatter_opts), ?\n]
    check_equivalent? = Keyword.get(task_opts, :check_equivalent, false)
    check_formatted? = Keyword.get(task_opts, :check_formatted, false)
    dry_run? = Keyword.get(task_opts, :dry_run, false)
    print? = Keyword.get(task_opts, :print, false)
    {valid?, not_equivalent, not_formatted} =
      if check_equivalent? or check_formatted? do
        output_string = IO.iodata_to_binary(output)
        cond do
          check_equivalent? and not equivalent?(input, output_string) ->
            {false, [file | not_equivalent], not_formatted}
          check_formatted? and input != output_string ->
            {false, not_equivalent, [file | not_formatted]}
          true ->
            {not check_formatted?, not_equivalent, not_formatted}
        end
      else
        {true, not_equivalent, not_formatted}
      end
    cond do
      not valid? or dry_run? -> :ok
      print? -> IO.write(output)
      true -> File.write!(file, output)
    end
    {not_equivalent, not_formatted}
  end
  defp check!({[], []}) do
    :ok
  end
  defp check!({[], not_formatted}) do
    Mix.raise("""
    mix format failed due to --check-formatted.
    The following files were not formatted:
    #{to_bullet_list(not_formatted)}
    """)
  end
  defp check!({not_equivalent, []}) do
    Mix.raise("""
    mix format failed due to --check-equivalent.
    The following files were not equivalent:
    #{to_bullet_list(not_equivalent)}
    Please report this bug with the input files at github.com/elixir-lang/elixir/issues
    """)
  end
  defp to_bullet_list(files) do
    Enum.map_join(files, "\n", &["  * ", &1])
  end
  defp equivalent?(input, output) do
    Code.Formatter.equivalent(input, output) == :ok
  end
end | 
	lib/mix/lib/mix/tasks/format.ex | 0.804598 | 0.445288 | 
	format.ex | 
	starcoder | 
| 
	defmodule JID do
  @moduledoc """
  Jabber Identifiers (JIDs) uniquely identify individual entities in an XMPP
  network.
  A JID often resembles an email address with a user@host form, but there's
  a bit more to it. JIDs consist of three main parts:
  A JID can be composed of a local part, a server part, and a resource part.
  The server part is mandatory for all JIDs, and can even stand alone
  (e.g., as the address for a server).
  The combination of a local (user) part and a server is called a "bare JID",
  and it is used to identitfy a particular account on a server.
  A JID that includes a resource is called a "full JID", and it is used to
  identify a particular client connection (i.e., a specific connection for the
  associated "bare JID" account).
  This module implements the `to_string/1` for the `String.Chars` protocol for
  returning a binary string from the `JID` struct.
  Returns a string representation from a JID struct.
  ## Examples
      iex> to_string(%JID{user: "romeo", server: "montague.lit", resource: "chamber"})
      "[email protected]/chamber"
      iex> to_string(%JID{user: "romeo", server: "montague.lit"})
      "<EMAIL>"
      iex> to_string(%JID{server: "montague.lit"})
      "montague.lit"
  """
  alias JID
  defmodule JIDParsingError do
    @moduledoc false
    defexception [:message]
    def exception(msg) do
      %JIDParsingError{message: "JID parsing failed with #{inspect msg}"}
    end
  end
  @type t :: %__MODULE__{}
  @derive Jason.Encoder
  defstruct user: "", server: "", resource: "", full: ""
  defimpl String.Chars, for: JID do
    def to_string(%JID{user: "", server: server, resource: ""}), do: server
    def to_string(%JID{user: user, server: server, resource: ""}) do
      user <> "@" <> server
    end
    def to_string(%JID{user: user, server: server, resource: resource}) do
      user <> "@" <> server <> "/" <> resource
    end
  end
  @doc """
  Returns a binary JID without a resource.
  ## Examples
      iex> JID.bare(%JID{user: "romeo", server: "montague.lit", resource: "chamber"})
      "<EMAIL>"
      iex> JID.bare("[email protected]/chamber")
      "<EMAIL>"
  """
  @spec bare(jid :: binary | JID.t) :: binary
  def bare(jid) when is_binary(jid), do: parse(jid) |> bare
  def bare(%JID{} = jid), do: to_string(%JID{jid | resource: ""})
  @spec user(jid :: binary | JID.t) :: binary
  def user(jid) when is_binary(jid), do: parse(jid).user
  def user(%JID{user: user}), do: user
  @spec server(jid :: binary | JID.t) :: binary
  def server(jid) when is_binary(jid), do: parse(jid).server
  def server(%JID{server: server}), do: server
  @spec resource(jid :: binary | JID.t) :: binary
  def resource(jid) when is_binary(jid), do: parse(jid).resource
  def resource(%JID{resource: resource}), do: resource
  @spec parse(jid :: nil) :: JIDParsingError
  def parse(string = nil), do: raise JIDParsingError, message: string
  @doc """
  Parses a binary string JID into a JID struct.
  ## Examples
      iex> JID.parse("<EMAIL>[email protected]/chamber")
      %JID{user: "romeo", server: "montague.lit", resource: "chamber", full: "[email protected]/chamber"}
      iex> JID.parse("<EMAIL>")
      %JID{user: "romeo", server: "montague.lit", resource: "", full: "<EMAIL>"}
  """
  @spec parse(jid :: binary) :: JID.t
  def parse(string) do
    case String.split(string, ["@", "/"], parts: 3) do
      [user, server, resource] ->
        %JID{user: user, server: server, resource: resource, full: string}
      [user, server] ->
        %JID{user: user, server: server, full: string}
      [server] ->
        %JID{server: server, full: string}
    end
  end
end | 
	lib/jid.ex | 0.864982 | 0.61315 | 
	jid.ex | 
	starcoder | 
| 
	defmodule Sanity.Components.PortableText do
  @moduledoc ~S'''
  For rending [Sanity CMS portable text](https://www.sanity.io/docs/presenting-block-text).
  ## Examples
  ### Basic example
      use Phoenix.Component
      # ...
      assigns = %{
        portable_text: [
          %{
            _key: "f71173c80e3a",
            _type: "block",
            children: [%{_key: "<KEY>", _type: "span", marks: [], text: "Test paragraph."}],
            mark_defs: [],
            style: "normal"
          }
        ]
      }
      ~H"<Sanity.Components.PortableText.portable_text value={@portable_text} />"
  ### Custom rendering
      defmodule CustomBlock do
        use Phoenix.Component
        use Sanity.Components.PortableText
        @impl true
        def block(%{value: %{style: "normal"}} = assigns) do
          ~H"""
          <div class="custom-normal"><%= render_slot(@inner_block) %></div>
          """
        end
        def block(assigns), do: super(assigns)
      end
  Then render the component like:
      ~H"<Sanity.Components.PortableText.portable_text mod={CustomBlock} value={@portable_text} />"
  Similarly, marks and types can be customized by defining `mark/1` and `type/1` functions in the module.
  '''
  use Phoenix.Component
  require Logger
  defmodule Behaviour do
    @moduledoc false
    @callback block(map()) :: Phoenix.LiveView.Rendered.t()
    @callback mark(map()) :: Phoenix.LiveView.Rendered.t()
    @callback type(map()) :: Phoenix.LiveView.Rendered.t()
  end
  @behaviour Behaviour
  defmacro __using__([]) do
    quote do
      @behaviour Sanity.Components.PortableText.Behaviour
      def block(assigns), do: Sanity.Components.PortableText.block(assigns)
      def mark(assigns), do: Sanity.Components.PortableText.mark(assigns)
      def type(assigns), do: Sanity.Components.PortableText.type(assigns)
      defoverridable Sanity.Components.PortableText.Behaviour
    end
  end
  @doc """
  Renders Sanity CMS portable text. See module doc for examples.
  """
  def portable_text(assigns) do
    mod = Map.get(assigns, :mod, __MODULE__)
    ~H"""
    <%= for group <- blocks_to_nested_lists(@value) do %><.blocks_or_list mod={mod} value={group} /><% end %>
    """
  end
  defp blocks_to_nested_lists(blocks) do
    blocks
    |> Enum.chunk_by(fn block -> block[:list_item] end)
    |> Enum.map(fn
      [%{list_item: list_item} | _] = items when not is_nil(list_item) ->
        nest_list(items, %{type: list_item, level: 1, items: []})
      [%{} | _] = blocks ->
        %{type: "blocks", items: blocks}
    end)
  end
  defp nest_list([], acc) do
    update_in(acc.items, &Enum.reverse/1)
  end
  defp nest_list([%{level: level} = item | rest], %{level: level} = acc) do
    nest_list(rest, prepend_to_list(item, acc))
  end
  defp nest_list([%{level: level, list_item: list_item} | _] = items, acc)
       when level > acc.level do
    {deeper_items, rest} = Enum.split_while(items, fn i -> i.level > acc.level end)
    sub_list = nest_list(deeper_items, %{type: list_item, level: acc.level + 1, items: []})
    acc =
      case acc do
        %{items: [last_item | acc_rest]} ->
          put_in(acc.items, [Map.put(last_item, :sub_list, sub_list) | acc_rest])
        %{items: []} ->
          empty_list_block(%{level: acc.level + 1, list_item: acc.type})
          |> Map.put(:sub_list, sub_list)
          |> prepend_to_list(acc)
      end
    nest_list(rest, acc)
  end
  defp empty_list_block(%{level: level, list_item: list_item}) do
    %{
      _key: :crypto.strong_rand_bytes(6) |> Base.encode16(case: :lower),
      _type: "block",
      children: [],
      level: level,
      list_item: list_item,
      mark_defs: [],
      style: "normal"
    }
  end
  defp prepend_to_list(item, %{items: items} = list), do: %{list | items: [item | items]}
  defp render_with(assigns) do
    {func, assigns} = Map.pop!(assigns, :func)
    apply(assigns.mod, func, [assigns])
  end
  defp shared_props(assigns), do: Map.take(assigns, [:mod, :value])
  defp blocks_or_list(%{value: %{type: "blocks"}} = assigns) do
    ~H"""
    <%= for block <- @value.items do %>
      <.render_with mod={@mod} func={:type} value={block} />
    <% end %>
    """
  end
  defp blocks_or_list(%{value: %{type: "bullet"}} = assigns) do
    ~H"""
    <ul>
      <%= for item <- @value.items do %>
        <.list_item mod={@mod} value={item} />
      <% end %>
    </ul>
    """
  end
  defp blocks_or_list(%{value: %{type: "number"}} = assigns) do
    ~H"""
    <ol>
      <%= for item <- @value.items do %>
        <.list_item mod={@mod} value={item} />
      <% end %>
    </ol>
    """
  end
  defp list_item(assigns) do
    ~H"""
    <li>
      <.children {shared_props(assigns)} />
      <%= if @value[:sub_list] do %><.blocks_or_list mod={@mod} value={@value.sub_list} /><% end %>
    </li>
    """
  end
  defp children(assigns) do
    ~H"""
    <%= for child <- @value.children do %><.marks marks={child.marks} {shared_props(assigns)}><%= child.text %></.marks><% end %>
    """
  end
  @doc false
  @impl true
  def type(%{value: %{_type: "block"}} = assigns) do
    ~H"""
    <.render_with func={:block} {shared_props(assigns)}>
      <.children {shared_props(assigns)} />
    </.render_with>
    """
  end
  def type(%{value: %{_type: type}} = assigns) do
    Logger.error("unknown type: #{inspect(type)}")
    ~H""
  end
  @doc false
  @impl true
  def block(%{value: %{_type: "block", style: style}} = assigns)
      when style in ["blockquote", "h1", "h2", "h3", "h4", "h5", "h6"] do
    ~H"""
    <%= Phoenix.HTML.Tag.content_tag style do %><%= render_slot(@inner_block) %><% end %>
    """
  end
  def block(%{value: %{_type: "block", style: "normal"}} = assigns) do
    ~H"""
    <p><%= render_slot(@inner_block) %></p>
    """
  end
  def block(%{value: %{_type: "block", style: style}} = assigns) do
    Logger.error("unknown block style: #{inspect(style)}")
    ~H"""
    <p><%= render_slot(@inner_block) %></p>
    """
  end
  defp marks(%{marks: []} = assigns) do
    ~H"""
    <%= render_slot(@inner_block) %>
    """
  end
  defp marks(%{marks: [mark | remaining_marks]} = assigns) do
    mark_props =
      case Enum.find(assigns.value.mark_defs, &(&1._key == mark)) do
        nil ->
          %{
            mark_key: mark,
            mark_type: mark,
            value: nil
          }
        %{_type: type} = mark_def ->
          %{
            mark_key: mark,
            mark_type: type,
            value: mark_def
          }
      end
    ~H"""
    <.render_with mod={@mod} func={:mark} {mark_props}><.marks marks={remaining_marks} {shared_props(assigns)}><%= render_slot(@inner_block) %></.marks></.render_with>
    """
  end
  @doc false
  @impl true
  def mark(%{mark_type: "em"} = assigns) do
    ~H"""
    <em><%= render_slot(@inner_block) %></em>
    """
  end
  def mark(%{mark_type: "strong"} = assigns) do
    ~H"""
    <strong><%= render_slot(@inner_block) %></strong>
    """
  end
  def mark(%{mark_type: "link", value: value} = assigns) do
    ~H"""
    <a href={value.href}><%= render_slot(@inner_block) %></a>
    """
  end
  def mark(%{mark_type: mark_type} = assigns) do
    Logger.error("unknown mark type: #{inspect(mark_type)}")
    ~H"""
    <%= render_slot(@inner_block) %>
    """
  end
end | 
	lib/sanity/components/portable_text.ex | 0.754915 | 0.506897 | 
	portable_text.ex | 
	starcoder | 
| 
	defmodule CRUDimentary.Absinthe.Generator.Endpoint do
  @moduledoc """
  This module defines generators for Absinthe GraphQL CRUD fields.
  By calling one of macros you can generate multiple queries or mutations with generic resolver callbacks, middleware and error handlers.
  """
  import CRUDimentary.Absinthe.Generator.ResultType
  @query_types [:index, :show]
  @mutation_types [:create, :update, :destroy]
  @error_handler Confex.get_env(CRUDimentary.MixProject.project()[:app], :error_handler)
  @doc """
  Generates Absinthe schema query CRUD (index and show) fields based uppon options.
  ```
  query do
    CRUDimentary.Absinthe.EndpointGenerator.generic_query(
      :account,
      Project.API.Resolvers.Account,
      [
        error_handler: ErrorHandler,
        index: [
          middleware:
            [
              before: [Middleware, Middleware],
              after: [Middleware]
            ]
        ]
      ])
  end
  ```
  This results in generated fields:
  ```
  RootQueryType{
    account(id: ID!): AccountSingleResult
    accounts(
      filter: [AccountFilter]
      pagination: PaginationInput
      sorting: AccountSorting): AccountListResult
  }
  ```
  """
  defmacro generic_query(name, base_module, options \\ %{}) do
    for query_type <- @query_types do
      if included?(query_type, options) do
        quote do
          unquote(__MODULE__).generic_schema_field(
            unquote(query_type),
            unquote(name),
            nil,
            String.to_atom("#{unquote(name)}_filter"),
            String.to_atom("#{unquote(name)}_sorting"),
            unquote(base_module),
            unquote(options)
          )
        end
      end
    end
  end
  @doc """
  Generates Absinthe schema mutation CRUD (create, update, destory) fields based uppon options.
  ```
  mutation do
    CRUDimentary.Absinthe.EndpointGenerator.generic_mutation(
      :account,
      Project.API.Resolvers.Account,
      [
        exclude: [:update]
      ])
  end
  ```
  This results in generated fields:
  ```
  RootMutationType{
    createAccount(input: AccountInput!): AccountSingleResult
    destroyAccount(id: ID!): AccountSingleResult
  }
  ```
  """
  defmacro generic_mutation(name, base_module, options \\ %{}) do
    for mutation_type <- @mutation_types do
      if included?(mutation_type, options) do
        quote do
          unquote(__MODULE__).generic_schema_field(
            unquote(mutation_type),
            unquote(name),
            String.to_atom("#{unquote(name)}_input"),
            nil,
            nil,
            unquote(base_module),
            unquote(options)
          )
        end
      end
    end
  end
  @doc false
  defmacro generic_schema_field(
             action_type,
             name,
             input_type,
             filter_type,
             sort_type,
             base_module,
             options
           ) do
    error_handler = options[:error_handler] || @error_handler
    quote do
      @desc unquote(generate_description(name, action_type))
      field(
        unquote(action_name(name, action_type, options)),
        unquote(
          case action_type do
            :index ->
              result_name(name, :list)
            _ ->
              result_name(name, :single)
          end
        )
      ) do
        unquote(
          case action_type do
            :index ->
              quote do
                arg(:filter, list_of(unquote(filter_type)))
                arg(:sorting, unquote(sort_type))
                arg(:pagination, :pagination_input)
              end
            :create ->
              quote do
                arg(:input, non_null(unquote(input_type)))
              end
            :update ->
              quote do
                arg(:id, non_null(:id))
                arg(:input, non_null(unquote(input_type)))
              end
            _ ->
              quote do
                arg(:id, non_null(:id))
              end
          end
        )
        unquote(
          for mw <- extract_middleware(action_type, :before, options) do
            quote do
              middleware(unquote(mw))
            end
          end
        )
        resolve(
          &Module.concat(unquote(base_module), unquote(capitalize_atom(action_type))).call/3
        )
        unquote(
          if error_handler do
            quote do
              middleware(unquote(error_handler))
            end
          end
        )
        unquote(
          for mw <- extract_middleware(action_type, :after, options) do
            quote do
              middleware(unquote(mw))
            end
          end
        )
      end
    end
  end
  @doc false
  def generate_description(name, :index),
    do: "Fetches filtered and sorted list of #{name} resources"
  def generate_description(name, :show), do: "Fetches single #{name} resource by id"
  def generate_description(name, :create), do: "Creates new #{name} resource"
  def generate_description(name, :update), do: "Updates existing #{name} resource by id"
  def generate_description(name, :destroy), do: "Deletes #{name} resource by id"
  @doc false
  def included?(action, options) do
    !excluded?(action, options)
  end
  @doc false
  def excluded?(action, options) do
    exclusions = options[:except] || options[:exclude] || []
    included = options[:only] || []
    Enum.member?(exclusions, action) || (Enum.any?(included) && !Enum.member?(included, action))
  end
  @doc false
  def extract_middleware(action, position, options) do
    (options[action][:middleware][position] || []) ++ (options[:middleware][position] || [])
  end
  @doc false
  def filter_name(name) do
    String.to_atom("#{name}_filter")
  end
  @doc false
  def index_name(name) do
    "#{name}"
    |> Inflex.pluralize()
    |> String.to_atom()
  end
  @doc false
  def action_name(name, :show, options) do
    extract_action_name(:show, options) || name
  end
  def action_name(name, :index, options) do
    extract_action_name(:index, options) || index_name(name)
  end
  def action_name(name, action, options) do
    extract_action_name(action, options) || String.to_atom("#{action}_#{name}")
  end
  @doc false
  def extract_action_name(action, options) do
    options[:name][action]
  end
  @doc false
  def capitalize_atom(atom) do
    atom
    |> Atom.to_string()
    |> String.capitalize()
    |> String.to_atom()
  end
end | 
	lib/crudimentary/absinthe/generators/endpoint.ex | 0.728941 | 0.612339 | 
	endpoint.ex | 
	starcoder | 
| 
	defmodule Plymio.Fontais.Option do
  @moduledoc ~S"""
  Functions for Managing Keyword Options ("opts")
  See `Plymio.Fontais` for overview and other documentation terms.
  ## Documentation Terms
  ### *key*
  A *key* is an `Atom`.
  ### *key list*
  A *key list* is a list of *key*s.
  ### *key spec*
  A *key spec* is usually a *key list*.
  Alternatively a `Map` with `Atom` keys or a `Keyword` can be given and the (unique) keys will be used.
  ### *key alias dict*
  A *key alias dict* is usually a `Map` with `Atom` keys and values used for canonicalising keys (e.g. as the 2nd argument to `opts_canonical_keys/2`).
  Alternatively a `Keyword` with `Atom` values can be given and will be converted on the fly.
  ### *key dict*
  A *key alias dict* is usually a `Map` with `Atom` keys.
  Alternatively a `Keyword` with `Atom` values can be given and will be converted on the fly.
  """
  use Plymio.Fontais.Attribute
  @type key :: Plymio.Fontais.key()
  @type keys :: Plymio.Fontais.keys()
  @type opts :: Plymio.Fontais.opts()
  @type opzioni :: Plymio.Fontais.opzioni()
  @type error :: Plymio.Fontais.error()
  @type result :: Plymio.Fontais.result()
  @type aliases_tuples :: Plymio.Fontais.aliases_tuples()
  @type aliases_kvs :: Plymio.Fontais.aliases_kvs()
  @type aliases_dict :: Plymio.Fontais.aliases_dict()
  import Plymio.Fontais.Error,
    only: [
      new_error_result: 1,
      new_argument_error_result: 1,
      new_key_error_result: 2,
      new_bad_key_error_result: 2
    ]
  import Plymio.Fontais.Utility,
    only: [
      validate_key: 1
    ]
  import Plymio.Fontais.Option.Utility,
    only: [
      normalise_key_alias_dict: 1,
      normalise_key_list: 1,
      normalise_key_dict: 1
    ]
  @doc ~S"""
  `opts_normalise/` expects a *derivable opts* and returns `{:ok, opts}`.
  Any other argument causes `{:error, error}` to be returned.
  ## Examples
      iex> [] |> opts_normalise
      {:ok, []}
      iex> %{a: 1, b: 2, c: 3} |> opts_normalise
      {:ok, [a: 1, b: 2, c: 3]}
      iex> {:error, error} = %{"a" => 1, :b => 2, :c => 3} |> opts_normalise
      ...> error |> Exception.message
      "bad key \"a\" for: %{:b => 2, :c => 3, \"a\" => 1}"
      iex> {:error, error} = 42 |> opts_normalise
      ...> error |> Exception.message
      "opts not derivable, got: 42"
      iex> [a: nil, b: [:b1], c: [:c1, :c2, :c3]] |> opts_normalise
      {:ok, [a: nil, b: [:b1], c: [:c1, :c2, :c3]]}
  """
  @since "0.1.0"
  @spec opts_normalise(any) :: {:ok, opts} | {:error, error}
  def opts_normalise(value) do
    cond do
      Keyword.keyword?(value) ->
        {:ok, value}
      is_map(value) ->
        value
        |> Map.to_list()
        |> (fn tuples ->
              tuples
              |> Keyword.keyword?()
              |> case do
                true ->
                  {:ok, tuples}
                _ ->
                  tuples
                  |> Keyword.keys()
                  |> Enum.reject(&is_atom/1)
                  |> new_bad_key_error_result(value)
              end
            end).()
      true ->
        new_error_result(m: @plymio_fontais_error_message_opts_not_derivable, v: value)
    end
  end
  @doc ~S"""
  `opts_validate/1` returns `{:ok, opts}` if the argument is an *opts*.
  Any other argument causes `{:error, error}` to be returned.
  ## Examples
      iex> [] |> opts_validate
      {:ok, []}
      iex> %{a: 1, b: 2, c: 3} |> opts_validate
      {:error, %ArgumentError{message: "opts invalid, got: %{a: 1, b: 2, c: 3}"}}
      iex> %{"a" => 1, :b => 2, :c => 3} |> opts_validate
      {:error, %ArgumentError{message: "opts invalid, got: %{:b => 2, :c => 3, \"a\" => 1}"}}
      iex> 42 |> opts_validate
      {:error, %ArgumentError{message: "opts invalid, got: 42"}}
      iex> [a: nil, b: [:b1], c: [:c1, :c2, :c3]] |> opts_validate
      {:ok, [a: nil, b: [:b1], c: [:c1, :c2, :c3]]}
  """
  @since "0.1.0"
  @spec opts_validate(any) :: {:ok, opts} | {:error, error}
  def opts_validate(value) do
    case Keyword.keyword?(value) do
      true -> {:ok, value}
      _ -> new_error_result(m: @plymio_fontais_error_message_opts_invalid, v: value)
    end
  end
  @doc ~S"""
  `opts_merge/` takes one or more *derivable opts*, merges them and returns `{:ok, opts}`.
  Any other argument causes `{:error, error}` to be returned.
  ## Examples
      iex> [] |> opts_merge
      {:ok, []}
      iex> [a: 1, b: 2, c: 3] |> opts_merge
      {:ok, [a: 1, b: 2, c: 3]}
      iex> [[a: 1], [b: 2], [c: 3]] |> opts_merge
      {:ok, [a: 1, b: 2, c: 3]}
      iex> %{a: 1, b: 2, c: 3} |> opts_merge
      {:ok, [a: 1, b: 2, c: 3]}
      iex> [%{a: 1, b: 2, c: 3}, [d: 4]] |> opts_merge
      {:ok, [a: 1, b: 2, c: 3, d: 4]}
      iex> {:error, error} = [[d: 4], %{"a" => 1, :b => 2, :c => 3}] |> opts_merge
      ...> error |> Exception.message
      "bad key \"a\" for: %{:b => 2, :c => 3, \"a\" => 1}"
      iex> {:error, error} = 42 |> opts_merge
      ...> error |> Exception.message
      "opts not derivable, got: 42"
  """
  @since "0.1.0"
  @spec opts_merge(any) :: {:ok, opts} | {:error, error}
  def opts_merge(value) do
    cond do
      Keyword.keyword?(value) ->
        {:ok, value}
      is_map(value) ->
        value |> opts_normalise
      is_list(value) ->
        value
        |> Enum.reduce_while(
          [],
          fn opts, collated_opts ->
            with {:ok, new_opts} <- opts |> opts_normalise do
              {:cont, collated_opts ++ new_opts}
            else
              {:error, %{__struct__: _}} = result -> {:halt, result}
            end
          end
        )
        |> case do
          {:error, %{__struct__: _}} = result -> result
          opts -> {:ok, opts}
        end
      true ->
        new_error_result(m: @plymio_fontais_error_message_opts_not_derivable, v: value)
    end
  end
  @doc ~S"""
  `opts_canonical_keys/2` takes a *derivable opts*, together with a *key alias dict*.
  Each key in the `opts` is replaced with its (canonical) value from the dictionary, returning `{:ok, canon_opts}`.
  If there are any unknown keys, `{:error, error}`, where `error` is a `KeyError`, will be returned.
  ## Examples
      iex> [a: 1, b: 2, c: 3] |> opts_canonical_keys(%{a: :x, b: :y, c: :z})
      {:ok, [x: 1, y: 2, z: 3]}
      iex> [a: 1, b: 2, c: 3] |> opts_canonical_keys([a: :x, b: :y, c: :z])
      {:ok, [x: 1, y: 2, z: 3]}
      iex> [a: 11, p: 1, b: 22, q: 2, c: 33, r: 3] |> opts_canonical_keys(%{a: :x, b: :y, c: :z})
      {:error, %KeyError{key: [:p, :q, :r], term: %{a: :x, b: :y, c: :z}}}
      iex> [a: 1, b: 2, c: 3] |> opts_canonical_keys([a_canon: :a, b_canon: [:b], c_canon: [:c, :cc]])
      {:error, %ArgumentError{message: "expected valid key alias dictionary, got: %{a_canon: :a, b_canon: [:b], c_canon: [:c, :cc]}"}}
  """
  @since "0.1.0"
  @spec opts_canonical_keys(any, any) :: {:ok, opts} | {:error, error}
  def opts_canonical_keys(opts, dict)
  def opts_canonical_keys([], _dict) do
    {:ok, []}
  end
  def opts_canonical_keys(opts, dict) do
    with {:ok, opts} <- opts |> opts_normalise,
         {:ok, dict} <- dict |> normalise_key_alias_dict do
      opts
      # reject known_keys
      |> Enum.reject(fn {k, _v} -> Map.has_key?(dict, k) end)
      |> case do
        # no unknown keys
        [] ->
          canon_tuples =
            opts
            |> Enum.map(fn {k, v} -> {Map.get(dict, k), v} end)
          {:ok, canon_tuples}
        unknown_tuples ->
          unknown_tuples |> new_key_error_result(dict)
      end
    else
      {:error, _} = result -> result
    end
  end
  @doc ~S"""
  `opts_maybe_canonical_keys/2` takes a *derivable opts*, together with a *key alias dict*.
  If an *opts* key exists in the dictionary, it is replaced with its (canonical) value. Otherwise the key is unchanged.
  `{:ok, opts}` is returned.
  ## Examples
      iex> [a: 1, b: 2, c: 3] |> opts_maybe_canonical_keys(%{a: :x, b: :y, c: :z})
      {:ok, [x: 1, y: 2, z: 3]}
      iex> [a: 11, p: 1, b: 22, q: 2, c: 33, r: 3]
      ...> |> opts_maybe_canonical_keys(%{a: :x, b: :y, c: :z})
      {:ok, [x: 11, p: 1, y: 22, q: 2, z: 33, r: 3]}
  """
  @since "0.1.0"
  @spec opts_maybe_canonical_keys(any, any) :: {:ok, opts} | {:error, error}
  def opts_maybe_canonical_keys(opts, dict) do
    with {:ok, opts} <- opts |> opts_normalise,
         {:ok, dict} <- dict |> normalise_key_alias_dict do
      opts =
        opts
        |> Enum.map(fn {k, v} -> {Map.get(dict, k, k), v} end)
      {:ok, opts}
    else
      {:error, _} = result -> result
    end
  end
  @doc ~S"""
  `opts_take_canonical_keys/2` takes a *derivable opts*, together with a *key alias dict*.
  It first calls `opts_maybe_canonical_keys/2` to convert all known
  keys to their canonical values, and then takes only the canonical keys returning `{:ok, opts}`.
  ## Examples
      iex> [a: 1, b: 2, c: 3] |> opts_take_canonical_keys(%{a: :x, b: :y, c: :z})
      {:ok, [x: 1, y: 2, z: 3]}
      iex> [a: 11, p: 1, b: 22, q: 2, c: 33, r: 3]
      ...> |> opts_take_canonical_keys(%{a: :x, b: :y, c: :z})
      {:ok, [x: 11, y: 22, z: 33]}
  """
  @since "0.1.0"
  @spec opts_take_canonical_keys(any, any) :: {:ok, opts} | {:error, error}
  def opts_take_canonical_keys(opts, dict) do
    with {:ok, dict} <- dict |> normalise_key_alias_dict,
         {:ok, opts} <- opts |> opts_maybe_canonical_keys(dict) do
      {:ok, opts |> Keyword.take(dict |> Map.values())}
    else
      {:error, _} = result -> result
    end
  end
  @doc ~S"""
  `canonical_keys/2` takes a *key list* and *key alias dict* and replaces each key with its canonical value from the dictionary, returning `{:ok, canonical_keys}`.
  If there are any unknown keys `{:error, error}`, where `error` is a `KeyError`, will be returned.
  ## Examples
      iex> [:a, :b, :c] |> canonical_keys(%{a: :p, b: :q, c: :r})
      {:ok, [:p,:q,:r]}
      iex> [:a, :b, :c] |> canonical_keys(%{a: 1, b: 2, c: 3})
      {:ok, [1,2,3]}
      iex> [:a, :x, :b, :y, :c, :z] |> canonical_keys(%{a: 1, b: 2, c: 3})
      {:error, %KeyError{key: [:x, :y, :z], term: %{a: 1, b: 2, c: 3}}}
  """
  @spec canonical_keys(any, any) :: {:ok, keys} | {:error, error}
  def canonical_keys(keys, dict) do
    with {:ok, keys} <- keys |> normalise_key_list,
         {:ok, dict} <- dict |> normalise_key_dict do
      keys
      |> Enum.reject(fn k -> Map.has_key?(dict, k) end)
      |> case do
        # no unknown keys
        [] ->
          canon_keys = keys |> Enum.map(fn k -> dict |> Map.get(k) end)
          {:ok, canon_keys}
        unknown_keys ->
          unknown_keys |> new_key_error_result(dict)
      end
    else
      {:error, _} = result -> result
    end
  end
  @doc ~S"""
  `canonical_key/2` takes a key together with a *key dict* and replaces the key with its canonical value from the dictionary, returning `{:ok, canonical_key}`.
  If the key is unknown, `{:error, error}`, `error` is a `KeyError`, will be returned.
  ## Examples
      iex> :b |> canonical_key(%{a: :p, b: :q, c: :r})
      {:ok, :q}
      iex> :a |> canonical_key(%{a: 1, b: 2, c: 3})
      {:ok, 1}
      iex> :x |> canonical_key(%{a: 1, b: 2, c: 3})
      {:error, %KeyError{key: :x, term: %{a: 1, b: 2, c: 3}}}
  """
  @spec canonical_key(any, any) :: {:ok, key} | {:error, error}
  def canonical_key(key, dict) do
    with {:ok, key} <- key |> validate_key,
         {:ok, keys} <- [key] |> canonical_keys(dict) do
      {:ok, keys |> hd}
    else
      {:error, %KeyError{} = error} -> {:error, error |> struct!(key: key)}
      {:error, _} = result -> result
    end
  end
  @doc ~S"""
  `opzioni_normalise/1` takes a value tries to normalise it into an *opzioni*, returning `{:ok, opzioni}`.
  Any other argument causes `{:error, error}` to be returned.
  ## Examples
      iex> [] |> opzioni_normalise
      {:ok, []}
      iex> [a: 1, b: 2, c: 3] |> opzioni_normalise
      {:ok, [[a: 1, b: 2, c: 3]]}
      iex> %{a: 1, b: 2, c: 3} |> opzioni_normalise
      {:ok, [[a: 1, b: 2, c: 3]]}
      iex> [ [a: 1, b: 2, c: 3], %{x: 10, y: 11, z: 12}] |> opzioni_normalise
      {:ok, [[a: 1, b: 2, c: 3], [x: 10, y: 11, z: 12]]}
      iex> {:error, error} = %{"a" => 1, :b => 2, :c => 3} |> opzioni_normalise
      ...> error |> Exception.message
      "bad key \"a\" for: %{:b => 2, :c => 3, \"a\" => 1}"
      iex> {:error, error} = 42 |> opzioni_normalise
      ...> error |> Exception.message
      "opzioni invalid, got: 42"
      iex> [a: nil, b: [:b1], c: [:c1, :c2, :c3]] |> opzioni_normalise
      {:ok, [[a: nil, b: [:b1], c: [:c1, :c2, :c3]]]}
  """
  @since "0.1.0"
  @spec opzioni_normalise(any) :: {:ok, opts} | {:error, error}
  def opzioni_normalise(opzioni \\ [])
  def opzioni_normalise([]) do
    {:ok, []}
  end
  def opzioni_normalise(opzioni) do
    cond do
      Keyword.keyword?(opzioni) ->
        {:ok, [opzioni]}
      is_list(opzioni) ->
        opzioni
        |> Enum.reduce_while([], fn
          [], opzioni ->
            {:cont, opzioni}
          item, opzioni ->
            with {:ok, new_opzioni} <- item |> opzioni_normalise do
              {:cont, opzioni ++ new_opzioni}
            else
              {:error, %{__struct__: _}} = result -> {:halt, result}
            end
        end)
        |> case do
          {:error, %{__struct__: _}} = result ->
            result
          opzioni ->
            {:ok, opzioni}
        end
      is_map(opzioni) ->
        with {:ok, opts} <- opzioni |> opts_normalise do
          {:ok, [opts]}
        else
          {:error, %{__exception__: true}} = result -> result
        end
      true ->
        new_error_result(m: @plymio_fontais_error_message_opzioni_invalid, v: opzioni)
    end
  end
  @doc ~S"""
  `opzioni_validate/1` takes a value and validates it is an *opzioni*, returning `{:ok, opzioni}`.
  Any other argument causes `{:error, error}` to be returned.
  ## Examples
      iex> [] |> opzioni_validate
      {:ok, []}
      iex> [[a: 1, b: 2, c: 3]] |> opzioni_validate
      {:ok, [[a: 1, b: 2, c: 3]]}
      iex> {:error, error} = [a: 1, b: 2, c: 3] |> opzioni_validate
      ...> error |> Exception.message
      "opts invalid, got: {:a, 1}"
      iex> {:error, error} = %{a: 1, b: 2, c: 3} |> opzioni_validate
      ...> error |> Exception.message
      "opzioni invalid, got: %{a: 1, b: 2, c: 3}"
      iex> {:error, error} = [[a: 1, b: 2, c: 3], %{x: 10, y: 11, z: 12}] |> opzioni_validate
      ...> error |> Exception.message
      "opts invalid, got: %{x: 10, y: 11, z: 12}"
      iex> {:error, error} = 42 |> opzioni_validate
      ...> error |> Exception.message
      "opzioni invalid, got: 42"
  """
  @since "0.1.0"
  @spec opzioni_validate(any) :: {:ok, opts} | {:error, error}
  def opzioni_validate(opzioni \\ [])
  def opzioni_validate(opzioni) when is_list(opzioni) do
    opzioni
    |> Enum.reduce_while(
      [],
      fn opts, opzioni ->
        opts
        |> opts_validate
        |> case do
          {:ok, opts} ->
            {:cont, [opts | opzioni]}
          {:error, %{__struct__: _}} = result ->
            {:halt, result}
        end
      end
    )
    |> case do
      {:error, %{__exception__: true}} = result -> result
      opzioni -> {:ok, opzioni |> Enum.reverse()}
    end
  end
  def opzioni_validate(opzioni) do
    new_argument_error_result("opzioni invalid, got: #{inspect(opzioni)}")
  end
  @doc ~S"""
  `opzioni_merge/` takes one or more *opzioni*, normalises each one and merges them
  to return `{:ok, opzioni}`.
  Empty *opts* are removed.
  Any other argument causes `{:error, error}` to be returned.
  ## Examples
      iex> [] |> opzioni_merge
      {:ok, []}
      iex> [a: 1, b: 2, c: 3] |> opzioni_merge
      {:ok, [[a: 1, b: 2, c: 3]]}
      iex> [[a: 1], [b: 2], [c: 3]] |> opzioni_merge
      {:ok, [[a: 1], [b: 2], [c: 3]]}
      iex> [[[a: 1], [b: 2]], [c: 3], [[d: 4]]] |> opzioni_merge
      {:ok, [[a: 1], [b: 2], [c: 3], [d: 4]]}
      iex> [[a: 1], [], [b: 2], [], [c: 3]] |> opzioni_merge
      {:ok, [[a: 1], [b: 2], [c: 3]]}
      iex> %{a: 1, b: 2, c: 3} |> opzioni_merge
      {:ok, [[a: 1, b: 2, c: 3]]}
      iex> [%{a: 1, b: 2, c: 3}, [d: 4]] |> opzioni_merge
      {:ok, [[a: 1, b: 2, c: 3], [d: 4]]}
      iex> {:error, error} = [[d: 4], %{"a" => 1, :b => 2, :c => 3}] |> opzioni_merge
      ...> error |> Exception.message
      "bad key \"a\" for: %{:b => 2, :c => 3, \"a\" => 1}"
      iex> {:error, error} = 42 |> opzioni_merge
      ...> error |> Exception.message
      "opzioni invalid, got: 42"
  """
  @since "0.1.0"
  @spec opzioni_merge(any) :: {:ok, opzioni} | {:error, error}
  def opzioni_merge(opzioni)
  def opzioni_merge(value) when is_list(value) do
    value
    |> opzioni_normalise
    |> case do
      {:ok, opzioni} ->
        opzioni =
          opzioni
          |> Enum.filter(fn
            [] -> false
            _ -> true
          end)
        {:ok, opzioni}
      _ ->
        value
        |> Enum.reduce_while(
          [],
          fn opzioni, opzionis ->
            with {:ok, opzioni} <- opzioni |> opzioni_normalise do
              {:cont, [opzioni | opzionis]}
            else
              {:error, %{__struct__: _}} = result -> {:halt, result}
            end
          end
        )
        |> case do
          {:error, %{__struct__: _}} = result ->
            result
          opzionis ->
            opzioni =
              opzionis
              |> Enum.reverse()
              |> Enum.reduce([], fn v, s -> s ++ v end)
              |> Enum.filter(fn
                [] -> false
                _ -> true
              end)
            {:ok, opzioni}
        end
    end
  end
  def opzioni_merge(value) when is_map(value) do
    value |> Map.to_list() |> opzioni_merge
  end
  def opzioni_merge(opzioni) do
    opzioni |> opzioni_validate
  end
  @doc ~S"""
  `opzioni_flatten/1` takes a value, calls `opzioni_normalise/1` and then merges all the individual *opts* into a single *opts*.
  ## Examples
      iex> [] |> opzioni_flatten
      {:ok, []}
      iex> [a: 1, b: 2, c: 3] |> opzioni_flatten
      {:ok, [a: 1, b: 2, c: 3]}
      iex> [[a: 1], [b: 2], [c: 3]] |> opzioni_flatten
      {:ok, [a: 1, b: 2, c: 3]}
      iex> [[a: 1], [[b: 2], [c: 3]]] |> opzioni_flatten
      {:ok, [a: 1, b: 2, c: 3]}
      iex> %{a: 1, b: 2, c: 3} |> opzioni_flatten
      {:ok, [a: 1, b: 2, c: 3]}
      iex> {:ok, opts} = [[a: 1, b: 2, c: 3], %{x: 10, y: 11, z: 12}] |> opzioni_flatten
      ...> opts |> Enum.sort
      [a: 1, b: 2, c: 3, x: 10, y: 11, z: 12]
      iex> {:error, error} = %{"a" => 1, :b => 2, :c => 3} |> opzioni_flatten
      ...> error |> Exception.message
      "bad key \"a\" for: %{:b => 2, :c => 3, \"a\" => 1}"
      iex> {:error, error} = 42 |> opzioni_flatten
      ...> error |> Exception.message
      "opzioni invalid, got: 42"
      iex> [a: nil, b: [:b1], c: [:c1, :c2, :c3]] |> opzioni_flatten
      {:ok, [a: nil, b: [:b1], c: [:c1, :c2, :c3]]}
  """
  @since "0.1.0"
  @spec opzioni_flatten(any) :: {:ok, opts} | {:error, error}
  def opzioni_flatten(opzioni \\ [])
  def opzioni_flatten([]) do
    {:ok, []}
  end
  def opzioni_flatten(opzioni) do
    opzioni
    |> Keyword.keyword?()
    |> case do
      true ->
        {:ok, opzioni}
      _ ->
        with {:ok, opzioni} <- opzioni |> opzioni_normalise do
          {:ok, opzioni |> Enum.flat_map(& &1)}
        else
          {:error, %{__exception__: true}} = result -> result
        end
    end
  end
  @doc ~S"""
  `opts_get/3` take a *derivable opts*, *key* and default and returns
  the *last* value for the key (or default) as `{:ok, value_or_default}`.
  Note this is different to `Keyword.get/3` that returns the *first* value.
  ## Examples
      iex> [a: 1, b: 2, c: 3] |> opts_get(:a)
      {:ok, 1}
      iex> [a: 11, b: 21, c: 31, a: 12, b: 22, c: 32, a: 13, b: 23, c: 33] |> opts_get(:c)
      {:ok, 33}
      iex> [a: 1, b: 2, c: 3] |> opts_get(:d, 4)
      {:ok, 4}
      iex> {:error, error} = [a: 1, b: 2, c: 3] |> opts_get("a")
      ...> error |> Exception.message
      "key invalid, got: a"
      iex> {:error, error} = 42 |> opts_get(:a)
      ...> error |> Exception.message
      "opts not derivable, got: 42"
      iex> {:error, error} = [{:a, 1}, {:b, 2}, {"c", 3}] |> opts_get(:a)
      ...> error |> Exception.message
      "opts not derivable, got: [{:a, 1}, {:b, 2}, {\"c\", 3}]"
  """
  @since "0.1.0"
  @spec opts_get(any, any, any) :: {:ok, any} | {:error, error}
  def opts_get(opts, key, default \\ nil) do
    with {:ok, opts} <- opts |> opts_normalise,
         {:ok, key} <- key |> validate_key do
      {:ok, opts |> Enum.reverse() |> Keyword.get(key, default)}
    else
      {:error, %{__exception__: true}} = result -> result
    end
  end
  @doc ~S"""
  `opts_get_values/3` take a *derivable opts*, *key* and default and,
  if the derived opts has the *key*, returns the values
  (`Keyword.get_values/21`).
  Otherwise the "listified" (`List.wrap/1`) default is returned.
  ## Examples
      iex> [a: 1, b: 2, c: 3] |> opts_get_values(:a)
      {:ok, [1]}
      iex> [a: 11, b: 21, c: 31, a: 12, b: 22, c: 32, a: 13, b: 23, c: 33] |> opts_get_values(:c)
      {:ok, [31, 32, 33]}
      iex> [a: 1, b: 2, c: 3] |> opts_get_values(:d, 4)
      {:ok, [4]}
      iex> [a: 1, b: 2, c: 3] |> opts_get_values(:d, [41, 42, 43])
      {:ok, [41, 42, 43]}
      iex> {:error, error} = [a: 1, b: 2, c: 3] |> opts_get_values("a")
      ...> error |> Exception.message
      "key invalid, got: a"
      iex> {:error, error} = 42 |> opts_get_values(:a)
      ...> error |> Exception.message
      "opts not derivable, got: 42"
  """
  @since "0.1.0"
  @spec opts_get_values(any, any, any) :: {:ok, list} | {:error, error}
  def opts_get_values(opts, key, default \\ nil) do
    with {:ok, opts} <- opts |> opts_normalise,
         {:ok, key} <- key |> validate_key do
      opts
      |> Keyword.has_key?(key)
      |> case do
        true ->
          {:ok, opts |> Keyword.get_values(key)}
        _ ->
          {:ok, default |> List.wrap()}
      end
    else
      {:error, %{__exception__: true}} = result -> result
    end
  end
  @doc ~S"""
  `opts_fetch/2` take a *derivable opts* and *key* and and returns
  the *last* value for the key (or default) as `{:ok, value}`.
  Note this is different to `Keyword.fetch/2` that returns the *first* value.
  ## Examples
      iex> [a: 1, b: 2, c: 3] |> opts_fetch(:a)
      {:ok, 1}
      iex> [a: 11, b: 21, c: 31, a: 12, b: 22, c: 32, a: 13, b: 23, c: 33] |> opts_fetch(:c)
      {:ok, 33}
      iex> {:error, error} = [a: 1, b: 2, c: 3] |> opts_fetch(:d)
      ...> error |> Exception.message
      "key :d not found in: [a: 1, b: 2, c: 3]"
      iex> {:error, error} = [a: 1, b: 2, c: 3] |> opts_fetch("a")
      ...> error |> Exception.message
      "key invalid, got: a"
      iex> {:error, error} = 42 |> opts_fetch(:a)
      ...> error |> Exception.message
      "opts not derivable, got: 42"
      iex> {:error, error} = [{:a, 1}, {:b, 2}, {"c", 3}] |> opts_fetch(:a)
      ...> error |> Exception.message
      "opts not derivable, got: [{:a, 1}, {:b, 2}, {\"c\", 3}]"
  """
  @since "0.1.0"
  @spec opts_fetch(any, any) :: {:ok, any} | {:error, error}
  def opts_fetch(opts, key) do
    with {:ok, norm_opts} <- opts |> opts_normalise,
         {:ok, key} <- key |> validate_key do
      norm_opts
      |> Enum.reverse()
      |> Keyword.fetch(key)
      |> case do
        {:ok, _} = result ->
          result
        :error ->
          new_key_error_result(key, opts)
      end
    else
      {:error, %{__exception__: true}} = result -> result
    end
  end
  @doc ~S"""
  `opts_put/3` take a *derivable opts*, *key* and a value and *appends* the `{key,value}` tuple returnsing `{:ok, opts}`.
  Note this is different to `Keyword.put/2` which prepends the new `{key,value}` tuple and drops all the other for the same key.
  ## Examples
      iex> [a: 11, b: 2, c: 3] |> opts_put(:a, 12)
      {:ok, [a: 11, b: 2, c: 3, a: 12]}
      iex> [a: 1, b: 2, c: 3] |> opts_put(:d, 4)
      {:ok, [a: 1, b: 2, c: 3, d: 4]}
      iex> {:error, error} = [a: 1, b: 2, c: 3] |> opts_put("a", 99)
      ...> error |> Exception.message
      "key invalid, got: a"
      iex> {:error, error} = 42 |> opts_put(:a, nil)
      ...> error |> Exception.message
      "opts not derivable, got: 42"
      iex> {:error, error} = [{:a, 1}, {:b, 2}, {"c", 3}] |> opts_put(:a, nil)
      ...> error |> Exception.message
      "opts not derivable, got: [{:a, 1}, {:b, 2}, {\"c\", 3}]"
  """
  @since "0.1.0"
  @spec opts_put(any, any, any) :: {:ok, any} | {:error, error}
  def opts_put(opts, key, value) do
    with {:ok, opts} <- opts |> opts_normalise,
         {:ok, key} <- key |> validate_key do
      {:ok, opts ++ [{key, value}]}
    else
      {:error, %{__exception__: true}} = result -> result
    end
  end
  @doc ~S"""
  `opts_put_new/3` take a *derivable opts*, *key* and a value.
  If the *key* already exsists in the derived opts, they are returned unchanged as `{:ok, opts}`.
  Otherwise `opts_put/3` is called to *append* the new `{key,value}`, again returning `{:ok, opts}`.
  ## Examples
      iex> [a: 11, b: 2, c: 3] |> opts_put_new(:a, 12)
      {:ok, [a: 11, b: 2, c: 3]}
      iex> [a: 1, b: 2, c: 3] |> opts_put_new(:d, 4)
      {:ok, [a: 1, b: 2, c: 3, d: 4]}
      iex> {:error, error} = [a: 1, b: 2, c: 3] |> opts_put_new("a", 99)
      ...> error |> Exception.message
      "key invalid, got: a"
      iex> {:error, error} = 42 |> opts_put_new(:a)
      ...> error |> Exception.message
      "opts not derivable, got: 42"
      iex> {:error, error} = [{:a, 1}, {:b, 2}, {"c", 3}] |> opts_put_new(:a)
      ...> error |> Exception.message
      "opts not derivable, got: [{:a, 1}, {:b, 2}, {\"c\", 3}]"
  """
  @since "0.1.0"
  @spec opts_put_new(any, any, any) :: {:ok, any} | {:error, error}
  def opts_put_new(opts, key, value \\ nil) do
    with {:ok, opts} <- opts |> opts_normalise,
         {:ok, key} <- key |> validate_key do
      opts
      |> Keyword.has_key?(key)
      |> case do
        true ->
          {:ok, opts}
        _ ->
          opts |> opts_put(key, value)
      end
    else
      {:error, %{__exception__: true}} = result -> result
    end
  end
  @doc ~S"""
  `opts_drop/3` take a *derivable opts*, *key* and delete *all* occurences of the *key* returning `{ok, opts}`.
  It essentially wraps `Keyword.delete/2`.
  ## Examples
      iex> [a: 1, b: 2, c: 3] |> opts_drop(:a)
      {:ok, [b: 2, c: 3]}
      iex> [a: 11, b: 21, c: 31, a: 12, b: 22, c: 32, a: 13, b: 23, c: 33] |> opts_drop([:a, :c])
      {:ok, [b: 21, b: 22, b: 23]}
      iex> {:error, error} = [a: 1, b: 2, c: 3] |> opts_drop([:b, "a"])
      ...> error |> Exception.message
      "bad key \"a\" for: [:b, \"a\"]"
      iex> {:error, error} = 42 |> opts_drop(:a)
      ...> error |> Exception.message
      "opts not derivable, got: 42"
      iex> {:error, error} = [{:a, 1}, {:b, 2}, {"c", 3}] |> opts_drop(:a)
      ...> error |> Exception.message
      "opts not derivable, got: [{:a, 1}, {:b, 2}, {\"c\", 3}]"
  """
  @since "0.1.0"
  @spec opts_drop(any, any) :: {:ok, opts} | {:error, error}
  def opts_drop(opts, keys) do
    with {:ok, opts} <- opts |> opts_normalise,
         {:ok, keys} <- keys |> normalise_key_list do
      {:ok, opts |> Keyword.drop(keys)}
    else
      {:error, %{__exception__: true}} = result -> result
    end
  end
  @doc ~S"""
  `opts_reduce/3` take a *derivable opts*, realises the derived opts, and calls `Keyword.new/1` to take the *last* `{key, value}` tuple for the same key, returning `{:ok, reduced_opts}`.
  ## Examples
      iex> [a: 1, b: 2, c: 3] |> opts_reduce
      {:ok, [a: 1, b: 2, c: 3]}
      iex> {:ok, opts} = %{a: 1, b: 2, c: 3} |> opts_reduce
      ...> opts |> Enum.sort
      [a: 1, b: 2, c: 3]
      iex> [a: 11, b: 21, c: 31, a: 12, b: 22, c: 32, a: 13, b: 23, c: 33] |> opts_reduce
      {:ok, [a: 13, b: 23, c: 33]}
      iex> {:error, error} = 42 |> opts_reduce
      ...> error |> Exception.message
      "opts not derivable, got: 42"
      iex> {:error, error} = [{:a, 1}, {:b, 2}, {"c", 3}] |> opts_reduce
      ...> error |> Exception.message
      "opts not derivable, got: [{:a, 1}, {:b, 2}, {\"c\", 3}]"
  """
  @since "0.1.0"
  @spec opts_reduce(any) :: {:ok, opts} | {:error, error}
  def opts_reduce(opts \\ [])
  def opts_reduce([]) do
    {:ok, []}
  end
  def opts_reduce(opts) do
    with {:ok, opts} <- opts |> opts_normalise do
      {:ok, opts |> Keyword.new()}
    else
      {:error, %{__exception__: true}} = result -> result
    end
  end
  @doc ~S"""
  `opts_create_aliases_tuples/1` takes an *opts* where the keys are the canonical key names, and their values are zero (nil), one or more aliases for the canonical key.
  A `Keyword` is returned where each key is an alias and its value the canonical key.
  The canonical key also has an entry for itself with the same value.
  ## Examples
      iex> [a: nil, b: [:b1], c: [:c1, :c2, :c3]] |> opts_create_aliases_tuples
      [a: :a, b: :b, b1: :b, c: :c, c1: :c, c2: :c, c3: :c]
  """
  @since "0.1.0"
  @spec opts_create_aliases_tuples(aliases_kvs) :: aliases_tuples
  def opts_create_aliases_tuples(aliases) do
    aliases
    |> Enum.map(fn
      {k, nil} ->
        {k, k}
      {k, a} ->
        [k | a |> List.wrap()]
        |> Enum.uniq()
        |> Enum.map(fn a -> {a, k} end)
    end)
    |> List.flatten()
  end
  @doc ~S"""
  `opts_create_aliases_dict/1` does the same job as `opts_create_aliases_tuples/1` but returns a *key alias dict*.
  ## Examples
      iex> [a: nil, b: [:b1], c: [:c1, :c2, :c3]] |> opts_create_aliases_dict
      %{a: :a, b: :b, b1: :b, c: :c, c1: :c, c2: :c, c3: :c}
  """
  @since "0.1.0"
  @spec opts_create_aliases_dict(aliases_kvs) :: aliases_dict
  def opts_create_aliases_dict(aliases) do
    aliases
    |> opts_create_aliases_tuples
    |> Enum.into(%{})
  end
end | 
	lib/fontais/option/option.ex | 0.889996 | 0.641338 | 
	option.ex | 
	starcoder | 
| 
	defmodule Blake2 do
  import Bitwise
  @moduledoc """
  BLAKE2 hash functions
  Implementing "Blake2b" and "Blake2s" as described in [RFC7693](https://tools.ietf.org/html/rfc7693)
  Note that, at present, this only supports full message hashing and no OPTIONAL features
  of BLAKE2.
  """
  defp modulo(n, 64), do: n |> rem(18_446_744_073_709_551_616)
  defp modulo(n, 32), do: n |> rem(4_294_967_296)
  defp rotations(64), do: {32, 24, 16, 63}
  defp rotations(32), do: {16, 12, 8, 7}
  defp mix(v, i, [x, y], bc) do
    [a, b, c, d] = extract_elements(v, i, [])
    {r1, r2, r3, r4} = rotations(bc)
    a = modulo(a + b + x, bc)
    d = rotr(bxor(d, a), r1, bc)
    c = modulo(c + d, bc)
    b = rotr(bxor(b, c), r2, bc)
    a = modulo(a + b + y, bc)
    d = rotr(bxor(d, a), r3, bc)
    c = modulo(c + d, bc)
    b = rotr(bxor(b, c), r4, bc)
    update_elements(v, [a, b, c, d], i)
  end
  defp rotr(x, n, b), do: modulo(bxor(x >>> n, x <<< (b - n)), b)
  defp compress(h, m, t, f, b) do
    v = (h ++ iv(b)) |> List.to_tuple()
    rounds =
      case b do
        32 -> 10
        _ -> 12
      end
    v
    |> update_elements(
      [
        bxor(elem(v, 12), modulo(t, b)),
        bxor(elem(v, 13), t >>> b),
        if(f, do: bxor(elem(v, 14), mask(b)), else: elem(v, 14))
      ],
      [12, 13, 14]
    )
    |> mix_rounds(m, rounds, rounds, b)
    |> update_state(h)
  end
  defp mask(64), do: 0xFFFFFFFFFFFFFFFF
  defp mask(32), do: 0xFFFFFFFF
  defp update_state(v, h), do: update_state_list(v, h, 0, [])
  defp update_state_list(_v, [], _i, acc), do: acc |> Enum.reverse()
  defp update_state_list(v, [h | t], i, acc),
    do: update_state_list(v, t, i + 1, [h |> bxor(elem(v, i)) |> bxor(elem(v, i + 8)) | acc])
  defp mix_rounds(v, _m, 0, _c, _b), do: v
  defp mix_rounds(v, m, n, c, b) do
    s = sigma(c - n)
    msg_word_pair = fn x -> [elem(m, elem(s, 2 * x)), elem(m, elem(s, 2 * x + 1))] end
    v
    |> mix([0, 4, 8, 12], msg_word_pair.(0), b)
    |> mix([1, 5, 9, 13], msg_word_pair.(1), b)
    |> mix([2, 6, 10, 14], msg_word_pair.(2), b)
    |> mix([3, 7, 11, 15], msg_word_pair.(3), b)
    |> mix([0, 5, 10, 15], msg_word_pair.(4), b)
    |> mix([1, 6, 11, 12], msg_word_pair.(5), b)
    |> mix([2, 7, 8, 13], msg_word_pair.(6), b)
    |> mix([3, 4, 9, 14], msg_word_pair.(7), b)
    |> mix_rounds(m, n - 1, c, b)
  end
  @doc """
  Blake2b hashing
  Note that the `output_size` is in bytes, not bits
  - 64 => Blake2b-512 (default)
  - 48 => Blake2b-384
  - 32 => Blake2b-256
  Per the specification, any `output_size` between 1 and 64 bytes is supported.
  """
  @spec hash2b(binary, pos_integer, binary) :: binary | :error
  def hash2b(m, output_size \\ 64, secret_key \\ ""), do: hash(m, 64, output_size, secret_key)
  @doc """
  Blake2s hashing
  Note that the `output_size` is in bytes, not bits
  - 32 => Blake2s-256 (default)
  - 24 => Blake2b-192
  - 16 => Blake2b-128
  Per the specification, any `output_size` between 1 and 32 bytes is supported.
  """
  @spec hash2s(binary, pos_integer, binary) :: binary | :error
  def hash2s(m, output_size \\ 32, secret_key \\ ""), do: hash(m, 32, output_size, secret_key)
  defp hash(m, b, output_size, secret_key)
       when byte_size(secret_key) <= b and output_size <= b and output_size >= 1 do
    ll = byte_size(m)
    kk = byte_size(secret_key)
    key =
      case {ll, kk} do
        {0, 0} -> <<0>>
        _ -> secret_key
      end
    key
    |> pad(b * 2)
    |> (&(&1 <> m)).()
    |> pad(b * 2)
    |> block_msg(b)
    |> msg_hash(ll, kk, output_size, b)
  end
  # Wrong-sized stuff
  defp hash(_m, _secret_key, _b, _output_size), do: :error
  defp pad(b, n) when b |> byte_size |> rem(n) == 0, do: b
  defp pad(b, n), do: pad(b <> <<0>>, n)
  defp block_msg(m, bs), do: break_blocks(m, {}, [], bs)
  defp break_blocks(<<>>, {}, blocks, _bs), do: blocks |> Enum.reverse()
  defp break_blocks(to_break, block_tuple, blocks, bs) do
    <<i::unsigned-little-integer-size(bs), rest::binary>> = to_break
    {block_tuple, blocks} =
      case tuple_size(block_tuple) do
        15 -> {{}, [Tuple.insert_at(block_tuple, 15, i) | blocks]}
        n -> {Tuple.insert_at(block_tuple, n, i), blocks}
      end
    break_blocks(rest, block_tuple, blocks, bs)
  end
  defp msg_hash(blocks, ll, kk, nn, b) do
    [h0 | hrest] = iv(b)
    [h0 |> bxor(0x01010000) |> bxor(kk <<< 8) |> bxor(nn) | hrest]
    |> process_blocks(blocks, kk, ll, 1, b)
    |> list_to_binary(<<>>, b)
    |> binary_part(0, nn)
  end
  defp list_to_binary([], bin, _b), do: bin
  defp list_to_binary([h | t], bin, b),
    do: list_to_binary(t, bin <> (h |> :binary.encode_unsigned(:little) |> pad(div(b, 8))), b)
  defp process_blocks(h, [final_block], kk, ll, _n, b) when kk == 0,
    do: compress(h, final_block, ll, true, b)
  defp process_blocks(h, [final_block], kk, ll, _n, b) when kk != 0,
    do: compress(h, final_block, ll + b * 2, true, b)
  defp process_blocks(h, [d | rest], kk, ll, n, b),
    do: process_blocks(compress(h, d, n * b * 2, false, b), rest, kk, ll, n + 1, b)
  defp extract_elements(_v, [], a), do: a |> Enum.reverse()
  defp extract_elements(v, [this | rest], a), do: extract_elements(v, rest, [elem(v, this) | a])
  defp update_elements(v, [], []), do: v
  defp update_elements(v, [n | m], [i | j]),
    do: v |> Tuple.delete_at(i) |> Tuple.insert_at(i, n) |> update_elements(m, j)
  # Initialization vector
  defp iv(64),
    do: [
      0x6A09E667F3BCC908,
      0xBB67AE8584CAA73B,
      0x3C6EF372FE94F82B,
      0xA54FF53A5F1D36F1,
      0x510E527FADE682D1,
      0x9B05688C2B3E6C1F,
      0x1F83D9ABFB41BD6B,
      0x5BE0CD19137E2179
    ]
  defp iv(32),
    do: [
      0x6A09E667,
      0xBB67AE85,
      0x3C6EF372,
      0xA54FF53A,
      0x510E527F,
      0x9B05688C,
      0x1F83D9AB,
      0x5BE0CD19
    ]
  # Word schedule permutations
  defp sigma(0), do: {00, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
  defp sigma(1), do: {14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3}
  defp sigma(2), do: {11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4}
  defp sigma(3), do: {07, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8}
  defp sigma(4), do: {09, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13}
  defp sigma(5), do: {02, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9}
  defp sigma(6), do: {12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11}
  defp sigma(7), do: {13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10}
  defp sigma(8), do: {06, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5}
  defp sigma(9), do: {10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0}
  defp sigma(10), do: {00, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
  defp sigma(11), do: {14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3}
end | 
	lib/blake2.ex | 0.746509 | 0.627666 | 
	blake2.ex | 
	starcoder | 
| 
	defmodule Talan.Counter do
  @moduledoc """
  Linear probabilistic counter implementation with **concurrent accessibility**,
  powered by [:atomics](http://erlang.org/doc/man/atomics.html) module for cardinality estimation.
  Cardinality is the count of unique elements.
  For more info about linear probabilistic counting:
  [linear probabilistic counting](https://www.waitingforcode.com/big-data-algorithms/cardinality-estimation-linear-probabilistic-counting/read)
  """
  @enforce_keys [:atomics_ref, :filter_length, :hash_function]
  defstruct [:atomics_ref, :filter_length, :hash_function]
  @type t :: %__MODULE__{
          atomics_ref: reference,
          filter_length: non_neg_integer,
          hash_function: function
        }
  alias Talan.Counter
  @doc """
  Returns a new `%Talan.Counter{}` struct.
  `expected_cardinality` is the max number of uniq items the counter will
  handle with approx 1% of error rate.
  ## Options
    * `hash_function` - defaults to `Murmur.hash_x64_128/1`
  ## Examples
      iex> c = Talan.Counter.new(10_000)
      iex> c |> Talan.Counter.put(["you", :can, Hash, {"any", "elixir", "term"}])
      iex> c |> Talan.Counter.put("more")
      iex> c |> Talan.Counter.put("another")
      iex> c |> Talan.Counter.cardinality()
      3
  """
  @spec new(non_neg_integer, list) :: t
  def new(expected_cardinality, options \\ []) do
    hash_function = options |> Keyword.get(:hash_function, &Murmur.hash_x64_128/1)
    # good defaults
    required_size = max(1, round(Float.floor(expected_cardinality * 10 / 64)))
    %Counter{
      atomics_ref: :atomics.new(required_size, signed: false),
      filter_length: required_size * 64,
      hash_function: hash_function
    }
  end
  @doc """
  Hashes `term` and sets a bit to mark it has been seen.
  Doesn't store the `term` so it's space efficient.
  Uses `:atomics` so it's mutable & highly concurrent.
  Returns `:ok`.
  ## Examples
      iex> c = Talan.Counter.new(10_000)
      iex> c |> Talan.Counter.put(["you", :can, Hash, {"any", "elixir", "term"}])
      :ok
  """
  @spec put(t, any) :: :ok
  def put(%Counter{} = counter, term) do
    hash = rem(counter.hash_function.(term), counter.filter_length)
    Abit.set_bit_at(counter.atomics_ref, hash, 1)
    :ok
  end
  @doc """
  Returns the estimated cardinality for the given
  `%Talan.Counter{}` struct.
  ## Examples
      iex> c = Talan.Counter.new(10_000)
      iex> c |> Talan.Counter.put(["you", :can, Hash, {"any", "elixir", "term"}])
      iex> c |> Talan.Counter.put(["you", :can, Hash, {"any", "elixir", "term"}])
      iex> c |> Talan.Counter.cardinality()
      1
      iex> c |> Talan.Counter.put("more")
      iex> c |> Talan.Counter.cardinality()
      2
  """
  @spec cardinality(t) :: non_neg_integer
  def cardinality(%Counter{atomics_ref: atomics_ref}) do
    bit_count = Abit.bit_count(atomics_ref)
    set_bit_count = Abit.set_bits_count(atomics_ref)
    unset_bit_count = bit_count - set_bit_count
    round(-bit_count * :math.log(unset_bit_count / bit_count))
  end
end | 
	lib/talan/counter.ex | 0.927256 | 0.689619 | 
	counter.ex | 
	starcoder | 
| 
	defmodule Grapex.Model.Logicenn do
  import Grapex.TupleUtils
  # alias Grapex.IOutils, as: IO_
  require Axon
  defp relation_embeddings(%Axon{output_shape: parent_shape} = x, n_relations, opts \\ []) do
    n_hidden_units = last(parent_shape) - 1
    output_shape = parent_shape
                   |> delete_last
                   |> Tuple.append(last(parent_shape) - 1)
                   |> Tuple.append(n_relations + 1) # there will be a coefficient for every relation + coefficient for the observer relation
    kernel_shape = {n_hidden_units, n_relations}
    kernel_initializer = opts[:kernel_initializer]
    kernel_regularizer = opts[:kernel_regularizer]
    kernel = Axon.param("kernel", kernel_shape, initializer: kernel_initializer, regularizer: kernel_regularizer)
    Axon.layer(
      x,
      fn input, params ->
        input_shape = Nx.shape(input)
        n_hidden_units = (Nx.shape(input) |> elem(2)) - 1
        observed_relation_indices = input
                                    |> Nx.slice_axis(n_hidden_units, 1, 2) # take indices of the observed relations from the join input tensor
                                    |> Nx.new_axis(-1, :relationship_dimension)
                                    |> Nx.tile([1, 1, n_hidden_units, 1]) # align to the shape of input, last dimension has just one value since there is one observer relation per triple
        tiled_input = input
                      |> Nx.slice_axis(0, n_hidden_units, 2) # discard the last vectors from the 2nd axis which contain indices of observed relations
                      |> Nx.new_axis(-1, :relationship_dimension)
                      |> Nx.tile(
                        (for _ <- 1..tuple_size(Nx.shape(input)), do: 1) ++ [n_relations]
                      )
        batch_size = input_shape 
                     |> delete_last
                     |> Tuple.to_list
        result = params["kernel"]
                 |> Grapex.NxUtils.new_axes(batch_size)
                 |> Nx.multiply(tiled_input)
        observed_relations = Nx.take_along_axis(result, Nx.as_type(observed_relation_indices, {:s, 64}), axis: 3)
        Nx.concatenate([result, observed_relations], axis: 3)
      end,
      output_shape, 
      %{"kernel" => kernel},
      "logicenn_scoring"
    )
  end
  defp inner_product(%Axon{output_shape: parent_shape} = x, units, opts) do
    activation = opts[:activation]
    enable_bias = Keyword.get(opts, :enable_bias, true)
    
    parent_shape_without_first_element = delete_first(parent_shape) # delete variable batch size 
    param_shape = parent_shape_without_first_element
                 |> delete_first # delete constant batch size
                 |> Tuple.append(units) # add number of units in layer
    output_shape =
      parent_shape
      |> delete_last(2) # delete entity embedding size and number of entities per triple from the parent node output shape
      |> Tuple.append(units)
    kernel_initializer = opts[:kernel_initializer]
    kernel_regularizer = opts[:kernel_regularizer]
    bias_initializer = unless enable_bias, do: nil, else: opts[:bias_initializer]
    bias_regularizer = unless enable_bias, do: nil, else: opts[:bias_regularizer]
    kernel = Axon.param("kernel", param_shape, initializer: kernel_initializer, regularizer: kernel_regularizer)
    bias = unless enable_bias, do: nil, else: Axon.param("bias", param_shape, initializer: bias_initializer, regularizer: bias_regularizer)
    node = Axon.layer(
      x,
      fn input, params ->
        bias = unless enable_bias, do: nil, else: params["bias"]
        kernel = params["kernel"]
        kernel_shape = Nx.shape(kernel)
        # align input to number of units in the hidden layer
        tiled_input =
          input
          |> Nx.new_axis(-1)
          |> Nx.tile(
            (for _ <- 1..tuple_size(Nx.shape(input)), do: 1) ++ [last(kernel_shape)] 
          )
        # align bias to batch size
        tiled_bias = unless enable_bias, do: nil, else: Grapex.NxUtils.new_axes(
          bias,
          elems(
            Nx.shape(input),
            [
              0, # variable batch size
              1 # constant batch size
            ]
          )
        )
        # align kernel to batch size
        tiled_kernel = Grapex.NxUtils.new_axes(
          kernel,
          elems(
            Nx.shape(input),
            [
              0, # n elements per triple ( = 2 )
              1 # constant batch size
            ]
          )
        )
        tiled_input
        |> Nx.multiply(tiled_kernel)
        |> (
          fn(x) ->
            unless enable_bias do
              x
            else
              x
              |> Nx.add(tiled_bias)
            end
          end
        ).()
        # |> Nx.add(tiled_bias)
        # |> Nx.max(0) # relu
        # |> Nx.multiply(tiled_bias)
        |> Nx.sum(axes: [-2, -3]) # eliminate dimensions which correspond to the entity embedding size and number of entities per triple
      end,
      output_shape,
      (unless enable_bias, do: %{"kernel" => kernel}, else: %{"kernel" => kernel, "bias" => bias}),
      "logicenn_inner_product"
    )
    if activation do
      Axon.activation(node, activation)
    else
      node
    end
  end
   
  def model(%Grapex.Init{entity_dimension: entity_embedding_size, input_size: batch_size, hidden_size: hidden_size, enable_bias: enable_bias}) do
    product = Axon.input({nil, batch_size, 2})
              |> Axon.embedding(Grapex.Meager.n_entities, entity_embedding_size)
              # |> Axon.layer_norm
              |> inner_product(hidden_size, activation: :relu, enable_bias: enable_bias)
    score = product
            |> Axon.concatenate(
              Axon.input({nil, batch_size, 1}),
              axis: 2
            )
            |> relation_embeddings(Grapex.Meager.n_relations)
    Axon.concatenate(
      product
      |> Axon.reshape({1, batch_size, hidden_size, 1})
      |> Axon.pad([{0, 0}, {0, 0}, {0, Grapex.Meager.n_relations}]),
      score
      |> Axon.reshape({1, batch_size, hidden_size, Grapex.Meager.n_relations + 1}),
      axis: 1
    ) # |> IO.inspect
    # Resulting dimensions:
    # - variable batch size
    # - generated value kinds ( = 2, the first contains values of f_ht functions which are independent of relations and the other one contains the same values multiplied by relations)
    # - constant batch size
    # - hidden size (number of units per hidden layer)
    # - relations + 1 (one is reserved for tracking the observed relation)
  end
  defp fix_shape(x, first_dimension) do
    case {x, first_dimension} do
      {%{shape: {_, _, _, _}}, 1} -> 
        Nx.new_axis(x, 0)
      {%{shape: {_, _, _, _}}, _} -> 
        Nx.new_axis(x, 0)
        |> Nx.tile([first_dimension, 1, 1, 1, 1])
      _ -> x
    end
  end
  def compute_score(x) do
    x
    # |> Grapex.IOutils.inspect_shape("original x")
    |> Nx.slice_axis(1, 1, 1) # Drop intermediate results of inner products calculation
    # |> Nx.slice_axis(0, 1, 1) # Drop intermediate results of inner products calculation
    # |> Nx.slice_axis(elem(Nx.shape(x), tuple_size(Nx.shape(x)) - 1) - 1, 1, -1) # Drop padding values in the last dimension which represents number of relations, last values correspond to the observed relations
    |> Nx.slice_axis(last(Nx.shape(x)) - 1, 1, -1) # Drop padding values in the last dimension which represents number of relations, last values correspond to the observed relations
    # |> Nx.slice_axis(last(Nx.shape(x)) - 1, 1, tuple_size(Nx.shape(x)) - 1) # Drop padding values in the last dimension which represents number of relations, last values correspond to the observed relations
    # |> Nx.slice_axis(0, 1, tuple_size(Nx.shape(x)) - 1) # Drop padding values in the last dimension which represents number of relations, last values correspond to the observed relations
    |> Nx.slice_axis(0, 1, -1) # Drop padding values in the last dimension which represents number of relations, last values correspond to the observed relations
    # |> Grapex.IOutils.inspect_shape("reshaped x")
    |> Nx.squeeze(axes: [1, -1])
    |> Nx.sum(axes: [-1]) # Sum up values corresponding to different values of L for the same (observed) relation
  end 
  def compute_loss_component(x, opts \\ []) do
    multiplier = Keyword.get(opts, :multiplier)
    x
    |> compute_score
    |> Nx.multiply(if multiplier == nil, do: 1, else: multiplier)
    |> Nx.multiply(-1)
    |> Nx.exp
    |> Nx.add(1)
    |> Nx.log
  end 
  def take_triples(x, index, inserted_dimension \\ 2) do
    fixed_x = 
      fix_shape(x, inserted_dimension)
    
    Nx.reshape(
      fixed_x,
      Nx.shape(fixed_x)
      |> delete_first
      |> Tuple.insert_at(0, 2)
      |> Tuple.insert_at(0, :auto)
    )
    |> (
      fn(y) ->
        if index >= 0 do
          Nx.slice_axis(y, index, 1, 0) # last dimension corresponds to the observed triples
        else
          Nx.slice_axis(y, (Nx.shape(y) |> elem(0)) + index, 1, 0)
        end
      end
    ).()
    |> Nx.squeeze(axes: [0])
  end
  def compute_loss(x, opts \\ []) do # when pattern == :symmetric do
    pattern = Keyword.get(opts, :pattern, nil)
    lambda = Keyword.get(opts, :lambda, nil)
    # enable_regularization = Keyword.get(opts, :enable_regularization, true)
    # fixed_x = 
    #   fix_shape(x, 2)
    # 
    # fixed_x =
    #   Nx.reshape(
    #     fixed_x,
    #     Nx.shape(fixed_x)
    #     |> delete_first
    #     |> Tuple.insert_at(0, 2)
    #     |> Tuple.insert_at(0, :auto)
    #   )
    #   |> Nx.slice_axis(-1, 1, 0) # last dimension corresponds to the observed triples
    #   |> Nx.squeeze(axes: [0])
    fixed_x = 
      x
      |> take_triples(-1)
     Nx.concatenate(
      [
        Nx.slice_axis(fixed_x, 0, 1, 0) # positive_triples
        |> compute_loss_component
        |> (
          fn(positive_loss_component) ->
            unless lambda == nil do
              compute_regularization(x, pattern, opts)
              |> Nx.multiply(lambda)
              |> Nx.add(positive_loss_component)
            else
              positive_loss_component
            end
          end
        ).(),
        # |> IO_.inspect_shape("Positive triples shape")
        Nx.slice_axis(fixed_x, 1, 1, 0) # negative triples
        |> compute_loss_component(multiplier: -1)
        # |> IO_.inspect_shape("Negative triples shape")
      ]
    )
    |> Nx.flatten
  end 
  def compute_regularization(x, pattern, opts \\ []) do
    case pattern do
      binary_pattern when binary_pattern == :symmetric or binary_pattern == :inverse ->
        margin = Keyword.get(opts, :margin, 0)
        x
        |> take_triples(0, 4) # forward triples 
        |> Nx.slice_axis(0, 1, 0) # positive_triples
        |> Nx.slice_axis(1, 1, 1) # Drop intermediate results of inner products calculation
        |> Nx.slice_axis(last(Nx.shape(x)) - 1, 1, -1) # Drop padding values in the last dimension which represents number of relations, last values correspond to the observed relations
        |> Nx.squeeze(axes: [1, -1])
        |> Nx.subtract(
          x
          |> take_triples(1, 4) # backward triples
          |> Nx.slice_axis(0, 1, 0) # positive_triples
          |> Nx.slice_axis(1, 1, 1) # Drop intermediate results of inner products calculation
          |> Nx.slice_axis(last(Nx.shape(x)) - 1, 1, -1) # Drop padding values in the last dimension which represents number of relations, last values correspond to the observed relations
          |> Nx.squeeze(axes: [1, -1])
        )
        |> Nx.abs
        |> Nx.subtract(margin)
        |> Nx.max(0)
        |> Nx.sum(axes: [-1]) # Sum up values corresponding to different values of L for the same (observed) relation
        |> Nx.flatten
        # |> IO_.inspect_shape("Shape of symmetric regularization")
      _ -> 
        IO.puts "No regularization-specific loss components are defined for pattern #{pattern}"
        0
    end
  end
end | 
	lib/grapex/models/logicenn.ex | 0.75274 | 0.589126 | 
	logicenn.ex | 
	starcoder | 
| 
	defmodule Affine do
  @moduledoc """
  This library performs affine transforms for multiple dimensions. The
  implementation is simple in this initial version allowing for translation,
  scaling, shear and rotation.
  This library uses the Matrix library available on Hex. It is automatically included when using this library as a dep in your application.
  ## Using The Affine Library
  The capabilities of the library can be accessed through either a low level api or a higher level one.
  ### Low Level API
  The transform library can be accessed at it's lowest level giving the best
  performance and full control to the developer. An example of using the API at
  this level is:
      t_translate = Affine.Transforms.translate (3.0, 4.0, 5.0)
      point = Affine.transform t_translate [ 1.0, 2.0, 3.0 ]
      assert point == [4.0, 6.0, 8.0]
  And to add a transform to the first one:
      t_scale = Affine.Transforms.scale (2.0, 2.0, 2.0)
      t_scale_then_translate = Affine.multiply t_translate, t_scale
      point = Affine.transform t_scale_then_translate [ 1.0, 2.0, 3.0 ]
      assert point == [ 5.0, 8.0, 11.0 ]
  Keep in mind that the order individual transforms are provided to the multiply
  function is important since transforms are not commutative. With the same
  example as above but with t_translate and t_scale reversed, the resulting point is different:
      t_translate_then_scale = Affine.multiply t_scale, t_translate
      point = Affine.transform t_translate_then_scale [ 1.0, 2.0, 3.0 ]
      assert point == [ 8.0, 12.0, 16.0 ]
  The last transform, t_translate, in this case will be the first to be done. Of course,
  the beauty of Affine transforms is that all multiplied transforms are done
  simultaneously but logically, the last transform multiplied is the first to be
  applied.
  ### High Level API
  The easier API for creating and using Affine transforms uses the flexibility
  provided in Elixir to more elegantly define the transforms. This requires a bit more processing but generally would not be a burden to the application unless
  many transforms are being created. Here's an example:
      t_translate = Affine.create [ type: :translate, dimensions: 3, x: 3.0, y: 4.0, z: 5.0]
      point = Affine.transform t_translate [ 1.0, 2.0, 3.0]
      assert point == [4.0, 6.0, 8.0]
  So the create function takes a parameter list and generates the correct
  transform. The create function can also take a list of parameter lists and
  generate a single transform from those parameter lists. For example, to create,
  t_translate_then_scale with a single call to create, the following can be done:
      point =
        [ [type: :translate, dimensions: 3, x: 3.0, y: 4.0, z: 5.0],
          [type: :scale, dimensions: 3, x: 2.0, y: 2.0, z: 2.0] ]
        |> Affine.create
        |> Affine.transform [1.0, 2.0, 3.0]
      assert point == [ 8.0, 12.0, 16.0 ]
  Note the order of transforms in the parameter list is applied such that the first transform in the parameter list is the last one applied to the final transform. Logically, it is the first one to be applied when using the final transform.
  Of course, the above is only useful for a one time point transformation since
  the generate transform is not saved. So the following is likely to be more
  useful:
      t_translate_then_scale =
        [ [type: :translate, dimensions: 3, x: 3.0, y: 4.0, z: 5.0],
          [type: :scale, dimensions: 3, x: 2.0, y: 2.0, z: 2.0] ]
        |> Affine.create
      point = t_translate_then_scale
        |> Affine.transform [1.0, 2.0, 3.0]
      assert point == [ 8.0, 12.0, 16.0 ]
  ### Linear Maps
  Generating 2D graphics, either for charting, design or other reasons, can require reassignment of a space on the drawing canvas for a part of the graphic.
  For instance, creating the x-axis in a chart that goes for 0-21 for the data in the area from pixel 143 to pixel 200 on the drawing canvas can use a transform to easily convert from data space to canvas space.
  A special type of 'create' parameter list can be used to generate the transform for the very example just stated. Here's how it looks:
      t =
        [type: :linear_map, x1_in: 0.0, x1_out: 143.0, x2_in: 21.0, x2_out: 200.0]
        |> Affine.create
  This code generates a 1D transform with translation and scaling such that a value of 0 in will generate 143 and a value of 21 in will generate a 200.
      point = Affine.map (t, 0.0)
      assert point == 143
      point = Affine.map (t, 21.0)
      assert point == 200
  """
  @type matrix :: [[number]]
  @type point :: [number]
  @type list_of_specs :: [[]]
  @spec create(list_of_specs) :: matrix
  defdelegate create(parameters), to: Affine.Generator, as: :create
  @spec m(matrix,matrix) :: matrix
  defdelegate m(matrix1,matrix2), to: Affine.Operations, as: :multiply
  @spec multiply(matrix,matrix) :: matrix
  defdelegate multiply(matrix1,matrix2), to: Affine.Operations, as: :multiply
  @spec t(matrix,point) :: point
  defdelegate t(matrix,point), to: Affine.Operations, as: :transform
  @spec transform(matrix,point) :: point
  defdelegate transform(matrix,point), to: Affine.Operations, as: :transform
  @spec map(matrix,number) :: number
  defdelegate map(matrix,value), to: Affine.LinearMap, as: :map
end | 
	lib/affine.ex | 0.942109 | 0.898633 | 
	affine.ex | 
	starcoder | 
| 
	defmodule Advent.Y2021.D10 do
  @moduledoc """
  https://adventofcode.com/2021/day/10
  """
  @doc """
  Find the first illegal character in each corrupted line of the navigation
  subsystem. What is the total syntax error score for those errors?
  """
  @spec part_one(Enumerable.t()) :: non_neg_integer()
  def part_one(input) do
    input
    |> parse_input()
    |> Stream.map(fn
      {:corrupted, score} -> score
      _ -> 0
    end)
    |> Enum.sum()
  end
  @doc """
  Find the completion string for each incomplete line, score the completion
  strings, and sort the scores. What is the middle score?
  """
  @spec part_two(Enumerable.t()) :: non_neg_integer()
  def part_two(input) do
    input
    |> parse_input()
    |> Enum.reduce([], fn
      {:incomplete, score}, acc -> [score | acc]
      _, acc -> acc
    end)
    |> Enum.sort()
    # Get middle value - assumed to be odd length (median)
    |> (&Enum.at(&1, div(length(&1), 2))).()
  end
  @spec parse_input(Enumerable.t()) :: Enumerable.t()
  defp parse_input(input) do
    input
    |> Stream.map(&String.graphemes/1)
    |> Stream.map(&parse_chunks/1)
  end
  @spec parse_chunks([String.grapheme()]) ::
          :legal | {:corrupted, non_neg_integer()} | {:incomplete, non_neg_integer()}
  defp parse_chunks(chunks) do
    Enum.reduce_while(chunks, [], fn
      ")", ["(" | tail] -> {:cont, tail}
      "]", ["[" | tail] -> {:cont, tail}
      "}", ["{" | tail] -> {:cont, tail}
      ">", ["<" | tail] -> {:cont, tail}
      ")", _stack -> {:halt, 3}
      "]", _stack -> {:halt, 57}
      "}", _stack -> {:halt, 1197}
      ">", _stack -> {:halt, 25_137}
      open, stack -> {:cont, [open | stack]}
    end)
    |> case do
      [] -> :legal
      res when is_list(res) -> {:incomplete, score_incomplete(res)}
      res when is_integer(res) -> {:corrupted, res}
    end
  end
  @spec score_incomplete([String.grapheme()]) :: non_neg_integer()
  defp score_incomplete(incomplete) do
    Enum.reduce(incomplete, 0, fn chunk, acc ->
      acc * 5 +
        case chunk do
          "(" -> 1
          "[" -> 2
          "{" -> 3
          "<" -> 4
        end
    end)
  end
end | 
	lib/advent/y2021/d10.ex | 0.755457 | 0.565959 | 
	d10.ex | 
	starcoder | 
| 
	defmodule Dune.Parser do
  @moduledoc false
  alias Dune.{AtomMapping, Success, Failure, Opts}
  alias Dune.Parser.{CompileEnv, StringParser, Sanitizer, SafeAst, UnsafeAst}
  @typep previous_session :: %{
           atom_mapping: AtomMapping.t(),
           compile_env: Dune.Parser.CompileEnv.t()
         }
  @spec parse_string(String.t(), Opts.t(), previous_session | nil) :: SafeAst.t() | Failure.t()
  def parse_string(string, opts = %Opts{}, previous_session \\ nil) when is_binary(string) do
    compile_env = get_compile_env(opts, previous_session)
    string
    |> do_parse_string(opts, previous_session)
    |> Sanitizer.sanitize(compile_env)
  end
  defp do_parse_string(string, opts = %{max_length: max_length}, previous_session) do
    case String.length(string) do
      length when length > max_length ->
        %Failure{type: :parsing, message: "max code length exceeded: #{length} > #{max_length}"}
      _ ->
        StringParser.parse_string(string, opts, previous_session)
    end
  end
  @spec parse_quoted(Macro.t(), Opts.t(), previous_session | nil) :: SafeAst.t()
  def parse_quoted(quoted, opts = %Opts{}, previous_session \\ nil) do
    compile_env = get_compile_env(opts, previous_session)
    quoted
    |> unsafe_quoted()
    |> Sanitizer.sanitize(compile_env)
  end
  def unsafe_quoted(ast) do
    %UnsafeAst{ast: ast, atom_mapping: AtomMapping.new()}
  end
  defp get_compile_env(opts, nil) do
    CompileEnv.new(opts.allowlist)
  end
  defp get_compile_env(opts, %{compile_env: compile_env}) do
    %{compile_env | allowlist: opts.allowlist}
  end
  @spec string_to_quoted(String.t(), Opts.t()) :: Success.t() | Failure.t()
  def string_to_quoted(string, opts) do
    with unsafe = %UnsafeAst{} <- StringParser.parse_string(string, opts, nil, false) do
      inspected = inspect(unsafe.ast, pretty: opts.pretty)
      inspected = AtomMapping.replace_in_string(unsafe.atom_mapping, inspected)
      %Success{
        value: unsafe.ast,
        inspected: inspected,
        stdio: ""
      }
    end
  end
end | 
	lib/dune/parser.ex | 0.741861 | 0.421641 | 
	parser.ex | 
	starcoder | 
| 
	defmodule LineBot.Message.Imagemap do
  use LineBot.Message
  @moduledoc """
  Represents an [Imagemap message](https://developers.line.biz/en/reference/messaging-api/#imagemap-message).
  """
  @type t :: %__MODULE__{
          baseUrl: String.t(),
          altText: String.t(),
          baseSize: %{width: integer(), height: integer()},
          video: LineBot.Message.Imagemap.Video.t() | nil,
          actions: [
            LineBot.Message.Imagemap.Action.Message.t() | LineBot.Message.Imagemap.Action.URI.t()
          ],
          type: :imagemap,
          quickReply: LineBot.Message.QuickReply.t() | nil
        }
  @enforce_keys [:baseUrl, :altText, :baseSize, :actions]
  defstruct [:baseUrl, :altText, :baseSize, :video, :actions, :quickReply, type: :imagemap]
end
defmodule LineBot.Message.Imagemap.Video do
  use LineBot.Message
  @moduledoc """
  Represents the video component of a `t:LineBot.Message.Imagemap.t/0`.
  """
  @type t :: %__MODULE__{
          originalContentUrl: String.t(),
          previewImageUrl: String.t(),
          area: %{x: integer(), y: integer(), width: integer(), height: integer()},
          externalLink: %{linkUri: String.t(), label: String.t()} | nil
        }
  @enforce_keys [:originalContentUrl, :previewImageUrl, :area]
  defstruct [:originalContentUrl, :previewImageUrl, :area, :externalLink]
end
defmodule LineBot.Message.Imagemap.Action.Message do
  use LineBot.Message
  @moduledoc """
  Represents an [Imagemap Message action object](https://developers.line.biz/en/reference/messaging-api/#imagemap-message-action-object).
  """
  @type t :: %__MODULE__{
          label: String.t() | nil,
          text: String.t(),
          area: %{x: integer(), y: integer(), width: integer(), height: integer()},
          type: :message
        }
  @enforce_keys [:text, :area]
  defstruct [:label, :text, :area, type: :message]
end
defmodule LineBot.Message.Imagemap.Action.URI do
  use LineBot.Message
  @moduledoc """
  Represents an [Imagemap URI action object](https://developers.line.biz/en/reference/messaging-api/#imagemap-uri-action-object).
  """
  @type t :: %__MODULE__{
          label: String.t() | nil,
          linkUri: String.t(),
          area: %{x: integer(), y: integer(), width: integer(), height: integer()},
          type: :uri
        }
  @enforce_keys [:linkUri, :area]
  defstruct [:label, :linkUri, :area, type: :uri]
end | 
	lib/line_bot/message/image_map.ex | 0.880733 | 0.430656 | 
	image_map.ex | 
	starcoder | 
| 
	defmodule Day22 do
  def part1(input, moves \\ 10000) do
    grid = parse(input)
    |> Map.new
    state = {{0, 0}, 0, 0, grid}
    res = Stream.iterate(state, &next_state_part1/1)
    |> Stream.drop(moves)
    |> Enum.take(1)
    |> hd
    {_, _, infections, _} = res
    infections
  end
  def part2(input, moves \\ 10000000) do
    grid = parse(input)
    |> Map.new
    state = {{0, 0}, 0, 0, grid}
    res = Stream.iterate(state, &next_state_part2/1)
    |> Stream.drop(moves)
    |> Enum.take(1)
    |> hd
    {_, _, infections, _} = res
    infections
  end
  defp next_state_part1({location, direction, infections, grid}) do
    case Map.get(grid, location, :clean) do
      :infected ->
        direction = turn_right(direction)
        grid = Map.put(grid, location, :clean)
        location = move(location, direction)
        {location, direction, infections, grid}
      :clean ->
        direction = turn_left(direction)
        grid = Map.put(grid, location, :infected)
        infections = infections + 1
        location = move(location, direction)
        {location, direction, infections, grid}
    end
  end
  defp next_state_part2({location, direction, infections, grid}) do
    case Map.get(grid, location, :clean) do
      :infected ->
        direction = turn_right(direction)
        grid = Map.put(grid, location, :flagged)
        location = move(location, direction)
        {location, direction, infections, grid}
      :clean ->
        direction = turn_left(direction)
        grid = Map.put(grid, location, :weakened)
        location = move(location, direction)
        {location, direction, infections, grid}
      :flagged ->
        direction = direction |> turn_left |> turn_left
        grid = Map.put(grid, location, :clean)
        location = move(location, direction)
        {location, direction, infections, grid}
      :weakened ->
        grid = Map.put(grid, location, :infected)
        infections = infections + 1
        location = move(location, direction)
        {location, direction, infections, grid}
    end
  end
  defp turn_left(direction) do
    rem(4 + direction - 1, 4)
  end
  defp turn_right(direction) do
    rem(4 + direction + 1, 4)
  end
  defp move({x, y}, direction) do
    case direction do
      0 -> {x, y - 1}
      2 -> {x, y + 1}
      1 -> {x + 1, y}
      3 -> {x - 1, y}
    end
  end
  defp parse(input) do
    middle = div(length(input), 2)
    input
    |> Enum.with_index(-middle)
    |> Enum.flat_map(fn {line, y} ->
      String.to_charlist(line)
      |> Enum.with_index(-middle)
      |> Enum.map(fn {char, x} ->
        location = {x, y}
        state = case char do
                  ?\# -> :infected
                  ?\. -> :clean
                end
        {location, state}
      end)
    end)
  end
end | 
	day22/lib/day22.ex | 0.654343 | 0.622832 | 
	day22.ex | 
	starcoder | 
| 
	defmodule MetarMap do
  @moduledoc """
  MetarMap keeps the contexts that define your domain
  and business logic.
  Contexts are also responsible for managing your data, regardless
  if it comes from the database, an external API or others.
  """
  alias MetarMap.Display.Color
  @doc """
  Naively blends two colors
  """
  def blend(from_color, to_color, to_factor) do
    from_factor = 1.0 - to_factor
    %Color{
      r: trunc(from_color.r * from_factor + to_color.r * to_factor),
      g: trunc(from_color.g * from_factor + to_color.g * to_factor),
      b: trunc(from_color.b * from_factor + to_color.b * to_factor),
      w: trunc(from_color.w * from_factor + to_color.w * to_factor)
    }
  end
  @doc """
  Naively blends two colors
  """
  def blend(from_color, to_color, %Range{} = range, value) do
    blend(from_color, to_color, {range.first, range.last}, value)
  end
  def blend(from_color, to_color, {first, last}, value) do
    blend(from_color, to_color, normalize(first, last, value))
  end
  @spec blend_gradient([{number(), %Color{}}], number, term) :: %Color{} | term
  def blend_gradient(_, nil, default), do: default
  def blend_gradient(gradient, value, _default) when is_list(gradient) do
    gradient = Enum.sort(gradient)
    {first_value, first_color} = hd(gradient)
    {last_value, last_color} = List.last(gradient)
    cond do
      value <= first_value ->
        first_color
      value >= last_value ->
        last_color
      true ->
        pairs = Enum.zip(Enum.slice(gradient, 0..-2), Enum.slice(gradient, 1..-1))
        Enum.reduce_while(pairs, nil, fn
          {{min_value, _}, _}, _ when min_value > value ->
            {:cont, nil}
          {{min_value, min_color}, {max_value, max_color}}, _ ->
            {:halt, blend(min_color, max_color, {min_value, max_value}, value)}
        end)
    end
  end
  @doc """
  Changes a color's brightness.
  """
  def brighten(color, rate) do
    %Color{
      r: (color.r * rate) |> min(255) |> max(0) |> trunc(),
      g: (color.g * rate) |> min(255) |> max(0) |> trunc(),
      b: (color.b * rate) |> min(255) |> max(0) |> trunc(),
      w: (color.w * rate) |> min(255) |> max(0) |> trunc()
    }
  end
  @doc """
  Normalize a value from 0.0 to 1.0.
  """
  def normalize(min, max, value) do
    (value - min) / (max - min)
  end
end | 
	lib/metar_map.ex | 0.858704 | 0.555194 | 
	metar_map.ex | 
	starcoder | 
| 
	defmodule Bolt.Sips.Internals.PackStream.EncoderV1 do
  @moduledoc false
  alias Bolt.Sips.Internals.PackStream.EncoderHelper
  use Bolt.Sips.Internals.PackStream.Markers
  @doc """
  Encode an atom into Bolt binary format.
  Encoding:
  `Marker`
  with
  | Value | Marker |
  | ------- | -------- |
  | nil | `0xC0` |
  | false | `0xC2` |
  | true | `0xC3` |
  Other atoms are converted to string before encoding.
  ## Example
      iex> alias Bolt.Sips.Internals.PackStream.EncoderV1
      iex> :erlang.iolist_to_binary(EncoderV1.encode_atom(nil, 1))
      <<0xC0>>
      iex> :erlang.iolist_to_binary(EncoderV1.encode_atom(true, 1))
      <<0xC3>>
      iex> :erlang.iolist_to_binary(EncoderV1.encode_atom(:guten_tag, 1))
      <<0x89, 0x67, 0x75, 0x74, 0x65, 0x6E, 0x5F, 0x74, 0x61, 0x67>>
  """
  @spec encode_atom(atom(), integer()) :: Bolt.Sips.Internals.PackStream.value()
  def encode_atom(atom , bolt_version), do: EncoderHelper.call_encode(:atom, atom, bolt_version) 
  @doc """
  Encode a string into Bolt binary format.
  Encoding:
  `Marker` `Size` `Content`
  with
  | Marker | Size | Max data size |
  |--------|------|---------------|
  | `0x80`..`0x8F` | None (contained in marker) | 15 bytes |
  | `0xD0` | 8-bit integer | 255 bytes |
  | `0xD1` | 16-bit integer | 65_535 bytes |
  | `0xD2` | 32-bit integer | 4_294_967_295 bytes |
  ## Example
      iex> alias Bolt.Sips.Internals.PackStream.EncoderV1
      iex> :erlang.iolist_to_binary(EncoderV1.encode_string("guten tag", 1))
      <<0x89, 0x67, 0x75, 0x74, 0x65, 0x6E, 0x20, 0x74, 0x61, 0x67>>
  """
  @spec encode_string(String.t(), integer()) :: Bolt.Sips.Internals.PackStream.value()
  def encode_string(string, bolt_version), do: EncoderHelper.call_encode(:string, string, bolt_version)
  @doc """
  Encode an integer into Bolt binary format.
  Encoding:
  `Marker` `Value`
  with
  |   | Marker |
  |---|--------|
  | tiny int | `0x2A` |
  | int8 | `0xC8` |
  | int16 | `0xC9` |
  | int32 | `0xCA` |
  | int64 | `0xCB` |
  ## Example
      iex> alias Bolt.Sips.Internals.PackStream.EncoderV1
      iex> :erlang.iolist_to_binary(EncoderV1.encode_integer(74, 1))
      <<0x4A>>
      iex> :erlang.iolist_to_binary(EncoderV1.encode_integer(-74_789, 1))
      <<0xCA, 0xFF, 0xFE, 0xDB, 0xDB>>
  """
  @spec encode_integer(integer(), integer()) :: Bolt.Sips.Internals.PackStream.value()
  def encode_integer(integer, bolt_version), do: EncoderHelper.call_encode(:integer, integer, bolt_version)
  @doc """
  Encode a float into Bolt binary format.
  Encoding: `Marker` `8 byte Content`.
  Marker: `0xC1`
  Formated according to the IEEE 754 floating-point "double format" bit layout.
  ## Example
      iex> alias Bolt.Sips.Internals.PackStream.EncoderV1
      iex> :erlang.iolist_to_binary(EncoderV1.encode_float(42.42, 1))
      <<0xC1, 0x40, 0x45, 0x35, 0xC2, 0x8F, 0x5C, 0x28, 0xF6>>
  """
  @spec encode_float(float(), integer()) :: Bolt.Sips.Internals.PackStream.value()
  def encode_float(number, bolt_version), do: EncoderHelper.call_encode(:float, number, bolt_version) 
  @doc """
  Encode a list into Bolt binary format.
  Encoding:
  `Marker` `Size` `Content`
  with
  | Marker | Size | Max list size |
  |--------|------|---------------|
  | `0x90`..`0x9F` | None (contained in marker) | 15 bytes |
  | `0xD4` | 8-bit integer | 255 items |
  | `0xD5` | 16-bit integer | 65_535 items |
  | `0xD6` | 32-bit integer | 4_294_967_295 items |
  ## Example
      iex> alias Bolt.Sips.Internals.PackStream.EncoderV1
      iex> :erlang.iolist_to_binary(EncoderV1.encode_list(["hello", "world"], 1))
      <<0x92, 0x85, 0x68, 0x65, 0x6C, 0x6C, 0x6F, 0x85, 0x77, 0x6F, 0x72, 0x6C, 0x64>>
  """
  @spec encode_list(list(), integer()) :: Bolt.Sips.Internals.PackStream.value()
  def encode_list(list, bolt_version), do: EncoderHelper.call_encode(:list, list, bolt_version)
  @doc """
  Encode a map into Bolt binary format.
  Note that Elixir structs are converted to map for encoding purpose.
  Encoding:
  `Marker` `Size` `Content`
  with
  | Marker | Size | Max map size |
  |--------|------|---------------|
  | `0xA0`..`0xAF` | None (contained in marker) | 15 entries |
  | `0xD8` | 8-bit integer | 255 entries |
  | `0xD9` | 16-bit integer | 65_535 entries |
  | `0xDA` | 32-bit integer | 4_294_967_295 entries |
  ## Example
      iex> alias Bolt.Sips.Internals.PackStream.EncoderV1
      iex> :erlang.iolist_to_binary(EncoderV1.encode_map(%{id: 345, value: "hello world"}, 1))
      <<0xA2, 0x82, 0x69, 0x64, 0xC9, 0x1, 0x59, 0x85, 0x76, 0x61, 0x6C, 0x75,
      0x65, 0x8B, 0x68, 0x65, 0x6C, 0x6C, 0x6F, 0x20, 0x77, 0x6F, 0x72, 0x6C, 0x64>>
  """
  @spec encode_map(map(), integer()) :: Bolt.Sips.Internals.PackStream.value()
  def encode_map(map, bolt_version), do: EncoderHelper.call_encode(:map, map, bolt_version)
  @doc """
  Encode a struct into Bolt binary format.
  This concerns Bolt Structs as defined in []().
  Elixir structs are just converted to regular maps before encoding
  Encoding:
  `Marker` `Size` `Signature` `Content`
  with
  | Marker | Size | Max structure size |
  |--------|------|---------------|
  | `0xB0`..`0xBF` | None (contained in marker) | 15 fields |
  | `0xDC` | 8-bit integer | 255 fields |
  | `0xDD` | 16-bit integer | 65_535 fields |
  ## Example
      iex> alias Bolt.Sips.Internals.PackStream.EncoderV1
      iex> :erlang.iolist_to_binary(EncoderV1.encode_struct({0x01, ["two", "params"]}, 1))
      <<0xB2, 0x1, 0x83, 0x74, 0x77, 0x6F, 0x86, 0x70, 0x61, 0x72, 0x61, 0x6D, 0x73>>
  """
  @spec encode_struct({integer(), list()}, integer()) :: Bolt.Sips.Internals.PackStream.value()
  def encode_struct(struct, bolt_version) , do: EncoderHelper.call_encode(:struct, struct, bolt_version)
end | 
	lib/bolt_sips/internals/pack_stream/encoder_v1.ex | 0.847936 | 0.659193 | 
	encoder_v1.ex | 
	starcoder | 
| 
	defmodule EtsQuery do
  @moduledoc """
    EtsQuery gives you convinient function to work with ets tables
  """
  alias :ets, as: Ets
  import Ets
  @doc """
    looks up every row in the given ets table.
    Type indicates whether the traversal should start from the start or the last row of the table.
    It would matter if the table is :ordered_set
  """
  def traversal(tab, func, type) do
    traversal(tab, first_key(tab, type), func, type)
  end
    @doc """
    looks up every row in the given ets table.
    Type indicates whether the traversal should start from the start or the last row of the table.
    It would matter if the table is :ordered_set
  """
  @spec traversal(
    atom | :ets.tid(),
    any | :'$end_of_table',
    fun,
    :first | :last
  ) :: :'$end_of_table'
  def traversal(_tab, :'$end_of_table', _func, _type), do: :'$end_of_table'
  def traversal(tab, key, func, type) do
    [outcome] = lookup(tab, key)
    func.(outcome)
    traversal(tab,
      next_key(tab, type, key),
      func,
      type
    )
  end
  defp next_key(tab, type, key) do
    case type do
      :first -> :ets.next(tab, key)
      :last -> :ets.prev(tab, key)
    end
  end
  defp first_key(tab, type) do
    case type do
      :first -> :ets.first(tab)
      :last -> :ets.last(tab)
    end
  end
  @doc """
    new_row is a function for appending an element on a ets table where the row is expected to have a list
    args: 1 => ets ref or atom, 2 => key for ets, value => an element you want to append
  """
  @spec new_row(atom | :ets.tid(), any, any) :: true
  def new_row(tab, key, value) do
    append(tab, key, value)
  end
  @spec new_row(atom | :ets.tid(), {any, any}) :: true
  def new_row(tab, tuple) do
    append(tab, tuple)
  end
  defp value2list(value) when not is_list(value), do: [value]
  defp value2list(value), do: value
  @doc """
    alias for new_row
  """
  @spec append(atom | :ets.tid(), any, any) :: true
  def append(tab, key, value) do
    append(tab, {key, value})
  end
  @spec append(atom | :ets.tid(), {any, any}) :: true
  def append(tab, {key, value}) do
    new_value =
      case lookup(tab, key) do
        [] -> value2list(value)
        [{_, existing_value}] ->
          [value | existing_value]
      end
    insert(tab, {key, new_value})
  end
  @doc """
    The row is expected to have a list as a value.
    this function will removes an element from that list
  """
  @spec remove_element(atom | :ets.tid(), any, (any -> boolean)) :: true
  def remove_element(tab, key, match_func) do
    [{_, value}] = lookup(tab, key)
    filtered = filter_element(value, match_func)
    len = length(value)
    case length(filtered) do
      ^len -> :unremoved
      _ -> insert(tab, {key, filtered})
    end
  end
  defp filter_element(to_check, func, filtered \\ [])
  defp filter_element([h | t], func, filtered) do
    filtered =
      case func.(h) do
        true -> [h | filtered]
        _ -> filtered
      end
    filter_element(t, func, filtered)
  end
  defp filter_element(_to_check, _func, filtered) do
    filtered
  end
  @spec remove(atom | :ets.tid(), any, (any -> boolean)) :: true
  def remove(tab, key, match_func) do
    remove_element(tab, key, match_func)
  end
  @spec fetch(any, :map | :list) :: any
  def fetch(tab, merge_type \\ :map) do
    {:ok, pid} = Agent.start_link(fn  -> %{} end)
    try do
      traversal(tab, fn {key, list} ->
        Agent.update(pid, &
          case merge_type do
            :map ->
                Map.merge(&1, %{ key => list })
            :list ->
                list ++ &1
          end
        )
      end, :first)
      data = Agent.get(pid, & &1)
      Agent.stop(pid)
      data
    rescue _ ->
      Agent.stop(pid)
    end
  end
end | 
	lib/ets_query.ex | 0.657758 | 0.706861 | 
	ets_query.ex | 
	starcoder | 
| 
	defmodule Rambla.Amqp do
  @moduledoc """
  Default connection implementation for 🐰 Rabbit.
  `publish/2` accepts the following options:
  - `exchange` [`binary()`, **mandatory**] the exchange to publish to
  - `queue` [`binary()`, **optional**] if passed, the queue will be created
    and bound to the exchange; it’s slowing down publishing, but safer
    for the cold RebbatMQ installation
  - `declare?`[`boolean()`, **optional**, _default_: `true`] if false
    is passed, the exchange would not be declared; use it if the exchange
    already surely exists to speed up the publishing
  - `routing_key` [`binary()`, **optional**, _default_: `""`] if passed,
    used as a routing key
  - `options` [`keyword()`, **optional**, _default_: `[]`] the options
    to be passed as is to call to `AMQP.Basic.publish/5`
  ---
  Since `v0.6.0` provides two `mix` tasks:
  - `mix rambla.rabbit.exchange` Operations with exchanges in RabbitMQ
  - `mix rambla.rabbit.queue`    Operations with queues in RabbitMQ
  Tasks support arguments to be passed to RabbitMQ instance. Usage example:
  ```
  mix rambla.rabbit.queue declare foo -o durable:true
  ```
  """
  defmodule ChannelPool do
    @moduledoc false
    @amqp_pool_size Application.get_env(:rambla, :amqp_pool_size, 32)
    use Tarearbol.Pool, pool_size: @amqp_pool_size, pickup: :hashring
    @spec publish(%Rambla.Connection.Config{}, binary() | map() | list()) ::
            {:ok | :replace, Rambla.Connection.Config.t()}
    def publish(%Rambla.Connection.Config{conn: conn} = cfg, message) do
      id = Enum.random(1..workers_slice())
      do_publish({id, conn}, cfg, message)
    end
    defsynch do_publish(
               {id, conn},
               %Rambla.Connection.Config{conn: conn, opts: opts} = cfg,
               message
             ) do
      message =
        case message do
          message when is_binary(message) -> message
          %{} = message -> Jason.encode!(message)
          [{_, _} | _] = message -> message |> Map.new() |> Jason.encode!()
          message -> inspect(message)
        end
      {_, %{chan: %{__struct__: AMQP.Channel} = chan}} =
        reply =
        case payload!() do
          %{conn: ^conn, chan: %{__struct__: AMQP.Channel}} = cfg ->
            {:ok, cfg}
          %{} = cfg ->
            if not is_nil(cfg[:chan]), do: apply(AMQP.Channel, :close, [cfg[:chan]])
            {:replace, %{conn: conn, chan: AMQP.Channel |> apply(:open, [conn]) |> elem(1)}}
        end
      with %{exchange: exchange} <- opts,
           declare? <- Map.get(opts, :declare?, true),
           if(declare?, do: apply(AMQP.Exchange, :declare, [chan, exchange])),
           :ok <- queue!(chan, opts),
           do:
             apply(AMQP.Basic, :publish, [
               chan,
               exchange,
               Map.get(opts, :routing_key, ""),
               message,
               Map.get(opts, :options, [])
             ])
      reply
    end
    @spec queue!(chan :: AMQP.Channel.t(), map()) :: :ok
    defp queue!(chan, %{queue: queue, exchange: exchange}) do
      with {:ok, %{consumer_count: _, message_count: _, queue: ^queue}} <-
             apply(AMQP.Queue, :declare, [chan, queue]),
           do: apply(AMQP.Queue, :bind, [chan, queue, exchange])
    end
    defp queue!(_, %{exchange: _exchange}), do: :ok
    @spec workers_slice :: pos_integer()
    defp workers_slice,
      do: Application.get_env(:rambla, __MODULE__) || invalidate_workers_slice()
    @spec invalidate_workers_slice :: pos_integer()
    defp invalidate_workers_slice do
      poolboy =
        Rambla.ConnectionPool
        |> DynamicSupervisor.which_children()
        |> Enum.reduce_while(nil, fn
          {_, pid, :worker, [:poolboy]}, nil -> {:halt, pid}
          _, nil -> {:cont, nil}
        end)
      with pid when is_pid(pid) <- poolboy,
           {:ready, num, _, _} when num >= 0 <- :poolboy.status(pid) do
        num = div(@amqp_pool_size, num + 1)
        Application.put_env(:rambla, __MODULE__, num)
        num
      else
        _ -> @amqp_pool_size
      end
    end
  end
  @with_amqp match?({:module, _}, Code.ensure_compiled(AMQP.Channel))
  @behaviour Rambla.Connection
  @impl Rambla.Connection
  def connect(params) when is_list(params) do
    if not @with_amqp or is_nil(params[:host]),
      do:
        raise(Rambla.Exceptions.Connection,
          value: params,
          expected: "🐰 configuration with :host key"
        )
    case ChannelPool.start_link() do
      {:ok, pool} -> Process.link(pool)
      {:error, {:already_started, pool}} -> Process.link(pool)
    end
    maybe_amqp(params)
  end
  @impl Rambla.Connection
  def publish(%Rambla.Connection.Config{} = conn, message)
      when is_binary(message) or is_list(message) or is_map(message),
      do: ChannelPool.publish(conn, message)
  if @with_amqp do
    defp maybe_amqp(params) do
      case AMQP.Connection.open(params) do
        {:ok, conn} ->
          Process.link(conn.pid)
          %Rambla.Connection{
            conn: %Rambla.Connection.Config{conn: conn},
            conn_type: __MODULE__,
            conn_pid: conn.pid,
            conn_params: params,
            errors: []
          }
        error ->
          %Rambla.Connection{
            conn: %Rambla.Connection.Config{},
            conn_type: __MODULE__,
            conn_pid: nil,
            conn_params: params,
            errors: [error]
          }
      end
    end
  else
    defp maybe_amqp(params) do
      error =
        Rambla.Exceptions.Connection.exception(
          source: __MODULE__,
          info: params,
          reason: "🐰 AMQP should be explicitly included to use this functionality"
        )
      %Rambla.Connection{
        conn: %Rambla.Connection.Config{},
        conn_type: __MODULE__,
        conn_pid: nil,
        conn_params: params,
        errors: [error]
      }
    end
  end
end | 
	lib/rambla/connections/amqp.ex | 0.881596 | 0.820685 | 
	amqp.ex | 
	starcoder | 
| 
	defmodule AfterGlow.ColumnValueController do
  use AfterGlow.Web, :controller
  alias AfterGlow.ColumnValue
  alias JaSerializer.Params
  alias AfterGlow.Plugs.Authorization
  plug Authorization
  plug :authorize!, ColumnValue
  plug :scrub_params, "data" when action in [:create, :update]
  plug :verify_authorized
  def index(conn, %{"filter" => %{"id" =>ids}}) do
    ids = ids |> String.split(",")
    column_values = Repo.all(from t in ColumnValue, where: t.id in ^ids )
    render(conn, :index, data: column_values)
  end
  def create(conn, %{"data" => data = %{"type" => "column_value", "attributes" => _column_value_params}}) do
    changeset = ColumnValue.changeset(%ColumnValue{}, Params.to_attributes(data))
    case Repo.insert(changeset) do
      {:ok, column_value} ->
        conn
        |> put_status(:created)
        |> put_resp_header("location", column_value_path(conn, :show, column_value))
        |> render("show.json-api", data: column_value)
      {:error, changeset} ->
        conn
        |> put_status(:unprocessable_entity)
        |> render(:errors, data: changeset)
    end
  end
  def show(conn, %{"id" => id}) do
    column_value = Repo.get!(ColumnValue, id)
    render(conn, :show, data: column_value)
  end
  def update(conn, %{"id" => id, "data" => data = %{"type" => "column_value", "attributes" => _column_value_params}}) do
    column_value = Repo.get!(ColumnValue, id)
    changeset = ColumnValue.changeset(column_value, Params.to_attributes(data))
    case Repo.update(changeset) do
      {:ok, column_value} ->
        render(conn, "show.json-api", data: column_value)
      {:error, changeset} ->
        conn
        |> put_status(:unprocessable_entity)
        |> render(:errors, data: changeset)
    end
  end
  def delete(conn, %{"id" => id}) do
    column_value = Repo.get!(ColumnValue, id)
    # Here we use delete! (with a bang) because we expect
    # it to always work (and if it does not, it will raise).
    Repo.delete!(column_value)
    send_resp(conn, :no_content, "")
  end
end | 
	web/controllers/column_values_controller.ex | 0.536556 | 0.415166 | 
	column_values_controller.ex | 
	starcoder | 
| 
	defmodule Logger.ErrorHandler do
  @moduledoc false
  use GenEvent
  require Logger
  def init({otp?, sasl?, threshold}) do
    # We store the logger PID in the state because when we are shutting
    # down the Logger application, the Logger process may be terminated
    # and then trying to reach it will lead to crashes. So we send a
    # message to a PID, instead of named process, to avoid crashes on
    # send since this handler will be removed soon by the supervisor.
    {:ok, %{otp: otp?, sasl: sasl?, threshold: threshold,
            logger: Process.whereis(Logger), last_length: 0,
            last_time: :os.timestamp, dropped: 0}}
  end
  ## Handle event
  def handle_event({_type, gl, _msg}, state) when node(gl) != node() do
    {:ok, state}
  end
  def handle_event(event, state) do
    state = check_threshold(state)
    log_event(event, state)
    {:ok, state}
  end
  ## Helpers
  defp log_event({:error, _gl, {pid, format, data}}, %{otp: true} = state),
    do: log_event(:error, :format, pid, {format, data}, state)
  defp log_event({:error_report, _gl, {pid, :std_error, format}}, %{otp: true} = state),
    do: log_event(:error, :report, pid, {:std_error, format}, state)
  defp log_event({:error_report, _gl, {pid, :supervisor_report, data}}, %{sasl: true} = state),
    do: log_event(:error, :report, pid, {:supervisor_report, data}, state)
  defp log_event({:error_report, _gl, {pid, :crash_report, data}}, %{sasl: true} = state),
    do: log_event(:error, :report, pid, {:crash_report, data}, state)
  defp log_event({:warning_msg, _gl, {pid, format, data}}, %{otp: true} = state),
    do: log_event(:warn, :format, pid, {format, data}, state)
  defp log_event({:warning_report, _gl, {pid, :std_warning, format}}, %{otp: true} = state),
    do: log_event(:warn, :report, pid, {:std_warning, format}, state)
  defp log_event({:info_msg, _gl, {pid, format, data}}, %{otp: true} = state),
    do: log_event(:info, :format, pid, {format, data}, state)
  defp log_event({:info_report, _gl, {pid, :std_info, format}}, %{otp: true} = state),
    do: log_event(:info, :report, pid, {:std_info, format}, state)
  defp log_event({:info_report, _gl, {pid, :progress, data}}, %{sasl: true} = state),
    do: log_event(:info, :report, pid, {:progress, data}, state)
  defp log_event(_, _state),
    do: :ok
  defp log_event(level, kind, pid, {type, _} = data, state) do
    %{level: min_level, truncate: truncate,
      utc_log: utc_log?, translators: translators} = Logger.Config.__data__
    with log when log != :lt <- Logger.compare_levels(level, min_level),
         {:ok, message} <- translate(translators, min_level, level, kind, data, truncate) do
      message = Logger.Utils.truncate(message, truncate)
      # Mode is always async to avoid clogging the error_logger
      meta = [pid: ensure_pid(pid), error_logger: ensure_type(type)]
      GenEvent.notify(state.logger,
        {level, Process.group_leader(),
          {Logger, message, Logger.Utils.timestamp(utc_log?), meta}})
    end
    :ok
  end
  defp ensure_type(type) when is_atom(type), do: type
  defp ensure_type(_), do: :format
  defp ensure_pid(pid) when is_pid(pid), do: pid
  defp ensure_pid(_), do: self()
  defp check_threshold(%{last_time: last_time, last_length: last_length,
                         dropped: dropped, threshold: threshold} = state) do
    {m, s, _} = current_time = :os.timestamp
    current_length = message_queue_length()
    cond do
      match?({^m, ^s, _}, last_time) and current_length - last_length > threshold ->
        count = drop_messages(current_time, 0)
        %{state | dropped: dropped + count, last_length: message_queue_length()}
      match?({^m, ^s, _}, last_time) ->
        state
      true ->
        _ = if dropped > 0 do
          Logger.warn "Logger dropped #{dropped} OTP/SASL messages as it " <>
                      "exceeded the amount of #{threshold} messages/second"
        end
        %{state | dropped: 0, last_time: current_time, last_length: current_length}
    end
  end
  defp message_queue_length() do
    {:message_queue_len, len} = Process.info(self(), :message_queue_len)
    len
  end
  defp drop_messages({m, s, _} = last_time, count) do
    case :os.timestamp do
      {^m, ^s, _} ->
        receive do
          {:notify, _event} -> drop_messages(last_time, count + 1)
        after
          0 -> count
        end
      _ ->
        count
    end
  end
  defp translate([{mod, fun} | t], min_level, level, kind, data, truncate) do
    case apply(mod, fun, [min_level, level, kind, data]) do
      {:ok, chardata} -> {:ok, chardata}
      :skip -> :skip
      :none -> translate(t, min_level, level, kind, data, truncate)
    end
  end
  defp translate([], _min_level, _level, :format, {format, args}, truncate) do
    {format, args} = Logger.Utils.inspect(format, args, truncate)
    {:ok, :io_lib.format(format, args)}
  end
  defp translate([], _min_level, _level, :report, {_type, data}, _truncate) do
    {:ok, Kernel.inspect(data)}
  end
end | 
	lib/logger/lib/logger/error_handler.ex | 0.562898 | 0.448245 | 
	error_handler.ex | 
	starcoder | 
| 
	defmodule Slipstream.Socket do
  @moduledoc """
  A data structure representing a potential websocket client connection
  This structure closely resembles `t:Phoenix.Socket.t/0`, but is not
  compatible with its functions. All documented functions from this module
  are imported by `use Slipstream`.
  """
  import Kernel, except: [send: 2, pid: 1]
  alias Slipstream.{TelemetryHelper, Socket.Join}
  alias Slipstream.Events
  if Version.match?(System.version(), ">= 1.8.0") do
    @derive {Inspect, only: [:assigns]}
  end
  defstruct [
    :channel_pid,
    :socket_pid,
    :channel_config,
    :response_headers,
    metadata: %{},
    reconnect_counter: 0,
    joins: %{},
    assigns: %{}
  ]
  @typedoc """
  A socket data structure representing a potential websocket client connection
  """
  @typedoc since: "0.1.0"
  @type t :: %__MODULE__{
          channel_pid: nil | pid(),
          socket_pid: pid(),
          channel_config: Slipstream.Configuration.t() | nil,
          metadata: %{atom() => String.t() | %{String.t() => String.t()}},
          reconnect_counter: non_neg_integer(),
          assigns: map(),
          joins: %{String.t() => %Join{}}
        }
  @doc false
  @spec new() :: t()
  def new do
    %__MODULE__{
      socket_pid: self(),
      metadata: %{
        socket_id: TelemetryHelper.trace_id(),
        joins: %{}
      }
    }
  end
  @doc """
  Adds key-value pairs to socket assigns
  Behaves the same as `Phoenix.Socket.assign/3`
  ## Examples
      iex> assign(socket, :key, :value)
      iex> assign(socket, key: :value)
  """
  # and indeed the implementation is just about the same as well.
  # we can't defdelegate/2 though because the socket module is different
  # (hence the struct doesn't match)
  @doc since: "0.1.0"
  @spec assign(t(), Keyword.t()) :: t()
  @spec assign(t(), key :: atom(), value :: any()) :: t()
  def assign(%__MODULE__{} = socket, key, value) when is_atom(key) do
    assign(socket, [{key, value}])
  end
  def assign(%__MODULE__{} = socket, attrs)
      when is_list(attrs) or is_map(attrs) do
    %__MODULE__{socket | assigns: Map.merge(socket.assigns, Map.new(attrs))}
  end
  @doc """
  Updates an existing key in the socket assigns
  Raises a `KeyError` if the key is not present in `socket.assigns`.
  `func` should be an 1-arity function which takes the existing value at assign
  `key` and updates it to a new value. The new value will take the old value's
  place in `socket.assigns[key]`.
  This function is a useful alternative to `assign/3` when the key is already
  present in assigns and is a list, map, or similarly malleable data structure.
  ## Examples
      @impl Slipstream
      def handle_cast({:join, topic}, socket) do
        socket =
          socket
          |> update(:topics, &[topic | &1])
          |> join(topic)
        {:noreply, socket}
      end
      @impl Slipstream
      def handle_call({:join, topic}, from, socket) do
        socket =
          socket
          |> update(:join_requests, &Map.put(&1, topic, from))
          |> join(topic)
        # note: not replying here so we can provide a synchronous call to a
        # topic being joined
        {:noreply, socket}
      end
      @impl Slipstream
      def handle_join(topic, response, socket) do
        case Map.fetch(socket.assigns.join_requests, topic) do
          {:ok, from} -> GenServer.reply(from, {:ok, response})
          :error -> :ok
        end
        {:ok, socket}
      end
  """
  # again, can't defdelegate/2 because of the socket module being different
  # but see the `Phoenix.LiveView.update/3` implementation for the original
  # source
  @doc since: "0.5.0"
  @spec update(t(), key :: atom(), func :: (value :: any() -> value :: any())) ::
          t()
  def update(%__MODULE__{assigns: assigns} = socket, key, func)
      when is_atom(key) and is_function(func, 1) do
    case Map.fetch(assigns, key) do
      {:ok, value} -> assign(socket, [{key, func.(value)}])
      :error -> raise KeyError, key: key, term: assigns
    end
  end
  @doc """
  Checks if a channel is currently joined
  ## Examples
      iex> joined?(socket, "room:lobby")
      true
  """
  @doc since: "0.1.0"
  @spec joined?(t(), topic :: String.t()) :: boolean()
  def joined?(%__MODULE__{} = socket, topic) when is_binary(topic) do
    join_status(socket, topic) == :joined
  end
  @doc """
  Checks the status of a join request
  When a join is requested with `Slipstream.join/3`, the join request is
  considered to be in the `:requested` state. Once the topic is successfully
  joined, it is considered `:joined` until closed. If there is a failure to
  join the topic, if the topic crashes, or if the topic is left after being
  joined, the status of the join is considered `:closed`. Finally, if a topic
  has not been requested in a join so far for a socket, the status is `nil`.
  Notably, the status of a join will not automatically change to `:joined` once
  the remote server replies with successful join. Either the join must be
  awaited with `Slipstream.await_join/2` or the status may be checked later
  in the `c:Slipstream.handle_join/3` callback.
  ## Examples
      iex> socket = join(socket, "room:lobby")
      iex> join_status(socket, "room:lobby")
      :requested
      iex> {:ok, socket, _join_response} = await_join(socket, "room:lobby")
      iex> join_status(socket, "room:lobby")
      :joined
  """
  @doc since: "0.1.0"
  @spec join_status(t(), topic :: String.t()) ::
          :requested | :joined | :closed | nil
  def join_status(%__MODULE__{} = socket, topic) when is_binary(topic) do
    case Map.fetch(socket.joins, topic) do
      {:ok, %Join{status: status}} -> status
      :error -> nil
    end
  end
  @doc """
  Checks if a socket is connected to a remote websocket host
  ## Examples
      iex> socket = connect(socket, uri: "ws://example.org")
      iex> socket = await_connect!(socket)
      iex> connected?(socket)
      true
  """
  @doc since: "0.1.0"
  @spec connected?(t()) :: boolean()
  def connected?(%__MODULE__{} = socket),
    do: socket |> channel_pid() |> is_pid()
  @doc """
  Gets the process ID of the connection
  The slipstream implementor module is not the same process as the GenServer
  which interfaces with the remote server for websocket communication. This
  other process, the Slipstream.Connection process, interfaces with `:gun`
  and communicates with the implementor module by puassing messages (mostly
  with `Kernel.send/2`.
  It can be useful to have access to this pid for niche purposes, like
  sending a fake disconnect message or for debugging (e.g. with
  `:sys.get_state/1`)
  ## Examples
      iex> Slipstream.Socket.channel_pid(socket)
      #PID<0.1.2>
  """
  @doc since: "0.1.0"
  @spec channel_pid(t()) :: pid() | nil
  def channel_pid(%__MODULE__{channel_pid: pid}) do
    if is_pid(pid) and Process.alive?(pid), do: pid, else: nil
  end
  ## helper functions for implementing Slipstream
  @doc false
  def send(%__MODULE__{} = socket, message) do
    if pid = channel_pid(socket), do: Kernel.send(pid, message)
    socket
  end
  @doc false
  def call(%__MODULE__{} = socket, message, timeout) do
    if pid = channel_pid(socket) do
      {:ok, GenServer.call(pid, message, timeout)}
    else
      {:error, :not_connected}
    end
  end
  @doc false
  def put_join_config(%__MODULE__{} = socket, topic, params) do
    join = Join.new(topic, params)
    %__MODULE__{socket | joins: Map.put_new(socket.joins, topic, join)}
  end
  # potentially changes a socket by applying an event to it
  @doc false
  @spec apply_event(t(), struct()) :: t()
  def apply_event(socket, event)
  def apply_event(socket, %Events.ChannelConnected{} = event) do
    socket = TelemetryHelper.conclude_connect(socket, event)
    %__MODULE__{
      socket
      | channel_pid: event.pid,
        channel_config: event.config || socket.channel_config,
        reconnect_counter: 0
    }
  end
  def apply_event(socket, %Events.TopicJoinSucceeded{topic: topic} = event) do
    socket
    |> TelemetryHelper.conclude_join(event)
    |> put_in([Access.key(:joins), topic, Access.key(:status)], :joined)
    |> put_in([Access.key(:joins), topic, Access.key(:rejoin_counter)], 0)
  end
  def apply_event(socket, %event{topic: topic})
      when event in [
             Events.TopicLeft,
             Events.TopicJoinFailed,
             Events.TopicJoinClosed
           ] do
    put_in(socket, [Access.key(:joins), topic, Access.key(:status)], :closed)
  end
  def apply_event(socket, %Events.ChannelClosed{}) do
    %__MODULE__{
      socket
      | channel_pid: nil,
        joins:
          Enum.into(socket.joins, %{}, fn {topic, join} ->
            {topic, %Join{join | status: :closed}}
          end)
    }
  end
  def apply_event(socket, _event), do: socket
  @doc false
  @spec next_reconnect_time(t()) :: {non_neg_integer(), t()}
  def next_reconnect_time(%__MODULE__{} = socket) do
    socket = update_in(socket, [Access.key(:reconnect_counter)], &(&1 + 1))
    time =
      retry_time(
        socket.channel_config.reconnect_after_msec,
        socket.reconnect_counter - 1
      )
    {time, socket}
  end
  @doc false
  @spec next_rejoin_time(t(), String.t()) :: {non_neg_integer(), t()}
  def next_rejoin_time(socket, topic) do
    socket =
      update_in(
        socket,
        [Access.key(:joins), topic, Access.key(:rejoin_counter)],
        &(&1 + 1)
      )
    time =
      retry_time(
        socket.channel_config.rejoin_after_msec,
        socket.joins[topic].rejoin_counter - 1
      )
    {time, socket}
  end
  defp retry_time(backoff_times, try_number) do
    # when we hit the end of the list, we repeat the last value in the list
    default = Enum.at(backoff_times, -1)
    Enum.at(backoff_times, try_number, default)
  end
end | 
	lib/slipstream/socket.ex | 0.902734 | 0.483405 | 
	socket.ex | 
	starcoder | 
| 
	defmodule ShEx.ShapeMap.Decoder do
  @moduledoc !"""
             Decoder for standard representation format for ShapeMaps specified in <https://shexspec.github.io/shape-map/>.
             """
  import ShEx.Utils
  alias RDF.{IRI, BlankNode, Literal}
  def decode(content, opts \\ []) do
    with {:ok, tokens, _} <- tokenize(content),
         {:ok, ast} <- parse(tokens) do
      build_shape_map(ast, opts)
    else
      {:error, {error_line, :shape_map_lexer, error_descriptor}, _error_line_again} ->
        {:error,
         "ShapeMap scanner error on line #{error_line}: #{error_description(error_descriptor)}"}
      {:error, {error_line, :shape_map_parser, error_descriptor}} ->
        {:error,
         "ShapeMap parser error on line #{error_line}: #{error_description(error_descriptor)}"}
    end
  end
  defp error_description(error_descriptor) when is_list(error_descriptor) do
    error_descriptor
    |> Stream.map(&to_string/1)
    |> Enum.join("")
  end
  defp error_description(error_descriptor), do: inspect(error_descriptor)
  defp tokenize(content), do: content |> to_charlist |> :shape_map_lexer.string()
  defp parse([]), do: {:ok, []}
  defp parse(tokens), do: tokens |> :shape_map_parser.parse()
  defp build_shape_map(shape_associations_ast, opts) do
    with {:ok, associations} <-
           map(shape_associations_ast, &build_association/2, opts) do
      {:ok, ShEx.ShapeMap.new(associations)}
    end
  end
  defp build_association({{:node, node_ast}, shape_ast}, opts) do
    with {:ok, node} <- build_node(node_ast, opts),
         {:ok, shape} <- build_shape(shape_ast, opts) do
      {:ok, ShEx.ShapeMap.Association.new(node, shape)}
    end
  end
  defp build_association({{:triple_pattern, triple_pattern_ast}, shape_ast}, opts) do
    with {:ok, triple_pattern} <- build_triple_pattern(triple_pattern_ast, opts),
         {:ok, shape} <- build_shape(shape_ast, opts) do
      {:ok, ShEx.ShapeMap.Association.new(triple_pattern, shape)}
    end
  end
  defp build_shape(:start, _opts), do: {:ok, :start}
  defp build_shape(node, opts), do: build_node(node, opts)
  defp build_node(%IRI{} = iri, _opts), do: {:ok, iri}
  defp build_node(%BlankNode{} = bnode, _opts), do: {:ok, bnode}
  defp build_node(%Literal{} = literal, _opts), do: {:ok, literal}
  defp build_node({{:string_literal_quote, _line, value}, {:datatype, datatype}}, opts) do
    with {:ok, datatype} <- build_node(datatype, opts) do
      {:ok, RDF.literal(value, datatype: datatype)}
    end
  end
  defp build_triple_pattern({subject_ast, predicate_ast, object_ast}, opts) do
    with {:ok, subject_node_pattern} <- build_node_pattern(subject_ast, opts),
         {:ok, predicate_node_pattern} <- build_predicate_pattern(predicate_ast, opts),
         {:ok, object_node_pattern} <- build_node_pattern(object_ast, opts) do
      {:ok, {subject_node_pattern, predicate_node_pattern, object_node_pattern}}
    end
  end
  defp build_node_pattern(keyword, _opts) when is_atom(keyword), do: {:ok, keyword}
  defp build_node_pattern(node_pattern_ast, opts), do: build_node(node_pattern_ast, opts)
  defp build_predicate_pattern(:rdf_type, _opts), do: {:ok, RDF.type()}
  defp build_predicate_pattern(iri, _opts), do: {:ok, iri}
end | 
	lib/shex/shape_map/decoder.ex | 0.773559 | 0.564038 | 
	decoder.ex | 
	starcoder | 
| 
	defmodule Poison.SyntaxError do
  defexception [:message, :token]
  def exception(opts) do
    message = if token = opts[:token] do
      "Unexpected token: #{token}"
    else
      "Unexpected end of input"
    end
    %Poison.SyntaxError{message: message, token: token}
  end
end
defmodule Poison.Parser do
  @moduledoc """
  An ECMA 404 conforming JSON parser.
  See: http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf
  """
  if Application.get_env(:poison, :native) do
    @compile :native
  end
  use Bitwise
  alias Poison.SyntaxError
  @type t :: nil | true | false | list | float | integer | String.t | map
  @spec parse(iodata, Keyword.t) :: {:ok, t} | {:error, :invalid}
    | {:error, {:invalid, String.t}}
  def parse(iodata, options \\ []) do
    string = IO.iodata_to_binary(iodata)
    {value, rest} = value(skip_whitespace(string), options[:keys])
    case skip_whitespace(rest) do
      "" -> {:ok, value}
      other -> syntax_error(other)
    end
  catch
    :invalid ->
      {:error, :invalid}
    {:invalid, token} ->
      {:error, {:invalid, token}}
  end
  @spec parse!(iodata, Keyword.t) :: t
  def parse!(iodata, options \\ []) do
    case parse(iodata, options) do
      {:ok, value} ->
        value
      {:error, :invalid} ->
        raise SyntaxError
      {:error, {:invalid, token}} ->
        raise SyntaxError, token: token
    end
  end
  defp value("\"" <> rest, _keys),    do: string_continue(rest, [])
  defp value("{" <> rest, keys),      do: object_pairs(skip_whitespace(rest), keys, [])
  defp value("[" <> rest, keys),      do: array_values(skip_whitespace(rest), keys, [])
  defp value("null" <> rest, _keys),  do: {nil, rest}
  defp value("true" <> rest, _keys),  do: {true, rest}
  defp value("false" <> rest, _keys), do: {false, rest}
  defp value(<<char, _ :: binary>> = string, _keys) when char in '-0123456789' do
    number_start(string)
  end
  defp value(other, _keys), do: syntax_error(other)
  ## Objects
  defp object_pairs("\"" <> rest, keys, acc) do
    {name, rest} = string_continue(rest, [])
    {value, rest} = case skip_whitespace(rest) do
      ":" <> rest -> value(skip_whitespace(rest), keys)
      other -> syntax_error(other)
    end
    acc = [{object_name(name, keys), value} | acc]
    case skip_whitespace(rest) do
      "," <> rest -> object_pairs(skip_whitespace(rest), keys, acc)
      "}" <> rest -> {:maps.from_list(acc), rest}
      other -> syntax_error(other)
    end
  end
  defp object_pairs("}" <> rest, _, []) do
    {:maps.new, rest}
  end
  defp object_pairs(other, _, _), do: syntax_error(other)
  defp object_name(name, :atoms),  do: String.to_atom(name)
  defp object_name(name, :atoms!), do: String.to_existing_atom(name)
  defp object_name(name, _keys),   do: name
  ## Arrays
  defp array_values("]" <> rest, _, []) do
    {[], rest}
  end
  defp array_values(string, keys, acc) do
    {value, rest} = value(string, keys)
    acc = [value | acc]
    case skip_whitespace(rest) do
      "," <> rest -> array_values(skip_whitespace(rest), keys, acc)
      "]" <> rest -> {:lists.reverse(acc), rest}
      other -> syntax_error(other)
    end
  end
  ## Numbers
  defp number_start("-" <> rest) do
    case rest do
      "0" <> rest -> number_frac(rest, ["-0"])
      rest -> number_int(rest, [?-])
    end
  end
  defp number_start("0" <> rest) do
    number_frac(rest, [?0])
  end
  defp number_start(string) do
    number_int(string, [])
  end
  defp number_int(<<char, _ :: binary>> = string, acc) when char in '123456789' do
    {digits, rest} = number_digits(string)
    number_frac(rest, [acc, digits])
  end
  defp number_int(other, _), do: syntax_error(other)
  defp number_frac("." <> rest, acc) do
    {digits, rest} = number_digits(rest)
    number_exp(rest, true, [acc, ?., digits])
  end
  defp number_frac(string, acc) do
    number_exp(string, false, acc)
  end
  defp number_exp(<<e>> <> rest, frac, acc) when e in 'eE' do
    e = if frac, do: ?e, else: ".0e"
    case rest do
      "-" <> rest -> number_exp_continue(rest, [acc, e, ?-])
      "+" <> rest -> number_exp_continue(rest, [acc, e])
      rest -> number_exp_continue(rest, [acc, e])
    end
  end
  defp number_exp(string, frac, acc) do
    {number_complete(acc, frac), string}
  end
  defp number_exp_continue(rest, acc) do
    {digits, rest} = number_digits(rest)
    {number_complete([acc, digits], true), rest}
  end
  defp number_complete(iolist, false) do
    IO.iodata_to_binary(iolist) |> String.to_integer
  end
  defp number_complete(iolist, true) do
    IO.iodata_to_binary(iolist) |> String.to_float
  end
  defp number_digits(<<char>> <> rest = string) when char in '0123456789' do
    count = number_digits_count(rest, 1)
    <<digits :: binary-size(count), rest :: binary>> = string
    {digits, rest}
  end
  defp number_digits(other), do: syntax_error(other)
  defp number_digits_count(<<char>> <> rest, acc) when char in '0123456789' do
    number_digits_count(rest, acc + 1)
  end
  defp number_digits_count(_, acc), do: acc
  ## Strings
  defp string_continue("\"" <> rest, acc) do
    {IO.iodata_to_binary(acc), rest}
  end
  defp string_continue("\\" <> rest, acc) do
    string_escape(rest, acc)
  end
  defp string_continue("", _), do: throw(:invalid)
  defp string_continue(string, acc) do
    n = string_chunk_size(string, 0)
    <<chunk :: binary-size(n), rest :: binary>> = string
    string_continue(rest, [acc, chunk])
  end
  for {seq, char} <- Enum.zip('"\\ntr/fb', '"\\\n\t\r/\f\b') do
    defp string_escape(<<unquote(seq)>> <> rest, acc) do
      string_continue(rest, [acc, unquote(char)])
    end
  end
  # http://www.ietf.org/rfc/rfc2781.txt
  # http://perldoc.perl.org/Encode/Unicode.html#Surrogate-Pairs
  # http://mathiasbynens.be/notes/javascript-encoding#surrogate-pairs
  defp string_escape(<<?u, a1, b1, c1, d1, "\\u", a2, b2, c2, d2>> <> rest, acc)
    when a1 in 'dD' and a2 in 'dD'
    and (b1 in '89abAB')
    and (b2 in ?c..?f or b2 in ?C..?F) \
  do
    hi = List.to_integer([a1, b1, c1, d1], 16)
    lo = List.to_integer([a2, b2, c2, d2], 16)
    codepoint = 0x10000 + ((hi &&& 0x03FF) <<< 10) + (lo &&& 0x03FF)
    string_continue(rest, [acc, <<codepoint :: utf8>>])
  end
  defp string_escape(<<?u, seq :: binary-size(4)>> <> rest, acc) do
    string_continue(rest, [acc, <<String.to_integer(seq, 16) :: utf8>> ])
  end
  defp string_escape(other, _), do: syntax_error(other)
  defp string_chunk_size("\"" <> _, acc), do: acc
  defp string_chunk_size("\\" <> _, acc), do: acc
  defp string_chunk_size(<<char>> <> rest, acc) when char < 0x80 do
    string_chunk_size(rest, acc + 1)
  end
  defp string_chunk_size(<<codepoint :: utf8>> <> rest, acc) do
    string_chunk_size(rest, acc + string_codepoint_size(codepoint))
  end
  defp string_chunk_size(other, _), do: syntax_error(other)
  defp string_codepoint_size(codepoint) when codepoint < 0x800,   do: 2
  defp string_codepoint_size(codepoint) when codepoint < 0x10000, do: 3
  defp string_codepoint_size(_),                                  do: 4
  ## Whitespace
  defp skip_whitespace(<<char>> <> rest) when char in '\s\n\t\r' do
    skip_whitespace(rest)
  end
  defp skip_whitespace(string), do: string
  ## Errors
  defp syntax_error(<<token :: utf8>> <> _) do
    throw({:invalid, <<token>>})
  end
  defp syntax_error(_) do
    throw(:invalid)
  end
end | 
	issues/deps/poison/lib/poison/parser.ex | 0.648689 | 0.517449 | 
	parser.ex | 
	starcoder | 
| 
	defmodule OpenIDConnect do
  @moduledoc """
  Handles a majority of the life-cycle concerns with [OpenID Connect](http://openid.net/connect/)
  """
  @typedoc """
  URI as a string
  """
  @type uri :: String.t()
  @typedoc """
  JSON Web Token
  See: https://jwt.io/introduction/
  """
  @type jwt :: String.t()
  @typedoc """
  The provider name as an atom
  Example: `:google`
  This atom should match what you've used in your application config
  """
  @type provider :: atom
  @typedoc """
  The payload of user data from the provider
  """
  @type claims :: map
  @typedoc """
  The name of the genserver
  This is optional and will default to `:openid_connect` unless overridden
  """
  @type name :: atom
  @typedoc """
  Query param map
  """
  @type params :: map
  @typedoc """
  The success tuple
  The 2nd element will be the relevant value to work with
  """
  @type success(value) :: {:ok, value}
  @typedoc """
  A string reason for an error failure
  """
  @type reason :: String.t() | %HTTPoison.Error{} | %HTTPoison.Response{}
  @typedoc """
  An error tuple
  The 2nd element will indicate which function failed
  The 3rd element will give details of the failure
  """
  @type error(name) :: {:error, name, reason}
  @typedoc """
  A provider's documents
  * discovery_document: the provider's discovery document for OpenID Connect
  * jwk: the provider's certificates converted into a JOSE JSON Web Key
  * remaining_lifetime: how long the provider's JWK is valid for
  """
  @type documents :: %{
          discovery_document: map,
          jwk: JOSE.JWK.t(),
          remaining_lifetime: integer | nil
        }
  @spec authorization_uri(provider, params, name) :: uri
  @doc """
  Builds the authorization URI according to the spec in the providers discovery document
  The `params` option can be used to add additional query params to the URI
  Example:
      OpenIDConnect.authorization_uri(:google, %{"hd" => "dockyard.com"})
  > It is *highly suggested* that you add the `state` param for security reasons. Your
  > OpenID Connect provider should have more information on this topic.
  """
  def authorization_uri(provider, params \\ %{}, name \\ :openid_connect) do
    document = discovery_document(provider, name)
    config = config(provider, name)
    uri = Map.get(document, "authorization_endpoint")
    params =
      Map.merge(
        %{
          client_id: client_id(config),
          redirect_uri: redirect_uri(config),
          response_type: response_type(provider, config, name),
          scope: normalize_scope(provider, config[:scope])
        },
        params
      )
    build_uri(uri, params)
  end
  @spec fetch_tokens(provider, params, name) :: success(map) | error(:fetch_tokens)
  @doc """
  Fetches the authentication tokens from the provider
  The `params` option should at least include the key/value pairs of the `response_type` that
  was requested during authorization. `params` may also include any one-off overrides for token
  fetching.
  """
  def fetch_tokens(provider, params, name \\ :openid_connect)
  def fetch_tokens(provider, code, name) when is_binary(code) do
    IO.warn(
      "Deprecation: `OpenIDConnect.fetch_tokens/3` no longer takes a binary as the 2nd argument. Please refer to the docs for the new API."
    )
    fetch_tokens(provider, %{code: code}, name)
  end
  def fetch_tokens(provider, params, name) do
    uri = access_token_uri(provider, name)
    config = config(provider, name)
    form_body =
      Map.merge(
        %{
          client_id: client_id(config),
          client_secret: client_secret(config),
          grant_type: "authorization_code",
          redirect_uri: redirect_uri(config)
        },
        params
      )
      |> Map.to_list()
    headers = [{"Content-Type", "application/x-www-form-urlencoded"}]
    with {:ok, %HTTPoison.Response{status_code: status_code} = resp} when status_code in 200..299 <-
           http_client().post(uri, {:form, form_body}, headers, http_client_options()),
         {:ok, json} <- Jason.decode(resp.body),
         {:ok, json} <- assert_json(json) do
      {:ok, json}
    else
      {:ok, resp} -> {:error, :fetch_tokens, resp}
      {:error, reason} -> {:error, :fetch_tokens, reason}
    end
  end
  @spec verify(provider, jwt, name) :: success(claims) | error(:verify)
  @doc """
  Verifies the validity of the JSON Web Token (JWT)
  This verification will assert the token's encryption against the provider's
  JSON Web Key (JWK)
  """
  def verify(provider, jwt, name \\ :openid_connect) do
    jwk = jwk(provider, name)
    with {:ok, protected} <- peek_protected(jwt),
         {:ok, decoded_protected} <- Jason.decode(protected),
         {:ok, token_alg} <- Map.fetch(decoded_protected, "alg"),
         {true, claims, _jwk} <- do_verify(jwk, token_alg, jwt) do
      Jason.decode(claims)
    else
      {:error, %Jason.DecodeError{}} ->
        {:error, :verify, "token claims did not contain a JSON payload"}
      {:error, :peek_protected} ->
        {:error, :verify, "invalid token format"}
      :error ->
        {:error, :verify, "no `alg` found in token"}
      {false, _claims, _jwk} ->
        {:error, :verify, "verification failed"}
      _ ->
        {:error, :verify, "verification error"}
    end
  end
  @spec update_documents(list) :: success(documents) | error(:update_documents)
  @doc """
  Requests updated documents from the provider
  This function is used by `OpenIDConnect.Worker` for document updates
  according to the lifetime returned by the provider
  """
  def update_documents(config) do
    uri = discovery_document_uri(config)
    with {:ok, discovery_document, _} <- fetch_resource(uri),
         {:ok, certs, remaining_lifetime} <- fetch_resource(discovery_document["jwks_uri"]),
         {:ok, jwk} <- from_certs(certs) do
      {:ok,
       %{
         discovery_document: normalize_discovery_document(discovery_document),
         jwk: jwk,
         remaining_lifetime: remaining_lifetime
       }}
    else
      {:error, reason} -> {:error, :update_documents, reason}
    end
  end
  @doc false
  def normalize_discovery_document(discovery_document) do
    # claims_supported may be missing as it is marked RECOMMENDED by the spec, default to an empty list
    sorted_claims_supported =
      discovery_document
      |> Map.get("claims_supported", [])
      |> Enum.sort()
    # response_types_supported's presence is REQUIRED by the spec, crash when missing
    sorted_response_types_supported =
      discovery_document
      |> Map.get("response_types_supported")
      |> Enum.map(fn response_type ->
        response_type
        |> String.split()
        |> Enum.sort()
        |> Enum.join(" ")
      end)
    Map.merge(discovery_document, %{
      "claims_supported" => sorted_claims_supported,
      "response_types_supported" => sorted_response_types_supported
    })
  end
  defp peek_protected(jwt) do
    try do
      {:ok, JOSE.JWS.peek_protected(jwt)}
    rescue
      _ -> {:error, :peek_protected}
    end
  end
  defp do_verify(%JOSE.JWK{keys: {:jose_jwk_set, jwks}}, token_alg, jwt) do
    Enum.find_value(jwks, {false, "{}", jwt}, fn jwk ->
      jwk
      |> JOSE.JWK.from()
      |> do_verify(token_alg, jwt)
      |> case do
        {false, _claims, _jwt} -> false
        verified_claims -> verified_claims
      end
    end)
  end
  defp do_verify(%JOSE.JWK{} = jwk, token_alg, jwt),
    do: JOSE.JWS.verify_strict(jwk, [token_alg], jwt)
  defp from_certs(certs) do
    try do
      {:ok, JOSE.JWK.from(certs)}
    rescue
      _ ->
        {:error, "certificates bad format"}
    end
  end
  defp discovery_document(provider, name) do
    GenServer.call(name, {:discovery_document, provider})
  end
  defp jwk(provider, name) do
    GenServer.call(name, {:jwk, provider})
  end
  defp config(provider, name) do
    GenServer.call(name, {:config, provider})
  end
  defp access_token_uri(provider, name) do
    Map.get(discovery_document(provider, name), "token_endpoint")
  end
  defp client_id(config) do
    Keyword.get(config, :client_id)
  end
  defp client_secret(config) do
    Keyword.get(config, :client_secret)
  end
  defp redirect_uri(config) do
    Keyword.get(config, :redirect_uri)
  end
  defp response_type(provider, config, name) do
    response_type =
      config
      |> Keyword.get(:response_type)
      |> normalize_response_type(provider)
    response_types_supported = response_types_supported(provider, name)
    cond do
      response_type in response_types_supported ->
        response_type
      true ->
        raise ArgumentError,
          message: """
          Requested response type (#{response_type}) not supported by provider (#{provider}).
          Supported types:
          #{Enum.join(response_types_supported, "\n")}
          """
    end
  end
  defp normalize_response_type(response_type, provider)
       when is_nil(response_type) or response_type == [] do
    raise ArgumentError, "no response_type has been defined for provider `#{provider}`"
  end
  defp normalize_response_type(response_type, provider) when is_binary(response_type) do
    response_type
    |> String.split()
    |> normalize_response_type(provider)
  end
  defp normalize_response_type(response_type, _provider) when is_list(response_type) do
    response_type
    |> Enum.sort()
    |> Enum.join(" ")
  end
  defp response_types_supported(provider, name) do
    provider
    |> discovery_document(name)
    |> Map.get("response_types_supported")
  end
  defp discovery_document_uri(config) do
    Keyword.get(config, :discovery_document_uri)
  end
  defp fetch_resource(uri) do
    with {:ok, %HTTPoison.Response{status_code: status_code} = resp} when status_code in 200..299 <-
           http_client().get(uri, [], http_client_options()),
         {:ok, json} <- Jason.decode(resp.body),
         {:ok, json} <- assert_json(json) do
      {:ok, json, remaining_lifetime(resp.headers)}
    else
      {:ok, resp} -> {:error, resp}
      error -> error
    end
  end
  defp build_uri(uri, params) do
    query = URI.encode_query(params)
    uri
    |> URI.merge("?#{query}")
    |> URI.to_string()
  end
  defp assert_json(%{"error" => reason}), do: {:error, reason}
  defp assert_json(json), do: {:ok, json}
  @spec remaining_lifetime([{String.t(), String.t()}]) :: integer | nil
  defp remaining_lifetime(headers) do
    with headers = Enum.into(headers, %{}),
         {:ok, max_age} <- find_max_age(headers),
         {:ok, age} <- find_age(headers) do
      max_age - age
    else
      _ -> nil
    end
  end
  defp normalize_scope(provider, scopes) when is_nil(scopes) or scopes == [] do
    raise ArgumentError, "no scopes have been defined for provider `#{provider}`"
  end
  defp normalize_scope(_provider, scopes) when is_binary(scopes), do: scopes
  defp normalize_scope(_provider, scopes) when is_list(scopes), do: Enum.join(scopes, " ")
  defp find_max_age(headers) when is_map(headers) do
    case Regex.run(~r"(?<=max-age=)\d+", Map.get(headers, "Cache-Control", "")) do
      [max_age] -> {:ok, String.to_integer(max_age)}
      _ -> :error
    end
  end
  defp find_age(headers) when is_map(headers) do
    case Map.get(headers, "Age") do
      nil -> :error
      age -> {:ok, String.to_integer(age)}
    end
  end
  defp http_client do
    Application.get_env(:openid_connect, :http_client, HTTPoison)
  end
  defp http_client_options do
    Application.get_env(:openid_connect, :http_client_options, [])
  end
end | 
	lib/openid_connect.ex | 0.897558 | 0.495484 | 
	openid_connect.ex | 
	starcoder | 
| 
	defmodule Sentix.Cache do
  @moduledoc """
  This module just provides a cache interface which can back multiple Sentix
  watchers, to make sure that we have some form of persistence (rather than
  relying on the developer to remember to reconnect on crashes).
  Currently the only provided functions are based around subscriber storage (in
  order to persist across crashes), and paths of binaries to avoid having to look
  for them on every execution.
  """
  @doc """
  Adds a subscriber against the provided Sentix name.
  This is a convenience method to avoid having to check whether the list includes
  the provided subscriber every time we wish to add a subscriber (because we
  don't want to duplicate messages to the subscriber).
  """
  @spec add_subscriber(name :: atom, subs :: [ pid ], sub :: pid) :: subs :: [ pid ]
  def add_subscriber(name, subs, sub) do
    sub_list = if Enum.member?(subs, sub) do
      subs
    else
      [ sub | subs ]
    end
    set_subscribers(name, sub_list)
  end
  @doc """
  Retrieves the list of subscribers for a provided Sentix name.
  This simply hits the backing cache for the provided name, and returns an empty
  list if the name does not exist inside the cache.
  """
  @spec get_subscribers(name :: atom) :: subscribers :: [ pid ]
  def get_subscribers(name) do
    Cachex.get!(Sentix, name) || []
  end
  @doc """
  Sets the subscribers for a given Sentix watcher.
  This will write the subscribers into the cache, and then return the list of
  persisted subscribers for convenience.
  """
  @spec set_subscribers(name :: atom, subscribers :: [ pid ]) :: subscribers :: [ pid ]
  def set_subscribers(name, subscribers) do
    Cachex.set!(Sentix, name, subscribers) && subscribers
  end
  @doc """
  Locates a binary on the host system, if possible.
  Once the binary has been located, it's stored inside the cache to speed up future
  lookups. We use `:os.find_executable/1` under the hood to locate the binary.
  """
  @spec find_binary(name :: [ char ]) ::
        { :ok, path :: binary } |
        { :error, reason :: binary }
  def find_binary(name) do
    case Cachex.get!(Sentix, name, fallback: &do_find_binary/1) do
      nil -> { :error, :missing_binary }
      val -> { :ok, val }
    end
  end
  # Internal fallback function for use when a binary has no entry inside the
  # cache. This function is used to generate the initial entry for the cache.
  defp do_find_binary(key) do
    case :os.find_executable(key) do
      false -> nil
      value -> to_string(value)
    end
  end
end | 
	lib/sentix/cache.ex | 0.774583 | 0.503418 | 
	cache.ex | 
	starcoder | 
| 
	defmodule Timber.Plug.Event do
  @moduledoc """
  Automatically logs metadata information about HTTP requests
  and responses in Plug-based frameworks like Phoenix.
  Whether you use Plug by itself or as part of a framework like Phoenix,
  adding this plug to your pipeline will automatically create events
  for incoming HTTP requests and responses for your log statements.
  Note: If you're using `Timber.Plug.HTTPContext`, that plug should come before
  `Timber.Plug.Event` in any pipeline. This will give you the best results.
  ## Adding the Plug
  `Timber.Plug.Event` can be added to your plug pipeline using the standard
  `Plug.Builder.plug/2` macro. The point at which you place it determines
  what state Timber will receive the connection in, therefore it's
  recommended you place it as close to the origin of the request as
  possible.
  ### Plug (Standalone or Plug.Router)
  If you are using Plug without a framework, your setup will vary depending
  on your architecture. The call to `plug Timber.Plug.Event` should be grouped
  with any other plugs you call prior to performing business logic.
  Timber expects query paramters to have already been fetched on the
  connection using `Plug.Conn.fetch_query_params/2`.
  ### Phoenix
  Phoenix's flexibility means there are multiple points in the plug pipeline
  where the `Timber.Plug.Event` can be inserted. The recommended place is in
  a `:logging` pipeline in your router, but if you have more complex needs
  you can also place the plug in an endpoint or a controller.
  ```elixir
  defmodule MyApp.Router do
    use MyApp.Web, :router
    pipeline :logging do
      plug Timber.Plug.Event
    end
    scope "/api", MyApp do
      pipe_through :logging
    end
  end
  ```
  If you place the plug call in your endpoint, you will need to make sure
  that it appears after `Plug.RequestId` (if you are using it) but before
  the call to your router.
  ## Issues with Plug.ErrorHandler
  If you are using `Plug.ErrorHandler`, you will not see a response
  event if an exception is raised. This is because of how the error
  handler works in practice. In order to capture information about the
  response, Timber registers a callback to be used before Plug actually
  sends the response. Plug stores this information on the
  connection struct. When an exception is raised, the methodology used
  by the error handler will reset the conn to the state it was first
  accepted by the router.
  """
  @behaviour Plug
  require Logger
  alias Timber.JSON
  alias Timber.Timer
  @doc false
  @impl true
  def init(opts) do
    opts
  end
  @doc false
  @impl true
  def call(conn, opts) do
    timer = Timer.start()
    log_level = Keyword.get(opts, :log_level, :info)
    request_id_header_name = Keyword.get(opts, :request_id_header, "x-request-id")
    request_id_header = Timber.Plug.get_request_id(conn, request_id_header_name)
    request_id = request_id_from_header(request_id_header)
    method = conn.method
    host = conn.host
    port = conn.port
    scheme = conn.scheme
    path = conn.request_path
    headers = List.flatten([request_id_header | conn.req_headers])
    headers_json = headers_to_headers_json(headers)
    query_string = conn.query_string
    event = %{
      http_request_received: %{
        headers_json: headers_json,
        host: host,
        method: method,
        path: path,
        port: port,
        query_string: query_string,
        request_id: request_id,
        scheme: scheme
      }
    }
    message =
      if path do
        ["Received ", method, " ", path]
      else
        ["Received ", method]
      end
    Logger.log(log_level, message, event: event)
    conn
    |> Plug.Conn.put_private(:timber_opts, opts)
    |> Plug.Conn.put_private(:timber_timer, timer)
    |> Plug.Conn.register_before_send(&log_response_event/1)
  end
  @spec log_response_event(Plug.Conn.t()) :: Plug.Conn.t()
  defp log_response_event(conn) do
    duration_ms = Timber.duration_ms(conn.private.timber_timer)
    opts = conn.private.timber_opts
    log_level = Keyword.get(opts, :log_level, :info)
    status = Plug.Conn.Status.code(conn.status)
    request_id_header_name = Keyword.get(opts, :request_id_header, "x-request-id")
    request_id_header = Timber.Plug.get_request_id(conn, request_id_header_name)
    # The response body typing is iodata; it should not be assumed
    # to be a binary
    bytes = body_bytes(conn.resp_body)
    headers = [
      {"content-length", Integer.to_string(bytes)}
      | conn.resp_headers
    ]
    headers = List.flatten([request_id_header | headers])
    headers_json = headers_to_headers_json(headers)
    request_id = request_id_from_header(request_id_header)
    event = %{
      http_response_sent: %{
        headers_json: headers_json,
        request_id: request_id,
        status: status,
        duration_ms: duration_ms
      }
    }
    message = [
      "Sent ",
      Integer.to_string(status),
      " response in ",
      Timber.format_duration_ms(duration_ms)
    ]
    Logger.log(log_level, message, event: event)
    conn
  end
  defp body_bytes(nil), do: 0
  defp body_bytes(body), do: IO.iodata_length(body)
  defp request_id_from_header(request_id_header) do
    case request_id_header do
      [{_, request_id}] -> request_id
      [] -> nil
    end
  end
  # Constructs a full path from the given parts
  def full_url(scheme, host, path, port, query_string) do
    %URI{scheme: scheme, host: host, path: path, port: port, query: query_string}
    |> URI.to_string()
  end
  @spec headers_to_headers_json(Keyword.t()) :: String.t()
  def headers_to_headers_json(headers) do
    Map.new(headers)
    |> JSON.encode_to_binary!()
  end
end | 
	lib/timber_plug/event.ex | 0.906978 | 0.74382 | 
	event.ex | 
	starcoder | 
| 
	
defmodule AWS.Kinesis do
  @moduledoc """
  Amazon Kinesis Streams Service API Reference
  Amazon Kinesis Streams is a managed service that scales elastically for
  real time processing of streaming big data.
  """
  @doc """
  Adds or updates tags for the specified Amazon Kinesis stream. Each stream
  can have up to 10 tags.
  If tags have already been assigned to the stream, `AddTagsToStream`
  overwrites any existing tags that correspond to the specified tag keys.
  """
  def add_tags_to_stream(client, input, options \\ []) do
    request(client, "AddTagsToStream", input, options)
  end
  @doc """
  Creates an Amazon Kinesis stream. A stream captures and transports data
  records that are continuously emitted from different data sources or
  *producers*. Scale-out within a stream is explicitly supported by means of
  shards, which are uniquely identified groups of data records in a stream.
  You specify and control the number of shards that a stream is composed of.
  Each shard can support reads up to 5 transactions per second, up to a
  maximum data read total of 2 MB per second. Each shard can support writes
  up to 1,000 records per second, up to a maximum data write total of 1 MB
  per second. You can add shards to a stream if the amount of data input
  increases and you can remove shards if the amount of data input decreases.
  The stream name identifies the stream. The name is scoped to the AWS
  account used by the application. It is also scoped by region. That is, two
  streams in two different accounts can have the same name, and two streams
  in the same account, but in two different regions, can have the same name.
  `CreateStream` is an asynchronous operation. Upon receiving a
  `CreateStream` request, Amazon Kinesis immediately returns and sets the
  stream status to `CREATING`. After the stream is created, Amazon Kinesis
  sets the stream status to `ACTIVE`. You should perform read and write
  operations only on an `ACTIVE` stream.
  You receive a `LimitExceededException` when making a `CreateStream` request
  if you try to do one of the following:
  <ul> <li>Have more than five streams in the `CREATING` state at any point
  in time.
  </li> <li>Create more shards than are authorized for your account.
  </li> </ul> For the default shard limit for an AWS account, see [Streams
  Limits](http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
  in the *Amazon Kinesis Streams Developer Guide*. If you need to increase
  this limit, [contact AWS
  Support](http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html).
  You can use `DescribeStream` to check the stream status, which is returned
  in `StreamStatus`.
  `CreateStream` has a limit of 5 transactions per second per account.
  """
  def create_stream(client, input, options \\ []) do
    request(client, "CreateStream", input, options)
  end
  @doc """
  Decreases the Amazon Kinesis stream's retention period, which is the length
  of time data records are accessible after they are added to the stream. The
  minimum value of a stream's retention period is 24 hours.
  This operation may result in lost data. For example, if the stream's
  retention period is 48 hours and is decreased to 24 hours, any data already
  in the stream that is older than 24 hours is inaccessible.
  """
  def decrease_stream_retention_period(client, input, options \\ []) do
    request(client, "DecreaseStreamRetentionPeriod", input, options)
  end
  @doc """
  Deletes an Amazon Kinesis stream and all its shards and data. You must shut
  down any applications that are operating on the stream before you delete
  the stream. If an application attempts to operate on a deleted stream, it
  will receive the exception `ResourceNotFoundException`.
  If the stream is in the `ACTIVE` state, you can delete it. After a
  `DeleteStream` request, the specified stream is in the `DELETING` state
  until Amazon Kinesis completes the deletion.
  **Note:** Amazon Kinesis might continue to accept data read and write
  operations, such as `PutRecord`, `PutRecords`, and `GetRecords`, on a
  stream in the `DELETING` state until the stream deletion is complete.
  When you delete a stream, any shards in that stream are also deleted, and
  any tags are dissociated from the stream.
  You can use the `DescribeStream` operation to check the state of the
  stream, which is returned in `StreamStatus`.
  `DeleteStream` has a limit of 5 transactions per second per account.
  """
  def delete_stream(client, input, options \\ []) do
    request(client, "DeleteStream", input, options)
  end
  @doc """
  Describes the shard limits and usage for the account.
  If you update your account limits, the old limits might be returned for a
  few minutes.
  This operation has a limit of 1 transaction per second per account.
  """
  def describe_limits(client, input, options \\ []) do
    request(client, "DescribeLimits", input, options)
  end
  @doc """
  Describes the specified Amazon Kinesis stream.
  The information returned includes the stream name, Amazon Resource Name
  (ARN), creation time, enhanced metric configuration, and shard map. The
  shard map is an array of shard objects. For each shard object, there is the
  hash key and sequence number ranges that the shard spans, and the IDs of
  any earlier shards that played in a role in creating the shard. Every
  record ingested in the stream is identified by a sequence number, which is
  assigned when the record is put into the stream.
  You can limit the number of shards returned by each call. For more
  information, see [Retrieving Shards from a
  Stream](http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-retrieve-shards.html)
  in the *Amazon Kinesis Streams Developer Guide*.
  There are no guarantees about the chronological order shards returned. To
  process shards in chronological order, use the ID of the parent shard to
  track the lineage to the oldest shard.
  This operation has a limit of 10 transactions per second per account.
  """
  def describe_stream(client, input, options \\ []) do
    request(client, "DescribeStream", input, options)
  end
  @doc """
  Disables enhanced monitoring.
  """
  def disable_enhanced_monitoring(client, input, options \\ []) do
    request(client, "DisableEnhancedMonitoring", input, options)
  end
  @doc """
  Enables enhanced Amazon Kinesis stream monitoring for shard-level metrics.
  """
  def enable_enhanced_monitoring(client, input, options \\ []) do
    request(client, "EnableEnhancedMonitoring", input, options)
  end
  @doc """
  Gets data records from an Amazon Kinesis stream's shard.
  Specify a shard iterator using the `ShardIterator` parameter. The shard
  iterator specifies the position in the shard from which you want to start
  reading data records sequentially. If there are no records available in the
  portion of the shard that the iterator points to, `GetRecords` returns an
  empty list. Note that it might take multiple calls to get to a portion of
  the shard that contains records.
  You can scale by provisioning multiple shards per stream while considering
  service limits (for more information, see [Streams
  Limits](http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
  in the *Amazon Kinesis Streams Developer Guide*). Your application should
  have one thread per shard, each reading continuously from its stream. To
  read from a stream continually, call `GetRecords` in a loop. Use
  `GetShardIterator` to get the shard iterator to specify in the first
  `GetRecords` call. `GetRecords` returns a new shard iterator in
  `NextShardIterator`. Specify the shard iterator returned in
  `NextShardIterator` in subsequent calls to `GetRecords`. Note that if the
  shard has been closed, the shard iterator can't return more data and
  `GetRecords` returns `null` in `NextShardIterator`. You can terminate the
  loop when the shard is closed, or when the shard iterator reaches the
  record with the sequence number or other attribute that marks it as the
  last record to process.
  Each data record can be up to 1 MB in size, and each shard can read up to 2
  MB per second. You can ensure that your calls don't exceed the maximum
  supported size or throughput by using the `Limit` parameter to specify the
  maximum number of records that `GetRecords` can return. Consider your
  average record size when determining this limit.
  The size of the data returned by `GetRecords` varies depending on the
  utilization of the shard. The maximum size of data that `GetRecords` can
  return is 10 MB. If a call returns this amount of data, subsequent calls
  made within the next 5 seconds throw
  `ProvisionedThroughputExceededException`. If there is insufficient
  provisioned throughput on the shard, subsequent calls made within the next
  1 second throw `ProvisionedThroughputExceededException`. Note that
  `GetRecords` won't return any data when it throws an exception. For this
  reason, we recommend that you wait one second between calls to
  `GetRecords`; however, it's possible that the application will get
  exceptions for longer than 1 second.
  To detect whether the application is falling behind in processing, you can
  use the `MillisBehindLatest` response attribute. You can also monitor the
  stream using CloudWatch metrics and other mechanisms (see
  [Monitoring](http://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html)
  in the *Amazon Kinesis Streams Developer Guide*).
  Each Amazon Kinesis record includes a value, `ApproximateArrivalTimestamp`,
  that is set when a stream successfully receives and stores a record. This
  is commonly referred to as a server-side timestamp, whereas a client-side
  timestamp is set when a data producer creates or sends the record to a
  stream (a data producer is any data source putting data records into a
  stream, for example with `PutRecords`). The timestamp has millisecond
  precision. There are no guarantees about the timestamp accuracy, or that
  the timestamp is always increasing. For example, records in a shard or
  across a stream might have timestamps that are out of order.
  """
  def get_records(client, input, options \\ []) do
    request(client, "GetRecords", input, options)
  end
  @doc """
  Gets an Amazon Kinesis shard iterator. A shard iterator expires five
  minutes after it is returned to the requester.
  A shard iterator specifies the shard position from which to start reading
  data records sequentially. The position is specified using the sequence
  number of a data record in a shard. A sequence number is the identifier
  associated with every record ingested in the stream, and is assigned when a
  record is put into the stream. Each stream has one or more shards.
  You must specify the shard iterator type. For example, you can set the
  `ShardIteratorType` parameter to read exactly from the position denoted by
  a specific sequence number by using the `AT_SEQUENCE_NUMBER` shard iterator
  type, or right after the sequence number by using the
  `AFTER_SEQUENCE_NUMBER` shard iterator type, using sequence numbers
  returned by earlier calls to `PutRecord`, `PutRecords`, `GetRecords`, or
  `DescribeStream`. In the request, you can specify the shard iterator type
  `AT_TIMESTAMP` to read records from an arbitrary point in time,
  `TRIM_HORIZON` to cause `ShardIterator` to point to the last untrimmed
  record in the shard in the system (the oldest data record in the shard), or
  `LATEST` so that you always read the most recent data in the shard.
  When you read repeatedly from a stream, use a `GetShardIterator` request to
  get the first shard iterator for use in your first `GetRecords` request and
  for subsequent reads use the shard iterator returned by the `GetRecords`
  request in `NextShardIterator`. A new shard iterator is returned by every
  `GetRecords` request in `NextShardIterator`, which you use in the
  `ShardIterator` parameter of the next `GetRecords` request.
  If a `GetShardIterator` request is made too often, you receive a
  `ProvisionedThroughputExceededException`. For more information about
  throughput limits, see `GetRecords`, and [Streams
  Limits](http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
  in the *Amazon Kinesis Streams Developer Guide*.
  If the shard is closed, `GetShardIterator` returns a valid iterator for the
  last sequence number of the shard. Note that a shard can be closed as a
  result of using `SplitShard` or `MergeShards`.
  `GetShardIterator` has a limit of 5 transactions per second per account per
  open shard.
  """
  def get_shard_iterator(client, input, options \\ []) do
    request(client, "GetShardIterator", input, options)
  end
  @doc """
  Increases the Amazon Kinesis stream's retention period, which is the length
  of time data records are accessible after they are added to the stream. The
  maximum value of a stream's retention period is 168 hours (7 days).
  Upon choosing a longer stream retention period, this operation will
  increase the time period records are accessible that have not yet expired.
  However, it will not make previous data that has expired (older than the
  stream's previous retention period) accessible after the operation has been
  called. For example, if a stream's retention period is set to 24 hours and
  is increased to 168 hours, any data that is older than 24 hours will remain
  inaccessible to consumer applications.
  """
  def increase_stream_retention_period(client, input, options \\ []) do
    request(client, "IncreaseStreamRetentionPeriod", input, options)
  end
  @doc """
  Lists your Amazon Kinesis streams.
  The number of streams may be too large to return from a single call to
  `ListStreams`. You can limit the number of returned streams using the
  `Limit` parameter. If you do not specify a value for the `Limit` parameter,
  Amazon Kinesis uses the default limit, which is currently 10.
  You can detect if there are more streams available to list by using the
  `HasMoreStreams` flag from the returned output. If there are more streams
  available, you can request more streams by using the name of the last
  stream returned by the `ListStreams` request in the
  `ExclusiveStartStreamName` parameter in a subsequent request to
  `ListStreams`. The group of stream names returned by the subsequent request
  is then added to the list. You can continue this process until all the
  stream names have been collected in the list.
  `ListStreams` has a limit of 5 transactions per second per account.
  """
  def list_streams(client, input, options \\ []) do
    request(client, "ListStreams", input, options)
  end
  @doc """
  Lists the tags for the specified Amazon Kinesis stream.
  """
  def list_tags_for_stream(client, input, options \\ []) do
    request(client, "ListTagsForStream", input, options)
  end
  @doc """
  Merges two adjacent shards in an Amazon Kinesis stream and combines them
  into a single shard to reduce the stream's capacity to ingest and transport
  data. Two shards are considered adjacent if the union of the hash key
  ranges for the two shards form a contiguous set with no gaps. For example,
  if you have two shards, one with a hash key range of 276...381 and the
  other with a hash key range of 382...454, then you could merge these two
  shards into a single shard that would have a hash key range of 276...454.
  After the merge, the single child shard receives data for all hash key
  values covered by the two parent shards.
  `MergeShards` is called when there is a need to reduce the overall capacity
  of a stream because of excess capacity that is not being used. You must
  specify the shard to be merged and the adjacent shard for a stream. For
  more information about merging shards, see [Merge Two
  Shards](http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html)
  in the *Amazon Kinesis Streams Developer Guide*.
  If the stream is in the `ACTIVE` state, you can call `MergeShards`. If a
  stream is in the `CREATING`, `UPDATING`, or `DELETING` state, `MergeShards`
  returns a `ResourceInUseException`. If the specified stream does not exist,
  `MergeShards` returns a `ResourceNotFoundException`.
  You can use `DescribeStream` to check the state of the stream, which is
  returned in `StreamStatus`.
  `MergeShards` is an asynchronous operation. Upon receiving a `MergeShards`
  request, Amazon Kinesis immediately returns a response and sets the
  `StreamStatus` to `UPDATING`. After the operation is completed, Amazon
  Kinesis sets the `StreamStatus` to `ACTIVE`. Read and write operations
  continue to work while the stream is in the `UPDATING` state.
  You use `DescribeStream` to determine the shard IDs that are specified in
  the `MergeShards` request.
  If you try to operate on too many streams in parallel using `CreateStream`,
  `DeleteStream`, `MergeShards` or `SplitShard`, you will receive a
  `LimitExceededException`.
  `MergeShards` has limit of 5 transactions per second per account.
  """
  def merge_shards(client, input, options \\ []) do
    request(client, "MergeShards", input, options)
  end
  @doc """
  Writes a single data record into an Amazon Kinesis stream. Call `PutRecord`
  to send data into the stream for real-time ingestion and subsequent
  processing, one record at a time. Each shard can support writes up to 1,000
  records per second, up to a maximum data write total of 1 MB per second.
  You must specify the name of the stream that captures, stores, and
  transports the data; a partition key; and the data blob itself.
  The data blob can be any type of data; for example, a segment from a log
  file, geographic/location data, website clickstream data, and so on.
  The partition key is used by Amazon Kinesis to distribute data across
  shards. Amazon Kinesis segregates the data records that belong to a stream
  into multiple shards, using the partition key associated with each data
  record to determine which shard a given data record belongs to.
  Partition keys are Unicode strings, with a maximum length limit of 256
  characters for each key. An MD5 hash function is used to map partition keys
  to 128-bit integer values and to map associated data records to shards
  using the hash key ranges of the shards. You can override hashing the
  partition key to determine the shard by explicitly specifying a hash value
  using the `ExplicitHashKey` parameter. For more information, see [Adding
  Data to a
  Stream](http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
  in the *Amazon Kinesis Streams Developer Guide*.
  `PutRecord` returns the shard ID of where the data record was placed and
  the sequence number that was assigned to the data record.
  Sequence numbers increase over time and are specific to a shard within a
  stream, not across all shards within a stream. To guarantee strictly
  increasing ordering, write serially to a shard and use the
  `SequenceNumberForOrdering` parameter. For more information, see [Adding
  Data to a
  Stream](http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
  in the *Amazon Kinesis Streams Developer Guide*.
  If a `PutRecord` request cannot be processed because of insufficient
  provisioned throughput on the shard involved in the request, `PutRecord`
  throws `ProvisionedThroughputExceededException`.
  Data records are accessible for only 24 hours from the time that they are
  added to a stream.
  """
  def put_record(client, input, options \\ []) do
    request(client, "PutRecord", input, options)
  end
  @doc """
  Writes multiple data records into an Amazon Kinesis stream in a single call
  (also referred to as a `PutRecords` request). Use this operation to send
  data into the stream for data ingestion and processing.
  Each `PutRecords` request can support up to 500 records. Each record in the
  request can be as large as 1 MB, up to a limit of 5 MB for the entire
  request, including partition keys. Each shard can support writes up to
  1,000 records per second, up to a maximum data write total of 1 MB per
  second.
  You must specify the name of the stream that captures, stores, and
  transports the data; and an array of request `Records`, with each record in
  the array requiring a partition key and data blob. The record size limit
  applies to the total size of the partition key and data blob.
  The data blob can be any type of data; for example, a segment from a log
  file, geographic/location data, website clickstream data, and so on.
  The partition key is used by Amazon Kinesis as input to a hash function
  that maps the partition key and associated data to a specific shard. An MD5
  hash function is used to map partition keys to 128-bit integer values and
  to map associated data records to shards. As a result of this hashing
  mechanism, all data records with the same partition key map to the same
  shard within the stream. For more information, see [Adding Data to a
  Stream](http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
  in the *Amazon Kinesis Streams Developer Guide*.
  Each record in the `Records` array may include an optional parameter,
  `ExplicitHashKey`, which overrides the partition key to shard mapping. This
  parameter allows a data producer to determine explicitly the shard where
  the record is stored. For more information, see [Adding Multiple Records
  with
  PutRecords](http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords)
  in the *Amazon Kinesis Streams Developer Guide*.
  The `PutRecords` response includes an array of response `Records`. Each
  record in the response array directly correlates with a record in the
  request array using natural ordering, from the top to the bottom of the
  request and response. The response `Records` array always includes the same
  number of records as the request array.
  The response `Records` array includes both successfully and unsuccessfully
  processed records. Amazon Kinesis attempts to process all records in each
  `PutRecords` request. A single record failure does not stop the processing
  of subsequent records.
  A successfully-processed record includes `ShardId` and `SequenceNumber`
  values. The `ShardId` parameter identifies the shard in the stream where
  the record is stored. The `SequenceNumber` parameter is an identifier
  assigned to the put record, unique to all records in the stream.
  An unsuccessfully-processed record includes `ErrorCode` and `ErrorMessage`
  values. `ErrorCode` reflects the type of error and can be one of the
  following values: `ProvisionedThroughputExceededException` or
  `InternalFailure`. `ErrorMessage` provides more detailed information about
  the `ProvisionedThroughputExceededException` exception including the
  account ID, stream name, and shard ID of the record that was throttled. For
  more information about partially successful responses, see [Adding Multiple
  Records with
  PutRecords](http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords)
  in the *Amazon Kinesis Streams Developer Guide*.
  By default, data records are accessible for only 24 hours from the time
  that they are added to an Amazon Kinesis stream. This retention period can
  be modified using the `DecreaseStreamRetentionPeriod` and
  `IncreaseStreamRetentionPeriod` operations.
  """
  def put_records(client, input, options \\ []) do
    request(client, "PutRecords", input, options)
  end
  @doc """
  Removes tags from the specified Amazon Kinesis stream. Removed tags are
  deleted and cannot be recovered after this operation successfully
  completes.
  If you specify a tag that does not exist, it is ignored.
  """
  def remove_tags_from_stream(client, input, options \\ []) do
    request(client, "RemoveTagsFromStream", input, options)
  end
  @doc """
  Splits a shard into two new shards in the Amazon Kinesis stream to increase
  the stream's capacity to ingest and transport data. `SplitShard` is called
  when there is a need to increase the overall capacity of a stream because
  of an expected increase in the volume of data records being ingested.
  You can also use `SplitShard` when a shard appears to be approaching its
  maximum utilization; for example, the producers sending data into the
  specific shard are suddenly sending more than previously anticipated. You
  can also call `SplitShard` to increase stream capacity, so that more Amazon
  Kinesis applications can simultaneously read data from the stream for
  real-time processing.
  You must specify the shard to be split and the new hash key, which is the
  position in the shard where the shard gets split in two. In many cases, the
  new hash key might simply be the average of the beginning and ending hash
  key, but it can be any hash key value in the range being mapped into the
  shard. For more information about splitting shards, see [Split a
  Shard](http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html)
  in the *Amazon Kinesis Streams Developer Guide*.
  You can use `DescribeStream` to determine the shard ID and hash key values
  for the `ShardToSplit` and `NewStartingHashKey` parameters that are
  specified in the `SplitShard` request.
  `SplitShard` is an asynchronous operation. Upon receiving a `SplitShard`
  request, Amazon Kinesis immediately returns a response and sets the stream
  status to `UPDATING`. After the operation is completed, Amazon Kinesis sets
  the stream status to `ACTIVE`. Read and write operations continue to work
  while the stream is in the `UPDATING` state.
  You can use `DescribeStream` to check the status of the stream, which is
  returned in `StreamStatus`. If the stream is in the `ACTIVE` state, you can
  call `SplitShard`. If a stream is in `CREATING` or `UPDATING` or `DELETING`
  states, `DescribeStream` returns a `ResourceInUseException`.
  If the specified stream does not exist, `DescribeStream` returns a
  `ResourceNotFoundException`. If you try to create more shards than are
  authorized for your account, you receive a `LimitExceededException`.
  For the default shard limit for an AWS account, see [Streams
  Limits](http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
  in the *Amazon Kinesis Streams Developer Guide*. If you need to increase
  this limit, [contact AWS
  Support](http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html).
  If you try to operate on too many streams simultaneously using
  `CreateStream`, `DeleteStream`, `MergeShards`, and/or `SplitShard`, you
  receive a `LimitExceededException`.
  `SplitShard` has limit of 5 transactions per second per account.
  """
  def split_shard(client, input, options \\ []) do
    request(client, "SplitShard", input, options)
  end
  @doc """
  Updates the shard count of the specified stream to the specified number of
  shards.
  Updating the shard count is an asynchronous operation. Upon receiving the
  request, Amazon Kinesis returns immediately and sets the status of the
  stream to `UPDATING`. After the update is complete, Amazon Kinesis sets the
  status of the stream back to `ACTIVE`. Depending on the size of the stream,
  the scaling action could take a few minutes to complete. You can continue
  to read and write data to your stream while its status is `UPDATING`.
  To update the shard count, Amazon Kinesis performs splits and merges and
  individual shards. This can cause short-lived shards to be created, in
  addition to the final shards. We recommend that you double or halve the
  shard count, as this results in the fewest number of splits or merges.
  This operation has a rate limit of twice per rolling 24 hour period. You
  cannot scale above double your current shard count, scale below half your
  current shard count, or exceed the shard limits for your account.
  For the default limits for an AWS account, see [Streams
  Limits](http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
  in the *Amazon Kinesis Streams Developer Guide*. If you need to increase a
  limit, [contact AWS
  Support](http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html).
  """
  def update_shard_count(client, input, options \\ []) do
    request(client, "UpdateShardCount", input, options)
  end
  @spec request(map(), binary(), map(), list()) ::
    {:ok, Poison.Parser.t | nil, Poison.Response.t} |
    {:error, Poison.Parser.t} |
    {:error, HTTPoison.Error.t}
  defp request(client, action, input, options) do
    client = %{client | service: "kinesis"}
    host = get_host("kinesis", client)
    url = get_url(host, client)
    headers = [{"Host", host},
               {"Content-Type", "application/x-amz-json-1.1"},
               {"X-Amz-Target", "Kinesis_20131202.#{action}"}]
    payload = Poison.Encoder.encode(input, [])
    headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
    case HTTPoison.post(url, payload, headers, options) do
      {:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
        {:ok, nil, response}
      {:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
        {:ok, Poison.Parser.parse!(body), response}
      {:ok, _response=%HTTPoison.Response{body: body}} ->
        error = Poison.Parser.parse!(body)
        exception = error["__type"]
        message = error["message"]
        {:error, {exception, message}}
      {:error, %HTTPoison.Error{reason: reason}} ->
        {:error, %HTTPoison.Error{reason: reason}}
    end
  end
  defp get_host(endpoint_prefix, client) do
    if client.region == "local" do
      "localhost"
    else
      "#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
    end
  end
  defp get_url(host, %{:proto => proto, :port => port}) do
    "#{proto}://#{host}:#{port}/"
  end
end | 
	lib/aws/kinesis.ex | 0.934724 | 0.775817 | 
	kinesis.ex | 
	starcoder | 
| 
	defmodule Valet.Error.TypeMismatch do
  @enforce_keys [:trail, :value, :expected]
  defstruct @enforce_keys
  def new(trail, value, expected),
    do: %__MODULE__{trail: trail, value: value, expected: expected}
end
defmodule Valet.Error.NotInSet do
  @enforce_keys [:trail, :value, :valid]
  defstruct @enforce_keys
  def new(trail, value, valid),
    do: %__MODULE__{trail: trail, value: value, valid: valid}
end
defmodule Valet.Error.RegexDoesNotMatch do
  @enforce_keys [:trail, :value, :regex]
  defstruct @enforce_keys
  def new(trail, value, regex),
    do: %__MODULE__{trail: trail, value: value, regex: regex}
end
defmodule Valet.Error.KeyIsMissing do
  @enforce_keys [:trail, :in_value, :key, :val_schema]
  defstruct @enforce_keys
  def new(trail, in_value, key, val_schema),
    do: %__MODULE__{trail: trail, in_value: in_value, key: key, val_schema: val_schema}
end
defmodule Valet.Error.KeysUnknown do
  @enforce_keys [:trail, :value, :keys, :valid]
  defstruct @enforce_keys
  def new(trail, value, keys, valid),
    do: %__MODULE__{trail: trail, value: value, keys: keys, valid: valid}
end
defmodule Valet.Error.NotAtLeast do
  @enforce_keys [:trail, :value, :min]
  defstruct @enforce_keys
  def new(trail, value, min),
    do: %__MODULE__{trail: trail, value: value, min: min}
end
defmodule Valet.Error.NotAtMost do
  @enforce_keys [:trail, :value, :max]
  defstruct @enforce_keys
  def new(trail, value, max),
    do: %__MODULE__{trail: trail, value: value, max: max}
end
defmodule Valet.Error.NotBetween do
  @enforce_keys [:trail, :value, :min, :max]
  defstruct @enforce_keys
  def new(trail, value, min, max),
    do: %__MODULE__{trail: trail, value: value, min: min, max: max}
end
defmodule Valet.Error.LengthNot do
  @enforce_keys [:trail, :value, :expected]
  defstruct @enforce_keys
  def new(trail, value, expected),
    do: %__MODULE__{trail: trail, value: value, expected: expected}
end
defmodule Valet.Error.LengthNotAtLeast do
  @enforce_keys [:trail, :value, :min]
  defstruct @enforce_keys
  def new(trail, value, min),
    do: %__MODULE__{trail: trail, value: value, min: min}
end
defmodule Valet.Error.LengthNotAtMost do
  @enforce_keys [:trail, :value, :max]
  defstruct @enforce_keys
  def new(trail, value, max),
    do: %__MODULE__{trail: trail, value: value, max: max}
end
defmodule Valet.Error.LengthNotBetween do
  @enforce_keys [:trail, :value, :min, :max]
  defstruct @enforce_keys
  def new(trail, value, min, max),
    do: %__MODULE__{trail: trail, value: value, min: min, max: max}
end
defmodule Valet.Error.Disunion do
  @enforce_keys [:trail, :value, :errors]
  defstruct @enforce_keys
  
  def new(trail, value, errors),
    do: %__MODULE__{trail: trail, value: value, errors: errors}
end | 
	lib/errors.ex | 0.782912 | 0.743401 | 
	errors.ex | 
	starcoder | 
| 
	defmodule AisFront.Units.ROT do
  alias __MODULE__
  alias AisFront.Protocols.Convertible
  defstruct value: %Decimal{}, unit: :ms
  @si_unit :rad_sec
  @unit_si_ratio %{
    rad_sec: 1,
    deg_sec: :math.pi |> Decimal.cast |> Decimal.div(180),
    deg_min: :math.pi |> Decimal.cast |> Decimal.div(180*60)
  }
  @possible_units Map.keys(@unit_si_ratio) ++ [:ais]
  def si_unit(), do: @si_unit
  def unit_si_ratio(), do: @unit_si_ratio
  def possible_units(), do: @possible_units
  def new(value, unit \\ @si_unit) when unit in @possible_units do
    %ROT{value: Decimal.cast(value), unit: unit}
  end
  def to_rad_sec(rot), do: Convertible.convert(rot, :rad_sec)
  def to_deg_sec(rot), do: Convertible.convert(rot, :deg_sec)
  def to_deg_min(rot), do: Convertible.convert(rot, :deg_min)
  def to_ais(rot), do: Convertible.convert(rot, :ais)
  defimpl Convertible do
    def possible_units(_rot), do: ROT.possible_units
    def si_unit(_rot), do: ROT.si_unit()
    def si?(rot), do: rot.unit == ROT.si_unit
    def to_si(%ROT{value: value, unit: :ais}) do
      v = Decimal.div(value, "4.733")
      deg_min_si_ratio = ROT.unit_si_ratio[:deg_min]
      %ROT{
        value: Decimal.mult(v,v) |> Decimal.mult(deg_min_si_ratio),
        unit: ROT.si_unit
      }
    end
    def to_si(%ROT{value: value, unit: unit}) do
      unit_si_ratio = ROT.unit_si_ratio[unit]
      %ROT{
        value: Decimal.mult(value, unit_si_ratio),
        unit: ROT.si_unit
      }
    end
    def convert(%ROT{unit: unit} = rot, unit), do: rot
    def convert(rot, :ais) do
      %ROT{value: value} = to_si(rot)
      deg_min_si_ratio = ROT.unit_si_ratio[:deg_min]
      positive? = Decimal.positive?(value)
      value = Decimal.div(value, deg_min_si_ratio) 
              |> Decimal.abs
              |> Decimal.sqrt
              |> Decimal.mult("4.733")
      value = case positive? do
        true -> value
        false -> Decimal.mult(-1, value)
      end
      %ROT{
        value: value,
        unit: :ais
      }
    end
    def convert(rot, to_unit) do
      %ROT{value: value} = to_si(rot)
      unit_si_ratio = ROT.unit_si_ratio[to_unit]
      %ROT{
        value: Decimal.div(value, unit_si_ratio),
        unit: to_unit
      }
    end
  end
  defimpl String.Chars do
    @unit_repr %{rad_sec: "rad/s", deg_sec: "°/s", deg_min: "°/min"}
    @unit_precision %{rad_sec: 6, deg_sec: 4, deg_min: 2, ais: 0}
    def to_string(%ROT{value: value, unit: :ais} = rot) do
      rounded = Decimal.round(value, @unit_precision[:ais]) |> Decimal.to_integer
      case rounded do
        127 -> ">5°/30s"
        -127 -> "<-5°/30s"
        128 -> "No turn information"
        _ ->
          deg_min = rot |> ROT.to_deg_min
          "#{deg_min}"
      end
    end
    def to_string(%ROT{value: value, unit: unit}) do
      rounded = Decimal.round(value, @unit_precision[unit])
      case unit do
        :rad_sec -> "#{rounded} #{@unit_repr[unit]}"
        _ -> "#{rounded}#{@unit_repr[unit]}"
      end
    end
  end
end | 
	lib/ais_front/units/rot.ex | 0.643217 | 0.681899 | 
	rot.ex | 
	starcoder | 
| 
	defmodule Guardian.Plug.Backdoor do
  @moduledoc """
  This plug allows you to bypass authentication in acceptance tests by passing
  the token needed to load the current resource directly to your Guardian module
  via a query string parameter.
  ## Installation
  Add the following to your Phoenix router before other Guardian plugs.
  ```
  if Mix.env() == :test do
    plug Guardian.Plug.Backdoor, module: MyApp.Guardian
  end
  plug Guardian.Plug.VerifySession
  ```
  NOTE: This plug is designed for acceptance testing and should never be added
  to a production environment.
  ## Usage
  Now that `Guardian.Plug.Backdoor` is installed, it's time to sign in. Pass
  your claims as `claims` in the query string of your route.
  ```
  conn = get(conn, "/", claims: %{sub: "User:1"})
  resource = MyApp.Guardian.Plug.current_resource(conn)
  %{"sub" => "User:1"} = MyApp.Guardian.Plug.current_claims(conn)
  ```
  When the `Guardian.Plug.Backdoor` plug runs, it fetches the resource from your
  Guardian implementation with those claims and signs in.
  Alternatively, encode your claims into a token and pass that as `token` in the
  query string instead.
  ```
  {:ok, token, _claims} = MyApp.Guardian.encode_and_sign(resource)
  conn = get(conn, "/", token: token)
  resource = MyApp.Guardian.Plug.current_resource(conn)
  ```
  ## Options
    `:module` - Your app's `Guardian` implementation module. Required.
  [hound]: https://github.com/HashNuke/hound
  """
  import Plug.Conn
  @doc false
  def init(opts) do
    Enum.into(opts, %{})
  end
  @doc false
  def call(conn, %{module: module}) do
    conn = fetch_query_params(conn)
    case resource_from_params(module, conn.params) do
      {:ok, resource, claims} ->
        module = Module.concat(module, Plug)
        module.sign_in(conn, resource, claims)
      _ ->
        conn
    end
  end
  defp resource_from_params(module, %{"token" => token}) do
    case module.resource_from_token(token) do
      {:ok, resource, claims} -> {:ok, resource, claims}
      error -> error
    end
  end
  defp resource_from_params(module, %{"claims" => claims}) do
    case module.resource_from_claims(claims) do
      {:ok, resource} -> {:ok, resource, claims}
      error -> error
    end
  end
  defp resource_from_params(_module, _params), do: :no_resource_found
end | 
	lib/guardian/plug/backdoor.ex | 0.698432 | 0.76769 | 
	backdoor.ex | 
	starcoder | 
| 
	defmodule Redix.Protocol do
  @moduledoc """
  This module provides functions to work with the [Redis binary
  protocol](http://redis.io/topics/protocol).
  """
  defmodule ParseError do
    @moduledoc """
    Error in parsing data according to the
    [RESP](http://redis.io/topics/protocol) protocol.
    """
    defexception [:message]
  end
  @type redis_value :: binary | integer | nil | Redix.Error.t() | [redis_value]
  @type on_parse(value) :: {:ok, value, binary} | {:continuation, (binary -> on_parse(value))}
  @crlf "\r\n"
  @crlf_iodata [?\r, ?\n]
  @doc ~S"""
  Packs a list of Elixir terms to a Redis (RESP) array.
  This function returns an iodata (instead of a binary) because the packed
  result is usually sent to Redis through `:gen_tcp.send/2` or similar. It can
  be converted to a binary with `IO.iodata_to_binary/1`.
  All elements of `elems` are converted to strings with `to_string/1`, hence
  this function supports encoding everything that implements `String.Chars`.
  ## Examples
      iex> iodata = Redix.Protocol.pack(["SET", "mykey", 1])
      iex> IO.iodata_to_binary(iodata)
      "*3\r\n$3\r\nSET\r\n$5\r\nmykey\r\n$1\r\n1\r\n"
  """
  @spec pack([binary]) :: iodata
  def pack(items) when is_list(items) do
    pack(items, [], 0)
  end
  defp pack([item | rest], acc, count) do
    item = to_string(item)
    new_acc = [acc, [?$, Integer.to_string(byte_size(item)), @crlf_iodata, item, @crlf_iodata]]
    pack(rest, new_acc, count + 1)
  end
  defp pack([], acc, count) do
    [?*, Integer.to_string(count), @crlf_iodata, acc]
  end
  @doc ~S"""
  Parses a RESP-encoded value from the given `data`.
  Returns `{:ok, value, rest}` if a value is parsed successfully, or a
  continuation in the form `{:continuation, fun}` if the data is incomplete.
  ## Examples
      iex> Redix.Protocol.parse("+OK\r\ncruft")
      {:ok, "OK", "cruft"}
      iex> Redix.Protocol.parse("-ERR wrong type\r\n")
      {:ok, %Redix.Error{message: "ERR wrong type"}, ""}
      iex> {:continuation, fun} = Redix.Protocol.parse("+OK")
      iex> fun.("\r\n")
      {:ok, "OK", ""}
  """
  @spec parse(binary) :: on_parse(redis_value)
  def parse(data)
  def parse("+" <> rest), do: parse_simple_string(rest)
  def parse("-" <> rest), do: parse_error(rest)
  def parse(":" <> rest), do: parse_integer(rest)
  def parse("$" <> rest), do: parse_bulk_string(rest)
  def parse("*" <> rest), do: parse_array(rest)
  def parse(""), do: {:continuation, &parse/1}
  def parse(<<byte>> <> _),
    do: raise(ParseError, message: "invalid type specifier (#{inspect(<<byte>>)})")
  @doc ~S"""
  Parses `n` RESP-encoded values from the given `data`.
  Each element is parsed as described in `parse/1`. If an element can't be fully
  parsed or there are less than `n` elements encoded in `data`, then a
  continuation in the form of `{:continuation, fun}` is returned. Otherwise,
  `{:ok, values, rest}` is returned. If there's an error in decoding, a
  `Redix.Protocol.ParseError` exception is raised.
  ## Examples
      iex> Redix.Protocol.parse_multi("+OK\r\n+COOL\r\n", 2)
      {:ok, ["OK", "COOL"], ""}
      iex> {:continuation, fun} = Redix.Protocol.parse_multi("+OK\r\n", 2)
      iex> fun.("+OK\r\n")
      {:ok, ["OK", "OK"], ""}
  """
  @spec parse_multi(binary, non_neg_integer) :: on_parse([redis_value])
  def parse_multi(data, nelems)
  # We treat the case when we have just one element to parse differently as it's
  # a very common case since single commands are treated as pipelines with just
  # one command in them.
  def parse_multi(data, 1) do
    resolve_cont(parse(data), &{:ok, [&1], &2})
  end
  def parse_multi(data, n) do
    take_elems(data, n, [])
  end
  # Type parsers
  defp parse_simple_string(data) do
    until_crlf(data)
  end
  defp parse_error(data) do
    data
    |> until_crlf()
    |> resolve_cont(&{:ok, %Redix.Error{message: &1}, &2})
  end
  defp parse_integer(""), do: {:continuation, &parse_integer/1}
  defp parse_integer("-" <> rest),
    do: resolve_cont(parse_integer_without_sign(rest), &{:ok, -&1, &2})
  defp parse_integer(bin), do: parse_integer_without_sign(bin)
  defp parse_integer_without_sign("") do
    {:continuation, &parse_integer_without_sign/1}
  end
  defp parse_integer_without_sign(<<digit, _::binary>> = bin) when digit in ?0..?9 do
    resolve_cont(parse_integer_digits(bin, 0), fn i, rest ->
      resolve_cont(until_crlf(rest), fn
        "", rest ->
          {:ok, i, rest}
        <<char, _::binary>>, _rest ->
          raise ParseError, message: "expected CRLF, found: #{inspect(<<char>>)}"
      end)
    end)
  end
  defp parse_integer_without_sign(<<non_digit, _::binary>>) do
    raise ParseError, message: "expected integer, found: #{inspect(<<non_digit>>)}"
  end
  defp parse_integer_digits(<<digit, rest::binary>>, acc) when digit in ?0..?9,
    do: parse_integer_digits(rest, acc * 10 + (digit - ?0))
  defp parse_integer_digits(<<_non_digit, _::binary>> = rest, acc), do: {:ok, acc, rest}
  defp parse_integer_digits(<<>>, acc), do: {:continuation, &parse_integer_digits(&1, acc)}
  defp parse_bulk_string(rest) do
    resolve_cont(parse_integer(rest), fn
      -1, rest ->
        {:ok, nil, rest}
      size, rest ->
        parse_string_of_known_size(rest, size)
    end)
  end
  defp parse_string_of_known_size(data, size) do
    case data do
      <<str::bytes-size(size), @crlf, rest::binary>> ->
        {:ok, str, rest}
      _ ->
        {:continuation, &parse_string_of_known_size(data <> &1, size)}
    end
  end
  defp parse_array(rest) do
    resolve_cont(parse_integer(rest), fn
      -1, rest ->
        {:ok, nil, rest}
      size, rest ->
        take_elems(rest, size, [])
    end)
  end
  defp until_crlf(data, acc \\ "")
  defp until_crlf(<<@crlf, rest::binary>>, acc), do: {:ok, acc, rest}
  defp until_crlf(<<>>, acc), do: {:continuation, &until_crlf(&1, acc)}
  defp until_crlf(<<?\r>>, acc), do: {:continuation, &until_crlf(<<?\r, &1::binary>>, acc)}
  defp until_crlf(<<byte, rest::binary>>, acc), do: until_crlf(rest, <<acc::binary, byte>>)
  defp take_elems(data, 0, acc) do
    {:ok, Enum.reverse(acc), data}
  end
  defp take_elems(<<_, _::binary>> = data, n, acc) when n > 0 do
    resolve_cont(parse(data), fn elem, rest ->
      take_elems(rest, n - 1, [elem | acc])
    end)
  end
  defp take_elems(<<>>, n, acc) do
    {:continuation, &take_elems(&1, n, acc)}
  end
  defp resolve_cont({:ok, val, rest}, ok) when is_function(ok, 2), do: ok.(val, rest)
  defp resolve_cont({:continuation, cont}, ok),
    do: {:continuation, fn new_data -> resolve_cont(cont.(new_data), ok) end}
end | 
	lib/redix/protocol.ex | 0.888124 | 0.564639 | 
	protocol.ex | 
	starcoder | 
| 
	import Kernel, except: [to_string: 1]
defmodule Macro do
  @moduledoc """
  Conveniences for working with macros.
  """
  @typedoc "Abstract Syntax Tree (AST)"
  @type t :: expr | { t, t } | atom | number | binary | pid | fun | [t]
  @typedoc "Expr node (remaining ones are literals)"
  @type expr :: { expr | atom, Keyword.t, atom | [t] }
  @binary_ops [:===, :!==,
    :==, :!=, :<=, :>=,
    :&&, :||, :<>, :++, :--, :\\, :::, :<-, :.., :|>, :=~,
    :<, :>, :->,
    :+, :-, :*, :/, :=, :|, :.,
    :and, :or, :xor, :when, :in, :inlist, :inbits,
    :<<<, :>>>, :|||, :&&&, :^^^, :~~~]
  @doc false
  defmacro binary_ops, do: @binary_ops
  @unary_ops [:!, :@, :^, :not, :+, :-, :~~~, :&]
  @doc false
  defmacro unary_ops, do: @unary_ops
  @spec binary_op_props(atom) :: { :left | :right, precedence :: integer }
  defp binary_op_props(o) do
    case o do
      o when o in [:<-, :inlist, :inbits, :\\, :::]             -> {:left,  40}
      :|                                                        -> {:right, 50}
      :when                                                     -> {:right, 70}
      :=                                                        -> {:right, 80}
      o when o in [:||, :|||, :or, :xor]                        -> {:left, 130}
      o when o in [:&&, :&&&, :and]                             -> {:left, 140}
      o when o in [:==, :!=, :<, :<=, :>=, :>, :=~, :===, :!==] -> {:left, 150}
      o when o in [:|>, :<<<, :>>>]                             -> {:right, 160}
      :in                                                       -> {:left, 170}
      o when o in [:++, :--, :.., :<>]                          -> {:right, 200}
      o when o in [:+, :-]                                      -> {:left, 210}
      o when o in [:*, :/]                                      -> {:left, 220}
      :^^^                                                      -> {:left, 250}
      :.                                                        -> {:left, 310}
    end
  end
  @doc """
  Breaks a pipeline expression into a list.
  Raises if the pipeline is ill-formed.
  """
  @spec unpipe(Macro.t) :: [Macro.t]
  def unpipe({ :|> , _, [left, right] }) do
    [left|unpipe(right)]
  end
  def unpipe(other) do
    [other]
  end
  @doc """
  Pipes `expr` into the `call_expr` as the
  argument in the given `position`.
  """
  @spec pipe(Macro.t, Macro.t, integer) :: Macro.t | no_return
  def pipe(expr, call_args, integer \\ 0)
  def pipe(expr, { :&, _, _ } = call_args, _integer) do
    raise ArgumentError, message: "cannot pipe #{to_string expr} into #{to_string call_args}"
  end
  def pipe(expr, { call, line, atom }, integer) when is_atom(atom) do
    { call, line, List.insert_at([], integer, expr) }
  end
  def pipe(expr, { call, line, args }, integer) when is_list(args) do
    { call, line, List.insert_at(args, integer, expr) }
  end
  def pipe(expr, call_args, _integer) do
    raise ArgumentError,
      message: "cannot pipe #{to_string expr} into #{to_string call_args}"
  end
  @doc """
  Recurs the quoted expression applying the given function to
  each metadata node.
  This is often useful to remove information like lines and
  hygienic counters from the expression for either storage or
  comparison.
  ## Examples
      iex> quoted = quote line: 10, do: sample()
      {:sample, [line: 10], []}
      iex> Macro.update_meta(quoted, &Keyword.delete(&1, :line))
      {:sample, [], []}
  """
  @spec update_meta(t, (Keyword.t -> Keyword.t)) :: t
  def update_meta(quoted, fun)
  def update_meta({ left, meta, right }, fun) when is_list(meta) do
    { update_meta(left, fun), fun.(meta), update_meta(right, fun) }
  end
  def update_meta({ left, right }, fun) do
    { update_meta(left, fun), update_meta(right, fun) }
  end
  def update_meta(list, fun) when is_list(list) do
    for x <- list, do: update_meta(x, fun)
  end
  def update_meta(other, _fun) do
    other
  end
  @doc """
  Decomposes a local or remote call into its remote part (when provided),
  function name and argument list.
  Returns `:error` when an invalid call syntax is provided.
  ## Examples
      iex> Macro.decompose_call(quote do: foo)
      { :foo, [] }
      iex> Macro.decompose_call(quote do: foo())
      { :foo, [] }
      iex> Macro.decompose_call(quote do: foo(1, 2, 3))
      { :foo, [1, 2, 3] }
      iex> Macro.decompose_call(quote do: Elixir.M.foo(1, 2, 3))
      { { :__aliases__, [], [:Elixir, :M] }, :foo, [1, 2, 3] }
      iex> Macro.decompose_call(quote do: 42)
      :error
  """
  @spec decompose_call(Macro.t) :: { atom, [Macro.t] } | { Macro.t, atom, [Macro.t] } | :error
  def decompose_call({ { :., _, [remote, function] }, _, args }) when is_tuple(remote) or is_atom(remote),
    do: { remote, function, args }
  def decompose_call({ name, _, args }) when is_atom(name) and is_atom(args),
    do: { name, [] }
  def decompose_call({ name, _, args }) when is_atom(name) and is_list(args),
    do: { name, args }
  def decompose_call(_),
    do: :error
  @doc """
  Recursively escapes a value so it can be inserted
  into a syntax tree.
  One may pass `unquote: true` to `escape/2`
  which leaves `unquote` statements unescaped, effectively
  unquoting the contents on escape.
  ## Examples
      iex> Macro.escape(:foo)
      :foo
      iex> Macro.escape({ :a, :b, :c })
      { :{}, [], [:a, :b, :c] }
      iex> Macro.escape({ :unquote, [], [1] }, unquote: true)
      1
  """
  @spec escape(term) :: Macro.t
  @spec escape(term, Keyword.t) :: Macro.t
  def escape(expr, opts \\ []) do
    elem(:elixir_quote.escape(expr, Keyword.get(opts, :unquote, false)), 0)
  end
  @doc ~S"""
  Unescape the given chars.
  This is the unescaping behaviour used by default in Elixir
  single- and double-quoted strings. Check `unescape_string/2`
  for information on how to customize the escaping map.
  In this setup, Elixir will escape the following: `\a`, `\b`,
  `\d`, `\e`, `\f`, `\n`, `\r`, `\s`, `\t` and `\v`. Octals are
  also escaped according to the latin1 set they represent.
  This function is commonly used on sigil implementations
  (like `~r`, `~s` and others) which receive a raw, unescaped
  string.
  ## Examples
      iex> Macro.unescape_string("example\\n")
      "example\n"
  In the example above, we pass a string with `\n` escaped
  and return a version with it unescaped.
  """
  @spec unescape_string(String.t) :: String.t
  def unescape_string(chars) do
    :elixir_interpolation.unescape_chars(chars)
  end
  @doc ~S"""
  Unescape the given chars according to the map given.
  Check `unescape_string/1` if you want to use the same map
  as Elixir single- and double-quoted strings.
  ## Map
  The map must be a function. The function receives an integer
  representing the codepoint of the character it wants to unescape.
  Here is the default mapping function implemented by Elixir:
      def unescape_map(?a), do: ?\a
      def unescape_map(?b), do: ?\b
      def unescape_map(?d), do: ?\d
      def unescape_map(?e), do: ?\e
      def unescape_map(?f), do: ?\f
      def unescape_map(?n), do: ?\n
      def unescape_map(?r), do: ?\r
      def unescape_map(?s), do: ?\s
      def unescape_map(?t), do: ?\t
      def unescape_map(?v), do: ?\v
      def unescape_map(e),  do: e
  If the `unescape_map` function returns `false`. The char is
  not escaped and `\` is kept in the char list.
  ## Octals
  Octals will by default be escaped unless the map function
  returns `false` for `?0`.
  ## Hex
  Hexadecimals will by default be escaped unless the map function
  returns `false` for `?x`.
  ## Examples
  Using the `unescape_map` function defined above is easy:
      Macro.unescape_string "example\\n", &unescape_map(&1)
  """
  @spec unescape_string(String.t, (non_neg_integer -> non_neg_integer | false)) :: String.t
  def unescape_string(chars, map) do
    :elixir_interpolation.unescape_chars(chars, map)
  end
  @doc """
  Unescape the given tokens according to the default map.
  Check `unescape_string/1` and `unescape_string/2` for more
  information about unescaping.
  Only tokens that are binaries are unescaped, all others are
  ignored. This function is useful when implementing your own
  sigils. Check the implementation of `Kernel.sigil_s/2`
  for examples.
  """
  @spec unescape_tokens([Macro.t]) :: [Macro.t]
  def unescape_tokens(tokens) do
    :elixir_interpolation.unescape_tokens(tokens)
  end
  @doc """
  Unescape the given tokens according to the given map.
  Check `unescape_tokens/1` and `unescape_string/2` for more information.
  """
  @spec unescape_tokens([Macro.t], (non_neg_integer -> non_neg_integer | false)) :: [Macro.t]
  def unescape_tokens(tokens, map) do
    :elixir_interpolation.unescape_tokens(tokens, map)
  end
  @doc """
  Converts the given expression to a binary.
  ## Examples
      iex> Macro.to_string(quote do: foo.bar(1, 2, 3))
      "foo.bar(1, 2, 3)"
  """
  @spec to_string(Macro.t) :: String.t
  @spec to_string(Macro.t, (Macro.t, String.t -> String.t)) :: String.t
  def to_string(tree, fun \\ fn(_ast, string) -> string end)
  # Variables
  def to_string({ var, _, atom } = ast, fun) when is_atom(atom) do
    fun.(ast, atom_to_binary(var))
  end
  # Aliases
  def to_string({ :__aliases__, _, refs } = ast, fun) do
    fun.(ast, Enum.map_join(refs, ".", &call_to_string(&1, fun)))
  end
  # Blocks
  def to_string({ :__block__, _, [expr] } = ast, fun) do
    fun.(ast, to_string(expr, fun))
  end
  def to_string({ :__block__, _, _ } = ast, fun) do
    block = adjust_new_lines block_to_string(ast, fun), "\n  "
    fun.(ast, "(\n  " <> block <> "\n)")
  end
  # Bits containers
  def to_string({ :<<>>, _, args } = ast, fun) do
    fun.(ast, case Enum.map_join(args, ", ", &to_string(&1, fun)) do
      "<" <> rest -> "<< <" <> rest  <> " >>"
      rest -> "<<" <> rest <> ">>"
    end)
  end
  # Tuple containers
  def to_string({ :{}, _, args } = ast, fun) do
    tuple = "{" <> Enum.map_join(args, ", ", &to_string(&1, fun)) <> "}"
    fun.(ast, tuple)
  end
  # Map containers
  def to_string({ :%{}, _, args } = ast, fun) do
    map = "%{" <> map_to_string(args, fun) <> "}"
    fun.(ast, map)
  end
  def to_string({ :%, _, [structname, map] } = ast, fun) do
    { :%{}, _, args } = map
    struct = "%" <> to_string(structname, fun) <> "{" <> map_to_string(args, fun) <> "}"
    fun.(ast, struct)
  end
  # Fn keyword
  def to_string({ :fn, _, [{ :->, _, [_, tuple] }] = arrow } = ast, fun)
      when not is_tuple(tuple) or elem(tuple, 0) != :__block__ do
    fun.(ast, "fn " <> arrow_to_string(arrow, fun) <> " end")
  end
  def to_string({ :fn, _, [{ :->, _, _ }] = block } = ast, fun) do
    fun.(ast, "fn " <> block_to_string(block, fun) <> "\nend")
  end
  def to_string({ :fn, _, block } = ast, fun) do
    block = adjust_new_lines block_to_string(block, fun), "\n  "
    fun.(ast, "fn\n  " <> block <> "\nend")
  end
  # left -> right
  def to_string([{ :->, _, _ }|_] = ast, fun) do
    fun.(ast, "(" <> arrow_to_string(ast, fun, true) <> ")")
  end
  # left when right
  def to_string({ :when, _, [left, right] } = ast, fun) do
    if right != [] and Keyword.keyword?(right) do
      right = kw_list_to_string(right, fun)
    else
      right = fun.(ast, op_to_string(right, fun, :when, :right))
    end
    fun.(ast, op_to_string(left, fun, :when, :left) <> " when " <> right)
  end
  # Binary ops
  def to_string({ op, _, [left, right] } = ast, fun) when op in unquote(@binary_ops) do
    fun.(ast, op_to_string(left, fun, op, :left) <> " #{op} " <> op_to_string(right, fun, op, :right))
  end
  # Splat when
  def to_string({ :when, _, args } = ast, fun) do
    { left, right } = :elixir_utils.split_last(args)
    fun.(ast, "(" <> Enum.map_join(left, ", ", &to_string(&1, fun)) <> ") when " <> to_string(right, fun))
  end
  # Unary ops
  def to_string({ unary, _, [{ binary, _, [_, _] } = arg] } = ast, fun)
      when unary in unquote(@unary_ops) and binary in unquote(@binary_ops) do
    fun.(ast, atom_to_binary(unary) <> "(" <> to_string(arg, fun) <> ")")
  end
  def to_string({ :not, _, [arg] } = ast, fun)  do
    fun.(ast, "not " <> to_string(arg, fun))
  end
  def to_string({ op, _, [arg] } = ast, fun) when op in unquote(@unary_ops) do
    fun.(ast, atom_to_binary(op) <> to_string(arg, fun))
  end
  # Access
  def to_string({ { :., _, [Kernel, :access] }, _, [left, right] } = ast, fun) do
    fun.(ast, to_string(left, fun) <> to_string(right, fun))
  end
  # All other calls
  def to_string({ target, _, args } = ast, fun) when is_list(args) do
    { list, last } = :elixir_utils.split_last(args)
    fun.(ast, case kw_blocks?(last) do
      true  -> call_to_string_with_args(target, list, fun) <> kw_blocks_to_string(last, fun)
      false -> call_to_string_with_args(target, args, fun)
    end)
  end
  # Two-item tuples
  def to_string({ left, right }, fun) do
    to_string({ :{}, [], [left, right] }, fun)
  end
  # Lists
  def to_string(list, fun) when is_list(list) do
    fun.(list, cond do
      list == [] ->
        "[]"
      :io_lib.printable_list(list) ->
        "'" <> Inspect.BitString.escape(String.from_char_list!(list), ?') <> "'"
      Keyword.keyword?(list) ->
        "[" <> kw_list_to_string(list, fun) <> "]"
      true ->
        "[" <> Enum.map_join(list, ", ", &to_string(&1, fun)) <> "]"
    end)
  end
  # All other structures
  def to_string(other, fun), do: fun.(other, inspect(other, records: false))
  # Block keywords
  @kw_keywords [:do, :catch, :rescue, :after, :else]
  defp kw_blocks?([_|_] = kw) do
    Enum.all?(kw, &match?({x, _} when x in unquote(@kw_keywords), &1))
  end
  defp kw_blocks?(_), do: false
  defp module_to_string(atom, _fun) when is_atom(atom), do: inspect(atom, records: false)
  defp module_to_string(other, fun), do: call_to_string(other, fun)
  defp call_to_string(atom, _fun) when is_atom(atom), do: atom_to_binary(atom)
  defp call_to_string({ :., _, [arg] }, fun),         do: module_to_string(arg, fun) <> "."
  defp call_to_string({ :., _, [left, right] }, fun), do: module_to_string(left, fun) <> "." <> call_to_string(right, fun)
  defp call_to_string(other, fun),                    do: to_string(other, fun)
  defp call_to_string_with_args(target, args, fun) do
    target = call_to_string(target, fun)
    args = args_to_string(args, fun)
    target <> "(" <> args <> ")"
  end
  defp args_to_string(args, fun) do
    { list, last } = :elixir_utils.split_last(args)
    if last != [] and Keyword.keyword?(last) do
      args = Enum.map_join(list, ", ", &to_string(&1, fun))
      if list != [], do: args = args <> ", "
      args <> kw_list_to_string(last, fun)
    else
      Enum.map_join(args, ", ", &to_string(&1, fun))
    end
  end
  defp kw_blocks_to_string(kw, fun) do
    Enum.reduce(@kw_keywords, " ", fn(x, acc) ->
      case Keyword.has_key?(kw, x) do
        true  -> acc <> kw_block_to_string(x, Keyword.get(kw, x), fun)
        false -> acc
      end
    end) <> "end"
  end
  defp kw_block_to_string(key, value, fun) do
    block = adjust_new_lines block_to_string(value, fun), "\n  "
    atom_to_binary(key) <> "\n  " <> block <> "\n"
  end
  defp block_to_string([{ :->, _, _ }|_] = block, fun) do
    Enum.map_join(block, "\n", fn({ :->, _, [left, right] }) ->
      left = comma_join_or_empty_paren(left, fun, false)
      left <> "->\n  " <> adjust_new_lines block_to_string(right, fun), "\n  "
    end)
  end
  defp block_to_string({ :__block__, _, exprs }, fun) do
    Enum.map_join(exprs, "\n", &to_string(&1, fun))
  end
  defp block_to_string(other, fun), do: to_string(other, fun)
  defp map_to_string([{:|, _, [update_map, update_args]}], fun) do
    to_string(update_map, fun) <> " | " <> map_to_string(update_args, fun)
  end
  defp map_to_string(list, fun) do
    cond do
      Keyword.keyword?(list) -> kw_list_to_string(list, fun)
      true -> map_list_to_string(list, fun)
    end
  end
  defp kw_list_to_string(list, fun) do
    Enum.map_join(list, ", ", fn { key, value } ->
      atom_to_binary(key) <> ": " <> to_string(value, fun)
    end)
  end
  defp map_list_to_string(list, fun) do
    Enum.map_join(list, ", ", fn { key, value } ->
      to_string(key, fun) <> " => " <> to_string(value, fun)
    end)
  end
  defp parenthise(expr, fun) do
    "(" <> to_string(expr, fun) <> ")"
  end
  defp op_to_string({ op, _, [_, _] } = expr, fun, parent_op, side) when op in unquote(@binary_ops) do
    { parent_assoc, parent_prec } = binary_op_props(parent_op)
    { _, prec }                   = binary_op_props(op)
    cond do
      parent_prec < prec -> to_string(expr, fun)
      parent_prec > prec -> parenthise(expr, fun)
      true ->
        # parent_prec == prec, so look at associativity.
        if parent_assoc == side do
          to_string(expr, fun)
        else
          parenthise(expr, fun)
        end
    end
  end
  defp op_to_string(expr, fun, _, _), do: to_string(expr, fun)
  defp arrow_to_string(pairs, fun, paren \\ false) do
    Enum.map_join(pairs, "; ", fn({ :->, _, [left, right] }) ->
      left = comma_join_or_empty_paren(left, fun, paren)
      left <> "-> " <> to_string(right, fun)
    end)
  end
  defp comma_join_or_empty_paren([], _fun, true),  do: "() "
  defp comma_join_or_empty_paren([], _fun, false), do: ""
  defp comma_join_or_empty_paren(left, fun, _) do
    Enum.map_join(left, ", ", &to_string(&1, fun)) <> " "
  end
  defp adjust_new_lines(block, replacement) do
    for <<x <- block>>, into: "" do
      case x == ?\n do
        true  -> replacement
        false -> <<x>>
      end
    end
  end
  @doc """
  Receives an AST node and expands it once.
  The following contents are expanded:
  * Macros (local or remote);
  * Aliases are expanded (if possible) and return atoms;
  * Pseudo-variables (`__ENV__`, `__MODULE__` and `__DIR__`);
  * Module attributes reader (`@foo`);
  If the expression cannot be expanded, it returns the expression
  itself. Notice that `expand_once/2` performs the expansion just
  once and it is not recursive. Check `expand/2` for expansion
  until the node can no longer be expanded.
  ## Examples
  In the example below, we have a macro that generates a module
  with a function named `name_length` that returns the length
  of the module name. The value of this function will be calculated
  at compilation time and not at runtime.
  Consider the implementation below:
      defmacro defmodule_with_length(name, do: block) do
        length = length(atom_to_list(name))
        quote do
          defmodule unquote(name) do
            def name_length, do: unquote(length)
            unquote(block)
          end
        end
      end
  When invoked like this:
      defmodule_with_length My.Module do
        def other_function, do: ...
      end
  The compilation will fail because `My.Module` when quoted
  is not an atom, but a syntax tree as follow:
      {:__aliases__, [], [:My, :Module] }
  That said, we need to expand the aliases node above to an
  atom, so we can retrieve its length. Expanding the node is
  not straight-forward because we also need to expand the
  caller aliases. For example:
      alias MyHelpers, as: My
      defmodule_with_length My.Module do
        def other_function, do: ...
      end
  The final module name will be `MyHelpers.Module` and not
  `My.Module`. With `Macro.expand/2`, such aliases are taken
  into consideration. Local and remote macros are also
  expanded. We could rewrite our macro above to use this
  function as:
      defmacro defmodule_with_length(name, do: block) do
        expanded = Macro.expand(name, __CALLER__)
        length   = length(atom_to_list(expanded))
        quote do
          defmodule unquote(name) do
            def name_length, do: unquote(length)
            unquote(block)
          end
        end
      end
  """
  def expand_once(ast, env) do
    elem(do_expand_once(ast, env), 0)
  end
  defp do_expand_once({ :__aliases__, _, _ } = original, env) do
    case :elixir_aliases.expand(original, env.aliases, env.macro_aliases, env.lexical_tracker) do
      receiver when is_atom(receiver) ->
        :elixir_lexical.record_remote(receiver, env.lexical_tracker)
        { receiver, true }
      aliases ->
        aliases = for alias <- aliases, do: elem(do_expand_once(alias, env), 0)
        case :lists.all(&is_atom/1, aliases) do
          true ->
            receiver = :elixir_aliases.concat(aliases)
            :elixir_lexical.record_remote(receiver, env.lexical_tracker)
            { receiver, true }
          false ->
            { original, false }
        end
    end
  end
  # Expand @ calls
  defp do_expand_once({ :@, _, [{ name, _, args }] } = original, env) when is_atom(args) or args == [] do
    case (module = env.module) && Module.open?(module) do
      true  -> { Module.get_attribute(module, name), true }
      false -> { original, false }
    end
  end
  # Expand pseudo-variables
  defp do_expand_once({ :__MODULE__, _, atom }, env) when is_atom(atom),
    do: { env.module, true }
  defp do_expand_once({ :__DIR__, _, atom }, env)    when is_atom(atom),
    do: { :filename.dirname(env.file), true }
  defp do_expand_once({ :__ENV__, _, atom }, env)    when is_atom(atom),
    do: { { :{}, [], tuple_to_list(env) }, true }
  defp do_expand_once({ { :., _, [{ :__ENV__, _, atom }, field] }, _, [] } = original, env) when
      is_atom(atom) and is_atom(field) do
    case :erlang.function_exported(Macro.Env, field, 1) do
      true  -> { apply(env, field, []), true }
      false -> { original, false }
    end
  end
  # Expand possible macro import invocation
  defp do_expand_once({ atom, meta, context } = original, env)
      when is_atom(atom) and is_list(meta) and is_atom(context) do
    if :lists.member({ atom, Keyword.get(meta, :counter, context) }, env.vars) do
      { original, false }
    else
      case do_expand_once({ atom, meta, [] }, env) do
        { _, true } = exp -> exp
        { _, false }      -> { original, false }
      end
    end
  end
  defp do_expand_once({ atom, meta, args } = original, env)
      when is_atom(atom) and is_list(args) and is_list(meta) do
    arity = length(args)
    if :elixir_import.special_form(atom, arity) do
      { original, false }
    else
      module = env.module
      extra  = if function_exported?(module, :__info__, 1) do
        [{ module, module.__info__(:macros) }]
      else
        []
      end
      expand = :elixir_dispatch.expand_import(meta, { atom, length(args) }, args,
                                              :elixir_env.ex_to_env(env), extra)
      case expand do
        { :ok, receiver, quoted } ->
          next = :elixir_counter.next
          { :elixir_quote.linify_with_context_counter(0, { receiver, next }, quoted), true }
        { :ok, _receiver } ->
          { original, false }
        :error ->
          { original, false }
      end
    end
  end
  # Expand possible macro require invocation
  defp do_expand_once({ { :., _, [left, right] }, meta, args } = original, env) when is_atom(right) do
    { receiver, _ } = do_expand_once(left, env)
    case is_atom(receiver) do
      false -> { original, false }
      true  ->
        expand = :elixir_dispatch.expand_require(meta, receiver, { right, length(args) },
          args, :elixir_env.ex_to_env(env))
        case expand do
          { :ok, receiver, quoted } ->
            next = :elixir_counter.next
            { :elixir_quote.linify_with_context_counter(0, { receiver, next }, quoted), true }
          :error ->
            { original, false }
        end
    end
  end
  # Anything else is just returned
  defp do_expand_once(other, _env), do: { other, false }
  @doc """
  Receives an AST node and expands it until it can no longer
  be expanded.
  This function uses `expand_once/2` under the hood. Check
  `expand_once/2` for more information and exmaples.
  """
  def expand(tree, env) do
    expand_until({ tree, true }, env)
  end
  defp expand_until({ tree, true }, env) do
    expand_until(do_expand_once(tree, env), env)
  end
  defp expand_until({ tree, false }, _env) do
    tree
  end
  @doc """
  Recursively traverses the quoted expression checking if all sub-terms are
  safe.
  Terms are considered safe if they represent data structures and don't actually
  evaluate code. Returns `:ok` unless a given term is unsafe,
  which is returned as `{ :unsafe, term }`.
  """
  def safe_term(terms) do
    do_safe_term(terms) || :ok
  end
  defp do_safe_term({ local, _, terms }) when local in [:{}, :%{}, :__aliases__] do
    do_safe_term(terms)
  end
  defp do_safe_term({ unary, _, [term] }) when unary in [:+, :-] do
    do_safe_term(term)
  end
  defp do_safe_term({ left, right }), do: do_safe_term(left) || do_safe_term(right)
  defp do_safe_term(terms) when is_list(terms),  do: Enum.find_value(terms, &do_safe_term(&1))
  defp do_safe_term(terms) when is_tuple(terms), do: { :unsafe, terms }
  defp do_safe_term(_), do: nil
end | 
	lib/elixir/lib/macro.ex | 0.7413 | 0.538923 | 
	macro.ex | 
	starcoder | 
| 
	defmodule Extatus.Metric.Histogram do
  @moduledoc """
  This module defines a wrapper over `Prometheus.Metric.Histogram` functions to
  be compatible with `Extatus` way of handling metrics.
  """
  alias Extatus.Settings
  @metric Settings.extatus_histogram_mod()
  @doc """
  Creates a histogram using the `name` of a metric.
  """
  defmacro new(name) do
    module = __MODULE__
    caller = __CALLER__.module()
    metric = @metric
    quote do
      require Prometheus.Metric.Histogram
      name = unquote(name)
      case unquote(caller).get_spec(name) do
        {unquote(module), spec} ->
          unquote(metric).new(spec)
        _ ->
          raise %Prometheus.UnknownMetricError{registry: nil, name: name}
      end
    end
  end
  @doc """
  Creates a histogram using the `name` of a `metric`. If the counter exists,
  returns false.
  """
  defmacro declare(name) do
    module = __MODULE__
    caller = __CALLER__.module()
    metric = @metric
    quote do
      require Prometheus.Metric.Histogram
      name = unquote(name)
      case unquote(caller).get_spec(name) do
        {unquote(module), spec} ->
          unquote(metric).declare(spec)
        _ ->
          raise %Prometheus.UnknownMetricError{registry: nil, name: name}
      end
    end
  end
  @doc """
  Observes the given `amount` for the histogram identified by `name` and
  `values` (keyword list with the correspondence between labels and values).
  """
  defmacro observe(name, values, amount \\ 1) do
    module = __MODULE__
    caller = __CALLER__.module()
    metric = @metric
    quote do
      require Prometheus.Metric.Histogram
      name = unquote(name)
      case unquote(caller).gen_spec(name, unquote(values)) do
        {unquote(module), spec} ->
          unquote(metric).observe(spec, unquote(amount))
        _ ->
          raise %Prometheus.UnknownMetricError{registry: nil, name: name}
      end
    end
  end
  @doc """
  Observes the given `amount` for the histogram identified by `name` and
  `values` (keyword list with the correspondence between labels and values). If
  `amount` happened to be a float number even one time(!) you shoudn't use
  `observe/3` after `dobserve/3`.
  """
  defmacro dobserve(name, values, amount \\ 1) do
    module = __MODULE__
    caller = __CALLER__.module()
    metric = @metric
    quote do
      require Prometheus.Metric.Histogram
      name = unquote(name)
      case unquote(caller).gen_spec(name, unquote(values)) do
        {unquote(module), spec} ->
          unquote(metric).dobserve(spec, unquote(amount))
        _ ->
          raise %Prometheus.UnknownMetricError{registry: nil, name: name}
      end
    end
  end
  @doc """
  Observes the histogram identified by `name` and `values` (keyword list with
  the correspondence between labels and values) to the amount of time spent
  executing `function`.
  """
  defmacro observe_duration(name, values, function) do
    module = __MODULE__
    caller = __CALLER__.module()
    metric = @metric
    quote do
      require Prometheus.Metric.Histogram
      name = unquote(name)
      case unquote(caller).gen_spec(name, unquote(values)) do
        {unquote(module), spec} ->
          unquote(metric).observe_duration(spec, unquote(function))
        _ ->
          raise %Prometheus.UnknownMetricError{registry: nil, name: name}
      end
    end
  end
  @doc """
  Removes histogram series identified by `name` and `values` (keyword list with
  the correspondence between labels and values).
  """
  defmacro remove(name, values) do
    module = __MODULE__
    caller = __CALLER__.module()
    metric = @metric
    quote do
      require Prometheus.Metric.Histogram
      name = unquote(name)
      case unquote(caller).gen_spec(name, unquote(values)) do
        {unquote(module), spec} ->
          unquote(metric).remove(spec)
        _ ->
          raise %Prometheus.UnknownMetricError{registry: nil, name: name}
      end
    end
  end
  @doc """
  Resets the value of the histogram identified by `name` and `values`
  (keyword list with the correspondence between labels and values).
  """
  defmacro reset(name, values) do
    module = __MODULE__
    caller = __CALLER__.module()
    metric = @metric
    quote do
      require Prometheus.Metric.Histogram
      name = unquote(name)
      case unquote(caller).gen_spec(name, unquote(values)) do
        {unquote(module), spec} ->
          unquote(metric).reset(spec)
        _ ->
          raise %Prometheus.UnknownMetricError{registry: nil, name: name}
      end
    end
  end
  @doc """
  Returns the value of the histogram identified by `name` and `values`
  (keyword list with the correspondence between labels and values).
  """
  defmacro value(name, values) do
    module = __MODULE__
    caller = __CALLER__.module()
    metric = @metric
    quote do
      require Prometheus.Metric.Histogram
      name = unquote(name)
      case unquote(caller).gen_spec(name, unquote(values)) do
        {unquote(module), spec} ->
          unquote(metric).value(spec)
        _ ->
          raise %Prometheus.UnknownMetricError{registry: nil, name: name}
      end
    end
  end
end | 
	lib/extatus/metric/histogram.ex | 0.896 | 0.655164 | 
	histogram.ex | 
	starcoder | 
| 
	defmodule Snitch.Data.Model.Promotion.Applicability do
  @moduledoc """
  Exposes functions related to promotion level checks.
  """
  use Snitch.Data.Model
  alias Snitch.Data.Schema.Promotion
  @errors %{
    not_found: "promotion not found",
    inactive: "promotion is not active",
    expired: "promotion has expired"
  }
  @success %{
    promo_active: "promotion active",
    has_actions: "has actions"
  }
  @doc """
  Checks if a valid coupon exists for the supplied coupon_code.
  The function along with valid `code` name also checks if coupon is not
  `archived`.
  """
  @spec valid_coupon_check(String.t()) :: {:ok, Promotion.t()} | {:error, String.t()}
  def valid_coupon_check(coupon_code) do
    case Repo.get_by(Promotion, code: coupon_code, archived_at: 0) do
      nil ->
        {:error, @errors.not_found}
      promotion ->
        {:ok, promotion}
    end
  end
  def promotion_active(promotion) do
    if promotion.active? do
      {true, @success.promo_active}
    else
      {false, @errors.inactive}
    end
  end
  def promotion_actions_exist(promotion) do
    promotion = Repo.preload(promotion, :actions)
    case promotion.actions do
      [] ->
        {false, @errors.inactive}
      _ ->
        {true, @success.has_actions}
    end
  end
  @doc """
  Checks for `starts_at` date for promotion.
  If the `starts_at` is in past then `true` is returned otherwise, if `starts_at`
  is in future it means promotion has not started and not active is returned
  for the promotion.
  """
  def starts_at_check(promotion) do
    if DateTime.compare(DateTime.utc_now(), promotion.starts_at) == :gt do
      {true, @success.promo_active}
    else
      {false, @errors.inactive}
    end
  end
  @doc """
  Checks for `expires_at` date for the promotion.
  If `expires_at` is in past then the coupon has expired otherwise it is
  still active.
  """
  def expires_at_check(promotion) do
    if DateTime.compare(DateTime.utc_now(), promotion.expires_at) == :lt do
      {true, @success.promo_active}
    else
      {false, @errors.expired}
    end
  end
  @doc """
  Checks for `usage_limit` for the promotion.
  If usage limit reached returns {false, message} and coupon code expired
  otherwise returns {true, message}.
  ### Note
  In case the `usage_limit` property of promotion has been set to zero,
  it is being assumed that the store keeper wants the usage limit to be infinite.
  """
  def usage_limit_check(promotion) do
    if promotion.usage_limit == 0 do
      {true, @success.promo_active}
    else
      if promotion.usage_limit > promotion.current_usage_count do
        {true, @success.promo_active}
      else
        {false, @errors.expired}
      end
    end
  end
end | 
	apps/snitch_core/lib/core/data/model/promotion/promotion_applicability.ex | 0.828106 | 0.414247 | 
	promotion_applicability.ex | 
	starcoder | 
| 
	defmodule Vow.Pat do
  @moduledoc """
  This module provides a vow for wrapping a pattern and the `Vow.Pat.pat/1`
  macro for conveniently wrapping the pattern and packaging it in `Vow.Pat.t`.
  # Note
  Installation of the `Expat` package is recommended if using this module as
  `Expat` provides excellent utilities for defining and reusing patterns.
    ```
    def deps do
      [{:expat, "~> 1.0"}]
    end
    ```
  """
  use Vow.Utils.AccessShortcut,
    type: :passthrough,
    passthrough_key: :pat
  import Kernel, except: [match?: 2]
  defstruct [:pat]
  @type t :: %__MODULE__{
          pat: Macro.t()
        }
  @doc false
  @spec new(Macro.t()) :: t | no_return
  def new(pattern) do
    %__MODULE__{pat: pattern}
  end
  @doc """
  Wraps a pattern and stores it in `Vow.Pat.t` for later matching.
  ## Examples
    ```
    iex> import Vow.Pat
    ...> p = pat({:ok, _})
    ...> Vow.conform(p, {:ok, :foo})
    {:ok, {:ok, :foo}}
    ```
  """
  @spec pat(Macro.t()) :: Macro.t()
  defmacro pat(pat) do
    quote do
      Vow.Pat.new(unquote(Macro.escape(Macro.expand(pat, __ENV__))))
    end
  end
  @doc """
  A convenience function that checks if the right side (an expresssion),
  matches the left side (a `Vow.Pat`).
  """
  @spec match?(t, expr :: term) :: boolean | no_return
  def match?(%__MODULE__{pat: pat}, expr) do
    {result, _bindings} =
      Code.eval_quoted(
        quote do
          Kernel.match?(unquote(pat), unquote(expr))
        end
      )
    result
  end
  defimpl Vow.Conformable do
    @moduledoc false
    import Vow.FunctionWrapper, only: [wrap: 2]
    alias Vow.ConformError
    @impl Vow.Conformable
    def conform(vow, path, via, route, val) do
      if @for.match?(vow, val) do
        {:ok, val}
      else
        pred = wrap(&@for.match?(vow, &1), vow: vow)
        {:error, [ConformError.new_problem(pred, path, via, route, val)]}
      end
    rescue
      error ->
        msg = Exception.message(error)
        {:error, [ConformError.new_problem(vow, path, via, route, val, msg)]}
    end
    @impl Vow.Conformable
    def unform(_vow, val) do
      {:ok, val}
    end
    @impl Vow.Conformable
    def regex?(_vow), do: false
  end
  defimpl Inspect do
    @moduledoc false
    @impl Inspect
    def inspect(%@for{pat: pat}, _opts) do
      Macro.to_string(pat)
    end
  end
  if Code.ensure_loaded?(StreamData) do
    defimpl Vow.Generatable do
      @moduledoc false
      alias Vow.Utils
      import StreamData
      @impl Vow.Generatable
      def gen(vow, opts) do
        ignore_warn? = Keyword.get(opts, :ignore_warn?, false)
        _ = Utils.no_override_warn(vow, ignore_warn?)
        {:ok, filter(term(), &@for.match?(vow, &1))}
      end
    end
  end
end | 
	lib/vow/pat.ex | 0.905706 | 0.835953 | 
	pat.ex | 
	starcoder | 
| 
	defmodule AtomTweaks.Tweaks.Tweak do
  @moduledoc """
  Represents a tweak.
  ## Fields
  * `code` - Source code of the tweak
  * `description` - Markdown description of what the tweak does
  * `title` - Title of the tweak
  * `type` - The type of the tweak
  ### Associations
  Must be preloaded before they can be used.
  * `forked_from` - The `AtomTweaks.Tweaks.Tweak` this tweak was forked from
  * `forks` - The `AtomTweaks.Tweaks.Tweak` records that have been forked from this tweak
  * `stargazers` - The `AtomTweaks.Accounts.User` records that have starred this tweak
  * `user` - The `AtomTweaks.Accounts.User` that created or forked this tweak
  """
  use Ecto.Schema
  import Ecto.Changeset
  import Ecto.Query
  alias Ecto.Changeset
  alias AtomTweaks.Accounts.User
  alias AtomTweaks.Ecto.Markdown
  alias AtomTweaks.Tweaks.Star
  alias AtomTweaks.Tweaks.Tweak
  alias AtomTweaksWeb.PageMetadata.Metadata
  @type t :: %__MODULE__{}
  @changeset_keys ~w{code created_by description parent title type}a
  @primary_key {:id, :binary_id, autogenerate: true}
  schema "tweaks" do
    field(:code, :string)
    field(:description, Markdown)
    field(:title, :string)
    field(:type, :string)
    belongs_to(:forked_from, Tweak, foreign_key: :parent, type: :binary_id)
    belongs_to(:user, User, foreign_key: :created_by, type: :binary_id)
    has_many(:forks, Tweak, foreign_key: :parent)
    many_to_many(
      :stargazers,
      User,
      join_through: Star,
      on_replace: :delete,
      on_delete: :delete_all
    )
    timestamps()
  end
  @doc false
  def changeset(struct, params \\ %{}) do
    struct
    |> cast(params, @changeset_keys)
    |> validate_required([:title, :code, :created_by, :type])
    |> validate_inclusion(:type, ["init", "style"])
  end
  @doc """
  Filters `query` to include only tweaks of `type`.
  If `nil` is given for the type, the query is not filtered. This allows for easily building the
  query in a pipeline.
  """
  def filter_by_type(query, nil), do: query
  def filter_by_type(query, type), do: from(t in query, where: t.type == ^type)
  @doc """
  Filters `query` to include only tweaks that were created by `user`.
  If `nil` is given for the user, the query is not filtered. This allows for easily building the
  query in a pipeline.
  """
  def filter_by_user(query, nil), do: query
  def filter_by_user(query, user = %User{}), do: from(t in query, where: t.created_by == ^user.id)
  def fork_params(tweak, user) do
    tweak
    |> copy_params(@changeset_keys)
    |> Map.merge(%{created_by: user.id, parent: tweak.id})
  end
  def include_forks(query, true), do: query
  def include_forks(query, _), do: from(t in query, where: is_nil(t.parent))
  @doc """
  Validates that the person forking the tweak is different from the original author of the tweak.
  """
  @spec validate_fork_by_different_user(Changeset.t(), t() | binary) :: Changeset.t()
  def validate_fork_by_different_user(changeset, original_tweak)
  def validate_fork_by_different_user(changeset, %Tweak{created_by: created_by}) do
    validate_fork_by_different_user(changeset, created_by)
  end
  def validate_fork_by_different_user(changeset, original_id) when is_binary(original_id) do
    validate_change(changeset, :created_by, fn _field, creator_id ->
      if creator_id == original_id do
        [{:created_by, "cannot fork your own tweak"}]
      else
        []
      end
    end)
  end
  defimpl Metadata do
    def to_metadata(tweak) do
      [
        [property: "og:title", content: tweak.title],
        [property: "og:description", content: tweak.code]
      ]
    end
  end
  # Copy only `keys` out of `map` into a new map
  defp copy_params(map, keys) do
    Enum.reduce(keys, %{}, fn key, acc -> Map.put(acc, key, Map.fetch!(map, key)) end)
  end
end | 
	lib/atom_tweaks/tweaks/tweak.ex | 0.79854 | 0.514095 | 
	tweak.ex | 
	starcoder | 
| 
	defmodule ExStoneOpenbank.TeslaHelper do
  @moduledoc """
  Group of functions that helps in the utilization of Mox in Tesla Adapters
  """
  import Mox
  import Tesla.Mock, only: [json: 2]
  import ExUnit.Assertions
  alias ExStoneOpenbank.TeslaMock
  defmodule RequestMatchError do
    defexception [:message]
    def exception(request: request) do
      message = """
      A Tesla mock request could not be matched in an `expect_call`.
      This usually happens when the fun head can't match on an incoming request. Please,
      check if URL, method and other request data can match the expectation.
      Incoming request: #{inspect(request)}
      """
      %__MODULE__{message: message}
    end
  end
  @doc "Helper for expecting a Tesla call"
  @spec expect_call(n :: integer(), call_fun :: (Tesla.Env.t() -> {:ok | :error, Tesla.Env.t()})) ::
          term()
  def expect_call(n \\ 1, call_fun) do
    unless Function.info(call_fun, :arity) == {:arity, 1} do
      raise "expect_call must receive a function with arity 1"
    end
    TeslaMock
    |> expect(:call, n, fn request, _opts ->
      try do
        call_fun.(request)
      rescue
        FunctionClauseError ->
          reraise RequestMatchError, [request: request], __STACKTRACE__
      end
    end)
  end
  @doc "Helper for building a JSON Tesla.Env response"
  @spec json_response(body :: map() | String.t() | integer() | boolean(), status :: pos_integer()) ::
          {:ok, Tesla.Env.t()}
  def json_response(body, status \\ 200) do
    {:ok, json(body, status: status)}
  end
  @doc "Expects an authenticate call"
  @spec expect_authentication(client_id :: String.t(), {:ok, Tesla.Env.t()}) :: term()
  def expect_authentication(client_id, response \\ nil) do
    expect_call(1, fn
      %{headers: [{"content-type", "application/x-www-form-urlencoded"}]} = env ->
        assert_authentication(client_id, response, env)
    end)
  end
  def assert_authentication(client_id, response, env) do
    assert env.body =~ "client_id=#{client_id}"
    assert env.body =~ "grant_type=client_credentials"
    assert env.body =~
             "client_assertion_type=#{
               URI.encode_www_form("urn:ietf:params:oauth:client-assertion-type:jwt-bearer")
             }"
    if response,
      do: response,
      else: json_response(%{access_token: "token"})
  end
end | 
	test/support/tesla_helper.ex | 0.736401 | 0.438785 | 
	tesla_helper.ex | 
	starcoder | 
| 
	defmodule BowlingGame do
  defstruct scores: List.duplicate([0, 0], 12),
            current_frame: 1,
            roll_in_frame: 1
end
defmodule Bowling do
  def start do
    %BowlingGame{}
  end
  def roll(_, score) when score < 0 do
    {:error, "Negative roll is invalid"}
  end
  def roll(_, score) when score > 10 do
    {:error, "Pin count exceeds pins on the lane"}
  end
  def roll(game, score) do
    updates = update_score(game.roll_in_frame, game, score)
    cond do
      too_many_frames?(updates) ->
        {:error, "Cannot roll after game is over"}
      valid_updates?(updates) ->
        {:ok, updates}
      true ->
        {:error, "Pin count exceeds pins on the lane"}
    end
  end
  defp update_score(1, game, score) do
    current_frame = game.current_frame
    scores = List.replace_at(game.scores, current_frame - 1, [score, 0])
    cond do
      score == 10 ->
        %{game | current_frame: current_frame + 1, scores: scores}
      true ->
        %{game | roll_in_frame: 2, scores: scores}
    end
  end
  defp update_score(2, game, score) do
    current_frame = game.current_frame
    old_scores = game.scores
    old_frame_values = Enum.at(old_scores, current_frame - 1)
    new_frame_values = List.replace_at(old_frame_values, 1, score)
    new_scores = List.replace_at(old_scores, current_frame - 1, new_frame_values)
    %{game | scores: new_scores, roll_in_frame: 1, current_frame: current_frame + 1}
  end
  defp valid_updates?(updates) do
    Enum.all?(updates.scores, fn frame -> Enum.sum(frame) <= 10 end)
  end
  def score(game) do
    cond do
      game.current_frame < 10 ->
        {:error, "Score cannot be taken until the end of the game"}
      bonus_roll_remaining?(game) ->
        {:error, "Score cannot be taken until the end of the game"}
      true ->
        {:ok, parse_scores(game.scores)}
    end
  end
  defp bonus_roll_remaining?(game) do
    final_frame = Enum.at(game.scores, 9)
    cond do
      strike?(final_frame) ->
        cond do
          strike?(Enum.at(game.scores, 10)) ->
            game.current_frame == 12 && game.roll_in_frame == 1
          true ->
            game.current_frame < 12
        end
      spare?(final_frame) ->
        game.current_frame == 11 && game.roll_in_frame == 1
      true ->
        false
    end
  end
  defp too_many_frames?(game) do
    final_frame = Enum.at(game.scores, 9)
    bonus_frame_1 = Enum.at(game.scores, 10)
    bonus_frame_2 = Enum.at(game.scores, 11)
    cond do
      # If last frame is all strikes:
      strike?(final_frame) && strike?(bonus_frame_1) && strike?(bonus_frame_2) ->
        game.current_frame == 13 && game.roll_in_frame == 2
      # If last frame has two strikes:
      strike?(final_frame) && strike?(bonus_frame_1) && not strike?(bonus_frame_2) ->
        game.current_frame == 13 && game.roll_in_frame == 1
      # If last frame has one strike and some other combination
      strike?(final_frame) && not strike?(bonus_frame_1) ->
        game.current_frame == 12 && game.roll_in_frame == 2
      # If last frame has a spare, and one strike
      spare?(final_frame) && strike?(bonus_frame_1) ->
        game.current_frame == 12 && game.roll_in_frame == 2
      # If last frame has a spare and some other combination
      spare?(final_frame) && not strike?(bonus_frame_1) ->
        game.current_frame == 12 && game.roll_in_frame == 1
      # All of the others
      true ->
        game.current_frame == 11 && game.roll_in_frame == 2
    end
  end
  defp parse_scores(scores) do
    scores
    |> score_frames
    |> Enum.sum()
  end
  defp score_frames(scores) do
    Enum.map(0..9, fn idx ->
      current_frame = Enum.at(scores, idx)
      next_frame = Enum.at(scores, idx + 1, [0, 0])
      cond do
        strike?(current_frame) ->
          strike(current_frame, next_frame, scores, idx)
        spare?(current_frame) ->
          10 + hd(next_frame)
        true ->
          Enum.sum(current_frame)
      end
    end)
  end
  defp strike?(frame) do
    frame == [10, 0]
  end
  defp strike(current_frame, next_frame, scores, idx) do
    if strike?(next_frame) do
      Enum.sum(current_frame) + Enum.sum(next_frame) +
        (Enum.at(scores, idx + 2, [0, 0]) |> Enum.at(0))
    else
      Enum.sum(Enum.at(scores, idx)) + Enum.sum(Enum.at(scores, idx + 1, [0, 0]))
    end
  end
  defp spare?(frame) do
    Enum.sum(frame) == 10
  end
end | 
	exercises/practice/bowling/.meta/example.ex | 0.695131 | 0.610424 | 
	example.ex | 
	starcoder | 
| 
	defmodule ExWire.Message do
  @moduledoc """
  Defines a behavior for messages so that they can be
  easily encoded and decoded.
  """
  defmodule UnknownMessageError do
    defexception [:message]
  end
  @type t ::
          ExWire.Message.Ping.t()
          | ExWire.Message.Pong.t()
          | ExWire.Message.FindNeighbours.t()
          | ExWire.Message.Neighbours.t()
  @type handlers ::
          ExWire.Message.Ping
          | ExWire.Message.Pong
          | ExWire.Message.FindNeighbours
          | ExWire.Message.Neighbours
  @type message_id :: integer()
  @callback message_id() :: message_id
  @callback encode(t) :: binary()
  @callback to(t) :: ExWire.Endpoint.t() | nil
  @message_types %{
    0x01 => ExWire.Message.Ping,
    0x02 => ExWire.Message.Pong,
    0x03 => ExWire.Message.FindNeighbours,
    0x04 => ExWire.Message.Neighbours
  }
  @doc """
  Decodes a message of given `type` based on the encoded
  data. Effectively reverses the `decode/1` function.
  ## Examples
      iex> ExWire.Message.decode(0x01, <<210, 1, 199, 132, 1, 2, 3, 4, 128, 5, 199, 132, 5, 6, 7, 8, 6, 128, 4>>)
      %ExWire.Message.Ping{
        version: 1,
        from: %ExWire.Struct.Endpoint{ip: {1, 2, 3, 4}, tcp_port: 5, udp_port: nil},
        to: %ExWire.Struct.Endpoint{ip: {5, 6, 7, 8}, tcp_port: nil, udp_port: 6},
        timestamp: 4
      }
      iex> ExWire.Message.decode(0x02, <<202, 199, 132, 5, 6, 7, 8, 6, 128, 2, 3>>)
      %ExWire.Message.Pong{
      to: %ExWire.Struct.Endpoint{ip: {5, 6, 7, 8}, tcp_port: nil, udp_port: 6}, hash: <<2>>, timestamp: 3
      }
      iex> ExWire.Message.decode(0x99, <<>>)
      ** (ExWire.Message.UnknownMessageError) Unknown message type: 0x99
  """
  @spec decode(integer(), binary()) :: t
  def decode(type, data) do
    case @message_types[type] do
      nil -> raise UnknownMessageError, "Unknown message type: #{inspect(type, base: :hex)}"
      mod -> mod.decode(data)
    end
  end
  @doc """
  Encoded a message by concatting its `message_id` to
  the encoded data of the message itself.
  ## Examples
      iex> ExWire.Message.encode(
      ...>   %ExWire.Message.Ping{
      ...>     version: 1,
      ...>     from: %ExWire.Struct.Endpoint{ip: {1, 2, 3, 4}, tcp_port: 5, udp_port: nil},
      ...>     to: %ExWire.Struct.Endpoint{ip: {5, 6, 7, 8}, tcp_port: nil, udp_port: 6},
      ...>     timestamp: 4
      ...>   }
      ...> )
      <<1, 214, 1, 201, 132, 1, 2, 3, 4, 128, 130, 0, 5, 201, 132, 5, 6, 7, 8, 130, 0, 6, 128, 4>>
      iex> ExWire.Message.encode(%ExWire.Message.Pong{to: %ExWire.Struct.Endpoint{ip: {5, 6, 7, 8}, tcp_port: nil, udp_port: 6}, hash: <<2>>, timestamp: 3})
      <<2, 204, 201, 132, 5, 6, 7, 8, 130, 0, 6, 128, 2, 3>>
  """
  @spec encode(t) :: binary()
  def encode(message) do
    <<message.__struct__.message_id()>> <> message.__struct__.encode(message)
  end
end | 
	apps/ex_wire/lib/ex_wire/message.ex | 0.860838 | 0.408247 | 
	message.ex | 
	starcoder | 
| 
	defmodule Membrane.Element.Base do
  @moduledoc """
  Module defining behaviour common to all elements.
  When used declares behaviour implementation, provides default callback definitions
  and imports macros.
  # Elements
  Elements are units that produce, process or consume data. They can be linked
  with `Membrane.Pipeline`, and thus form a pipeline able to perform complex data
  processing. Each element defines a set of pads, through which it can be linked
  with other elements. During playback, pads can either send (output pads) or
  receive (input pads) data. For more information on pads, see
  `Membrane.Pad`.
  To implement an element, one of base modules (`Membrane.Source`,
  `Membrane.Filter`, `Membrane.Sink`)
  has to be `use`d, depending on the element type:
  - source, producing buffers (contain only output pads),
  - filter, processing buffers (contain both input and output pads),
  - sink, consuming buffers (contain only input pads).
  For more information on each element type, check documentation for appropriate
  base module.
  ## Behaviours
  Element-specific behaviours are specified in modules:
  - `Membrane.Element.Base` - this module, behaviour common to all
  elements,
  - `Membrane.Element.WithOutputPads` - behaviour common to sources
  and filters,
  - `Membrane.Element.WithInputPads` - behaviour common to sinks and
  filters,
  - Base modules (`Membrane.Source`, `Membrane.Filter`,
  `Membrane.Sink`) - behaviours specific to each element type.
  ## Callbacks
  Modules listed above provide specifications of callbacks that define elements
  lifecycle. All of these callbacks have names with the `handle_` prefix.
  They are used to define reaction to certain events that happen during runtime,
  and indicate what actions framework should undertake as a result, besides
  executing element-specific code.
  For actions that can be returned by each callback, see `Membrane.Element.Action`
  module.
  """
  use Bunch
  alias Membrane.{Element, Event, Pad}
  alias Membrane.Core.OptionsSpecs
  alias Membrane.Element.{Action, CallbackContext}
  @typedoc """
  Type that defines all valid return values from most callbacks.
  In case of error, a callback is supposed to return `{:error, any}` if it is not
  passed state, and `{{:error, any}, state}` otherwise.
  """
  @type callback_return_t ::
          {:ok | {:ok, [Action.t()]} | {:error, any}, Element.state_t()} | {:error, any}
  @doc """
  Automatically implemented callback returning specification of pads exported
  by the element.
  Generated by `Membrane.Element.WithInputPads.def_input_pad/2`
  and `Membrane..WithOutputPads.def_output_pad/2` macros.
  """
  @callback membrane_pads() :: [{Pad.name_t(), Pad.description_t()}]
  @doc """
  Automatically implemented callback used to determine if module is a membrane element.
  """
  @callback membrane_element? :: true
  @doc """
  Automatically implemented callback used to determine whether element exports clock.
  """
  @callback membrane_clock? :: true
  @doc """
  Automatically implemented callback determining whether element is a source,
  a filter or a sink.
  """
  @callback membrane_element_type :: Element.type_t()
  @doc """
  Callback invoked on initialization of element process. It should parse options
  and initialize element internal state. Internally it is invoked inside
  `c:GenServer.init/1` callback.
  """
  @callback handle_init(options :: Element.options_t()) ::
              {:ok, Element.state_t()}
              | {:error, any}
  @doc """
  Callback invoked when element goes to `:prepared` state from state `:stopped` and should get
  ready to enter `:playing` state.
  Usually most resources used by the element are allocated here.
  For example, if element opens a file, this is the place to try to actually open it
  and return error if that has failed. Such resources should be released in `c:handle_prepared_to_stopped/2`.
  """
  @callback handle_stopped_to_prepared(
              context :: CallbackContext.PlaybackChange.t(),
              state :: Element.state_t()
            ) :: callback_return_t
  @doc """
  Callback invoked when element goes to `:prepared` state from state `:playing` and should get
  ready to enter `:stopped` state.
  All resources allocated in `c:handle_prepared_to_playing/2` callback should be released here, and no more buffers or
  demands should be sent.
  """
  @callback handle_playing_to_prepared(
              context :: CallbackContext.PlaybackChange.t(),
              state :: Element.state_t()
            ) :: callback_return_t
  @doc """
  Callback invoked when element is supposed to start playing (goes from state `:prepared` to `:playing`).
  This is moment when initial demands are sent and first buffers are generated
  if there are any pads in the push mode.
  """
  @callback handle_prepared_to_playing(
              context :: CallbackContext.PlaybackChange.t(),
              state :: Element.state_t()
            ) :: callback_return_t
  @doc """
  Callback invoked when element is supposed to stop (goes from state `:prepared` to `:stopped`).
  Usually this is the place for releasing all remaining resources
  used by the element. For example, if element opens a file in `c:handle_stopped_to_prepared/2`,
  this is the place to close it.
  """
  @callback handle_prepared_to_stopped(
              context :: CallbackContext.PlaybackChange.t(),
              state :: Element.state_t()
            ) :: callback_return_t
  @callback handle_stopped_to_terminating(
              context :: CallbackContext.PlaybackChange.t(),
              state :: Element.state_t()
            ) :: callback_return_t
  @doc """
  Callback invoked when element receives a message that is not recognized
  as an internal membrane message.
  Useful for receiving ticks from timer, data sent from NIFs or other stuff.
  """
  @callback handle_other(
              message :: any(),
              context :: CallbackContext.Other.t(),
              state :: Element.state_t()
            ) :: callback_return_t
  @doc """
  Callback that is called when new pad has beed added to element. Executed
  ONLY for dynamic pads.
  """
  @callback handle_pad_added(
              pad :: Pad.ref_t(),
              context :: CallbackContext.PadAdded.t(),
              state :: Element.state_t()
            ) :: callback_return_t
  @doc """
  Callback that is called when some pad of the element has beed removed. Executed
  ONLY for dynamic pads.
  """
  @callback handle_pad_removed(
              pad :: Pad.ref_t(),
              context :: CallbackContext.PadRemoved.t(),
              state :: Element.state_t()
            ) :: callback_return_t
  @doc """
  Callback that is called when event arrives.
  Events may arrive from both sinks and sources. In filters by default event is
  forwarded to all sources or sinks, respectively.
  """
  @callback handle_event(
              pad :: Pad.ref_t(),
              event :: Event.t(),
              context :: CallbackContext.Event.t(),
              state :: Element.state_t()
            ) :: callback_return_t
  @doc """
  Callback invoked upon each timer tick. A timer can be started with `Membrane.Element.Action.start_timer_t`
  action.
  """
  @callback handle_tick(
              timer_id :: any,
              context :: CallbackContext.Tick.t(),
              state :: Element.state_t()
            ) :: callback_return_t
  @doc """
  Callback invoked when element is shutting down just before process is exiting.
  Internally called in `c:GenServer.terminate/2` callback.
  """
  @callback handle_shutdown(reason, state :: Element.state_t()) :: :ok
            when reason: :normal | :shutdown | {:shutdown, any} | term()
  @optional_callbacks membrane_clock?: 0,
                      handle_init: 1,
                      handle_stopped_to_prepared: 2,
                      handle_prepared_to_playing: 2,
                      handle_playing_to_prepared: 2,
                      handle_prepared_to_stopped: 2,
                      handle_other: 3,
                      handle_pad_added: 3,
                      handle_pad_removed: 3,
                      handle_event: 4,
                      handle_tick: 3,
                      handle_shutdown: 2
  @doc """
  Macro defining options that parametrize element.
  It automatically generates appropriate struct and documentation.
  #{OptionsSpecs.options_doc()}
  """
  defmacro def_options(options) do
    OptionsSpecs.def_options(__CALLER__.module, options, :element)
  end
  @doc """
  Defines that element exports a clock to pipeline.
  Exporting clock allows pipeline to choose it as the pipeline clock, enabling other
  elements to synchronize with it. Element's clock is accessible via `clock` field,
  while pipeline's one - via `parent_clock` field in callback contexts. Both of
  them can be used for starting timers.
  """
  defmacro def_clock(doc \\ "") do
    quote do
      @membrane_element_has_clock true
      Module.put_attribute(__MODULE__, :membrane_clock_moduledoc, """
      ## Clock
      This element provides a clock to its parent.
      #{unquote(doc)}
      """)
      @impl true
      def membrane_clock?, do: true
    end
  end
  @doc false
  defmacro __before_compile__(env) do
    Membrane.Core.Child.generate_moduledoc(env.module, :element)
  end
  @doc """
  Brings common stuff needed to implement an element. Used by
  `Membrane.Source.__using__/1`, `Membrane.Filter.__using__/1`
  and `Membrane.Sink.__using__/1`.
  Options:
    - `:bring_pad?` - if true (default) requires and aliases `Membrane.Pad`
  """
  defmacro __using__(options) do
    bring_pad =
      if options |> Keyword.get(:bring_pad?, true) do
        quote do
          require Membrane.Pad
          alias Membrane.Pad
        end
      end
    quote location: :keep do
      @behaviour unquote(__MODULE__)
      @before_compile unquote(__MODULE__)
      alias Membrane.Element.CallbackContext, as: Ctx
      import unquote(__MODULE__), only: [def_clock: 0, def_clock: 1, def_options: 1]
      unquote(bring_pad)
      @impl true
      def membrane_element?, do: true
      @impl true
      def handle_init(%opt_struct{} = options), do: {:ok, options |> Map.from_struct()}
      def handle_init(options), do: {:ok, options}
      @impl true
      def handle_stopped_to_prepared(_context, state), do: {:ok, state}
      @impl true
      def handle_prepared_to_playing(_context, state), do: {:ok, state}
      @impl true
      def handle_playing_to_prepared(_context, state), do: {:ok, state}
      @impl true
      def handle_prepared_to_stopped(_context, state), do: {:ok, state}
      @impl true
      def handle_stopped_to_terminating(_context, state), do: {:ok, state}
      @impl true
      def handle_other(_message, _context, state), do: {:ok, state}
      @impl true
      def handle_pad_added(_pad, _context, state), do: {:ok, state}
      @impl true
      def handle_pad_removed(_pad, _context, state), do: {:ok, state}
      @impl true
      def handle_event(_pad, _event, _context, state), do: {:ok, state}
      @impl true
      def handle_shutdown(_reason, _state), do: :ok
      defoverridable handle_init: 1,
                     handle_stopped_to_prepared: 2,
                     handle_playing_to_prepared: 2,
                     handle_prepared_to_playing: 2,
                     handle_prepared_to_stopped: 2,
                     handle_other: 3,
                     handle_pad_added: 3,
                     handle_pad_removed: 3,
                     handle_event: 4,
                     handle_shutdown: 2
    end
  end
end | 
	lib/membrane/element/base.ex | 0.922787 | 0.653182 | 
	base.ex | 
	starcoder | 
| 
	defmodule ExDoc.Markdown do
  @moduledoc """
  Transform a given document in Markdown to HTML
  ExDoc supports the following Markdown parsers:
    * [Hoedown][]
    * [Earmark][]
    * [Cmark][]
  If you don't specify a parser in `config/config.exs`, ExDoc will try to
  find one of the Markdown parsers from the list above in top-down fashion.
  Otherwise, ExDoc will raise an exception.
  [Hoedown]: https://github.com/hoedown/hoedown
  [Earmark]: http://github.com/pragdave/earmark
  [Cmark]: https://github.com/asaaki/cmark.ex
  """
  @markdown_processors [
    ExDoc.Markdown.Hoedown,
    ExDoc.Markdown.Earmark,
    ExDoc.Markdown.Cmark
  ]
  @markdown_processor_key :markdown_processor
  @doc """
  Converts the given markdown document to HTML.
  """
  def to_html(text, opts \\ []) when is_binary(text) do
    pretty_codeblocks(get_markdown_processor().to_html(text, opts))
  end
  @doc """
  Helper to handle plain code blocks (```...```) with and without
  language specification and indentation code blocks
  """
  def pretty_codeblocks(bin) do
    bin = Regex.replace(~r/<pre><code(\s+class=\"\")?>\s*iex>/,
                        # Add "elixir" class for now, until we have support for
                        # "iex" in highlight.js
                        bin, ~S(<pre><code class="iex elixir">iex>))
    bin = Regex.replace(~r/<pre><code(\s+class=\"\")?>/,
                        bin, ~S(<pre><code class="elixir">))
    bin
  end
  defp get_markdown_processor do
    case Application.fetch_env(:ex_doc, @markdown_processor_key) do
      {:ok, processor} ->
        processor
      :error ->
        processor = find_markdown_processor() || raise_no_markdown_processor()
        Application.put_env(:ex_doc, @markdown_processor_key, processor)
        processor
    end
  end
  defp find_markdown_processor do
    Enum.find @markdown_processors, fn module ->
      Code.ensure_loaded?(module) && module.available?
    end
  end
  defp raise_no_markdown_processor do
    raise """
    Could not find a markdown processor to be used by ex_doc.
    You can either:
    * Add {:earmark, ">= 0.0.0"} to your mix.exs deps
      to use an Elixir-based markdown processor
    * Add {:markdown, github: "devinus/markdown"} to your mix.exs deps
      to use a C-based markdown processor
    * Add {:cmark, ">= 0.5"} to your mix.exs deps
      to use another C-based markdown processor
    """
  end
end | 
	lib/ex_doc/markdown.ex | 0.765856 | 0.662669 | 
	markdown.ex | 
	starcoder | 
| 
	defmodule HL7.Composite.Default.XCN do
  @moduledoc """
  2.9.52 XCN - extended composite ID number and name for persons
  Components:
    - `id_number` (ST)
    - `family_name` (FN)
    - `given_name` (ST)
    - `second_name` (ST)
    - `suffix` (ST)
    - `prefix` (ST)
    - `degree` (IS)
    - `source_table` (IS)
    - `assigning_authority` (HD)
    - `name_type_code` (ID)
    - `check_digit` (ST)
    - `check_digit_scheme` (ID)
    - `id_type` (IS)
    - `assigning_facility` (HD)
    - `name_representation_code` (ID)
    - `name_context` (CE)
    - `name_validity` (DR)
    - `name_assembly_order` (ID)
  Subcomponents of `family_name`:
    - `surname` (ST)
    - `own_surname_prefix` (ST)
    - `own_surname` (ST)
    - `surname_prefix_from_partner` (ST)
    - `surname_from_partner` (ST)
  Subcomponents of `assigning_authority`:
    - `namespace_id` (IS)
    - `universal_id` (ST)
    - `universal_id_type` (ID)
  Subcomponents of `assigning_facility`:
    - `namespace_id` (IS)
    - `universal_id` (ST)
    - `universal_id_type` (ID)
  Subcomponents of `name_context`:
    - `id` (ST)
    - `text` (ST)
    - `coding_system` (IS)
    - `alt_id` (ST)
    - `alt_text` (ST)
    - `alt_coding_system` (IS)
  Subcomponents of `name_validity`:
    - `start_datetime` (TS)
    - `end_datetime` (TS)
  This data type is used extensively appearing in the PV1, ORC, RXO, RXE, OBR
  and SCH segments, as well as others, where there is a need to specify the
  ID number and name of a person.
  ## Examples
    Neither an assigning authority nor an assigning facility are present in the
    example:
      |1234567^Smith^John^J^III^DR^PHD^ADT01^^L^4^M11^MR|
  """
  use HL7.Composite.Spec
  require HL7.Composite.Default.CE, as: CE
  require HL7.Composite.Default.DR, as: DR
  require HL7.Composite.Default.HD, as: HD
  composite do
    component :id_number,                type: :string
    component :family_name,              type: :string
    component :given_name,               type: :string
    component :second_name,              type: :string
    component :suffix,                   type: :string
    component :prefix,                   type: :string
    component :degree,                   type: :string
    component :source_table,             type: :string
    component :assigning_authority,      type: HD
    component :name_type_code,           type: :string
    component :check_digit,              type: :string
    component :check_digit_scheme,       type: :string
    component :id_type,                  type: :string
    component :assigning_facility,       type: HD
    component :name_representation_code, type: :string
    component :name_context,             type: CE
    component :name_validity,            type: DR
    component :name_assembly_order,      type: :string
    component :effective_date,           type: :datetime
    component :expiration_date,          type: :datetime
    component :professional_suffix,      type: :string
  end
end | 
	lib/ex_hl7/composite/default/xcn.ex | 0.800458 | 0.619788 | 
	xcn.ex | 
	starcoder | 
| 
	
defmodule AWS.SSM do
  @moduledoc """
  Amazon Web Services Systems Manager is a collection of capabilities that helps
  you automate management tasks such as collecting system inventory, applying
  operating system (OS) patches, automating the creation of Amazon Machine Images
  (AMIs), and configuring operating systems (OSs) and applications at scale.
  Systems Manager lets you remotely and securely manage the configuration of your
  managed nodes. A *managed node* is any Amazon Elastic Compute Cloud (Amazon EC2)
  instance, edge device, or on-premises server or virtual machine (VM) that has
  been configured for Systems Manager.
  With support for IoT Greengrass Version 2 devices, the phrase *managed instance*
  has been changed to *managed node* in most of the Systems Manager documentation.
  The Systems Manager console, API calls, error messages, and SSM documents still
  use the term instance.
  This reference is intended to be used with the [Amazon Web Services Systems Manager User
  Guide](https://docs.aws.amazon.com/systems-manager/latest/userguide/).
  To get started, verify prerequisites and configure managed nodes. For more
  information, see [Setting up Amazon Web Services Systems Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-setting-up.html)
  in the *Amazon Web Services Systems Manager User Guide*.
  ## Related resources
    * For information about how to use a Query API, see [Making API requests](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/making-api-requests.html).
    * For information about other API operations you can perform on EC2
  instances, see the [Amazon EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/).
    * For information about AppConfig, a capability of Systems Manager,
  see the [AppConfig User Guide](https://docs.aws.amazon.com/appconfig/latest/userguide/) and the
  [AppConfig API Reference](https://docs.aws.amazon.com/appconfig/2019-10-09/APIReference/).
    * For information about Incident Manager, a capability of Systems
  Manager, see the [Incident Manager User Guide](https://docs.aws.amazon.com/incident-manager/latest/userguide/) and the
  [Incident Manager API Reference](https://docs.aws.amazon.com/incident-manager/latest/APIReference/).
  """
  alias AWS.Client
  alias AWS.Request
  def metadata do
    %AWS.ServiceMetadata{
      abbreviation: "Amazon SSM",
      api_version: "2014-11-06",
      content_type: "application/x-amz-json-1.1",
      credential_scope: nil,
      endpoint_prefix: "ssm",
      global?: false,
      protocol: "json",
      service_id: "SSM",
      signature_version: "v4",
      signing_name: "ssm",
      target_prefix: "AmazonSSM"
    }
  end
  @doc """
  Adds or overwrites one or more tags for the specified resource.
  Tags are metadata that you can assign to your documents, managed nodes,
  maintenance windows, Parameter Store parameters, and patch baselines. Tags
  enable you to categorize your resources in different ways, for example, by
  purpose, owner, or environment. Each tag consists of a key and an optional
  value, both of which you define. For example, you could define a set of tags for
  your account's managed nodes that helps you track each node's owner and stack
  level. For example:
    * `Key=Owner,Value=DbAdmin`
    * `Key=Owner,Value=SysAdmin`
    * `Key=Owner,Value=Dev`
    * `Key=Stack,Value=Production`
    * `Key=Stack,Value=Pre-Production`
    * `Key=Stack,Value=Test`
  Each resource can have a maximum of 50 tags.
  We recommend that you devise a set of tag keys that meets your needs for each
  resource type. Using a consistent set of tag keys makes it easier for you to
  manage your resources. You can search and filter the resources based on the tags
  you add. Tags don't have any semantic meaning to and are interpreted strictly as
  a string of characters.
  For more information about using tags with Amazon Elastic Compute Cloud (Amazon
  EC2) instances, see [Tagging your Amazon EC2 resources](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html)
  in the *Amazon EC2 User Guide*.
  """
  def add_tags_to_resource(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "AddTagsToResource", input, options)
  end
  @doc """
  Associates a related item to a Systems Manager OpsCenter OpsItem.
  For example, you can associate an Incident Manager incident or analysis with an
  OpsItem. Incident Manager and OpsCenter are capabilities of Amazon Web Services
  Systems Manager.
  """
  def associate_ops_item_related_item(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "AssociateOpsItemRelatedItem", input, options)
  end
  @doc """
  Attempts to cancel the command specified by the Command ID.
  There is no guarantee that the command will be terminated and the underlying
  process stopped.
  """
  def cancel_command(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "CancelCommand", input, options)
  end
  @doc """
  Stops a maintenance window execution that is already in progress and cancels any
  tasks in the window that haven't already starting running.
  Tasks already in progress will continue to completion.
  """
  def cancel_maintenance_window_execution(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "CancelMaintenanceWindowExecution", input, options)
  end
  @doc """
  Generates an activation code and activation ID you can use to register your
  on-premises servers, edge devices, or virtual machine (VM) with Amazon Web
  Services Systems Manager.
  Registering these machines with Systems Manager makes it possible to manage them
  using Systems Manager capabilities. You use the activation code and ID when
  installing SSM Agent on machines in your hybrid environment. For more
  information about requirements for managing on-premises machines using Systems
  Manager, see [Setting up Amazon Web Services Systems Manager for hybrid environments](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances.html)
  in the *Amazon Web Services Systems Manager User Guide*.
  Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and
  on-premises servers and VMs that are configured for Systems Manager are all
  called *managed nodes*.
  """
  def create_activation(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "CreateActivation", input, options)
  end
  @doc """
  A State Manager association defines the state that you want to maintain on your
  managed nodes.
  For example, an association can specify that anti-virus software must be
  installed and running on your managed nodes, or that certain ports must be
  closed. For static targets, the association specifies a schedule for when the
  configuration is reapplied. For dynamic targets, such as an Amazon Web Services
  resource group or an Amazon Web Services autoscaling group, State Manager, a
  capability of Amazon Web Services Systems Manager applies the configuration when
  new managed nodes are added to the group. The association also specifies actions
  to take when applying the configuration. For example, an association for
  anti-virus software might run once a day. If the software isn't installed, then
  State Manager installs it. If the software is installed, but the service isn't
  running, then the association might instruct State Manager to start the service.
  """
  def create_association(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "CreateAssociation", input, options)
  end
  @doc """
  Associates the specified Amazon Web Services Systems Manager document (SSM
  document) with the specified managed nodes or targets.
  When you associate a document with one or more managed nodes using IDs or tags,
  Amazon Web Services Systems Manager Agent (SSM Agent) running on the managed
  node processes the document and configures the node as specified.
  If you associate a document with a managed node that already has an associated
  document, the system returns the AssociationAlreadyExists exception.
  """
  def create_association_batch(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "CreateAssociationBatch", input, options)
  end
  @doc """
  Creates a Amazon Web Services Systems Manager (SSM document).
  An SSM document defines the actions that Systems Manager performs on your
  managed nodes. For more information about SSM documents, including information
  about supported schemas, features, and syntax, see [Amazon Web Services Systems Manager
  Documents](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-ssm-docs.html)
  in the *Amazon Web Services Systems Manager User Guide*.
  """
  def create_document(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "CreateDocument", input, options)
  end
  @doc """
  Creates a new maintenance window.
  The value you specify for `Duration` determines the specific end time for the
  maintenance window based on the time it begins. No maintenance window tasks are
  permitted to start after the resulting endtime minus the number of hours you
  specify for `Cutoff`. For example, if the maintenance window starts at 3 PM, the
  duration is three hours, and the value you specify for `Cutoff` is one hour, no
  maintenance window tasks can start after 5 PM.
  """
  def create_maintenance_window(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "CreateMaintenanceWindow", input, options)
  end
  @doc """
  Creates a new OpsItem.
  You must have permission in Identity and Access Management (IAM) to create a new
  OpsItem. For more information, see [Getting started with OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html)
  in the *Amazon Web Services Systems Manager User Guide*.
  Operations engineers and IT professionals use Amazon Web Services Systems
  Manager OpsCenter to view, investigate, and remediate operational issues
  impacting the performance and health of their Amazon Web Services resources. For
  more information, see [Amazon Web Services Systems Manager OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html)
  in the *Amazon Web Services Systems Manager User Guide*.
  """
  def create_ops_item(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "CreateOpsItem", input, options)
  end
  @doc """
  If you create a new application in Application Manager, Amazon Web Services
  Systems Manager calls this API operation to specify information about the new
  application, including the application type.
  """
  def create_ops_metadata(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "CreateOpsMetadata", input, options)
  end
  @doc """
  Creates a patch baseline.
  For information about valid key-value pairs in `PatchFilters` for each supported
  operating system type, see `PatchFilter`.
  """
  def create_patch_baseline(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "CreatePatchBaseline", input, options)
  end
  @doc """
  A resource data sync helps you view data from multiple sources in a single
  location.
  Amazon Web Services Systems Manager offers two types of resource data sync:
  `SyncToDestination` and `SyncFromSource`.
  You can configure Systems Manager Inventory to use the `SyncToDestination` type
  to synchronize Inventory data from multiple Amazon Web Services Regions to a
  single Amazon Simple Storage Service (Amazon S3) bucket. For more information,
  see [Configuring resource data sync for Inventory](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-datasync.html)
  in the *Amazon Web Services Systems Manager User Guide*.
  You can configure Systems Manager Explorer to use the `SyncFromSource` type to
  synchronize operational work items (OpsItems) and operational data (OpsData)
  from multiple Amazon Web Services Regions to a single Amazon S3 bucket. This
  type can synchronize OpsItems and OpsData from multiple Amazon Web Services
  accounts and Amazon Web Services Regions or `EntireOrganization` by using
  Organizations. For more information, see [Setting up Systems Manager Explorer to display data from multiple accounts and
  Regions](https://docs.aws.amazon.com/systems-manager/latest/userguide/Explorer-resource-data-sync.html)
  in the *Amazon Web Services Systems Manager User Guide*.
  A resource data sync is an asynchronous operation that returns immediately.
  After a successful initial sync is completed, the system continuously syncs
  data. To check the status of a sync, use the `ListResourceDataSync`.
  By default, data isn't encrypted in Amazon S3. We strongly recommend that you
  enable encryption in Amazon S3 to ensure secure data storage. We also recommend
  that you secure access to the Amazon S3 bucket by creating a restrictive bucket
  policy.
  """
  def create_resource_data_sync(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "CreateResourceDataSync", input, options)
  end
  @doc """
  Deletes an activation.
  You aren't required to delete an activation. If you delete an activation, you
  can no longer use it to register additional managed nodes. Deleting an
  activation doesn't de-register managed nodes. You must manually de-register
  managed nodes.
  """
  def delete_activation(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DeleteActivation", input, options)
  end
  @doc """
  Disassociates the specified Amazon Web Services Systems Manager document (SSM
  document) from the specified managed node.
  If you created the association by using the `Targets` parameter, then you must
  delete the association by using the association ID.
  When you disassociate a document from a managed node, it doesn't change the
  configuration of the node. To change the configuration state of a managed node
  after you disassociate a document, you must create a new document with the
  desired configuration and associate it with the node.
  """
  def delete_association(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DeleteAssociation", input, options)
  end
  @doc """
  Deletes the Amazon Web Services Systems Manager document (SSM document) and all
  managed node associations to the document.
  Before you delete the document, we recommend that you use `DeleteAssociation` to
  disassociate all managed nodes that are associated with the document.
  """
  def delete_document(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DeleteDocument", input, options)
  end
  @doc """
  Delete a custom inventory type or the data associated with a custom Inventory
  type.
  Deleting a custom inventory type is also referred to as deleting a custom
  inventory schema.
  """
  def delete_inventory(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DeleteInventory", input, options)
  end
  @doc """
  Deletes a maintenance window.
  """
  def delete_maintenance_window(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DeleteMaintenanceWindow", input, options)
  end
  @doc """
  Delete OpsMetadata related to an application.
  """
  def delete_ops_metadata(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DeleteOpsMetadata", input, options)
  end
  @doc """
  Delete a parameter from the system.
  After deleting a parameter, wait for at least 30 seconds to create a parameter
  with the same name.
  """
  def delete_parameter(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DeleteParameter", input, options)
  end
  @doc """
  Delete a list of parameters.
  After deleting a parameter, wait for at least 30 seconds to create a parameter
  with the same name.
  """
  def delete_parameters(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DeleteParameters", input, options)
  end
  @doc """
  Deletes a patch baseline.
  """
  def delete_patch_baseline(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DeletePatchBaseline", input, options)
  end
  @doc """
  Deletes a resource data sync configuration.
  After the configuration is deleted, changes to data on managed nodes are no
  longer synced to or from the target. Deleting a sync configuration doesn't
  delete data.
  """
  def delete_resource_data_sync(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DeleteResourceDataSync", input, options)
  end
  @doc """
  Removes the server or virtual machine from the list of registered servers.
  You can reregister the node again at any time. If you don't plan to use Run
  Command on the server, we suggest uninstalling SSM Agent first.
  """
  def deregister_managed_instance(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DeregisterManagedInstance", input, options)
  end
  @doc """
  Removes a patch group from a patch baseline.
  """
  def deregister_patch_baseline_for_patch_group(%Client{} = client, input, options \\ []) do
    Request.request_post(
      client,
      metadata(),
      "DeregisterPatchBaselineForPatchGroup",
      input,
      options
    )
  end
  @doc """
  Removes a target from a maintenance window.
  """
  def deregister_target_from_maintenance_window(%Client{} = client, input, options \\ []) do
    Request.request_post(
      client,
      metadata(),
      "DeregisterTargetFromMaintenanceWindow",
      input,
      options
    )
  end
  @doc """
  Removes a task from a maintenance window.
  """
  def deregister_task_from_maintenance_window(%Client{} = client, input, options \\ []) do
    Request.request_post(
      client,
      metadata(),
      "DeregisterTaskFromMaintenanceWindow",
      input,
      options
    )
  end
  @doc """
  Describes details about the activation, such as the date and time the activation
  was created, its expiration date, the Identity and Access Management (IAM) role
  assigned to the managed nodes in the activation, and the number of nodes
  registered by using this activation.
  """
  def describe_activations(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeActivations", input, options)
  end
  @doc """
  Describes the association for the specified target or managed node.
  If you created the association by using the `Targets` parameter, then you must
  retrieve the association by using the association ID.
  """
  def describe_association(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeAssociation", input, options)
  end
  @doc """
  Views information about a specific execution of a specific association.
  """
  def describe_association_execution_targets(%Client{} = client, input, options \\ []) do
    Request.request_post(
      client,
      metadata(),
      "DescribeAssociationExecutionTargets",
      input,
      options
    )
  end
  @doc """
  Views all executions for a specific association ID.
  """
  def describe_association_executions(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeAssociationExecutions", input, options)
  end
  @doc """
  Provides details about all active and terminated Automation executions.
  """
  def describe_automation_executions(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeAutomationExecutions", input, options)
  end
  @doc """
  Information about all active and terminated step executions in an Automation
  workflow.
  """
  def describe_automation_step_executions(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeAutomationStepExecutions", input, options)
  end
  @doc """
  Lists all patches eligible to be included in a patch baseline.
  """
  def describe_available_patches(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeAvailablePatches", input, options)
  end
  @doc """
  Describes the specified Amazon Web Services Systems Manager document (SSM
  document).
  """
  def describe_document(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeDocument", input, options)
  end
  @doc """
  Describes the permissions for a Amazon Web Services Systems Manager document
  (SSM document).
  If you created the document, you are the owner. If a document is shared, it can
  either be shared privately (by specifying a user's Amazon Web Services account
  ID) or publicly (*All*).
  """
  def describe_document_permission(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeDocumentPermission", input, options)
  end
  @doc """
  All associations for the managed node(s).
  """
  def describe_effective_instance_associations(%Client{} = client, input, options \\ []) do
    Request.request_post(
      client,
      metadata(),
      "DescribeEffectiveInstanceAssociations",
      input,
      options
    )
  end
  @doc """
  Retrieves the current effective patches (the patch and the approval state) for
  the specified patch baseline.
  Applies to patch baselines for Windows only.
  """
  def describe_effective_patches_for_patch_baseline(%Client{} = client, input, options \\ []) do
    Request.request_post(
      client,
      metadata(),
      "DescribeEffectivePatchesForPatchBaseline",
      input,
      options
    )
  end
  @doc """
  The status of the associations for the managed node(s).
  """
  def describe_instance_associations_status(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeInstanceAssociationsStatus", input, options)
  end
  @doc """
  Describes one or more of your managed nodes, including information about the
  operating system platform, the version of SSM Agent installed on the managed
  node, node status, and so on.
  If you specify one or more managed node IDs, it returns information for those
  managed nodes. If you don't specify node IDs, it returns information for all
  your managed nodes. If you specify a node ID that isn't valid or a node that you
  don't own, you receive an error.
  The `IamRole` field for this API operation is the Identity and Access Management
  (IAM) role assigned to on-premises managed nodes. This call doesn't return the
  IAM role for EC2 instances.
  """
  def describe_instance_information(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeInstanceInformation", input, options)
  end
  @doc """
  Retrieves the high-level patch state of one or more managed nodes.
  """
  def describe_instance_patch_states(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeInstancePatchStates", input, options)
  end
  @doc """
  Retrieves the high-level patch state for the managed nodes in the specified
  patch group.
  """
  def describe_instance_patch_states_for_patch_group(%Client{} = client, input, options \\ []) do
    Request.request_post(
      client,
      metadata(),
      "DescribeInstancePatchStatesForPatchGroup",
      input,
      options
    )
  end
  @doc """
  Retrieves information about the patches on the specified managed node and their
  state relative to the patch baseline being used for the node.
  """
  def describe_instance_patches(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeInstancePatches", input, options)
  end
  @doc """
  Describes a specific delete inventory operation.
  """
  def describe_inventory_deletions(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeInventoryDeletions", input, options)
  end
  @doc """
  Retrieves the individual task executions (one per target) for a particular task
  run as part of a maintenance window execution.
  """
  def describe_maintenance_window_execution_task_invocations(
        %Client{} = client,
        input,
        options \\ []
      ) do
    Request.request_post(
      client,
      metadata(),
      "DescribeMaintenanceWindowExecutionTaskInvocations",
      input,
      options
    )
  end
  @doc """
  For a given maintenance window execution, lists the tasks that were run.
  """
  def describe_maintenance_window_execution_tasks(%Client{} = client, input, options \\ []) do
    Request.request_post(
      client,
      metadata(),
      "DescribeMaintenanceWindowExecutionTasks",
      input,
      options
    )
  end
  @doc """
  Lists the executions of a maintenance window.
  This includes information about when the maintenance window was scheduled to be
  active, and information about tasks registered and run with the maintenance
  window.
  """
  def describe_maintenance_window_executions(%Client{} = client, input, options \\ []) do
    Request.request_post(
      client,
      metadata(),
      "DescribeMaintenanceWindowExecutions",
      input,
      options
    )
  end
  @doc """
  Retrieves information about upcoming executions of a maintenance window.
  """
  def describe_maintenance_window_schedule(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeMaintenanceWindowSchedule", input, options)
  end
  @doc """
  Lists the targets registered with the maintenance window.
  """
  def describe_maintenance_window_targets(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeMaintenanceWindowTargets", input, options)
  end
  @doc """
  Lists the tasks in a maintenance window.
  For maintenance window tasks without a specified target, you can't supply values
  for `--max-errors` and `--max-concurrency`. Instead, the system inserts a
  placeholder value of `1`, which may be reported in the response to this command.
  These values don't affect the running of your task and can be ignored.
  """
  def describe_maintenance_window_tasks(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeMaintenanceWindowTasks", input, options)
  end
  @doc """
  Retrieves the maintenance windows in an Amazon Web Services account.
  """
  def describe_maintenance_windows(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeMaintenanceWindows", input, options)
  end
  @doc """
  Retrieves information about the maintenance window targets or tasks that a
  managed node is associated with.
  """
  def describe_maintenance_windows_for_target(%Client{} = client, input, options \\ []) do
    Request.request_post(
      client,
      metadata(),
      "DescribeMaintenanceWindowsForTarget",
      input,
      options
    )
  end
  @doc """
  Query a set of OpsItems.
  You must have permission in Identity and Access Management (IAM) to query a list
  of OpsItems. For more information, see [Getting started with OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html)
  in the *Amazon Web Services Systems Manager User Guide*.
  Operations engineers and IT professionals use Amazon Web Services Systems
  Manager OpsCenter to view, investigate, and remediate operational issues
  impacting the performance and health of their Amazon Web Services resources. For
  more information, see
  [OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html)
  in the *Amazon Web Services Systems Manager User Guide*.
  """
  def describe_ops_items(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeOpsItems", input, options)
  end
  @doc """
  Get information about a parameter.
  Request results are returned on a best-effort basis. If you specify `MaxResults`
  in the request, the response includes information up to the limit specified. The
  number of items returned, however, can be between zero and the value of
  `MaxResults`. If the service reaches an internal limit while processing the
  results, it stops the operation and returns the matching values up to that point
  and a `NextToken`. You can specify the `NextToken` in a subsequent call to get
  the next set of results.
  If you change the KMS key alias for the KMS key used to encrypt a parameter,
  then you must also update the key alias the parameter uses to reference KMS.
  Otherwise, `DescribeParameters` retrieves whatever the original key alias was
  referencing.
  """
  def describe_parameters(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeParameters", input, options)
  end
  @doc """
  Lists the patch baselines in your Amazon Web Services account.
  """
  def describe_patch_baselines(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribePatchBaselines", input, options)
  end
  @doc """
  Returns high-level aggregated patch compliance state information for a patch
  group.
  """
  def describe_patch_group_state(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribePatchGroupState", input, options)
  end
  @doc """
  Lists all patch groups that have been registered with patch baselines.
  """
  def describe_patch_groups(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribePatchGroups", input, options)
  end
  @doc """
  Lists the properties of available patches organized by product, product family,
  classification, severity, and other properties of available patches.
  You can use the reported properties in the filters you specify in requests for
  operations such as `CreatePatchBaseline`, `UpdatePatchBaseline`,
  `DescribeAvailablePatches`, and `DescribePatchBaselines`.
  The following section lists the properties that can be used in filters for each
  major operating system type:
  ## Definitions
  ### AMAZON_LINUX
  Valid properties: `PRODUCT` | `CLASSIFICATION` | `SEVERITY`
  ### AMAZON_LINUX_2
  Valid properties: `PRODUCT` | `CLASSIFICATION` | `SEVERITY`
  ### CENTOS
  Valid properties: `PRODUCT` | `CLASSIFICATION` | `SEVERITY`
  ### DEBIAN
  Valid properties: `PRODUCT` | `PRIORITY`
  ### MACOS
  Valid properties: `PRODUCT` | `CLASSIFICATION`
  ### ORACLE_LINUX
  Valid properties: `PRODUCT` | `CLASSIFICATION` | `SEVERITY`
  ### REDHAT_ENTERPRISE_LINUX
  Valid properties: `PRODUCT` | `CLASSIFICATION` | `SEVERITY`
  ### SUSE
  Valid properties: `PRODUCT` | `CLASSIFICATION` | `SEVERITY`
  ### UBUNTU
  Valid properties: `PRODUCT` | `PRIORITY`
  ### WINDOWS
  Valid properties: `PRODUCT` | `PRODUCT_FAMILY` | `CLASSIFICATION` |
  `MSRC_SEVERITY`
  """
  def describe_patch_properties(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribePatchProperties", input, options)
  end
  @doc """
  Retrieves a list of all active sessions (both connected and disconnected) or
  terminated sessions from the past 30 days.
  """
  def describe_sessions(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DescribeSessions", input, options)
  end
  @doc """
  Deletes the association between an OpsItem and a related item.
  For example, this API operation can delete an Incident Manager incident from an
  OpsItem. Incident Manager is a capability of Amazon Web Services Systems
  Manager.
  """
  def disassociate_ops_item_related_item(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "DisassociateOpsItemRelatedItem", input, options)
  end
  @doc """
  Get detailed information about a particular Automation execution.
  """
  def get_automation_execution(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetAutomationExecution", input, options)
  end
  @doc """
  Gets the state of a Amazon Web Services Systems Manager change calendar at the
  current time or a specified time.
  If you specify a time, `GetCalendarState` returns the state of the calendar at
  that specific time, and returns the next time that the change calendar state
  will transition. If you don't specify a time, `GetCalendarState` uses the
  current time. Change Calendar entries have two possible states: `OPEN` or
  `CLOSED`.
  If you specify more than one calendar in a request, the command returns the
  status of `OPEN` only if all calendars in the request are open. If one or more
  calendars in the request are closed, the status returned is `CLOSED`.
  For more information about Change Calendar, a capability of Amazon Web Services
  Systems Manager, see [Amazon Web Services Systems Manager Change Calendar](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-change-calendar.html)
  in the *Amazon Web Services Systems Manager User Guide*.
  """
  def get_calendar_state(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetCalendarState", input, options)
  end
  @doc """
  Returns detailed information about command execution for an invocation or
  plugin.
  `GetCommandInvocation` only gives the execution status of a plugin in a
  document. To get the command execution status on a specific managed node, use
  `ListCommandInvocations`. To get the command execution status across managed
  nodes, use `ListCommands`.
  """
  def get_command_invocation(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetCommandInvocation", input, options)
  end
  @doc """
  Retrieves the Session Manager connection status for a managed node to determine
  whether it is running and ready to receive Session Manager connections.
  """
  def get_connection_status(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetConnectionStatus", input, options)
  end
  @doc """
  Retrieves the default patch baseline.
  Amazon Web Services Systems Manager supports creating multiple default patch
  baselines. For example, you can create a default patch baseline for each
  operating system.
  If you don't specify an operating system value, the default patch baseline for
  Windows is returned.
  """
  def get_default_patch_baseline(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetDefaultPatchBaseline", input, options)
  end
  @doc """
  Retrieves the current snapshot for the patch baseline the managed node uses.
  This API is primarily used by the `AWS-RunPatchBaseline` Systems Manager
  document (SSM document).
  If you run the command locally, such as with the Command Line Interface (CLI),
  the system attempts to use your local Amazon Web Services credentials and the
  operation fails. To avoid this, you can run the command in the Amazon Web
  Services Systems Manager console. Use Run Command, a capability of Amazon Web
  Services Systems Manager, with an SSM document that enables you to target a
  managed node with a script or command. For example, run the command using the
  `AWS-RunShellScript` document or the `AWS-RunPowerShellScript` document.
  """
  def get_deployable_patch_snapshot_for_instance(%Client{} = client, input, options \\ []) do
    Request.request_post(
      client,
      metadata(),
      "GetDeployablePatchSnapshotForInstance",
      input,
      options
    )
  end
  @doc """
  Gets the contents of the specified Amazon Web Services Systems Manager document
  (SSM document).
  """
  def get_document(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetDocument", input, options)
  end
  @doc """
  Query inventory information.
  This includes managed node status, such as `Stopped` or `Terminated`.
  """
  def get_inventory(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetInventory", input, options)
  end
  @doc """
  Return a list of inventory type names for the account, or return a list of
  attribute names for a specific Inventory item type.
  """
  def get_inventory_schema(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetInventorySchema", input, options)
  end
  @doc """
  Retrieves a maintenance window.
  """
  def get_maintenance_window(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetMaintenanceWindow", input, options)
  end
  @doc """
  Retrieves details about a specific a maintenance window execution.
  """
  def get_maintenance_window_execution(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetMaintenanceWindowExecution", input, options)
  end
  @doc """
  Retrieves the details about a specific task run as part of a maintenance window
  execution.
  """
  def get_maintenance_window_execution_task(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetMaintenanceWindowExecutionTask", input, options)
  end
  @doc """
  Retrieves information about a specific task running on a specific target.
  """
  def get_maintenance_window_execution_task_invocation(%Client{} = client, input, options \\ []) do
    Request.request_post(
      client,
      metadata(),
      "GetMaintenanceWindowExecutionTaskInvocation",
      input,
      options
    )
  end
  @doc """
  Lists the tasks in a maintenance window.
  For maintenance window tasks without a specified target, you can't supply values
  for `--max-errors` and `--max-concurrency`. Instead, the system inserts a
  placeholder value of `1`, which may be reported in the response to this command.
  These values don't affect the running of your task and can be ignored.
  """
  def get_maintenance_window_task(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetMaintenanceWindowTask", input, options)
  end
  @doc """
  Get information about an OpsItem by using the ID.
  You must have permission in Identity and Access Management (IAM) to view
  information about an OpsItem. For more information, see [Getting started with OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html)
  in the *Amazon Web Services Systems Manager User Guide*.
  Operations engineers and IT professionals use Amazon Web Services Systems
  Manager OpsCenter to view, investigate, and remediate operational issues
  impacting the performance and health of their Amazon Web Services resources. For
  more information, see
  [OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html)
  in the *Amazon Web Services Systems Manager User Guide*.
  """
  def get_ops_item(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetOpsItem", input, options)
  end
  @doc """
  View operational metadata related to an application in Application Manager.
  """
  def get_ops_metadata(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetOpsMetadata", input, options)
  end
  @doc """
  View a summary of operations metadata (OpsData) based on specified filters and
  aggregators.
  OpsData can include information about Amazon Web Services Systems Manager
  OpsCenter operational workitems (OpsItems) as well as information about any
  Amazon Web Services resource or service configured to report OpsData to Amazon
  Web Services Systems Manager Explorer.
  """
  def get_ops_summary(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetOpsSummary", input, options)
  end
  @doc """
  Get information about a single parameter by specifying the parameter name.
  To get information about more than one parameter at a time, use the
  `GetParameters` operation.
  """
  def get_parameter(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetParameter", input, options)
  end
  @doc """
  Retrieves the history of all changes to a parameter.
  If you change the KMS key alias for the KMS key used to encrypt a parameter,
  then you must also update the key alias the parameter uses to reference KMS.
  Otherwise, `GetParameterHistory` retrieves whatever the original key alias was
  referencing.
  """
  def get_parameter_history(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetParameterHistory", input, options)
  end
  @doc """
  Get information about one or more parameters by specifying multiple parameter
  names.
  To get information about a single parameter, you can use the `GetParameter`
  operation instead.
  """
  def get_parameters(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetParameters", input, options)
  end
  @doc """
  Retrieve information about one or more parameters in a specific hierarchy.
  Request results are returned on a best-effort basis. If you specify `MaxResults`
  in the request, the response includes information up to the limit specified. The
  number of items returned, however, can be between zero and the value of
  `MaxResults`. If the service reaches an internal limit while processing the
  results, it stops the operation and returns the matching values up to that point
  and a `NextToken`. You can specify the `NextToken` in a subsequent call to get
  the next set of results.
  """
  def get_parameters_by_path(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetParametersByPath", input, options)
  end
  @doc """
  Retrieves information about a patch baseline.
  """
  def get_patch_baseline(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetPatchBaseline", input, options)
  end
  @doc """
  Retrieves the patch baseline that should be used for the specified patch group.
  """
  def get_patch_baseline_for_patch_group(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetPatchBaselineForPatchGroup", input, options)
  end
  @doc """
  `ServiceSetting` is an account-level setting for an Amazon Web Services service.
  This setting defines how a user interacts with or uses a service or a feature of
  a service. For example, if an Amazon Web Services service charges money to the
  account based on feature or service usage, then the Amazon Web Services service
  team might create a default setting of `false`. This means the user can't use
  this feature unless they change the setting to `true` and intentionally opt in
  for a paid feature.
  Services map a `SettingId` object to a setting value. Amazon Web Services
  services teams define the default value for a `SettingId`. You can't create a
  new `SettingId`, but you can overwrite the default value if you have the
  `ssm:UpdateServiceSetting` permission for the setting. Use the
  `UpdateServiceSetting` API operation to change the default setting. Or use the
  `ResetServiceSetting` to change the value back to the original value defined by
  the Amazon Web Services service team.
  Query the current service setting for the Amazon Web Services account.
  """
  def get_service_setting(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "GetServiceSetting", input, options)
  end
  @doc """
  A parameter label is a user-defined alias to help you manage different versions
  of a parameter.
  When you modify a parameter, Amazon Web Services Systems Manager automatically
  saves a new version and increments the version number by one. A label can help
  you remember the purpose of a parameter when there are multiple versions.
  Parameter labels have the following requirements and restrictions.
    * A version of a parameter can have a maximum of 10 labels.
    * You can't attach the same label to different versions of the same
  parameter. For example, if version 1 has the label Production, then you can't
  attach Production to version 2.
    * You can move a label from one version of a parameter to another.
    * You can't create a label when you create a new parameter. You must
  attach a label to a specific version of a parameter.
    * If you no longer want to use a parameter label, then you can
  either delete it or move it to a different version of a parameter.
    * A label can have a maximum of 100 characters.
    * Labels can contain letters (case sensitive), numbers, periods (.),
  hyphens (-), or underscores (_).
    * Labels can't begin with a number, "`aws`" or "`ssm`" (not case
  sensitive). If a label fails to meet these requirements, then the label isn't
  associated with a parameter and the system displays it in the list of
  InvalidLabels.
  """
  def label_parameter_version(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "LabelParameterVersion", input, options)
  end
  @doc """
  Retrieves all versions of an association for a specific association ID.
  """
  def list_association_versions(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ListAssociationVersions", input, options)
  end
  @doc """
  Returns all State Manager associations in the current Amazon Web Services
  account and Amazon Web Services Region.
  You can limit the results to a specific State Manager association document or
  managed node by specifying a filter. State Manager is a capability of Amazon Web
  Services Systems Manager.
  """
  def list_associations(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ListAssociations", input, options)
  end
  @doc """
  An invocation is copy of a command sent to a specific managed node.
  A command can apply to one or more managed nodes. A command invocation applies
  to one managed node. For example, if a user runs `SendCommand` against three
  managed nodes, then a command invocation is created for each requested managed
  node ID. `ListCommandInvocations` provide status about command execution.
  """
  def list_command_invocations(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ListCommandInvocations", input, options)
  end
  @doc """
  Lists the commands requested by users of the Amazon Web Services account.
  """
  def list_commands(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ListCommands", input, options)
  end
  @doc """
  For a specified resource ID, this API operation returns a list of compliance
  statuses for different resource types.
  Currently, you can only specify one resource ID per call. List results depend on
  the criteria specified in the filter.
  """
  def list_compliance_items(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ListComplianceItems", input, options)
  end
  @doc """
  Returns a summary count of compliant and non-compliant resources for a
  compliance type.
  For example, this call can return State Manager associations, patches, or custom
  compliance types according to the filter criteria that you specify.
  """
  def list_compliance_summaries(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ListComplianceSummaries", input, options)
  end
  @doc """
  Information about approval reviews for a version of a change template in Change
  Manager.
  """
  def list_document_metadata_history(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ListDocumentMetadataHistory", input, options)
  end
  @doc """
  List all versions for a document.
  """
  def list_document_versions(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ListDocumentVersions", input, options)
  end
  @doc """
  Returns all Systems Manager (SSM) documents in the current Amazon Web Services
  account and Amazon Web Services Region.
  You can limit the results of this request by using a filter.
  """
  def list_documents(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ListDocuments", input, options)
  end
  @doc """
  A list of inventory items returned by the request.
  """
  def list_inventory_entries(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ListInventoryEntries", input, options)
  end
  @doc """
  Returns a list of all OpsItem events in the current Amazon Web Services Region
  and Amazon Web Services account.
  You can limit the results to events associated with specific OpsItems by
  specifying a filter.
  """
  def list_ops_item_events(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ListOpsItemEvents", input, options)
  end
  @doc """
  Lists all related-item resources associated with a Systems Manager OpsCenter
  OpsItem.
  OpsCenter is a capability of Amazon Web Services Systems Manager.
  """
  def list_ops_item_related_items(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ListOpsItemRelatedItems", input, options)
  end
  @doc """
  Amazon Web Services Systems Manager calls this API operation when displaying all
  Application Manager OpsMetadata objects or blobs.
  """
  def list_ops_metadata(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ListOpsMetadata", input, options)
  end
  @doc """
  Returns a resource-level summary count.
  The summary includes information about compliant and non-compliant statuses and
  detailed compliance-item severity counts, according to the filter criteria you
  specify.
  """
  def list_resource_compliance_summaries(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ListResourceComplianceSummaries", input, options)
  end
  @doc """
  Lists your resource data sync configurations.
  Includes information about the last time a sync attempted to start, the last
  sync status, and the last time a sync successfully completed.
  The number of sync configurations might be too large to return using a single
  call to `ListResourceDataSync`. You can limit the number of sync configurations
  returned by using the `MaxResults` parameter. To determine whether there are
  more sync configurations to list, check the value of `NextToken` in the output.
  If there are more sync configurations to list, you can request them by
  specifying the `NextToken` returned in the call to the parameter of a subsequent
  call.
  """
  def list_resource_data_sync(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ListResourceDataSync", input, options)
  end
  @doc """
  Returns a list of the tags assigned to the specified resource.
  For information about the ID format for each supported resource type, see
  `AddTagsToResource`.
  """
  def list_tags_for_resource(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ListTagsForResource", input, options)
  end
  @doc """
  Shares a Amazon Web Services Systems Manager document (SSM document)publicly or
  privately.
  If you share a document privately, you must specify the Amazon Web Services user
  account IDs for those people who can use the document. If you share a document
  publicly, you must specify *All* as the account ID.
  """
  def modify_document_permission(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ModifyDocumentPermission", input, options)
  end
  @doc """
  Registers a compliance type and other compliance details on a designated
  resource.
  This operation lets you register custom compliance details with a resource. This
  call overwrites existing compliance information on the resource, so you must
  provide a full list of compliance items each time that you send the request.
  ComplianceType can be one of the following:
    * ExecutionId: The execution ID when the patch, association, or
  custom compliance item was applied.
    * ExecutionType: Specify patch, association, or Custom:`string`.
    * ExecutionTime. The time the patch, association, or custom
  compliance item was applied to the managed node.
    * Id: The patch, association, or custom compliance ID.
    * Title: A title.
    * Status: The status of the compliance item. For example, `approved`
  for patches, or `Failed` for associations.
    * Severity: A patch severity. For example, `critical`.
    * DocumentName: An SSM document name. For example,
  `AWS-RunPatchBaseline`.
    * DocumentVersion: An SSM document version number. For example, 4.
    * Classification: A patch classification. For example, `security
  updates`.
    * PatchBaselineId: A patch baseline ID.
    * PatchSeverity: A patch severity. For example, `Critical`.
    * PatchState: A patch state. For example,
  `InstancesWithFailedPatches`.
    * PatchGroup: The name of a patch group.
    * InstalledTime: The time the association, patch, or custom
  compliance item was applied to the resource. Specify the time by using the
  following format: yyyy-MM-dd'T'HH:mm:ss'Z'
  """
  def put_compliance_items(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "PutComplianceItems", input, options)
  end
  @doc """
  Bulk update custom inventory items on one or more managed nodes.
  The request adds an inventory item, if it doesn't already exist, or updates an
  inventory item, if it does exist.
  """
  def put_inventory(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "PutInventory", input, options)
  end
  @doc """
  Add a parameter to the system.
  """
  def put_parameter(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "PutParameter", input, options)
  end
  @doc """
  Defines the default patch baseline for the relevant operating system.
  To reset the Amazon Web Services-predefined patch baseline as the default,
  specify the full patch baseline Amazon Resource Name (ARN) as the baseline ID
  value. For example, for CentOS, specify
  `arn:aws:ssm:us-east-2:733109147000:patchbaseline/pb-0574b43a65ea646ed` instead
  of `pb-0574b43a65ea646ed`.
  """
  def register_default_patch_baseline(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "RegisterDefaultPatchBaseline", input, options)
  end
  @doc """
  Registers a patch baseline for a patch group.
  """
  def register_patch_baseline_for_patch_group(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "RegisterPatchBaselineForPatchGroup", input, options)
  end
  @doc """
  Registers a target with a maintenance window.
  """
  def register_target_with_maintenance_window(%Client{} = client, input, options \\ []) do
    Request.request_post(
      client,
      metadata(),
      "RegisterTargetWithMaintenanceWindow",
      input,
      options
    )
  end
  @doc """
  Adds a new task to a maintenance window.
  """
  def register_task_with_maintenance_window(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "RegisterTaskWithMaintenanceWindow", input, options)
  end
  @doc """
  Removes tag keys from the specified resource.
  """
  def remove_tags_from_resource(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "RemoveTagsFromResource", input, options)
  end
  @doc """
  `ServiceSetting` is an account-level setting for an Amazon Web Services service.
  This setting defines how a user interacts with or uses a service or a feature of
  a service. For example, if an Amazon Web Services service charges money to the
  account based on feature or service usage, then the Amazon Web Services service
  team might create a default setting of "false". This means the user can't use
  this feature unless they change the setting to "true" and intentionally opt in
  for a paid feature.
  Services map a `SettingId` object to a setting value. Amazon Web Services
  services teams define the default value for a `SettingId`. You can't create a
  new `SettingId`, but you can overwrite the default value if you have the
  `ssm:UpdateServiceSetting` permission for the setting. Use the
  `GetServiceSetting` API operation to view the current value. Use the
  `UpdateServiceSetting` API operation to change the default setting.
  Reset the service setting for the account to the default value as provisioned by
  the Amazon Web Services service team.
  """
  def reset_service_setting(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ResetServiceSetting", input, options)
  end
  @doc """
  Reconnects a session to a managed node after it has been disconnected.
  Connections can be resumed for disconnected sessions, but not terminated
  sessions.
  This command is primarily for use by client machines to automatically reconnect
  during intermittent network issues. It isn't intended for any other use.
  """
  def resume_session(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "ResumeSession", input, options)
  end
  @doc """
  Sends a signal to an Automation execution to change the current behavior or
  status of the execution.
  """
  def send_automation_signal(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "SendAutomationSignal", input, options)
  end
  @doc """
  Runs commands on one or more managed nodes.
  """
  def send_command(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "SendCommand", input, options)
  end
  @doc """
  Runs an association immediately and only one time.
  This operation can be helpful when troubleshooting associations.
  """
  def start_associations_once(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "StartAssociationsOnce", input, options)
  end
  @doc """
  Initiates execution of an Automation runbook.
  """
  def start_automation_execution(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "StartAutomationExecution", input, options)
  end
  @doc """
  Creates a change request for Change Manager.
  The Automation runbooks specified in the change request run only after all
  required approvals for the change request have been received.
  """
  def start_change_request_execution(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "StartChangeRequestExecution", input, options)
  end
  @doc """
  Initiates a connection to a target (for example, a managed node) for a Session
  Manager session.
  Returns a URL and token that can be used to open a WebSocket connection for
  sending input and receiving outputs.
  Amazon Web Services CLI usage: `start-session` is an interactive command that
  requires the Session Manager plugin to be installed on the client machine making
  the call. For information, see [Install the Session Manager plugin for the Amazon Web Services
  CLI](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html)
  in the *Amazon Web Services Systems Manager User Guide*.
  Amazon Web Services Tools for PowerShell usage: Start-SSMSession isn't currently
  supported by Amazon Web Services Tools for PowerShell on Windows local machines.
  """
  def start_session(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "StartSession", input, options)
  end
  @doc """
  Stop an Automation that is currently running.
  """
  def stop_automation_execution(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "StopAutomationExecution", input, options)
  end
  @doc """
  Permanently ends a session and closes the data connection between the Session
  Manager client and SSM Agent on the managed node.
  A terminated session isn't be resumed.
  """
  def terminate_session(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "TerminateSession", input, options)
  end
  @doc """
  Remove a label or labels from a parameter.
  """
  def unlabel_parameter_version(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "UnlabelParameterVersion", input, options)
  end
  @doc """
  Updates an association.
  You can update the association name and version, the document version, schedule,
  parameters, and Amazon Simple Storage Service (Amazon S3) output.
  In order to call this API operation, your Identity and Access Management (IAM)
  user account, group, or role must be configured with permission to call the
  `DescribeAssociation` API operation. If you don't have permission to call
  `DescribeAssociation`, then you receive the following error: `An error occurred
  (AccessDeniedException) when calling the UpdateAssociation operation: User:
  <user_arn> isn't authorized to perform: ssm:DescribeAssociation on resource:
  <resource_arn>`
  When you update an association, the association immediately runs against the
  specified targets.
  """
  def update_association(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "UpdateAssociation", input, options)
  end
  @doc """
  Updates the status of the Amazon Web Services Systems Manager document (SSM
  document) associated with the specified managed node.
  `UpdateAssociationStatus` is primarily used by the Amazon Web Services Systems
  Manager Agent (SSM Agent) to report status updates about your associations and
  is only used for associations created with the `InstanceId` legacy parameter.
  """
  def update_association_status(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "UpdateAssociationStatus", input, options)
  end
  @doc """
  Updates one or more values for an SSM document.
  """
  def update_document(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "UpdateDocument", input, options)
  end
  @doc """
  Set the default version of a document.
  """
  def update_document_default_version(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "UpdateDocumentDefaultVersion", input, options)
  end
  @doc """
  Updates information related to approval reviews for a specific version of a
  change template in Change Manager.
  """
  def update_document_metadata(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "UpdateDocumentMetadata", input, options)
  end
  @doc """
  Updates an existing maintenance window.
  Only specified parameters are modified.
  The value you specify for `Duration` determines the specific end time for the
  maintenance window based on the time it begins. No maintenance window tasks are
  permitted to start after the resulting endtime minus the number of hours you
  specify for `Cutoff`. For example, if the maintenance window starts at 3 PM, the
  duration is three hours, and the value you specify for `Cutoff` is one hour, no
  maintenance window tasks can start after 5 PM.
  """
  def update_maintenance_window(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "UpdateMaintenanceWindow", input, options)
  end
  @doc """
  Modifies the target of an existing maintenance window.
  You can change the following:
    * Name
    * Description
    * Owner
    * IDs for an ID target
    * Tags for a Tag target
    * From any supported tag type to another. The three supported tag
  types are ID target, Tag target, and resource group. For more information, see
  `Target`.
  If a parameter is null, then the corresponding field isn't modified.
  """
  def update_maintenance_window_target(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "UpdateMaintenanceWindowTarget", input, options)
  end
  @doc """
  Modifies a task assigned to a maintenance window.
  You can't change the task type, but you can change the following values:
    * `TaskARN`. For example, you can change a `RUN_COMMAND` task from
  `AWS-RunPowerShellScript` to `AWS-RunShellScript`.
    * `ServiceRoleArn`
    * `TaskInvocationParameters`
    * `Priority`
    * `MaxConcurrency`
    * `MaxErrors`
  One or more targets must be specified for maintenance window Run Command-type
  tasks. Depending on the task, targets are optional for other maintenance window
  task types (Automation, Lambda, and Step Functions). For more information about
  running tasks that don't specify targets, see [Registering maintenance window tasks without
  targets](https://docs.aws.amazon.com/systems-manager/latest/userguide/maintenance-windows-targetless-tasks.html)
  in the *Amazon Web Services Systems Manager User Guide*.
  If the value for a parameter in `UpdateMaintenanceWindowTask` is null, then the
  corresponding field isn't modified. If you set `Replace` to true, then all
  fields required by the `RegisterTaskWithMaintenanceWindow` operation are
  required for this request. Optional fields that aren't specified are set to
  null.
  When you update a maintenance window task that has options specified in
  `TaskInvocationParameters`, you must provide again all the
  `TaskInvocationParameters` values that you want to retain. The values you don't
  specify again are removed. For example, suppose that when you registered a Run
  Command task, you specified `TaskInvocationParameters` values for `Comment`,
  `NotificationConfig`, and `OutputS3BucketName`. If you update the maintenance
  window task and specify only a different `OutputS3BucketName` value, the values
  for `Comment` and `NotificationConfig` are removed.
  """
  def update_maintenance_window_task(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "UpdateMaintenanceWindowTask", input, options)
  end
  @doc """
  Changes the Identity and Access Management (IAM) role that is assigned to the
  on-premises server, edge device, or virtual machines (VM).
  IAM roles are first assigned to these hybrid nodes during the activation
  process. For more information, see `CreateActivation`.
  """
  def update_managed_instance_role(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "UpdateManagedInstanceRole", input, options)
  end
  @doc """
  Edit or change an OpsItem.
  You must have permission in Identity and Access Management (IAM) to update an
  OpsItem. For more information, see [Getting started with OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html)
  in the *Amazon Web Services Systems Manager User Guide*.
  Operations engineers and IT professionals use Amazon Web Services Systems
  Manager OpsCenter to view, investigate, and remediate operational issues
  impacting the performance and health of their Amazon Web Services resources. For
  more information, see
  [OpsCenter](https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html)
  in the *Amazon Web Services Systems Manager User Guide*.
  """
  def update_ops_item(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "UpdateOpsItem", input, options)
  end
  @doc """
  Amazon Web Services Systems Manager calls this API operation when you edit
  OpsMetadata in Application Manager.
  """
  def update_ops_metadata(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "UpdateOpsMetadata", input, options)
  end
  @doc """
  Modifies an existing patch baseline.
  Fields not specified in the request are left unchanged.
  For information about valid key-value pairs in `PatchFilters` for each supported
  operating system type, see `PatchFilter`.
  """
  def update_patch_baseline(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "UpdatePatchBaseline", input, options)
  end
  @doc """
  Update a resource data sync.
  After you create a resource data sync for a Region, you can't change the account
  options for that sync. For example, if you create a sync in the us-east-2 (Ohio)
  Region and you choose the `Include only the current account` option, you can't
  edit that sync later and choose the `Include all accounts from my Organizations
  configuration` option. Instead, you must delete the first resource data sync,
  and create a new one.
  This API operation only supports a resource data sync that was created with a
  SyncFromSource `SyncType`.
  """
  def update_resource_data_sync(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "UpdateResourceDataSync", input, options)
  end
  @doc """
  `ServiceSetting` is an account-level setting for an Amazon Web Services service.
  This setting defines how a user interacts with or uses a service or a feature of
  a service. For example, if an Amazon Web Services service charges money to the
  account based on feature or service usage, then the Amazon Web Services service
  team might create a default setting of "false". This means the user can't use
  this feature unless they change the setting to "true" and intentionally opt in
  for a paid feature.
  Services map a `SettingId` object to a setting value. Amazon Web Services
  services teams define the default value for a `SettingId`. You can't create a
  new `SettingId`, but you can overwrite the default value if you have the
  `ssm:UpdateServiceSetting` permission for the setting. Use the
  `GetServiceSetting` API operation to view the current value. Or, use the
  `ResetServiceSetting` to change the value back to the original value defined by
  the Amazon Web Services service team.
  Update the service setting for the account.
  """
  def update_service_setting(%Client{} = client, input, options \\ []) do
    Request.request_post(client, metadata(), "UpdateServiceSetting", input, options)
  end
end | 
	lib/aws/generated/ssm.ex | 0.902295 | 0.557905 | 
	ssm.ex | 
	starcoder | 
| 
	defmodule CyberSourceSDK.Helper do
  @moduledoc """
  Small utility functions
  """
  @doc """
  Convert a Map that have the keys as strings to atoms
  ## Examples
      iex> CyberSourceSDK.Helper.convert_map_to_key_atom(%{"a" => 3, "b" => 5})
      %{a: 3, b: 5}
  """
  def convert_map_to_key_atom(string_key_map) when is_map(string_key_map) do
    for {key, val} <- string_key_map, into: %{}, do: {String.to_atom(key), convert_map_to_key_atom(val)}
  end
  def convert_map_to_key_atom(list_maps) when is_list(list_maps) do
    Enum.map(list_maps, fn (map) -> convert_map_to_key_atom(map) end)
  end
  def convert_map_to_key_atom(string_key_map) when is_number(string_key_map) or is_nil(string_key_map) do
    string_key_map
  end
  def convert_map_to_key_atom(value) do
    if String.valid?(value) do
      value
    else
      Kernel.inspect(value) # Convert to string
    end
  end
  @doc """
  Decode Base64 string to JSON structure
  ## Examples
      iex> CyberSourceSDK.Helper.json_from_base64("eyJhIjogMiwgImIiOiAzfQ==")
      {:ok, %{a: 2, b: 3}}
  """
  def json_from_base64(base64_string) do
    case Base.decode64(base64_string) do
      {:ok, json} ->
        case Poison.Parser.parse(json) do
          {:ok, json} -> {:ok, convert_map_to_key_atom(json)}
          {:error, reason} -> {:error, reason}
        end
      _ -> {:error, :bad_base64_encoding}
    end
  end
  @doc """
  Check what type of payment is: Android Pay or Apple Pay
  ## Results
  - `{:ok, :android_pay}`
  - `{:ok, :apple_pay}`
  - `{:error, :not_found}`
  """
  def check_payment_type(encrypted_payload) do
    case json_from_base64(encrypted_payload) do
      {:ok, data} ->
        header = Map.get(data, :header)
        signature = Map.get(data, :signature)
        publicKeyHash = Map.get(data, :publicKeyHash)
        cond do
          !is_nil(header) && !is_nil(signature) -> {:ok, :apple_pay}
          !is_nil(publicKeyHash) -> {:ok, :android_pay}
          true -> {:ok, :not_found_payment_type}
        end
      {:error, _reason} -> {:error, :invalid_base64_or_json}
    end
  end
  @doc """
  Invalid configuration message error
  """
  def invalid_merchant_configuration() do
    {:error, "Invalid merchant configuration"}
  end
end | 
	lib/cybersource-sdk/helper.ex | 0.771843 | 0.517144 | 
	helper.ex | 
	starcoder | 
| 
	defmodule Trifolium.Genus do
  @moduledoc """
    Module to be used to interact with Trefle [Genus](https://docs.trefle.io/reference/#tag/Genus) related endpoints.
  """
  alias Trifolium.Config
  alias Trifolium.API
  @endpoint_path "api/v1/genus/"
  @http_client Config.http_client()
  @doc """
    List every possible `Genus`.
    This endpoint IS paginated, using a optional keyword parameter. By default, the page 1 is returned.
  """
  @spec all(page: non_neg_integer(), filter: %{}, order: %{}) :: API.response()
  def all(opts \\ []) do
    @http_client.get(
      get_path(),
      [],
      params: API.build_query_params(opts)
    )
    |> API.parse_response()
  end
  @doc """
    Find a specific `Genus` according to its `id` or `slug`.
  """
  @spec find(non_neg_integer() | String.t()) :: API.response()
  def find(id) do
    @http_client.get(
      get_path("#{id}"),
      [],
      params: API.build_query_params()
    )
    |> API.parse_response()
  end
  @doc """
    Lists all available `Plant`s for a specific `Genus` according to its `id` or `slug`.
    You can paginate this endpoint, and also filter it, as explained on Trefle documentation.
    You can use a `filter` or a `not_filter` like so:
    ```
    iex()> Trifolium.Genus.plants(id, filter: %{year: year})
    ```
    The same applies to the `order` and `range` parameters, where you just need to pass a map to it,
    that it will be correctly parsed to the query parameter.
  """
  @spec plants(
          non_neg_integer() | String.t(),
          filter: map,
          filter_not: map,
          order: map,
          range: map,
          page: non_neg_integer()
        ) :: API.response()
  def plants(id, opts \\ []) do
    @http_client.get(
      get_path("#{id}/plants"),
      [],
      params: API.build_query_params(opts)
    )
    |> API.parse_response()
  end
  @doc """
    Lists all available `Specie`s for a specific `Genus` according to its `id` or `slug`.
    You can paginate this endpoint, and also filter it, as explained on Trefle documentation.
    You can use a `filter` or a `not_filter` like so:
    ```
    iex()> Trifolium.Genus.species(id, filter: %{year: year})
    ```
    The same applies to the `order` and `range` parameters, where you just need to pass a map to it,
    that it will be correctly parsed to the query parameter.
  """
  @spec species(
          non_neg_integer() | String.t(),
          filter: map,
          filter_not: map,
          order: map,
          range: map,
          page: non_neg_integer()
        ) :: API.response()
  def species(id, opts \\ []) do
    @http_client.get(
      get_path("#{id}/species"),
      [],
      params: API.build_query_params(opts)
    )
    |> API.parse_response()
  end
  @spec get_path(String.t()) :: String.t()
  defp get_path(url \\ "") do
    Config.base_url() <> @endpoint_path <> url
  end
end | 
	lib/trifolium/endpoints/genus.ex | 0.833968 | 0.696139 | 
	genus.ex | 
	starcoder | 
| 
	defmodule SanbaseWeb.Graphql.Helpers.Utils do
  import Sanbase.DateTimeUtils, only: [round_datetime: 2, str_to_sec: 1]
  def selector_args_to_opts(args) when is_map(args) do
    opts = [aggregation: Map.get(args, :aggregation, nil)]
    selector = args[:selector]
    opts =
      if is_map(selector) do
        opts
        |> maybe_add_field(:additional_filters, selector)
        |> maybe_add_field(:source, selector)
      else
        opts
      end
    {:ok, opts}
  end
  @doc ~s"""
  Works when the result is a list of elements that contain a datetime and the query arguments
  have a `from` argument. In that case the first element's `datetime` is update to be
  the max of `datetime` and `from` from the query.
  This is used when a query to influxdb is made. Influxdb can return a timestamp
  that's outside `from` - `to` interval due to its inner working with buckets
  """
  def fit_from_datetime([%{datetime: _} | _] = data, %{from: from, interval: interval}) do
    interval_sec = str_to_sec(interval)
    from = round_datetime(from, second: interval_sec)
    result =
      Enum.drop_while(data, fn %{datetime: datetime} ->
        datetime = round_datetime(datetime, second: interval_sec)
        DateTime.compare(datetime, from) == :lt
      end)
    {:ok, result}
  end
  def fit_from_datetime(result, _args), do: {:ok, result}
  @doc ~s"""
  Extract the arguments passed to the root query from subfield resolution
  """
  def extract_root_query_args(resolution, root_query_name) do
    root_query_camelized = Absinthe.Utils.camelize(root_query_name, lower: true)
    resolution.path
    |> Enum.find(fn x -> is_map(x) && x.name == root_query_camelized end)
    |> Map.get(:argument_data)
  end
  @doc ~s"""
  Transform the UserTrigger structure to be more easily consumed by the API.
  This is done by propagating the tags and the UserTrigger id into the Trigger
  structure
  """
  def transform_user_trigger(%Sanbase.Alert.UserTrigger{trigger: trigger, tags: tags} = ut) do
    ut = Map.from_struct(ut)
    trigger = Map.from_struct(trigger)
    %{
      ut
      | trigger: trigger |> Map.put(:tags, tags) |> Map.put(:id, ut.id)
    }
  end
  def replace_user_trigger_with_trigger(data) when is_map(data) do
    case data do
      %{user_trigger: ut} = elem when not is_nil(ut) ->
        elem
        |> Map.drop([:__struct__, :user_trigger])
        |> Map.put(:trigger, Map.get(transform_user_trigger(ut), :trigger))
      elem ->
        elem
    end
  end
  def replace_user_trigger_with_trigger(data) when is_list(data) do
    data |> Enum.map(&replace_user_trigger_with_trigger/1)
  end
  @spec requested_fields(%Absinthe.Resolution{}) :: MapSet.t()
  def requested_fields(%Absinthe.Resolution{} = resolution) do
    resolution.definition.selections
    |> Enum.map(fn %{name: name} -> Inflex.camelize(name, :lower) end)
    |> MapSet.new()
  end
  def requested_fields(_), do: MapSet.new([])
  # Private functions
  @fields [:owner, :label, :label_fqn, :label_fqns, :blockchain, :owners, :labels]
  defp maybe_add_field(opts, :additional_filters, selector) do
    case Map.split(selector, @fields) do
      {map, _rest} when map_size(map) > 0 ->
        # Rename the plurals to singulars. This is done to simplify the
        # SQL generation
        map =
          map
          |> maybe_rename_field(:owners, :owner)
          |> maybe_rename_field(:labels, :label)
          |> maybe_rename_field(:label_fqns, :label_fqn)
        [additional_filters: Keyword.new(map)] ++ opts
      _ ->
        opts
    end
  end
  defp maybe_add_field(opts, field, selector) when is_atom(field) do
    case Map.has_key?(selector, field) do
      true -> [{field, Map.fetch!(selector, field)}] ++ opts
      false -> opts
    end
  end
  defp maybe_rename_field(map, old_key, new_key) do
    case Map.has_key?(map, old_key) do
      true ->
        value = Map.get(map, old_key)
        map |> Map.delete(old_key) |> Map.put(new_key, value)
      false ->
        map
    end
  end
end | 
	lib/sanbase_web/graphql/helpers/utils.ex | 0.732592 | 0.460228 | 
	utils.ex | 
	starcoder | 
| 
	defmodule Strava.Deserializer do
  @moduledoc """
  Helper functions for deserializing responses into models.
  """
  @doc """
  Update the provided model with a deserialization of a nested value.
  """
  @spec deserialize(
          struct(),
          atom(),
          :list | :struct | :map | :date | :datetime,
          module(),
          map()
        ) :: any()
  def deserialize(model, field, type, mod, options)
  def deserialize(model, field, :list, mod, options) do
    Map.update!(
      model,
      field,
      &transform(&1, Map.put(options, :as, [struct(mod)]))
    )
  end
  def deserialize(model, field, :struct, mod, options) do
    Map.update!(
      model,
      field,
      &transform(&1, Map.put(options, :as, struct(mod)))
    )
  end
  def deserialize(model, field, :map, mod, options) do
    Map.update!(
      model,
      field,
      &Map.new(&1, fn {key, val} ->
        {key, transform(val, Map.put(options, :as, struct(mod)))}
      end)
    )
  end
  def deserialize(model, field, :date, _, _options) do
    with value when is_binary(value) <- Map.get(model, field),
         {:ok, date, _offset} <- Date.from_iso8601(value) do
      Map.put(model, field, date)
    else
      _ -> model
    end
  end
  def deserialize(model, field, :datetime, _options) do
    with value when is_binary(value) <- Map.get(model, field),
         {:ok, datetime, _offset} <- DateTime.from_iso8601(value) do
      Map.put(model, field, datetime)
    else
      _ -> model
    end
  end
  if Code.ensure_loaded?(Poison.Decode) do
    cond do
      function_exported?(Poison.Decode, :transform, 2) ->
        def transform(value, options) do
          Poison.Decode.transform(value, options)
        end
      function_exported?(Poison.Decode, :decode, 2) ->
        def transform(value, options) do
          Poison.Decode.decode(value, options)
        end
      true ->
        raise "No suitable `Poison.Decode.transform/2` or `Poison.Decode.decode/2` function found"
    end
  end
end | 
	lib/strava/deserializer.ex | 0.795261 | 0.439086 | 
	deserializer.ex | 
	starcoder | 
| 
	defmodule OliWeb.Common.Table.SortableTableModel do
  @moduledoc """
  The model for the sortable table LiveComponent.
  The model consists of the `rows` that the table will display.  This must be in the form of an enumeration of
  either maps or structs.
  The `column_specs` are the specifications the columns that the table will render. This does not need to be a
  one-to-one mapping with the attributes present in the rows, and in fact is designed to allow omitting
  columns and implementing derived columns.
  The `selected` item stores the currently selected item - if there is one. A client LiveView can simply choose
  to ignore `selected` events, thus effectively leaving that sortable table with rows that cannot be selected.
  The `sort_by_spec` and `sort_order` attributes power the sorting of the table.  The `sort_by_spec` is
  a reference to the current column that the rows are sorted by.  Given that column specs provide the ability to
  provide an arbitrary sort function to use, a complex sorting implementation where primary, secondary, and
  even tertiary columns can contribute to sort order.  The `sort_by_spec` cannot be nil.
  The `event_suffix` is a string suffix to append to the event names that the table component will dispatch. This
  is used to allow a parent LiveView to differentiate between multiple tables in situations where multiple tables
  exist in the LiveView.  This suffix is also appended to URI parameter names when the attributes of a sortable
  table model are placed in the browser URL for live navigation.
  The `id_field` is the field name from the row items that uniquely identifies a row item.  This will be used
  when emitting events related to a particular row item.
  """
  alias OliWeb.Common.Table.ColumnSpec
  # the items to display
  defstruct rows: [],
            # the columns to display
            column_specs: [],
            # the selected row
            selected: nil,
            # the column that is being sorted by
            sort_by_spec: nil,
            # the sort order, :asc or :desc
            sort_order: :asc,
            event_suffix: "",
            # the field used to identify uniquely a row item
            id_field: nil
  def new(rows: rows, column_specs: column_specs, event_suffix: event_suffix, id_field: id_field) do
    model =
      %__MODULE__{
        rows: rows,
        column_specs: column_specs,
        event_suffix: event_suffix,
        id_field: id_field,
        sort_by_spec: hd(column_specs)
      }
      |> sort
    {:ok, model}
  end
  def new(
        rows: rows,
        column_specs: column_specs,
        event_suffix: event_suffix,
        id_field: id_field,
        sort_by_spec: sort_by_spec
      ) do
    model =
      %__MODULE__{
        rows: rows,
        column_specs: column_specs,
        event_suffix: event_suffix,
        id_field: id_field,
        sort_by_spec: sort_by_spec
      }
      |> sort
    {:ok, model}
  end
  def update_selection(%__MODULE__{rows: rows, id_field: id_field} = struct, selected_id) do
    Map.put(
      struct,
      :selected,
      Enum.find(rows, fn row -> Map.get(row, id_field) == selected_id end)
    )
  end
  def update_sort_params(%__MODULE__{sort_order: sort_order} = struct, column_name) do
    current_spec_name = struct.sort_by_spec.name
    case Enum.find(struct.column_specs, fn spec -> spec.name == column_name end) do
      %{name: ^current_spec_name} ->
        Map.put(
          struct,
          :sort_order,
          if sort_order == :asc do
            :desc
          else
            :asc
          end
        )
      spec ->
        Map.put(struct, :sort_by_spec, spec)
    end
  end
  def update_sort_params_and_sort(%__MODULE__{} = struct, column_name) do
    update_sort_params(struct, column_name)
    |> sort
  end
  def sort(%__MODULE__{rows: rows, sort_by_spec: sort_by_spec, sort_order: sort_order} = struct) do
    sort_fn =
      case sort_by_spec.sort_fn do
        nil -> ColumnSpec.default_sort_fn(sort_order, sort_by_spec)
        func -> func.(sort_order, sort_by_spec)
      end
    struct
    |> Map.put(:rows, Enum.sort(rows, sort_fn))
  end
  def to_params(%__MODULE__{} = struct) do
    Map.put(%{}, "sort_by" <> struct.event_suffix, struct.sort_by_spec.name)
    |> Map.put("sort_order" <> struct.event_suffix, struct.sort_order)
    |> Map.put(
      "selected" <> struct.event_suffix,
      if struct.selected != nil do
        Map.get(struct.selected, struct.id_field)
      else
        nil
      end
    )
  end
  def update_from_params(%__MODULE__{} = struct, params) do
    column_names =
      Enum.reduce(struct.column_specs, %{}, fn spec, m ->
        Map.put(m, Atom.to_string(spec.name), spec)
      end)
    sort_by =
      case Map.get(column_names, params["sort_by" <> struct.event_suffix]) do
        nil -> struct.sort_by_spec
        spec -> spec
      end
    sort_order =
      case params["sort_order" <> struct.event_suffix] do
        sort_order when sort_order in ~w(asc desc) -> String.to_existing_atom(sort_order)
        _ -> struct.sort_order
      end
    selected =
      case params["selected" <> struct.event_suffix] do
        nil -> nil
        id -> id
      end
    Map.put(struct, :sort_by_spec, sort_by)
    |> Map.put(:sort_order, sort_order)
    |> update_selection(selected)
    |> sort
  end
end | 
	lib/oli_web/live/common/table/sortable_table_model.ex | 0.861305 | 0.798854 | 
	sortable_table_model.ex | 
	starcoder | 
| 
	defmodule PersistentVector do
  @moduledoc """
  `PersistentVector` is an array-like collection of values indexed by contiguous `0`-based integer index
  and optimized for growing/shrinking at the end.
  `PersistentVector` optimizes the following operations:
  * Get element count
  * Lookup element by index
  * Update element by index
  * Adding new element to the end
  * Removing element from the end
  * Enumeration
  Get count operation is `O(1)`, most others are `O(log32(N))`.
  `PersistentVector` is implemented as a trie with 32-way branching at each level and uses *structural sharing* for updates.
  All ideas are borrowed directly from Clojure, yet the implementation (and all the bugs) are my own.
  ### Supported protocols
  `PersistentVector` implements the following protocols/behaviors:
  * `Access`
  * `Collectable`
  * `Enumerable`
  * `Inspect`
  ## Usage example
      iex> v = new(1..3)
      #PersistentVector<count: 3, [1, 2, 3]>
      iex> get(v, 0)
      1
      iex> v[1]
      2
      iex> set(v, 1, :two)
      #PersistentVector<count: 3, [1, :two, 3]>
      iex> v # but v remains unchanged
      #PersistentVector<count: 3, [1, 2, 3]>
      iex> append(v, 4)
      #PersistentVector<count: 4, [1, 2, 3, 4]>
      iex> remove_last(v)
      #PersistentVector<count: 2, [1, 2]>
  ## Efficiency
  Creating big vectors is OK both CPU-wise and memory-wise.
  For a `100_000`-element vector the trie depths is 4
  *(because `log32(100_000) = 3.3`)*, leading to fast lookup in 4 hops:
      iex> big = new(100_000..0)
      iex> big[70_000]
      30_000
  Update is also fast and efficient as it needs to build only 4 new trie nodes.
  Apart from that `big1` and `big2` share majority of the elements, leading to efficient memory usage:
      iex> big1 = new(100_000..0)
      iex> big2 = set(big1, 70_000, "thirty thousand")
      iex> big2[70_000]
      "thirty thousand"
  """
  use Bitwise
  @shift if Mix.env() == :test, do: 2, else: 5
  @block 1 <<< @shift
  @mask @block - 1
  @state __MODULE__
  @typep shift :: pos_integer
  @typedoc "Integer >= 0 for indexing elements."
  @type index :: non_neg_integer
  @typedoc "Stored values."
  @type value :: any
  @typedoc "The `PersistentVector` itself."
  @type t :: %__MODULE__{count: index, shift: shift, root: tuple, tail: tuple}
  defstruct(
    count: 0,
    shift: @shift,
    root: {},
    tail: {}
  )
  @compile {:inline, tail_start: 1}
  @spec tail_start(t) :: index
  defp tail_start(v = %@state{}), do: v.count - tuple_size(v.tail)
  @doc "Returns empty `PersistentVector`, same as `new/0`."
  @spec empty() :: t
  def empty(), do: %@state{}
  @doc "Creates new empty `PersistentVector`, same as `empty/0`."
  @spec new() :: t
  def new(), do: %@state{}
  @doc "Returns `PersistentVector` with elements from `enumerable`."
  @spec new(Enumerable.t) :: t
  def new(enumerable), do: enumerable |> Enum.reduce(empty(), &(&2 |> append(&1)))
  @doc "Returns `true` if `v` is empty and `false` otherwise."
  @spec empty?(t) :: boolean
  def empty?(v)
  def empty?(%@state{count: 0}), do: true
  def empty?(%@state{}), do: false
  @doc "Returns element count in `v`."
  @spec count(t) :: index
  def count(v)
  def count(%@state{count: count}), do: count
  @doc """
  Returns value of element in `v` at `0`-based `index`.
  Index must be an integer and satisfy condition `0 <= index < count(v)` or `ArgumentError` will be raised.
  **Note** that since `PersistentVector` implements `Access` behavior, a shorter syntax `v[i]` can be used.
  *See also:* `get/3`
  ## Examples:
      iex> v = new([:a, :b, :c])
      #PersistentVector<count: 3, [:a, :b, :c]>
      iex> get(v, 0)
      :a
      iex> v[1]
      :b
      iex> get(v, 10)
      ** (ArgumentError) Attempt to get index 10 for vector of size 3
      iex> v[10]
      nil
  """
  @spec get(t, index) :: value | no_return
  def get(v = %@state{count: count}, index)
    when is_integer(index) and index >= 0 and index < count
  do
    fast_get(v, index)
  end
  def get(%@state{count: count}, i) do
    raise ArgumentError, "Attempt to get index #{inspect i} for vector of size #{count}"
  end
  @doc """
  Returns value of element in `v` at `0`-based `index` or `default` if `index >= count(v)`.
  Index must be an integer and satisfy condition `0 <= index` or `ArgumentError` will be raised.
  *See also:* `get/2`
  ## Examples:
      iex> v = new([:a, :b, :c])
      #PersistentVector<count: 3, [:a, :b, :c]>
      iex> get(v, 0, :not_found)
      :a
      iex> get(v, 10, :not_found)
      :not_found
      iex> get(v, :bad_index, :not_found)
      ** (ArgumentError) Attempt to get index :bad_index for vector of size 3
  """
  @impl Access
  @spec get(t, index, value) :: value | no_return
  def get(v = %@state{count: count}, index, _default)
    when is_integer(index) and index >= 0 and index < count
  do
    fast_get(v, index)
  end
  def get(%@state{count: count}, index, default)
    when is_integer(index) and index >= count
  do
    default
  end
  def get(%@state{count: count}, i, _default) do
    raise ArgumentError, "Attempt to get index #{inspect i} for vector of size #{count}"
  end
  @doc false
  @compile {:inline, fast_get: 2}
  @spec fast_get(t, index) :: value
  def fast_get(v, i) do
    if i >= tail_start(v) do
      v.tail
    else
      do_get(v.root, v.shift, i)
    end
    |> elem(i &&& @mask)
  end
  @spec do_get(tuple, shift, index) :: tuple
  defp do_get(arr, level, i)
    when level > 0
  do
    arr |> elem((i >>> level) &&& @mask) |> do_get(level - @shift, i)
  end
  defp do_get(arr, _level, _i)
  # when level == 0
  do
    arr
  end
  @doc """
  Returns last element in `v`, or raises `ArgumentError` if `v` is empty.
  *See also:* `last/2`
  ## Examples:
      iex> v = new(1..3)
      iex> last(v)
      3
      iex> last(empty())
      ** (ArgumentError) last/1 called for empty vector
  """
  @spec last(t) :: value | no_return
  def last(v = %@state{count: count})
    when count > 0
  do
    v |> fast_get(count - 1)
  end
  def last(%@state{})
  # when count == 0
  do
    raise ArgumentError, "last/1 called for empty vector"
  end
  @doc """
  Returns last element in `v`, or `default` if `v` is empty.
  *See also:* `last/1`
  ## Examples:
      iex> v = new(1..3)
      iex> last(v, nil)
      3
      iex> last(empty(), nil)
      nil
      iex> last(empty(), 0)
      0
  """
  @spec last(t, value) :: value | nil
  def last(v = %@state{count: count}, _default)
    when count > 0
  do
    v |> fast_get(count - 1)
  end
  def last(%@state{}, default)
  # when count == 0
  do
    default
  end
  @doc """
  Returns updated `v` with element at `0`-based `index` set to `new_value`.
  Index must be an integer and satisfy condition `0 <= index <= count(v)` or `ArgumentError` will be raised.
  **Note** that setting `index` equal to `count(v)` is allowed and behaves as `append/2`.
  ## Examples:
      iex> v = new([:a, :b, :c])
      #PersistentVector<count: 3, [:a, :b, :c]>
      iex> get(v, 1)
      :b
      iex> set(v, 1, :new_value)
      #PersistentVector<count: 3, [:a, :new_value, :c]>
      iex> set(v, 3, :append)
      #PersistentVector<count: 4, [:a, :b, :c, :append]>
      iex> set(v, 10, :wrong_index)
      ** (ArgumentError) Attempt to set index 10 for vector of size 3
  """
  @spec set(t, index, value) :: t | no_return
  def set(v = %@state{count: count}, index, new_value)
    when is_integer(index) and index >=0 and index < count
  do
    if index >= tail_start(v) do
      new_tail = v.tail |> put_elem(index &&& @mask, new_value)
      %{v | tail: new_tail}
    else
      new_root = v.root |> do_set(v.shift, index, new_value)
      %{v | root: new_root}
    end
  end
  def set(v = %@state{count: count}, index, new_value)
    when is_integer(index) and index == count
  do
    v |> append(new_value)
  end
  def set(%@state{count: count}, index, _new_value) do
    raise ArgumentError, "Attempt to set index #{inspect index} for vector of size #{count}"
  end
  @spec do_set(tuple, shift, index, value) :: tuple
  defp do_set(arr, level, i, val)
    when level > 0
  do
    child_index = (i >>> level) &&& @mask
    new_child = arr |> elem(child_index) |> do_set(level - @shift, i, val)
    arr |> put_elem(child_index, new_child)
  end
  defp do_set(arr, _level, i, val)
  # when level == 0
  do
    arr |> put_elem(i &&& @mask, val)
  end
  @doc """
  Appends `new_value` to the end of `v`.
  ## Examples:
      iex> v = append(empty(), 1)
      #PersistentVector<count: 1, [1]>
      iex> append(v, 2)
      #PersistentVector<count: 2, [1, 2]>
  """
  @spec append(t, value) :: t
  def append(v = %@state{tail: tail}, new_value)
    when tuple_size(tail) < @block
  do
    new_tail = tail |> Tuple.append(new_value)
    %{v | count: v.count + 1, tail: new_tail}
  end
  def append(v = %@state{}, new_value) do
    new_count = v.count + 1
    new_tail = {new_value}
    case v.root |> append_block(v.shift, v.tail) do
      {:ok, new_root} ->
        %{v | count: new_count, root: new_root, tail: new_tail}
      {:overflow, tail_path} ->
        new_root = {v.root, tail_path}
        %{v | count: new_count, root: new_root, tail: new_tail, shift: v.shift + @shift}
    end
  end
  @spec append_block(tuple, shift, tuple) :: {:ok | :overflow, tuple}
  defp append_block(arr, level, tail)
    when level > @shift
  do
    last_child_index = tuple_size(arr) - 1
    case arr |> elem(last_child_index) |> append_block(level - @shift, tail) do
      {:ok, new_child} ->
        {:ok, arr |> put_elem(last_child_index, new_child)}
      {:overflow, tail_path} ->
        arr |> append_block_here(tail_path)
    end
  end
  defp append_block(arr, _level, tail)
  # when level == @shift
  do
    arr |> append_block_here(tail)
  end
  @compile {:inline, append_block_here: 2}
  @spec append_block_here(tuple, tuple) :: {:ok | :overflow, tuple}
  defp append_block_here(arr, tail_path) do
    if tuple_size(arr) < @block do
      {:ok, arr |> Tuple.append(tail_path)}
    else
      {:overflow, {tail_path}}
    end
  end
  @doc """
  Removes last element from `v` or raises `ArgumentError` if `v` is empty.
  ## Examples:
      iex> v = new(1..3)
      #PersistentVector<count: 3, [1, 2, 3]>
      iex> remove_last(v)
      #PersistentVector<count: 2, [1, 2]>
      iex> remove_last(empty())
      ** (ArgumentError) Cannot remove_last from empty vector
  """
  @spec remove_last(t) :: t | no_return
  def remove_last(v = %@state{tail: tail})
    when tuple_size(tail) > 1
  do
    new_tail = tail |> tuple_delete_last()
    %{v | count: v.count - 1, tail: new_tail}
  end
  def remove_last(v = %@state{count: count})
    when count > 1 # and tuple_size(tail) == 1
  do
    new_count = v.count - 1
    {new_root, new_tail} = remove_last_block(v.root, v.shift)
    if tuple_size(new_root) == 1 && v.shift > @shift do
      {new_root} = new_root # remove topmost tree level
      %{v | count: new_count, root: new_root, shift: v.shift - @shift, tail: new_tail}
    else
      %{v | count: new_count, root: new_root, tail: new_tail}
    end
  end
  def remove_last(%@state{count: count})
    when count == 1
  do
    empty()
  end
  def remove_last(%@state{})
  # when count == 0
  do
    raise ArgumentError, "Cannot remove_last from empty vector"
  end
  @spec remove_last_block(tuple, shift) :: {tuple, tuple}
  defp remove_last_block(arr, level)
    when level > @shift
  do
    last_child_index = tuple_size(arr) - 1
    case remove_last_block(arr |> elem(last_child_index), level - @shift) do
      {{}, last_block} ->
        {arr |> Tuple.delete_at(last_child_index), last_block}
      {new_child, last_block} ->
        {arr |> put_elem(last_child_index, new_child), last_block}
    end
  end
  defp remove_last_block(arr, _level)
  # when level == @shift
  do
    last_child_index = tuple_size(arr) - 1
    last_block = arr |> elem(last_child_index)
    new_path = arr |> Tuple.delete_at(last_child_index)
    {new_path, last_block}
  end
  @compile {:inline, tuple_delete_last: 1}
  @spec tuple_delete_last(tuple) :: tuple
  defp tuple_delete_last(tuple) do
    tuple |> Tuple.delete_at(tuple_size(tuple) - 1)
  end
  @doc """
  Converts `PersistentVector` `v` to `List`.
  This function is more efficient than `Enum.into/2``(v, [])` call
  because it builds the list in correct order right away and
  does not require `:lists.reverse/1` call at the end.
  ## Examples:
      iex> to_list(new(1..3))
      [1, 2, 3]
      iex> to_list(empty())
      []
  """
  @spec to_list(t) :: [value]
  def to_list(v = %@state{root: root, shift: shift})
  do
    acc = Tuple.to_list(v.tail)
    if root == {} do
      acc
    else
      big? = shift > 2 * @shift
      to_list(root, shift, tuple_size(root)-1, big?, acc)
    end
  end
  defp to_list(arr, level, i, big?, acc)
    when level > 0
  do
    child = elem(arr, i)
    acc = to_list(child, level - @shift, tuple_size(child)-1, big?, acc)
    if i > 0 do
      to_list(arr, level, i-1, big?, acc)
    else
      acc
    end
  end
  defp to_list(arr, _level, i, big?, acc)
  # when level == 0
  do
    if big? do
      to_list_leaf(arr, i-1, [elem(arr, i) | acc])
    else
      Tuple.to_list(arr) ++ acc
    end
  end
  defp to_list_leaf(arr, i, acc) do
    acc = [elem(arr, i) | acc]
    if i > 0 do
      to_list_leaf(arr, i-1, acc)
    else
      acc
    end
  end
  @doc false # "See `Enumerable.reduce/3`"
  @spec reduce(t, Enumerable.acc, Enumerable.reducer) :: Enumerable.result
  def reduce(v = %@state{}, acc, fun) do
    reduce_root(v.root, v.tail, v.shift, 0, acc, fun)
  end
  @spec reduce_root(tuple, tuple, shift, index, Enumerable.acc | Enumerable.result, Enumerable.reducer) :: Enumerable.result
  defp reduce_root(arr, tail, level, i, acc = {:cont, _}, fun)
    when i < tuple_size(arr)
  do
    reduce_root(arr, tail, level, i+1, reduce_node(elem(arr, i), level - @shift, 0, acc, fun), fun)
  end
  defp reduce_root(_arr, tail, _level, _i, acc = {:cont, _}, fun)
  # when i == tuple_size(arr)
  do
    reduce_tail(tail, 0, acc, fun)
  end
  defp reduce_root(_arr, _tail, _level, _i, acc = {:halted, _}, _fun) do
    acc
  end
  defp reduce_root(_arr, _tail, _level, _i, {:halt, acc}, _fun) do
    {:halted, acc}
  end
  defp reduce_root(arr, tail, level, i, {:suspended, acc, cont_fn}, fun) do
    {:suspended, acc, &reduce_root(arr, tail, level, i, cont_fn.(&1), fun)}
  end
  @spec reduce_tail(tuple, index, Enumerable.acc, Enumerable.reducer) :: Enumerable.result
  defp reduce_tail(arr, i, {:cont, acc}, fun)
    when i < tuple_size(arr)
  do
    reduce_tail(arr, i+1, fun.(elem(arr, i), acc), fun)
  end
  defp reduce_tail(_arr, _i, {:cont, acc}, _fun)
  # when i == tuple_size(arr)
  do
    {:done, acc}
  end
  defp reduce_tail(_arr, _i, {:halt, acc}, _fun) do
    {:halted, acc}
  end
  defp reduce_tail(arr, i, {:suspend, acc}, fun) do
    {:suspended, acc, &reduce_tail(arr, i, &1, fun)}
  end
  @spec reduce_node(tuple, shift, index, Enumerable.acc | Enumerable.result, Enumerable.reducer) :: Enumerable.result
  defp reduce_node(arr, level, i, acc = {:cont, _}, fun)
    when level > 0 and i < tuple_size(arr)
  do
    reduce_node(arr, level, i+1, reduce_node(elem(arr, i), level - @shift, 0, acc, fun), fun)
  end
  defp reduce_node(arr, level, i, {:cont, acc}, fun)
    when i < tuple_size(arr) # and level == 0
  do
    reduce_node(arr, level, i+1, fun.(elem(arr, i), acc), fun)
  end
  defp reduce_node(_arr, _level, _i, acc = {:cont, _}, _fun)
  # when i == tuple_size(arr)
  do
    acc
  end
  defp reduce_node(_arr, 0, _i, {:halt, acc}, _fun) do
    {:halted, acc}
  end
  defp reduce_node(_arr, _level, _i, acc = {:halted, _}, _fun) do
    acc
  end
  defp reduce_node(arr, 0, i, {:suspend, acc}, fun) do
    {:suspended, acc, &reduce_node(arr, 0, i, &1, fun)}
  end
  defp reduce_node(arr, level, i, {:suspended, acc, cont_fn}, fun) do
    {:suspended, acc, &reduce_node(arr, level, i, cont_fn.(&1), fun)}
  end
  @behaviour Access
  # `get/3` is implemented above
  @impl Access
  def fetch(v = %@state{count: count}, key)
    when is_integer(key) and key >= 0 and key < count
  do
    {:ok, v |> fast_get(key)}
  end
  def fetch(%@state{}, _key) do
    :error
  end
  @impl Access
  @spec get_and_update(t, index, fun) :: no_return
  def get_and_update(%@state{}, _key, _function) do
    raise UndefinedFunctionError
  end
  @impl Access
  @spec pop(t, index) :: no_return
  def pop(%@state{}, _key) do
    raise UndefinedFunctionError
  end
  defimpl Enumerable do
    def count(v), do: {:ok, @for.count(v)}
    def member?(%@for{}, _element), do: {:error, __MODULE__}
    def reduce(v, acc, fun), do: @for.reduce(v, acc, fun)
  end
  defimpl Collectable do
    def into(original) do
      collector_fun = fn
        v, {:cont, val} -> v |> @for.append(val)
        v, :done -> v
        _, :halt -> :ok
      end
      {original, collector_fun}
    end
  end
  defimpl Inspect do
    import Inspect.Algebra
    @prefix "#" <> inspect(@for) <> "<count: "
    def inspect(v, opts) do
      concat [@prefix <> Integer.to_string(v.count) <> ", ", to_doc(v |> Enum.take(opts.limit + 1), opts), ">"]
    end
  end
end | 
	lib/PersistentVector.ex | 0.926935 | 0.846133 | 
	PersistentVector.ex | 
	starcoder | 
| 
	defmodule SortedSet do
  alias RedBlackTree
  @moduledoc """
    A Set implementation that always remains sorted.
    SortedSet guarantees that no element appears more than once and that
    enumerating over members happens in their sorted order.
  """
  @behaviour Set
  @default_comparator &RedBlackTree.compare_terms/2
  # Define the type as opaque
  @opaque t :: %__MODULE__{members: RedBlackTree, size: non_neg_integer}
  @doc false
  defstruct members: RedBlackTree.new, size: 0
  @doc ~S"""
  Returns a new `SortedSet`, initialized with the unique, sorted values of
  `members`.
  ## Options
    - `:comparator` function taking two terms and deciding their order. Passed
    on to the underlying data structure, in this case a Red-Black tree. The
    default is to compare based on standard Erlang term comparison. To learn
    more about this option, see the examples given for
    [RedBlackTree](https://github.com/SenecaSystems/red_black_tree)
  ## Examples
      iex> SortedSet.new()
      #SortedSet<[]>
      iex> SortedSet.new([1,3,5])
      #SortedSet<[1, 3, 5]>
      iex> SortedSet.new([:a, :b, :c], comparator: fn (term1, term2) ->
      ...>   RedBlackTree.compare_terms(term1, term2) * -1
      ...> end)
      #SortedSet<[:c, :b, :a]>
  """
  def new(members \\ [], options \\ [])
  def new(members, options) do
    comparator = :proplists.get_value(:comparator, options, @default_comparator)
    new_set = %SortedSet{
      members: RedBlackTree.new([], comparator: comparator)
    }
    Enum.reduce(members, new_set, fn({key, member}, set) ->
      put(set, key, member)
    end)
  end
  @doc ~S"""
  Returns the number of elements in a `SortedSet`.
  ## Examples
      iex> SortedSet.size SortedSet.new([1,3,5])
      3
  """
  def size(%SortedSet{size: size}) do
    size
  end
  @doc ~S"""
  Returns a `List` with all of the members of `set`.
  ## Examples
      iex> SortedSet.to_list SortedSet.new([1,3,5])
      [1,3,5]
  """
  def to_list(%SortedSet{members: members}) do
    Enum.reduce(members, [], fn ({key, value}, acc) ->
      [{key, value} | acc]
    end) |> Enum.reverse
  end
  @doc ~S"""
  Returns a `SortedSet` with all of the members of `set` plus `element`.
  ## Examples
      iex> set = SortedSet.new([1,3,5])
      iex> SortedSet.to_list SortedSet.put(set, 1)
      [1,3,5]
      iex> set = SortedSet.new([1,3,5])
      iex> SortedSet.to_list SortedSet.put(set, 2)
      [1,2,3,5]
  """
  def put(set = %SortedSet{}, key, element) do
    new_tree = RedBlackTree.insert set.members, key, element
    %SortedSet{members: new_tree, size: new_tree.size}
  end
  @doc ~S"""
  Returns a `SortedSet` with all of the members of `sortedset` except for `element`.
  ## Examples
      iex> set = SortedSet.new([1,3,5])
      iex> SortedSet.to_list SortedSet.delete(set, 1)
      [3,5]
      iex> set = SortedSet.new([1,3,5])
      iex> SortedSet.to_list SortedSet.delete(set, 2)
      [1,3,5]
      iex> set = SortedSet.new([])
      iex> SortedSet.to_list SortedSet.delete(set, 2)
      []
  """
  def delete(%SortedSet{members: members}, key) do
    new_tree = RedBlackTree.delete members, key
    %SortedSet{members: new_tree, size: new_tree.size}
  end
  ## SortedSet predicate methods
  @doc ~S"""
  Returns `true` if `set` contains `element`
  ## Examples
      iex> set = SortedSet.new([1,3,5])
      iex> SortedSet.member?(set, 1)
      true
      iex> set = SortedSet.new([1,3,5])
      iex> SortedSet.member?(set, 0)
      false
  """
  def member?(%SortedSet{members: tree}, key) do
    RedBlackTree.has_key? tree, key
  end
  # If the sizes are not equal, no need to check members
  def equal?(%SortedSet{size: size1}, %SortedSet{size: size2}) when size1 != size2 do
    false
  end
  @doc ~S"""
  Returns `true` if all elements in `set1` are in `set2` and all elements in
  `set2` are in `set1`
  ## Examples
      iex> set1 = SortedSet.new([1,3,5])
      iex> set2 = SortedSet.new([1,3,5])
      iex> SortedSet.equal?(set1, set2)
      true
      iex> set1 = SortedSet.new([1,3,5])
      iex> set2 = SortedSet.new([1,2,3,4,5])
      iex> SortedSet.equal?(set1, set2)
      false
  """
  def equal?(%SortedSet{}=set1, %SortedSet{}=set2) do
    Enum.all?(to_list(set1), fn(set1_member) ->
      member? set2, set1_member
    end)
  end
  def subset?(%SortedSet{size: size1}, %SortedSet{size: size2}) when size1 > size2 do
    false
  end
  @doc ~S"""
  Returns `true` if all elements in `set1` are in `set2`
  ## Examples
      iex> set1 = SortedSet.new([1,3,5])
      iex> set2 = SortedSet.new([1,2,3,4,5])
      iex> SortedSet.subset?(set1, set2)
      true
      iex> set1 = SortedSet.new([1,2,3,4,5])
      iex> set2 = SortedSet.new([1,3,5])
      iex> SortedSet.subset?(set1, set2)
      false
  """
  # If set1 is larger than set2, it cannot be a subset of it
  def subset?(%SortedSet{}=set1, %SortedSet{}=set2) do
    Enum.all?(to_list(set1), fn(set1_member) ->
      member? set2, set1_member
    end)
  end
  @doc ~S"""
  Returns `true` if no member of `set1` is in `set2`. Otherwise returns
  `false`.
  ## Examples
      iex> set1 = SortedSet.new([1,2,3,4])
      iex> set2 = SortedSet.new([5,6,7,8])
      iex> SortedSet.disjoint?(set1, set2)
      true
      iex> set1 = SortedSet.new([1,2,3,4])
      iex> set2 = SortedSet.new([4,5,6,7])
      iex> SortedSet.disjoint?(set1, set2)
      false
  """
  def disjoint?(%SortedSet{size: size1}=set1, %SortedSet{size: size2}=set2) when size1 <= size2 do
    not Enum.any?(to_list(set1), fn(set1_member) ->
      member?(set2, set1_member)
    end)
  end
  def disjoint?(%SortedSet{}=set1, %SortedSet{}=set2) do
    disjoint?(set2, set1)
  end
  ## SortedSet Operations
  @doc ~S"""
  Returns a `SortedSet` containing the items of both `set1` and `set2`.
  ## Examples
      iex> set1 = SortedSet.new([1,3,5,7])
      iex> set2 = SortedSet.new([0,2,3,4,5])
      iex> SortedSet.to_list SortedSet.union(set1, set2)
      [0,1,2,3,4,5,7]
  """
  def union(%SortedSet{size: size1}=set1, %SortedSet{size: size2}=set2) when size1 <= size2  do
    Enum.reduce(to_list(set1), set2, fn({key, member}, new_set) ->
      put(new_set, key, member)
    end)
  end
  def union(%SortedSet{}=set1, %SortedSet{}=set2) do
    union(set2, set1)
  end
  # If either set is empty, the intersection is the empty set
  def intersection(%SortedSet{size: 0}=set1, _) do
    set1
  end
  # If either set is empty, the intersection is the empty set
  def intersection(_, %SortedSet{size: 0}=set2) do
    set2
  end
  @doc ~S"""
  Returns a `SortedSet` containing the items contained in both `set1` and
  `set2`.
  ## Examples
      iex> set1 = SortedSet.new([1,3,5,7])
      iex> set2 = SortedSet.new([0,2,3,4,5])
      iex> SortedSet.to_list SortedSet.intersection(set1, set2)
      [3,5]
  """
  def intersection(%SortedSet{size: size1}=set1, %SortedSet{size: size2}=set2) when size1 <= size2 do
    Enum.reduce(to_list(set1), SortedSet.new, fn({set1_key, set1_member}, new_set) ->
      if SortedSet.member?(set2, set1_key) do
        SortedSet.put(new_set, set1_key, set1_member)
      else
        new_set
      end
    end)
  end
  def intersection(%SortedSet{}=set1, %SortedSet{}=set2) do
    intersection(set2, set1)
  end
  @doc ~S"""
  Returns a `SortedSet` containing the items in `set1` that are not in `set2`.
  ## Examples
      iex> set1 = SortedSet.new([1,2,3,4])
      iex> set2 = SortedSet.new([2,4,6,8])
      iex> SortedSet.to_list SortedSet.difference(set1, set2)
      [1,3]
  """
  def difference(%SortedSet{size: size1}=set1, %SortedSet{size: size2}=set2) when size1 > 0 and size2 > 0 do
    Enum.reduce(to_list(set1), set1, fn(set1_member, new_set) ->
      if SortedSet.member?(set2, set1_member) do
        delete(new_set, set1_member)
      else
        new_set
      end
    end)
  end
  # When the first set is empty, the difference is the empty set
  def difference(%SortedSet{size: 0}=empty_set, _) do
    empty_set
  end
  # When the other set is empty, the difference is the first set
  def difference(%SortedSet{}=set1, %SortedSet{size: 0}) do
    set1
  end
end
defimpl Enumerable, for: SortedSet do
  def count(%SortedSet{size: size}), do: {:ok, size}
  def member?(%SortedSet{}=set, element), do: {:ok, SortedSet.member?(set, element)}
  def reduce(%SortedSet{}=set, acc, fun) do
    SortedSet.to_list(set)
    |> Enumerable.List.reduce(acc, fun)
  end
end
defimpl Collectable, for: SortedSet do
  def into(original) do
    {original, fn
      set, {:cont, {new_key, new_member}} -> SortedSet.put(set, new_key, new_member)
      set, :done -> set
      _, :halt -> :ok
    end}
  end
end
# We want our own inspect so that it will hide the underlying :members and :size
# fields. Otherwise users may try to play with them directly.
defimpl Inspect, for: SortedSet do
  import Inspect.Algebra
  def inspect(set, opts) do
    concat ["#SortedSet<", Inspect.List.inspect(SortedSet.to_list(set), opts), ">"]
  end
end | 
	lib/sorted_set.ex | 0.878822 | 0.542015 | 
	sorted_set.ex | 
	starcoder | 
| 
	defmodule SanbaseWeb.Graphql.Helpers.Utils do
  alias Sanbase.DateTimeUtils
  def calibrate_interval(
        module,
        measurement,
        from,
        to,
        interval,
        min_interval_seconds \\ 300,
        data_points_count \\ 500
      )
  def calibrate_interval(
        module,
        measurement,
        from,
        to,
        "",
        min_interval_seconds,
        data_points_count
      ) do
    with {:ok, first_datetime} <- module.first_datetime(measurement) do
      first_datetime = first_datetime || from
      from =
        max(
          DateTime.to_unix(from, :second),
          DateTime.to_unix(first_datetime, :second)
        )
      interval =
        max(
          div(DateTime.to_unix(to, :second) - from, data_points_count),
          min_interval_seconds
        )
      {:ok, DateTime.from_unix!(from), to, "#{interval}s"}
    end
  end
  def calibrate_interval(
        _module,
        _measurement,
        from,
        to,
        interval,
        _min_interval,
        _data_points_count
      ) do
    {:ok, from, to, interval}
  end
  def calibrate_interval(
        module,
        metric,
        slug,
        from,
        to,
        "",
        min_interval_seconds,
        data_points_count
      ) do
    {:ok, first_datetime} = module.first_datetime(metric, slug)
    first_datetime = first_datetime || from
    from =
      max(
        DateTime.to_unix(from, :second),
        DateTime.to_unix(first_datetime, :second)
      )
    interval =
      max(
        div(DateTime.to_unix(to, :second) - from, data_points_count),
        min_interval_seconds
      )
    {:ok, DateTime.from_unix!(from), to, "#{interval}s"}
  end
  def calibrate_interval(
        _module,
        _metric,
        _slug,
        from,
        to,
        interval,
        _min_interval,
        _data_points_count
      ) do
    {:ok, from, to, interval}
  end
  def calibrate_interval_with_ma_interval(
        module,
        measurement,
        from,
        to,
        interval,
        min_interval,
        ma_base,
        data_points_count \\ 500
      ) do
    {:ok, from, to, interval} =
      calibrate_interval(module, measurement, from, to, interval, min_interval, data_points_count)
    ma_interval =
      max(
        div(
          DateTimeUtils.str_to_sec(ma_base),
          DateTimeUtils.str_to_sec(interval)
        ),
        2
      )
    {:ok, from, to, interval, ma_interval}
  end
  def calibrate_incomplete_data_params(true, _module, _metric, from, to) do
    {:ok, from, to}
  end
  def calibrate_incomplete_data_params(false, module, metric, from, to) do
    case module.has_incomplete_data?(metric) do
      true -> rewrite_params_incomplete_data(from, to)
      false -> {:ok, from, to}
    end
  end
  defp rewrite_params_incomplete_data(from, to) do
    start_of_day = Timex.beginning_of_day(Timex.now())
    case DateTime.compare(from, start_of_day) != :lt do
      true ->
        {:error,
         """
         The time range provided [#{from} - #{to}] is contained in today. The metric
         requested could have incomplete data as it's calculated since the beginning
         of the day and not for the last 24 hours. If you still want to see this
         data you can pass the flag `includeIncompleteData: true` in the
         `timeseriesData` arguments
         """}
      false ->
        to = if DateTime.compare(to, start_of_day) == :gt, do: start_of_day, else: to
        {:ok, from, to}
    end
  end
  def error_details(changeset) do
    changeset
    |> Ecto.Changeset.traverse_errors(&format_error/1)
  end
  @doc ~s"""
  Works when the result is a list of elements that contain a datetime and the query arguments
  have a `from` argument. In that case the first element's `datetime` is update to be
  the max of `datetime` and `from` from the query.
  This is used when a query to influxdb is made. Influxdb can return a timestamp
  that's outside `from` - `to` interval due to its inner working with buckets
  """
  def fit_from_datetime([%{datetime: _} | _] = data, %{from: from}) do
    result =
      data
      |> Enum.drop_while(fn %{datetime: datetime} ->
        DateTime.compare(datetime, from) == :lt
      end)
    {:ok, result}
  end
  def fit_from_datetime(result, _args), do: {:ok, result}
  @doc ~s"""
  Extract the arguments passed to the root query from subfield resolution
  """
  def extract_root_query_args(resolution, root_query_name) do
    root_query_camelized = Absinthe.Utils.camelize(root_query_name, lower: true)
    resolution.path
    |> Enum.find(fn x -> is_map(x) && x.name == root_query_camelized end)
    |> Map.get(:argument_data)
  end
  @doc ~s"""
  Transform the UserTrigger structure to be more easily consumed by the API.
  This is done by propagating the tags and the UserTrigger id into the Trigger
  structure
  """
  def transform_user_trigger(%Sanbase.Signal.UserTrigger{trigger: trigger, tags: tags} = ut) do
    ut = Map.from_struct(ut)
    trigger = Map.from_struct(trigger)
    %{
      ut
      | trigger: trigger |> Map.put(:tags, tags) |> Map.put(:id, ut.id)
    }
  end
  def replace_user_trigger_with_trigger(data) when is_map(data) do
    case data do
      %{user_trigger: ut} = elem when not is_nil(ut) ->
        elem
        |> Map.drop([:__struct__, :user_trigger])
        |> Map.put(:trigger, Map.get(transform_user_trigger(ut), :trigger))
      elem ->
        elem
    end
  end
  def replace_user_trigger_with_trigger(data) when is_list(data) do
    data |> Enum.map(&replace_user_trigger_with_trigger/1)
  end
  @spec requested_fields(%Absinthe.Resolution{}) :: MapSet.t()
  def requested_fields(%Absinthe.Resolution{} = resolution) do
    resolution.definition.selections
    |> Enum.map(fn %{name: name} -> Inflex.camelize(name, :lower) end)
    |> MapSet.new()
  end
  # Private functions
  @spec format_error(Ecto.Changeset.error()) :: String.t()
  defp format_error({msg, opts}) do
    Enum.reduce(opts, msg, fn {key, value}, acc ->
      String.replace(acc, "%{#{key}}", to_string(inspect(value)))
    end)
  end
end | 
	lib/sanbase_web/graphql/helpers/utils.ex | 0.828523 | 0.409988 | 
	utils.ex | 
	starcoder | 
| 
	defmodule ServerUtils.SentryLogger do
  @moduledoc """
    Logger wrapper that handles `warn` and `error` logging and sends a Sentry report. Both logging and Sentry calls will be executed asynchronously.
    Sentry will need to be configured in the project that uses this dependency.
    The integration with Sentry can be disabled by setting the system variable *DISABLE_SENTRY* as true.
  """
  require Logger
  @doc """
  Logs a debug message.
  Returns `:ok` or an `{:error, reason}` tuple.
  """
  @spec debug(String.t()) :: atom()
  def debug(message) do
    Logger.debug(fn -> message end)
  end
  @doc """
  Logs a info message.
  Returns `:ok` or an `{:error, reason}` tuple.
  """
  @spec info(String.t()) :: atom()
  def info(message) do
    Logger.info(fn -> message end)
  end
  @doc """
  Logs a warn message.
  Unless the system variable `DISABLE_SENTRY` is set, it will send the logged message as warning level to Sentry.
  Returns `:ok` or an `{:error, reason}` tuple.
  Returns a [Task](https://hexdocs.pm/elixir/Task.html#content) struct if Sentry is **enabled**.
  """
  @spec warn(String.t()) :: atom() | Task.t()
  def warn(message, opts \\ []) do
    if System.get_env("DISABLE_SENTRY") do
      Logger.warn(fn -> message end)
    else
      opts =
        opts ++
          [
            tags: %{
              env: System.get_env("ENVIRONMENT")
            },
            level: "warning"
          ]
      Logger.warn(fn -> message end)
      Task.start(fn -> Sentry.capture_message(message, opts) end)
    end
  end
  @doc """
  Logs a error message.
  Unless the system variable `DISABLE_SENTRY` is set, it will send the logged message as error level to Sentry.
  Returns `:ok` or an `{:error, reason}` tuple if Sentry is **disabled**.
  Returns a [Task](https://hexdocs.pm/elixir/Task.html#content) struct if Sentry is **enabled**.
  """
  @spec error(String.t()) :: atom() | Task.t()
  def error(message, opts \\ []) do
    if System.get_env("DISABLE_SENTRY") do
      Logger.error(fn -> message end)
    else
      opts =
        opts ++
          [
            tags: %{
              environment: System.get_env("ENVIRONMENT")
            },
            level: "error"
          ]
      Logger.error(fn -> message end)
      Task.start(fn -> Sentry.capture_message(message, opts) end)
    end
  end
end | 
	lib/logger/logger.ex | 0.885055 | 0.484868 | 
	logger.ex | 
	starcoder | 
| 
	defmodule Rihanna.Migration do
  @max_32_bit_signed_integer (:math.pow(2, 31) |> round) - 1
  @moduledoc """
  A set of tools for creating the Rihanna jobs table.
  Rihanna stores jobs in a table in your database. The default table name is
  "rihanna_jobs". The name is configurable by either passing it as an argument
  to the functions below or setting `:jobs_table_name` in Rihanna's config.
  #### Using Ecto
  The easiest way to create the database is with Ecto. Run `mix ecto.gen.migration create_rihanna_jobs` and make your migration look like this:
  ```elixir
  defmodule MyApp.CreateRihannaJobs do
    use Rihanna.Migration
  end
  ```
  Now you can run `mix ecto.migrate`.
  #### Without Ecto
  Ecto is not required to run Rihanna. If you want to create the table yourself, without Ecto, take a look at either `statements/0` or `sql/0`.
  """
  defmacro __using__(opts) do
    table_name = Keyword.get(opts, :table_name, Rihanna.Config.jobs_table_name()) |> to_string
    quote do
      use Ecto.Migration
      def up() do
        Enum.each(Rihanna.Migration.statements(unquote(table_name)), fn statement ->
          execute(statement)
        end)
      end
      def down() do
        execute("""
        DROP TABLE(#{unquote(table_name)});
        """)
      end
    end
  end
  @doc """
  Returns a list of SQL statements that will create the Rihanna jobs table if
  executed sequentially.
  By default it takes the name of the table from the application config.
  You may optionally supply a table name as an argument if you want to override
  this.
  ## Examples
      > Rihanna.Migration.statements
      [...]
      > Rihanna.Migration.statements("my_alternative_table_name")
      [...]
  """
  @spec statements() :: list[String.t()]
  @spec statements(String.t() | atom) :: list[String.t()]
  def statements(table_name \\ Rihanna.Config.jobs_table_name())
      when is_binary(table_name) or is_atom(table_name) do
    [
      """
      CREATE TABLE #{table_name} (
        id int NOT NULL,
        term bytea NOT NULL,
        enqueued_at timestamp with time zone NOT NULL,
        failed_at timestamp with time zone,
        fail_reason text,
        CONSTRAINT failed_at_required_fail_reason CHECK((failed_at IS NOT NULL AND fail_reason IS NOT NULL) OR (failed_at IS NULL and fail_reason IS NULL))
      );
      """,
      """
      COMMENT ON CONSTRAINT failed_at_required_fail_reason ON #{table_name} IS 'When setting failed_at you must also set a fail_reason';
      """,
      """
      CREATE SEQUENCE #{table_name}_id_seq
      START WITH 1
      INCREMENT BY 1
      MINVALUE 1
      MAXVALUE #{@max_32_bit_signed_integer}
      CACHE 1
      CYCLE;
      """,
      """
      ALTER SEQUENCE #{table_name}_id_seq OWNED BY #{table_name}.id;
      """,
      """
      ALTER TABLE ONLY #{table_name} ALTER COLUMN id SET DEFAULT nextval('#{table_name}_id_seq'::regclass);
      """,
      """
      ALTER TABLE ONLY #{table_name}
      ADD CONSTRAINT #{table_name}_pkey PRIMARY KEY (id);
      """
    ]
  end
  @doc """
  Returns a string of semi-colon-terminated SQL statements that you can execute
  directly to create the Rihanna jobs table.
  """
  @spec sql(String.t() | atom) :: String.t()
  def sql(table_name \\ Rihanna.Config.jobs_table_name()) do
    Enum.join(statements(table_name), "\n")
  end
end | 
	lib/rihanna/migration.ex | 0.869396 | 0.72189 | 
	migration.ex | 
	starcoder | 
| 
	defmodule Ivy.Package do
  alias Ivy.{Constraint, Logical, LSubst, LVar, Unifyable}
  @behaviour Access
  defstruct [s: %{}, d: %{}, c: [], vs: nil, oc: true, meta: %{}]
  @type t :: %__MODULE__{
    s: %{LVar.t => LSubst.t | LVar.t},
    d: %{LVar.t => Domain.t},
    c: [Constraint.t],
    vs: [LVar.t] | nil,
    oc: boolean,
    meta: map()
  }
  def new() do
    %__MODULE__{}
  end
  @impl Access
  def fetch(package, key) do
    Map.fetch(package, key)
  end
  @impl Access
  def pop(package, key) do
    Map.pop(package, key)
  end
  @impl Access
  def get_and_update(package, key, fun) do
    Map.get_and_update(package, key, fun)
  end
  def ext(s, u, v) do
    vv = if match?(%LSubst{}, v), do: v.v, else: v
    if s.oc and occurs?(s, u, vv) do
      nil
    else
      ext_no_check(s, u, v)
    end
  end
  def ext_no_check(s, u, v) do
    put_in(s, [:s, u], v)
    |> Map.update(:vs, nil, fn
      vs when is_list(vs) -> [u | vs]
      vs -> vs
    end)
  end
  def unify(s, u, u), do: s
  def unify(s, u, v) do
    case {walk(s, u), walk(s, v)} do
      {%LVar{} = u, u} -> s
      {%LVar{} = u, v} -> Unifyable.unify(u, v, s)
      {u, %LVar{} = v} -> Unifyable.unify(v, u, s)
      {u, v} -> Unifyable.unify(u, v, s)
    end
  end
  def walk(%__MODULE__{s: s}, %LVar{} = v),
    do: walk_impl(v, Map.get(s, v), s)
  def walk(_, v), do: v
  defp walk_impl(lv, nil, _), do: lv
  defp walk_impl(_, %LSubst{v: sv}, _), do: sv
  defp walk_impl(_, %LVar{} = vp, s) do
    walk_impl(vp, Map.get(s, vp), s)
  end
  defp walk_impl(_, vp, _), do: vp
  def reify(s, v) do
    Logical.reify(walk(s, v), s)
  end
  def occurs?(s, u, v) do
    Logical.occurs?(walk(s, v), u, s)
  end
  def build(s, u) do
    Logical.build(u, s)
  end
  defimpl Ivy.Bindable do
    def bind(a, g) do
      g.(a)
    end
    def mplus(a, f) do
      %Ivy.Choice{a: a, f: f}
    end
    def take(a) do
      a
    end
  end
  defimpl Ivy.IfA do
    alias Ivy.Bindable
    def ifa(%@for{s: s} = b, gs, _) do
      %{b | s: Enum.reduce(gs, s, &Bindable.bind(&2, &1))}
    end
  end
  defimpl Ivy.IfU do
    alias Ivy.Bindable
    def ifu(%@for{s: s} = b, gs, _) do
      %{b | s: Enum.reduce(gs, s, &Bindable.bind(&2, &1))}
    end
  end
end | 
	archive/ivy/query/package.ex | 0.734786 | 0.456107 | 
	package.ex | 
	starcoder | 
| 
	defmodule Calcinator.Resources.Sort do
  @moduledoc """
  Sort in `Calcinator.Resources.query_options`
  """
  alias Alembic.{Document, Error, Fetch.Includes, Source}
  alias Calcinator.Resources
  import Resources, only: [attribute_to_field: 2]
  # Struct
  defstruct field: nil,
            direction: :ascending,
            association: nil
  # Types
  @typedoc """
  Keyword list of association path used to Ecto preloading
  """
  @type association :: Keyword.t()
  @typedoc """
  The direction to sort.  Default to `:ascending` per the JSONAPI spec.  Can be `:descending` when the dot-separated
  attribute path is prefixed with `-`.
  """
  @type direction :: :ascending | :descending
  @typedoc """
  Name of a field in an `Ecto.Schema.t`
  """
  @type field_name :: atom
  @typedoc """
  * `:assocation` - Keyword list of nested associations.  `nil` when the `:field` is direction on the primary data.
  * `:direction` - the direction to sort `:field`
  * `:field` - name of the field to sort
  """
  @type t :: %__MODULE__{
          association: nil | association,
          direction: direction,
          field: field_name
        }
  @typedoc """
  Used to convert includes used by JSONAPI to the corresponding association in Ecto.
  This map does not need to be a simple conversion of the nested map of strings `Alembic.Fetch.Includes.t` to
  the `Keyword.t` of `associations`, but can include completely different names or associations that the JSONAPI doesn't
  even expose, so that deprecated relationships can be mapped to newer associations.
  """
  @type associations_by_include :: %{Alembic.Fetch.Includes.t() => association}
  @typedoc """
  * `:associations_by_include` - maps the `Alembic.Fetch.Includes.t` to Keyword.t of associations.
  * `:ecto_schema_module` - primary Ecto.Schema module for checking if attribute is an existent field after applying
    associations.
  """
  @type from_alembic_fetch_sort_options :: %{
          required(:associations_by_include) => associations_by_include,
          required(:ecto_schema_module) => module
        }
  @typedoc """
  Used to convert associations used in Ecto to JSONAPI includes.
  This map need not be the inverse of `associations_by_include` if the JSONAPI incoming relationships are no the same
  as the outgoing relationships.
  """
  @type include_by_associations :: %{association => Alembic.Fetch.Includes.t()}
  # Functions
  @doc """
  Maps `Alembic.Fetch.Sort.t` `attribute` to `t` `field` and `Alembic.Fetch.Sort.t` `relationships` to
  `t` `associations`.
  When there are no `relationships`, there are no assocations
      iex> Calcinator.Resources.Sort.from_alembic_fetch_sort(
      ...>   %Alembic.Fetch.Sort{
      ...>     attribute: "inserted-at"
      ...>   },
      ...>   %{
      ...>     associations_by_include: %{},
      ...>     ecto_schema_module: Calcinator.Resources.TestPost
      ...>   }
      ...> )
      {
        :ok,
        %Calcinator.Resources.Sort{field: :inserted_at}
      }
  When there is `relationship` it is converted to association using `:associations_by_include`
      iex> Calcinator.Resources.Sort.from_alembic_fetch_sort(
      ...>   %Alembic.Fetch.Sort{
      ...>     attribute: "inserted-at",
      ...>     relationship: "posts"
      ...>   },
      ...>   %{
      ...>     associations_by_include: %{"posts" => :posts},
      ...>     ecto_schema_module: Calcinator.Resources.TestAuthor
      ...>   }
      ...> )
      {
        :ok,
        %Calcinator.Resources.Sort{
          association: :posts,
          direction: :ascending,
          field: :inserted_at
        }
      }
  The relationship can also be nested and it will be converted using `:associations_by_include` too
      iex> Calcinator.Resources.Sort.from_alembic_fetch_sort(
      ...>   %Alembic.Fetch.Sort{
      ...>     attribute: "inserted-at",
      ...>     relationship: %{
      ...>       "posts" => "comments"
      ...>     }
      ...>   },
      ...>   %{
      ...>     associations_by_include: %{
      ...>       %{
      ...>         "posts" => "comments"
      ...>       } => [posts: :comments]
      ...>     },
      ...>     ecto_schema_module: Calcinator.Resources.TestAuthor
      ...>   }
      ...> )
      {
        :ok,
        %Calcinator.Resources.Sort{
          association: [posts: :comments],
          direction: :ascending,
          field: :inserted_at
        }
      }
  ## Errors
  If the `Alembic.Fetch.Sort.t` `relationship` is not in `:associations_by_include`, then an error is returned
      iex> Calcinator.Resources.Sort.from_alembic_fetch_sort(
      ...>   %Alembic.Fetch.Sort{
      ...>     attribute: "inserted-at",
      ...>     relationship: "author"
      ...>   },
      ...>   %{
      ...>     associations_by_include: %{},
      ...>     ecto_schema_module: Calcinator.Resources.TestPost
      ...>   }
      ...> )
      {
        :error,
        %Alembic.Document{
          errors: [
            %Alembic.Error{
              detail: "`author` is an unknown relationship path",
              meta: %{
                "relationship_path" => "author"
              },
              source: %Alembic.Source{
                parameter: "include"
              },
              title: "Unknown relationship path"
            }
          ]
        }
      }
  If the `Alembic.Fetch.Sort.t` `attribute` is not on `:ecto_schema_module` when there is no `relationship`, then an
  error is returned with only the `attribute` in it
      iex> Calcinator.Resources.Sort.from_alembic_fetch_sort(
      ...>   %Alembic.Fetch.Sort{
      ...>     attribute: "likes",
      ...>     relationship: nil
      ...>   },
      ...>   %{
      ...>     associations_by_include: %{},
      ...>     ecto_schema_module: Calcinator.Resources.TestPost
      ...>   }
      ...> )
      {
        :error,
        %Alembic.Document{
          errors: [
            %Alembic.Error{
              detail: "Does not have `likes` attribute",
              meta: %{
                "attribute" => "likes"
              },
              source: %Alembic.Source{
                parameter: "sort"
              },
              title: "Unknown attribute"
            }
          ]
        }
      }
  If the `Alembic.Fetch.Sort.t` `attribute` is not on the associated `Ecto.Schema` module, than an error is returned
  with both the `relationship` and `attribute` in it.
      iex> Calcinator.Resources.Sort.from_alembic_fetch_sort(
      ...>   %Alembic.Fetch.Sort{
      ...>     attribute: "title",
      ...>     relationship: "author"
      ...>   },
      ...>   %{
      ...>     associations_by_include: %{
      ...>       "author" => :author
      ...>     },
      ...>     ecto_schema_module: Calcinator.Resources.TestPost
      ...>   }
      ...> )
      {
        :error,
        %Alembic.Document{
          errors: [
            %Alembic.Error{
              detail: "`author` does not have a `title` attribute",
              meta: %{
                "attribute" => "title",
                "relationship_path" => "author"
              },
              source: %Alembic.Source{
                parameter: "sort"
              },
              title: "Unknown attribute"
            }
          ]
        }
      }
  If the relationship is far, then the whole relationship is shown in the error
      iex> Calcinator.Resources.Sort.from_alembic_fetch_sort(
      ...>   %Alembic.Fetch.Sort{
      ...>     attribute: "likes",
      ...>     relationship: %{
      ...>       "posts" => "comments"
      ...>     }
      ...>   },
      ...>   %{
      ...>     associations_by_include: %{
      ...>       %{
      ...>         "posts" => "comments"
      ...>       } => [posts: :comments]
      ...>     },
      ...>     ecto_schema_module: Calcinator.Resources.TestAuthor
      ...>   }
      ...> )
      {
        :error,
        %Alembic.Document{
          errors: [
            %Alembic.Error{
              detail: "`posts.comments` does not have a `likes` attribute",
              meta: %{
                "attribute" => "likes",
                "relationship_path" => "posts.comments"
              },
              source: %Alembic.Source{
                parameter: "sort"
              },
              title: "Unknown attribute"
            }
          ]
        }
      }
  """
  @spec from_alembic_fetch_sort(Alembic.Fetch.Sort.t(), from_alembic_fetch_sort_options) ::
          {:ok, t} | {:error, Document.t()}
  def from_alembic_fetch_sort(sort = %Alembic.Fetch.Sort{direction: direction, relationship: relationship}, %{
        associations_by_include: associations_by_include,
        ecto_schema_module: ecto_schema_module
      }) do
    with {:ok, association} <- association(relationship, associations_by_include),
         {:ok, field} <- field(%{association: association, ecto_schema_module: ecto_schema_module, sort: sort}) do
      {:ok, %__MODULE__{association: association, direction: direction, field: field}}
    end
  end
  @doc """
  Maps `t` `field` to `Alembic.Fetch.Sort.t` `attribute` and `t` `associations` to `Alembic.Fetch.Sort.t`
  `relationships`.
  """
  @spec to_alembic_fetch_sort(t, Resources.t()) :: {:ok, Alembic.Fetch.Sort.t()} | {:error, Document.t()}
  def to_alembic_fetch_sort(%__MODULE__{association: association, direction: direction, field: field}, module) do
    {
      :ok,
      %Alembic.Fetch.Sort{
        attribute: attribute(field),
        direction: direction,
        relationship: relationship(module, association)
      }
    }
  end
  ## Private Functions
  defp association(nil, _), do: {:ok, nil}
  defp association(relationship, associations_by_include) when is_binary(relationship) or is_map(relationship) do
    with {:ok, [association]} <- Includes.to_preloads([relationship], associations_by_include) do
      {:ok, association}
    end
  end
  defp attribute(field) do
    field
    |> to_string()
    |> String.replace("_", "-")
  end
  defp attribute_error(%Error{detail: detail, meta: meta}) do
    %Error{
      detail: detail,
      meta: meta,
      source: %Source{
        parameter: "sort"
      },
      title: "Unknown attribute"
    }
  end
  defp attribute_error(%Alembic.Fetch.Sort{attribute: attribute, relationship: nil}) do
    attribute_error(%Error{
      detail: "Does not have `#{attribute}` attribute",
      meta: %{
        "attribute" => attribute
      }
    })
  end
  defp attribute_error(%Alembic.Fetch.Sort{attribute: attribute, relationship: relationship}) do
    relationship_path = Includes.to_string([relationship])
    attribute_error(%Error{
      detail: "`#{relationship_path}` does not have a `#{attribute}` attribute",
      meta: %{
        "attribute" => attribute,
        "relationship_path" => relationship_path
      }
    })
  end
  defp attribute_error_document(sort), do: %Document{errors: [attribute_error(sort)]}
  defp attribute_error_result(sort), do: {:error, attribute_error_document(sort)}
  defp field(%{
         association: nil,
         ecto_schema_module: ecto_schema_module,
         sort:
           sort = %Alembic.Fetch.Sort{
             attribute: attribute
           }
       }) do
    attribute
    |> attribute_to_field(ecto_schema_module)
    |> case do
      {:ok, field} ->
        {:ok, field}
      {:error, ^attribute} ->
        attribute_error_result(sort)
    end
  end
  defp field(%{
         association: association,
         ecto_schema_module: ecto_schema_module,
         sort: sort
       })
       when is_atom(association) do
    # Does not produce a JSON error because association being wrong is a programmer error that associatons_by_include
    # has a bad associciations
    %{related: related_ecto_schema_module} = ecto_schema_module.__schema__(:association, association)
    field(%{association: nil, ecto_schema_module: related_ecto_schema_module, sort: sort})
  end
  defp field(%{
         association: [{current_association, child_association}],
         ecto_schema_module: ecto_schema_module,
         sort: sort
       }) do
    # Does not produce a JSON error because association being wrong is a programmer error that associatons_by_include
    # has a bad associciations
    %{related: related_ecto_schema_module} = ecto_schema_module.__schema__(:association, current_association)
    field(%{association: child_association, ecto_schema_module: related_ecto_schema_module, sort: sort})
  end
  defp relationship(_, nil), do: nil
  defp relationship(module, association), do: module.association_to_include(association)
end | 
	lib/calcinator/resources/sort.ex | 0.903655 | 0.497559 | 
	sort.ex | 
	starcoder | 
| 
	defmodule Pointers.Mixin do
  @moduledoc """
  If a Pointer represents an object, mixins represent data about the object. Mixins collate optional
  additional information about an object. Different types of object will typically make use of
  different mixins. You can see these as aspects of the data if you like.
  A mixin table starts with an `id` column which references `Pointer` and forms the default primary
  key. It is up to the user to choose which other fields go in the table, and thus what the mixin is for.
  Use of a mixin is typically through `has_one`:
  ```
  has_one :my_mixin, MyMixin, foreign_key: :id, references: :id
  ```
  Sometimes, the user may wish to add fields to the primary key by using the `primary_key: true`
  option to `add` in their migrations. This is permitted and in such case we call the resulting
  mixin a `multimixin`. Use becomes `has_many`:
  ```
  has_many :my_mixin, MyMixin, foreign_key: :id, references: :id
  ```
  Thus the choice of single or multi comes down to how many times you want to store that data for
  the object. A user's profile naturally lends itself to a regular `single` mixin, whereas an
  object's appearance in a feed would naturally lend itself to being a multimixin since the object
  may appear in many feeds.
  ### Declaring a mixin table type
  ```
  defmodule My.Mixin do
    use Pointers.Mixin,
      otp_app: :my_app,
      source: "postgres_table_name"
    mixin_schema do
      field :is_awesome, :boolean
    end
  end
  ```
  """
  # alias Ecto.Changeset
  alias Pointers.{ULID, Util}
  defmacro __using__(options), do: using(__CALLER__.module, options)
  @must_be_in_module "Pointers.Mixin may only be used inside a defmodule!"
  def using(nil, _options), do: raise RuntimeError, description: @must_be_in_module
  def using(module, options) do
    otp_app = Util.get_otp_app(options)
    Util.get_source(options)
    config = Application.get_env(otp_app, module, [])
    Module.put_attribute(module, __MODULE__, options)
    pointers = emit_pointers(config ++ options)
    quote do
      use Ecto.Schema
      require Pointers.Changesets
      import Flexto
      import Pointers.Mixin
      # this is an attempt to help mix notice that we are using the configuration at compile
      # time. In flexto, for reasons, we already had to use Application.get_env
      _dummy_compile_env = Application.compile_env(unquote(otp_app), unquote(module))
      unquote_splicing(pointers)
    end
  end
  @must_use "You must use Pointers.Mixin before calling mixin_schema/1"
  defmacro mixin_schema([do: body]) do
    module = __CALLER__.module
    schema_check_attr(Module.get_attribute(module, __MODULE__), module, body)
  end
  
  @timestamps_opts [type: :utc_datetime_usec]
  @foreign_key_type ULID
  defp schema_check_attr(options, module, body) when is_list(options) do
    otp_app = Util.get_otp_app(options)
    config = Application.get_env(otp_app, module, [])
    source = Util.get_source(config ++ options)
    foreign_key = Module.get_attribute(module, :foreign_key_type, @foreign_key_type)
    timestamps_opts = Module.get_attribute(module, :timestamps_opts, @timestamps_opts)
    quote do
      @primary_key false
      @foreign_key_type unquote(foreign_key)
      @timestamps_opts unquote(timestamps_opts)
      schema(unquote(source)) do
        belongs_to :pointer, Pointers.Pointer,
          foreign_key: :id,
          on_replace: :update,
          primary_key: true,
          type: Pointers.ULID
        unquote(body)
        Flexto.flex_schema(unquote(otp_app))
      end
    end
  end
  defp schema_check_attr(_, _, _), do: raise ArgumentError, message: @must_use
  # defines __pointers__
  defp emit_pointers(config) do
    otp_app = Keyword.fetch!(config, :otp_app)
    [ Util.pointers_clause(:role, :mixin),
      Util.pointers_clause(:otp_app, otp_app)
    ]
  end
end | 
	lib/mixin.ex | 0.892621 | 0.875361 | 
	mixin.ex | 
	starcoder | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.