-
Notifications
You must be signed in to change notification settings - Fork 38
add support for generic webhooks #183
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,130 @@ | ||
| package nelson | ||
| package notifications | ||
|
|
||
| import Manifest._ | ||
| import argonaut._, Argonaut._ | ||
|
|
||
| sealed trait NotificationEvent | ||
|
|
||
| final case class DeployEvent( | ||
| unit: UnitDef, | ||
| actionConfig: ActionConfig | ||
| ) extends NotificationEvent | ||
|
|
||
| final case class DecommissionEvent( | ||
| d: Datacenter.Deployment | ||
| ) extends NotificationEvent | ||
|
|
||
| object NotificationEvent { | ||
|
|
||
| def deploy(unit: UnitDef @@ Versioned, actionConfig: ActionConfig): DeployEvent = { | ||
| DeployEvent(Versioned.unwrap(unit), actionConfig) | ||
| } | ||
|
|
||
| def decommission(d: Datacenter.Deployment): DecommissionEvent = DecommissionEvent(d) | ||
|
|
||
| /** | ||
| * Encodes the final web hook payload by matching on the event class. | ||
| * | ||
| * The notification model here is influenced heavily by the Github event model, wherein | ||
| * a causal event is a name as well as an event class, the latter of which dictates its | ||
| * actual structure. This provides greater flexibility for the consumer of the payload | ||
| * who can then react contextually, e.g. act on initial deploy but not redeploy. | ||
| * | ||
| * This behavior is not currently type-encoded but should be if the number of event types is expanded. | ||
| */ | ||
| implicit def encodeEventDetail: EncodeJson[NotificationEvent] = EncodeJson { | ||
| case d: DeployEvent => | ||
| ("action" := "deploy") ->: | ||
| ("deployed" := encodeDeploy(d)) ->: | ||
| jEmptyObject | ||
| case d: DecommissionEvent => | ||
| ("action" := "decommission") ->: | ||
| ("decommissioned" := encodeDecommission(d)) ->: | ||
| jEmptyObject | ||
| } | ||
|
|
||
| implicit def encodeDeploy: EncodeJson[DeployEvent] = EncodeJson { ev => | ||
|
|
||
| val ns = ev.actionConfig.namespace | ||
| val dc = ev.actionConfig.datacenter | ||
| val plan = ev.actionConfig.plan | ||
| val env = plan.environment | ||
| val unit = ev.unit | ||
|
|
||
| ("namespace" := ns.name.asString) ->: | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. More a note for the future than a review comment, since we're moving to Protobuf (#159) we may want these as protocols as well, esp. since this is quite a bit of information. But fine for now :) |
||
| ("datacenter" := dc.name) ->: | ||
| ("plan" := | ||
| ("name" := plan.name) ->: | ||
| ("schedule" := env.schedule.flatMap(_.toCron())) ->: | ||
| ("health_checks" := env.healthChecks.map(mkHealthCheck)) ->: | ||
| ("constraints" := env.constraints.map(_.fieldName)) ->: | ||
| ("bindings" := env.bindings.map(b => b.name -> b.value)) ->: | ||
| ("resources" := env.resources.mapValues(_.toString)) ->: jEmptyObject | ||
| ) ->: | ||
| ("unit" := | ||
| ("name" := unit.name) ->: | ||
| ("description" := unit.description) ->: | ||
| ("artifact" := unit.deployable.map(mkDeployable)) ->: | ||
| ("ports" := unit.ports.map(mkPorts)) ->: | ||
| ("dependencies" := unit.dependencies.mapValues(_.toString)) ->: | ||
| ("resources" := unit.resources.map(_.name)) ->: jEmptyObject | ||
| ) ->: | ||
| jEmptyObject | ||
| } | ||
|
|
||
| implicit def encodeDecommission: EncodeJson[DecommissionEvent] = EncodeJson { ev => | ||
| val deployment = ev.d | ||
| val unit = deployment.unit | ||
|
|
||
| ("namespace" := deployment.namespace.name.asString) ->: | ||
| ("datacenter" := deployment.namespace.datacenter) ->: | ||
| ("deployment" := | ||
| ("id" := deployment.id) ->: | ||
| ("hash" := deployment.hash) ->: | ||
| ("guid" := deployment.guid) ->: | ||
| ("plan" := deployment.plan) ->: | ||
| ("stack" := deployment.stackName.toString) ->: | ||
| ("deploy_time" := deployment.deployTime.toString) ->: | ||
| ("workflow" := deployment.workflow) ->: jEmptyObject | ||
| ) ->: | ||
| ("unit" := | ||
| ("name" := unit.name) ->: | ||
| ("description" := unit.description) ->: | ||
| ("ports" := unit.ports.map(mkPort)) ->: | ||
| ("dependencies" := unit.dependencies.map(sn => (sn.serviceType, sn.version.toString))) ->: | ||
| ("resources" := unit.resources) ->: jEmptyObject | ||
| ) ->: | ||
| jEmptyObject | ||
| } | ||
|
|
||
| private def mkDeployable(d: Manifest.Deployable) = | ||
| ("name" := d.name) ->: | ||
| ("version" := d.version.toString) ->: | ||
| ("deployable" := (d.output match { case Deployable.Container(i) => i })) ->: | ||
| jEmptyObject | ||
|
|
||
| private def mkHealthCheck(h: Manifest.HealthCheck) = | ||
| ("name" := h.name) ->: | ||
| ("path" := h.path) ->: | ||
| ("port" := h.portRef) ->: | ||
| ("protocol" := h.protocol) ->: | ||
| ("interval" := h.interval.toMillis) ->: | ||
| ("timeout" := h.timeout.toMillis) ->: | ||
| jEmptyObject | ||
|
|
||
| private def mkPort(p: Manifest.Port) = | ||
| ("default" := p.isDefault) ->: | ||
| ("ref" := p.ref) ->: | ||
| ("port" := p.port) ->: | ||
| ("protocol" := p.protocol) ->: | ||
| jEmptyObject | ||
|
|
||
| private def mkPort(p: Datacenter.Port) = | ||
| ("ref" := p.name) ->: | ||
| ("port" := p.port) ->: | ||
| ("protocol" := p.protocol) ->: | ||
| jEmptyObject | ||
|
|
||
| private def mkPorts(ps: Ports) = ps.nel.map(mkPort).toList | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,41 @@ | ||
| package nelson | ||
| package notifications | ||
|
|
||
| import cats.~> | ||
| import cats.effect.IO | ||
| import cats.free.Free | ||
| import cats.implicits._ | ||
|
|
||
| import org.http4s.Method.POST | ||
| import org.http4s.Request | ||
| import org.http4s.argonaut._ | ||
| import org.http4s.client.Client | ||
|
|
||
| sealed abstract class WebHookOp[A] extends Product with Serializable | ||
|
|
||
| object WebHookOp { | ||
|
|
||
| type WebHookOpF[A] = Free[WebHookOp, A] | ||
|
|
||
| final case class SendWebHookNotification(subscribers: List[WebHookSubscription], ev: NotificationEvent) extends WebHookOp[Unit] | ||
|
|
||
| def send(subscribers: List[WebHookSubscription], ev: NotificationEvent): WebHookOpF[Unit] = | ||
| Free.liftF(SendWebHookNotification(subscribers, ev)) | ||
| } | ||
|
|
||
| final class WebHookHttp(client: Client[IO]) extends (WebHookOp ~> IO) { | ||
| import argonaut._, Argonaut._ | ||
| import WebHookOp._ | ||
|
|
||
| def apply[A](op: WebHookOp[A]): IO[A] = op match { | ||
| case SendWebHookNotification(subscriptions, ev) => | ||
| subscriptions.traverse_(s => send(s, ev)) | ||
| } | ||
|
|
||
| def send(s: WebHookSubscription, ev: NotificationEvent): IO[Unit] = | ||
| client.expect[String](Request[IO]( | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Double checking, does
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is a great point, this should be fire and forget. We really shouldn't care about the response body, only the response code and that should only drive different logging behaviors imo. |
||
| method = POST, | ||
| uri = s.params.foldLeft(s.uri)((u, p) => u.withQueryParam(p._1, p._2)), | ||
| headers = s.headers) | ||
| .withBody(ev.asJson)).void | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -197,3 +197,6 @@ notifications: | |
| channels: | ||
| - development | ||
| - general | ||
| webhook: | ||
| subscribers: | ||
| - uri: https://localhost:80/ | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
So it looks like for webhooks you're sending big deploy and destroy blobs to the endpoint - I assume the intention is for (presumably custom) endpoints to then take that blob and do something useful with it? Put differently, this isn't specific to any specific kind of webhook for some service right?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Precisely, the goal is to provide real-time context for events occurring within Nelson to any desired consumer.