diff --git a/builtin/providers/newrelic/config.go b/builtin/providers/newrelic/config.go new file mode 100644 index 0000000000..da96c64475 --- /dev/null +++ b/builtin/providers/newrelic/config.go @@ -0,0 +1,29 @@ +package newrelic + +import ( + "log" + + "github.com/hashicorp/terraform/helper/logging" + newrelic "github.com/paultyng/go-newrelic/api" +) + +// Config contains New Relic provider settings +type Config struct { + APIKey string + APIURL string +} + +// Client returns a new client for accessing New Relic +func (c *Config) Client() (*newrelic.Client, error) { + nrConfig := newrelic.Config{ + APIKey: c.APIKey, + Debug: logging.IsDebugOrHigher(), + BaseURL: c.APIURL, + } + + client := newrelic.New(nrConfig) + + log.Printf("[INFO] New Relic client configured") + + return &client, nil +} diff --git a/builtin/providers/newrelic/data_source_newrelic_application.go b/builtin/providers/newrelic/data_source_newrelic_application.go new file mode 100644 index 0000000000..e76a787824 --- /dev/null +++ b/builtin/providers/newrelic/data_source_newrelic_application.go @@ -0,0 +1,65 @@ +package newrelic + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + newrelic "github.com/paultyng/go-newrelic/api" +) + +func dataSourceNewRelicApplication() *schema.Resource { + return &schema.Resource{ + Read: dataSourceNewRelicApplicationRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "instance_ids": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeInt}, + Computed: true, + }, + "host_ids": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeInt}, + Computed: true, + }, + }, + } +} + +func dataSourceNewRelicApplicationRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*newrelic.Client) + + log.Printf("[INFO] Reading New Relic applications") + + applications, err := client.ListApplications() + if err != nil { + return err + } + + var application *newrelic.Application + name := d.Get("name").(string) + + for _, a := range applications { + if a.Name == name { + application = &a + break + } + } + + if application == nil { + return fmt.Errorf("The name '%s' does not match any New Relic applications.", name) + } + + d.SetId(strconv.Itoa(application.ID)) + d.Set("name", application.Name) + d.Set("instance_ids", application.Links.InstanceIDs) + d.Set("host_ids", application.Links.HostIDs) + + return nil +} diff --git a/builtin/providers/newrelic/data_source_newrelic_application_test.go b/builtin/providers/newrelic/data_source_newrelic_application_test.go new file mode 100644 index 0000000000..21a85a35b9 --- /dev/null +++ b/builtin/providers/newrelic/data_source_newrelic_application_test.go @@ -0,0 +1,50 @@ +package newrelic + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccNewRelicApplication_Basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccNewRelicApplicationConfig(), + Check: resource.ComposeTestCheckFunc( + testAccNewRelicApplication("data.newrelic_application.app"), + ), + }, + }, + }) +} + +func testAccNewRelicApplication(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + r := s.RootModule().Resources[n] + a := r.Primary.Attributes + + if a["id"] == "" { + return fmt.Errorf("Expected to get an application from New Relic") + } + + if a["name"] != testAccExpectedApplicationName { + return fmt.Errorf("Expected the application name to be: %s, but got: %s", testAccExpectedApplicationName, a["name"]) + } + + return nil + } +} + +// The test application for this data source is created in provider_test.go +func testAccNewRelicApplicationConfig() string { + return fmt.Sprintf(` +data "newrelic_application" "app" { + name = "%s" +} +`, testAccExpectedApplicationName) +} diff --git a/builtin/providers/newrelic/helpers.go b/builtin/providers/newrelic/helpers.go new file mode 100644 index 0000000000..18f49135b4 --- /dev/null +++ b/builtin/providers/newrelic/helpers.go @@ -0,0 +1,37 @@ +package newrelic + +import ( + "fmt" + "strconv" + "strings" +) + +func parseIDs(serializedID string, count int) ([]int, error) { + rawIDs := strings.SplitN(serializedID, ":", count) + if len(rawIDs) != count { + return []int{}, fmt.Errorf("Unable to parse ID %v", serializedID) + } + + ids := make([]int, count) + + for i, rawID := range rawIDs { + id, err := strconv.ParseInt(rawID, 10, 32) + if err != nil { + return ids, err + } + + ids[i] = int(id) + } + + return ids, nil +} + +func serializeIDs(ids []int) string { + idStrings := make([]string, len(ids)) + + for i, id := range ids { + idStrings[i] = strconv.Itoa(id) + } + + return strings.Join(idStrings, ":") +} diff --git a/builtin/providers/newrelic/helpers_test.go b/builtin/providers/newrelic/helpers_test.go new file mode 100644 index 0000000000..837434f6e6 --- /dev/null +++ b/builtin/providers/newrelic/helpers_test.go @@ -0,0 +1,26 @@ +package newrelic + +import "testing" + +func TestParseIDs_Basic(t *testing.T) { + ids, err := parseIDs("1:2", 2) + if err != nil { + t.Fatal(err) + } + + if len(ids) != 2 { + t.Fatal(len(ids)) + } + + if ids[0] != 1 || ids[1] != 2 { + t.Fatal(ids) + } +} + +func TestSerializeIDs_Basic(t *testing.T) { + id := serializeIDs([]int{1, 2}) + + if id != "1:2" { + t.Fatal(id) + } +} diff --git a/builtin/providers/newrelic/import_newrelic_alert_channel_test.go b/builtin/providers/newrelic/import_newrelic_alert_channel_test.go new file mode 100644 index 0000000000..ac85062aa2 --- /dev/null +++ b/builtin/providers/newrelic/import_newrelic_alert_channel_test.go @@ -0,0 +1,29 @@ +package newrelic + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccNewRelicAlertChannel_import(t *testing.T) { + resourceName := "newrelic_alert_channel.foo" + rName := acctest.RandString(5) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNewRelicAlertChannelDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckNewRelicAlertChannelConfig(rName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/newrelic/import_newrelic_alert_condition_test.go b/builtin/providers/newrelic/import_newrelic_alert_condition_test.go new file mode 100644 index 0000000000..e030dfdee7 --- /dev/null +++ b/builtin/providers/newrelic/import_newrelic_alert_condition_test.go @@ -0,0 +1,30 @@ +package newrelic + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccNewRelicAlertCondition_import(t *testing.T) { + resourceName := "newrelic_alert_condition.foo" + rName := acctest.RandString(5) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNewRelicAlertConditionDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckNewRelicAlertConditionConfig(rName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/newrelic/import_newrelic_alert_policy_test.go b/builtin/providers/newrelic/import_newrelic_alert_policy_test.go new file mode 100644 index 0000000000..a1048a786f --- /dev/null +++ b/builtin/providers/newrelic/import_newrelic_alert_policy_test.go @@ -0,0 +1,30 @@ +package newrelic + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccNewRelicAlertPolicy_import(t *testing.T) { + resourceName := "newrelic_alert_policy.foo" + rName := acctest.RandString(5) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNewRelicAlertPolicyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckNewRelicAlertPolicyConfig(rName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/newrelic/provider.go b/builtin/providers/newrelic/provider.go new file mode 100644 index 0000000000..ac3a2e749a --- /dev/null +++ b/builtin/providers/newrelic/provider.go @@ -0,0 +1,49 @@ +package newrelic + +import ( + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +// Provider represents a resource provider in Terraform +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "api_key": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("NEWRELIC_API_KEY", nil), + Sensitive: true, + }, + "api_url": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("NEWRELIC_API_URL", "https://api.newrelic.com/v2"), + }, + }, + + DataSourcesMap: map[string]*schema.Resource{ + "newrelic_application": dataSourceNewRelicApplication(), + }, + + ResourcesMap: map[string]*schema.Resource{ + "newrelic_alert_channel": resourceNewRelicAlertChannel(), + "newrelic_alert_condition": resourceNewRelicAlertCondition(), + "newrelic_alert_policy": resourceNewRelicAlertPolicy(), + "newrelic_alert_policy_channel": resourceNewRelicAlertPolicyChannel(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func providerConfigure(data *schema.ResourceData) (interface{}, error) { + config := Config{ + APIKey: data.Get("api_key").(string), + APIURL: data.Get("api_url").(string), + } + log.Println("[INFO] Initializing New Relic client") + return config.Client() +} diff --git a/builtin/providers/newrelic/provider_test.go b/builtin/providers/newrelic/provider_test.go new file mode 100644 index 0000000000..7d36419b80 --- /dev/null +++ b/builtin/providers/newrelic/provider_test.go @@ -0,0 +1,69 @@ +package newrelic + +import ( + "fmt" + "os" + "testing" + "time" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" + newrelic "github.com/newrelic/go-agent" +) + +var ( + testAccExpectedApplicationName string + testAccProviders map[string]terraform.ResourceProvider + testAccProvider *schema.Provider +) + +func init() { + testAccExpectedApplicationName = fmt.Sprintf("tf_test_%s", acctest.RandString(10)) + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "newrelic": testAccProvider, + } +} + +func TestProvider(t *testing.T) { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProviderImpl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("NEWRELIC_API_KEY"); v == "" { + t.Log(v) + t.Fatal("NEWRELIC_API_KEY must be set for acceptance tests") + } + + // setup fake application by logging some metrics + if v := os.Getenv("NEWRELIC_LICENSE_KEY"); len(v) > 0 { + config := newrelic.NewConfig(testAccExpectedApplicationName, v) + app, err := newrelic.NewApplication(config) + if err != nil { + t.Log(err) + t.Fatal("Error setting up New Relic application") + } + + if err := app.WaitForConnection(30 * time.Second); err != nil { + t.Log(err) + t.Fatal("Unable to setup New Relic application connection") + } + + if err := app.RecordCustomEvent("terraform test", nil); err != nil { + t.Log(err) + t.Fatal("Unable to record custom event in New Relic") + } + + app.Shutdown(30 * time.Second) + } else { + t.Log(v) + t.Fatal("NEWRELIC_LICENSE_KEY must be set for acceptance tests") + } +} diff --git a/builtin/providers/newrelic/resource_newrelic_alert_channel.go b/builtin/providers/newrelic/resource_newrelic_alert_channel.go new file mode 100644 index 0000000000..e8a642d2d7 --- /dev/null +++ b/builtin/providers/newrelic/resource_newrelic_alert_channel.go @@ -0,0 +1,168 @@ +package newrelic + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + newrelic "github.com/paultyng/go-newrelic/api" +) + +var alertChannelTypes = map[string][]string{ + "campfire": []string{ + "room", + "subdomain", + "token", + }, + "email": []string{ + "include_json_attachment", + "recipients", + }, + "hipchat": []string{ + "auth_token", + "base_url", + "room_id", + }, + "opsgenie": []string{ + "api_key", + "recipients", + "tags", + "teams", + }, + "pagerduty": []string{ + "service_key", + }, + "slack": []string{ + "channel", + "url", + }, + "user": []string{ + "user_id", + }, + "victorops": []string{ + "key", + "route_key", + }, + "webhook": []string{ + "auth_password", + "auth_type", + "auth_username", + "base_url", + "headers", + "payload_type", + "payload", + }, +} + +func resourceNewRelicAlertChannel() *schema.Resource { + validAlertChannelTypes := make([]string, 0, len(alertChannelTypes)) + for k := range alertChannelTypes { + validAlertChannelTypes = append(validAlertChannelTypes, k) + } + + return &schema.Resource{ + Create: resourceNewRelicAlertChannelCreate, + Read: resourceNewRelicAlertChannelRead, + // Update: Not currently supported in API + Delete: resourceNewRelicAlertChannelDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(validAlertChannelTypes, false), + }, + "configuration": { + Type: schema.TypeMap, + Required: true, + ForceNew: true, + //TODO: ValidateFunc: (use list of keys from map above) + Sensitive: true, + }, + }, + } +} + +func buildAlertChannelStruct(d *schema.ResourceData) *newrelic.AlertChannel { + channel := newrelic.AlertChannel{ + Name: d.Get("name").(string), + Type: d.Get("type").(string), + Configuration: d.Get("configuration").(map[string]interface{}), + } + + return &channel +} + +func resourceNewRelicAlertChannelCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*newrelic.Client) + channel := buildAlertChannelStruct(d) + + log.Printf("[INFO] Creating New Relic alert channel %s", channel.Name) + + channel, err := client.CreateAlertChannel(*channel) + if err != nil { + return err + } + + d.SetId(strconv.Itoa(channel.ID)) + + return nil +} + +func resourceNewRelicAlertChannelRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*newrelic.Client) + + id, err := strconv.ParseInt(d.Id(), 10, 32) + if err != nil { + return err + } + + log.Printf("[INFO] Reading New Relic alert channel %v", id) + + channel, err := client.GetAlertChannel(int(id)) + if err != nil { + if err == newrelic.ErrNotFound { + d.SetId("") + return nil + } + + return err + } + + d.Set("name", channel.Name) + d.Set("type", channel.Type) + if err := d.Set("configuration", channel.Configuration); err != nil { + return fmt.Errorf("[DEBUG] Error setting Alert Channel Configuration: %#v", err) + } + + return nil +} + +func resourceNewRelicAlertChannelDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*newrelic.Client) + + id, err := strconv.ParseInt(d.Id(), 10, 32) + if err != nil { + return err + } + + log.Printf("[INFO] Deleting New Relic alert channel %v", id) + + if err := client.DeleteAlertChannel(int(id)); err != nil { + return err + } + + d.SetId("") + + return nil +} diff --git a/builtin/providers/newrelic/resource_newrelic_alert_channel_test.go b/builtin/providers/newrelic/resource_newrelic_alert_channel_test.go new file mode 100644 index 0000000000..a062e26ca9 --- /dev/null +++ b/builtin/providers/newrelic/resource_newrelic_alert_channel_test.go @@ -0,0 +1,131 @@ +package newrelic + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + newrelic "github.com/paultyng/go-newrelic/api" +) + +func TestAccNewRelicAlertChannel_Basic(t *testing.T) { + rName := acctest.RandString(5) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNewRelicAlertChannelDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckNewRelicAlertChannelConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckNewRelicAlertChannelExists("newrelic_alert_channel.foo"), + resource.TestCheckResourceAttr( + "newrelic_alert_channel.foo", "name", fmt.Sprintf("tf-test-%s", rName)), + resource.TestCheckResourceAttr( + "newrelic_alert_channel.foo", "type", "email"), + resource.TestCheckResourceAttr( + "newrelic_alert_channel.foo", "configuration.recipients", "foo@example.com"), + resource.TestCheckResourceAttr( + "newrelic_alert_channel.foo", "configuration.include_json_attachment", "1"), + ), + }, + resource.TestStep{ + Config: testAccCheckNewRelicAlertChannelConfigUpdated(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckNewRelicAlertChannelExists("newrelic_alert_channel.foo"), + resource.TestCheckResourceAttr( + "newrelic_alert_channel.foo", "name", fmt.Sprintf("tf-test-updated-%s", rName)), + resource.TestCheckResourceAttr( + "newrelic_alert_channel.foo", "type", "email"), + resource.TestCheckResourceAttr( + "newrelic_alert_channel.foo", "configuration.recipients", "bar@example.com"), + resource.TestCheckResourceAttr( + "newrelic_alert_channel.foo", "configuration.include_json_attachment", "0"), + ), + }, + }, + }) +} + +func testAccCheckNewRelicAlertChannelDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*newrelic.Client) + for _, r := range s.RootModule().Resources { + if r.Type != "newrelic_alert_channel" { + continue + } + + id, err := strconv.ParseInt(r.Primary.ID, 10, 32) + if err != nil { + return err + } + + _, err = client.GetAlertChannel(int(id)) + + if err == nil { + return fmt.Errorf("Alert channel still exists") + } + + } + return nil +} + +func testAccCheckNewRelicAlertChannelExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + if rs.Primary.ID == "" { + return fmt.Errorf("No channel ID is set") + } + + client := testAccProvider.Meta().(*newrelic.Client) + + id, err := strconv.ParseInt(rs.Primary.ID, 10, 32) + if err != nil { + return err + } + + found, err := client.GetAlertChannel(int(id)) + if err != nil { + return err + } + + if strconv.Itoa(found.ID) != rs.Primary.ID { + return fmt.Errorf("Channel not found: %v - %v", rs.Primary.ID, found) + } + + return nil + } +} + +func testAccCheckNewRelicAlertChannelConfig(rName string) string { + return fmt.Sprintf(` +resource "newrelic_alert_channel" "foo" { + name = "tf-test-%s" + type = "email" + + configuration = { + recipients = "foo@example.com" + include_json_attachment = "1" + } +} +`, rName) +} + +func testAccCheckNewRelicAlertChannelConfigUpdated(rName string) string { + return fmt.Sprintf(` +resource "newrelic_alert_channel" "foo" { + name = "tf-test-updated-%s" + type = "email" + + configuration = { + recipients = "bar@example.com" + include_json_attachment = "0" + } +} +`, rName) +} diff --git a/builtin/providers/newrelic/resource_newrelic_alert_condition.go b/builtin/providers/newrelic/resource_newrelic_alert_condition.go new file mode 100644 index 0000000000..db8ba3c9cb --- /dev/null +++ b/builtin/providers/newrelic/resource_newrelic_alert_condition.go @@ -0,0 +1,342 @@ +package newrelic + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + newrelic "github.com/paultyng/go-newrelic/api" +) + +var alertConditionTypes = map[string][]string{ + "apm_app_metric": []string{ + "apdex", + "error_percentage", + "response_time_background", + "response_time_web", + "throughput_background", + "throughput_web", + "user_defined", + }, + "apm_kt_metric": []string{ + "apdex", + "error_count", + "error_percentage", + "response_time", + "throughput", + }, + "browser_metric": []string{ + "ajax_response_time", + "ajax_throughput", + "dom_processing", + "end_user_apdex", + "network", + "page_rendering", + "page_view_throughput", + "page_views_with_js_errors", + "request_queuing", + "total_page_load", + "user_defined", + "web_application", + }, + "mobile_metric": []string{ + "database", + "images", + "json", + "mobile_crash_rate", + "network_error_percentage", + "network", + "status_error_percentage", + "user_defined", + "view_loading", + }, + "servers_metric": []string{ + "cpu_percentage", + "disk_io_percentage", + "fullest_disk_percentage", + "load_average_one_minute", + "memory_percentage", + "user_defined", + }, +} + +func resourceNewRelicAlertCondition() *schema.Resource { + validAlertConditionTypes := make([]string, 0, len(alertConditionTypes)) + for k := range alertConditionTypes { + validAlertConditionTypes = append(validAlertConditionTypes, k) + } + + return &schema.Resource{ + Create: resourceNewRelicAlertConditionCreate, + Read: resourceNewRelicAlertConditionRead, + Update: resourceNewRelicAlertConditionUpdate, + Delete: resourceNewRelicAlertConditionDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "policy_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(validAlertConditionTypes, false), + }, + "entities": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeInt}, + Required: true, + MinItems: 1, + }, + "metric": { + Type: schema.TypeString, + Required: true, + //TODO: ValidateFunc from map + }, + "runbook_url": { + Type: schema.TypeString, + Optional: true, + }, + "term": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "duration": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: intInSlice([]int{5, 10, 15, 30, 60, 120}), + }, + "operator": { + Type: schema.TypeString, + Optional: true, + Default: "equal", + ValidateFunc: validation.StringInSlice([]string{"above", "below", "equal"}, false), + }, + "priority": { + Type: schema.TypeString, + Optional: true, + Default: "critical", + ValidateFunc: validation.StringInSlice([]string{"critical", "warning"}, false), + }, + "threshold": { + Type: schema.TypeFloat, + Required: true, + ValidateFunc: float64Gte(0.0), + }, + "time_function": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"all", "any"}, false), + }, + }, + }, + Required: true, + MinItems: 1, + }, + "user_defined_metric": { + Type: schema.TypeString, + Optional: true, + }, + "user_defined_value_function": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"average", "min", "max", "total", "sample_size"}, false), + }, + }, + } +} + +func buildAlertConditionStruct(d *schema.ResourceData) *newrelic.AlertCondition { + entitySet := d.Get("entities").([]interface{}) + entities := make([]string, len(entitySet)) + + for i, entity := range entitySet { + entities[i] = strconv.Itoa(entity.(int)) + } + + termSet := d.Get("term").([]interface{}) + terms := make([]newrelic.AlertConditionTerm, len(termSet)) + + for i, termI := range termSet { + termM := termI.(map[string]interface{}) + + terms[i] = newrelic.AlertConditionTerm{ + Duration: termM["duration"].(int), + Operator: termM["operator"].(string), + Priority: termM["priority"].(string), + Threshold: termM["threshold"].(float64), + TimeFunction: termM["time_function"].(string), + } + } + + condition := newrelic.AlertCondition{ + Type: d.Get("type").(string), + Name: d.Get("name").(string), + Enabled: true, + Entities: entities, + Metric: d.Get("metric").(string), + Terms: terms, + PolicyID: d.Get("policy_id").(int), + } + + if attr, ok := d.GetOk("runbook_url"); ok { + condition.RunbookURL = attr.(string) + } + + if attrM, ok := d.GetOk("user_defined_metric"); ok { + if attrVF, ok := d.GetOk("user_defined_value_function"); ok { + condition.UserDefined = newrelic.AlertConditionUserDefined{ + Metric: attrM.(string), + ValueFunction: attrVF.(string), + } + } + } + + return &condition +} + +func readAlertConditionStruct(condition *newrelic.AlertCondition, d *schema.ResourceData) error { + ids, err := parseIDs(d.Id(), 2) + if err != nil { + return err + } + + policyID := ids[0] + + entities := make([]int, len(condition.Entities)) + for i, entity := range condition.Entities { + v, err := strconv.ParseInt(entity, 10, 32) + if err != nil { + return err + } + entities[i] = int(v) + } + + d.Set("policy_id", policyID) + d.Set("name", condition.Name) + d.Set("type", condition.Type) + d.Set("metric", condition.Metric) + d.Set("runbook_url", condition.RunbookURL) + d.Set("user_defined_metric", condition.UserDefined.Metric) + d.Set("user_defined_value_function", condition.UserDefined.ValueFunction) + if err := d.Set("entities", entities); err != nil { + return fmt.Errorf("[DEBUG] Error setting alert condition entities: %#v", err) + } + + var terms []map[string]interface{} + + for _, src := range condition.Terms { + dst := map[string]interface{}{ + "duration": src.Duration, + "operator": src.Operator, + "priority": src.Priority, + "threshold": src.Threshold, + "time_function": src.TimeFunction, + } + terms = append(terms, dst) + } + + if err := d.Set("term", terms); err != nil { + return fmt.Errorf("[DEBUG] Error setting alert condition terms: %#v", err) + } + + return nil +} + +func resourceNewRelicAlertConditionCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*newrelic.Client) + condition := buildAlertConditionStruct(d) + + log.Printf("[INFO] Creating New Relic alert condition %s", condition.Name) + + condition, err := client.CreateAlertCondition(*condition) + if err != nil { + return err + } + + d.SetId(serializeIDs([]int{condition.PolicyID, condition.ID})) + + return nil +} + +func resourceNewRelicAlertConditionRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*newrelic.Client) + + log.Printf("[INFO] Reading New Relic alert condition %s", d.Id()) + + ids, err := parseIDs(d.Id(), 2) + if err != nil { + return err + } + + policyID := ids[0] + id := ids[1] + + condition, err := client.GetAlertCondition(policyID, id) + if err != nil { + if err == newrelic.ErrNotFound { + d.SetId("") + return nil + } + + return err + } + + return readAlertConditionStruct(condition, d) +} + +func resourceNewRelicAlertConditionUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*newrelic.Client) + condition := buildAlertConditionStruct(d) + + ids, err := parseIDs(d.Id(), 2) + if err != nil { + return err + } + + policyID := ids[0] + id := ids[1] + + condition.PolicyID = policyID + condition.ID = id + + log.Printf("[INFO] Updating New Relic alert condition %d", id) + + updatedCondition, err := client.UpdateAlertCondition(*condition) + if err != nil { + return err + } + + return readAlertConditionStruct(updatedCondition, d) +} + +func resourceNewRelicAlertConditionDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*newrelic.Client) + + ids, err := parseIDs(d.Id(), 2) + if err != nil { + return err + } + + policyID := ids[0] + id := ids[1] + + log.Printf("[INFO] Deleting New Relic alert condition %d", id) + + if err := client.DeleteAlertCondition(policyID, id); err != nil { + return err + } + + d.SetId("") + + return nil +} diff --git a/builtin/providers/newrelic/resource_newrelic_alert_condition_test.go b/builtin/providers/newrelic/resource_newrelic_alert_condition_test.go new file mode 100644 index 0000000000..b9c608a83a --- /dev/null +++ b/builtin/providers/newrelic/resource_newrelic_alert_condition_test.go @@ -0,0 +1,189 @@ +package newrelic + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + newrelic "github.com/paultyng/go-newrelic/api" +) + +func TestAccNewRelicAlertCondition_Basic(t *testing.T) { + rName := acctest.RandString(5) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNewRelicAlertConditionDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckNewRelicAlertConditionConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckNewRelicAlertConditionExists("newrelic_alert_condition.foo"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "name", fmt.Sprintf("tf-test-%s", rName)), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "type", "apm_app_metric"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "runbook_url", "https://foo.example.com"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "entities.#", "1"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "entities.0", "12345"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "term.#", "1"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "term.0.duration", "5"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "term.0.operator", "below"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "term.0.priority", "critical"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "term.0.threshold", "0.75"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "term.0.time_function", "all"), + ), + }, + resource.TestStep{ + Config: testAccCheckNewRelicAlertConditionConfigUpdated(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckNewRelicAlertConditionExists("newrelic_alert_condition.foo"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "name", fmt.Sprintf("tf-test-updated-%s", rName)), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "runbook_url", "https://bar.example.com"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "entities.#", "1"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "entities.0", "67890"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "term.#", "1"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "term.0.duration", "10"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "term.0.operator", "below"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "term.0.priority", "critical"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "term.0.threshold", "0.65"), + resource.TestCheckResourceAttr( + "newrelic_alert_condition.foo", "term.0.time_function", "all"), + ), + }, + }, + }) +} + +// TODO: func TestAccNewRelicAlertCondition_Multi(t *testing.T) { + +func testAccCheckNewRelicAlertConditionDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*newrelic.Client) + for _, r := range s.RootModule().Resources { + if r.Type != "newrelic_alert_condition" { + continue + } + + ids, err := parseIDs(r.Primary.ID, 2) + if err != nil { + return err + } + + policyID := ids[0] + id := ids[1] + + _, err = client.GetAlertCondition(policyID, id) + if err == nil { + return fmt.Errorf("Alert condition still exists") + } + + } + return nil +} + +func testAccCheckNewRelicAlertConditionExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + if rs.Primary.ID == "" { + return fmt.Errorf("No alert condition ID is set") + } + + client := testAccProvider.Meta().(*newrelic.Client) + + ids, err := parseIDs(rs.Primary.ID, 2) + if err != nil { + return err + } + + policyID := ids[0] + id := ids[1] + + found, err := client.GetAlertCondition(policyID, id) + if err != nil { + return err + } + + if found.ID != id { + return fmt.Errorf("Alert condition not found: %v - %v", id, found) + } + + return nil + } +} + +func testAccCheckNewRelicAlertConditionConfig(rName string) string { + return fmt.Sprintf(` +resource "newrelic_alert_policy" "foo" { + name = "tf-test-%[1]s" +} + +resource "newrelic_alert_condition" "foo" { + policy_id = "${newrelic_alert_policy.foo.id}" + + name = "tf-test-%[1]s" + type = "apm_app_metric" + entities = ["12345"] + metric = "apdex" + runbook_url = "https://foo.example.com" + + term { + duration = 5 + operator = "below" + priority = "critical" + threshold = "0.75" + time_function = "all" + } +} +`, rName) +} + +func testAccCheckNewRelicAlertConditionConfigUpdated(rName string) string { + return fmt.Sprintf(` +resource "newrelic_alert_policy" "foo" { + name = "tf-test-updated-%[1]s" +} + +resource "newrelic_alert_condition" "foo" { + policy_id = "${newrelic_alert_policy.foo.id}" + + name = "tf-test-updated-%[1]s" + type = "apm_app_metric" + entities = ["67890"] + metric = "apdex" + runbook_url = "https://bar.example.com" + + term { + duration = 10 + operator = "below" + priority = "critical" + threshold = "0.65" + time_function = "all" + } +} +`, rName) +} + +// TODO: const testAccCheckNewRelicAlertConditionConfigMulti = ` diff --git a/builtin/providers/newrelic/resource_newrelic_alert_policy.go b/builtin/providers/newrelic/resource_newrelic_alert_policy.go new file mode 100644 index 0000000000..befc04ceaf --- /dev/null +++ b/builtin/providers/newrelic/resource_newrelic_alert_policy.go @@ -0,0 +1,119 @@ +package newrelic + +import ( + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + newrelic "github.com/paultyng/go-newrelic/api" +) + +func resourceNewRelicAlertPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceNewRelicAlertPolicyCreate, + Read: resourceNewRelicAlertPolicyRead, + // Update: Not currently supported in API + Delete: resourceNewRelicAlertPolicyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "incident_preference": { + Type: schema.TypeString, + Optional: true, + Default: "PER_POLICY", + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"PER_POLICY", "PER_CONDITION", "PER_CONDITION_AND_TARGET"}, false), + }, + "created_at": { + Type: schema.TypeInt, + Computed: true, + }, + "updated_at": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func buildAlertPolicyStruct(d *schema.ResourceData) *newrelic.AlertPolicy { + policy := newrelic.AlertPolicy{ + Name: d.Get("name").(string), + } + + if attr, ok := d.GetOk("incident_preference"); ok { + policy.IncidentPreference = attr.(string) + } + + return &policy +} + +func resourceNewRelicAlertPolicyCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*newrelic.Client) + policy := buildAlertPolicyStruct(d) + + log.Printf("[INFO] Creating New Relic alert policy %s", policy.Name) + + policy, err := client.CreateAlertPolicy(*policy) + if err != nil { + return err + } + + d.SetId(strconv.Itoa(policy.ID)) + + return nil +} + +func resourceNewRelicAlertPolicyRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*newrelic.Client) + + id, err := strconv.ParseInt(d.Id(), 10, 32) + if err != nil { + return err + } + + log.Printf("[INFO] Reading New Relic alert policy %v", id) + + policy, err := client.GetAlertPolicy(int(id)) + if err != nil { + if err == newrelic.ErrNotFound { + d.SetId("") + return nil + } + + return err + } + + d.Set("name", policy.Name) + d.Set("incident_preference", policy.IncidentPreference) + d.Set("created_at", policy.CreatedAt) + d.Set("updated_at", policy.UpdatedAt) + + return nil +} + +func resourceNewRelicAlertPolicyDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*newrelic.Client) + + id, err := strconv.ParseInt(d.Id(), 10, 32) + if err != nil { + return err + } + + log.Printf("[INFO] Deleting New Relic alert policy %v", id) + + if err := client.DeleteAlertPolicy(int(id)); err != nil { + return err + } + + d.SetId("") + + return nil +} diff --git a/builtin/providers/newrelic/resource_newrelic_alert_policy_channel.go b/builtin/providers/newrelic/resource_newrelic_alert_policy_channel.go new file mode 100644 index 0000000000..df3eee640b --- /dev/null +++ b/builtin/providers/newrelic/resource_newrelic_alert_policy_channel.go @@ -0,0 +1,137 @@ +package newrelic + +import ( + "log" + + "github.com/hashicorp/terraform/helper/schema" + newrelic "github.com/paultyng/go-newrelic/api" +) + +func policyChannelExists(client *newrelic.Client, policyID int, channelID int) (bool, error) { + channel, err := client.GetAlertChannel(channelID) + if err != nil { + if err == newrelic.ErrNotFound { + return false, nil + } + + return false, err + } + + for _, id := range channel.Links.PolicyIDs { + if id == policyID { + return true, nil + } + } + + return false, nil +} + +func resourceNewRelicAlertPolicyChannel() *schema.Resource { + return &schema.Resource{ + Create: resourceNewRelicAlertPolicyChannelCreate, + Read: resourceNewRelicAlertPolicyChannelRead, + // Update: Not currently supported in API + Delete: resourceNewRelicAlertPolicyChannelDelete, + Schema: map[string]*schema.Schema{ + "policy_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "channel_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceNewRelicAlertPolicyChannelCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*newrelic.Client) + + policyID := d.Get("policy_id").(int) + channelID := d.Get("channel_id").(int) + + serializedID := serializeIDs([]int{policyID, channelID}) + + log.Printf("[INFO] Creating New Relic alert policy channel %s", serializedID) + + exists, err := policyChannelExists(client, policyID, channelID) + if err != nil { + return err + } + + if !exists { + err = client.UpdateAlertPolicyChannels(policyID, []int{channelID}) + if err != nil { + return err + } + } + + d.SetId(serializedID) + + return nil +} + +func resourceNewRelicAlertPolicyChannelRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*newrelic.Client) + + ids, err := parseIDs(d.Id(), 2) + if err != nil { + return err + } + + policyID := ids[0] + channelID := ids[1] + + log.Printf("[INFO] Reading New Relic alert policy channel %s", d.Id()) + + exists, err := policyChannelExists(client, policyID, channelID) + if err != nil { + return err + } + + if !exists { + d.SetId("") + return nil + } + + d.Set("policy_id", policyID) + d.Set("channel_id", channelID) + + return nil +} + +func resourceNewRelicAlertPolicyChannelDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*newrelic.Client) + + ids, err := parseIDs(d.Id(), 2) + if err != nil { + return err + } + + policyID := ids[0] + channelID := ids[1] + + log.Printf("[INFO] Deleting New Relic alert policy channel %s", d.Id()) + + exists, err := policyChannelExists(client, policyID, channelID) + if err != nil { + return err + } + + if exists { + if err := client.DeleteAlertPolicyChannel(policyID, channelID); err != nil { + switch err { + case newrelic.ErrNotFound: + return nil + } + return err + } + } + + d.SetId("") + + return nil +} diff --git a/builtin/providers/newrelic/resource_newrelic_alert_policy_channel_test.go b/builtin/providers/newrelic/resource_newrelic_alert_policy_channel_test.go new file mode 100644 index 0000000000..7caef10df1 --- /dev/null +++ b/builtin/providers/newrelic/resource_newrelic_alert_policy_channel_test.go @@ -0,0 +1,139 @@ +package newrelic + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + newrelic "github.com/paultyng/go-newrelic/api" +) + +func TestAccNewRelicAlertPolicyChannel_Basic(t *testing.T) { + rName := acctest.RandString(5) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNewRelicAlertPolicyChannelDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckNewRelicAlertPolicyChannelConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckNewRelicAlertPolicyChannelExists("newrelic_alert_policy_channel.foo"), + ), + }, + resource.TestStep{ + Config: testAccCheckNewRelicAlertPolicyChannelConfigUpdated(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckNewRelicAlertPolicyChannelExists("newrelic_alert_policy_channel.foo"), + ), + }, + }, + }) +} + +func testAccCheckNewRelicAlertPolicyChannelDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*newrelic.Client) + for _, r := range s.RootModule().Resources { + if r.Type != "newrelic_alert_policy_channel" { + continue + } + + ids, err := parseIDs(r.Primary.ID, 2) + if err != nil { + return err + } + + policyID := ids[0] + channelID := ids[1] + + exists, err := policyChannelExists(client, policyID, channelID) + if err != nil { + return err + } + + if exists { + return fmt.Errorf("Resource still exists") + } + } + return nil +} + +func testAccCheckNewRelicAlertPolicyChannelExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + if rs.Primary.ID == "" { + return fmt.Errorf("No resource ID is set") + } + + client := testAccProvider.Meta().(*newrelic.Client) + + ids, err := parseIDs(rs.Primary.ID, 2) + if err != nil { + return err + } + + policyID := ids[0] + channelID := ids[1] + + exists, err := policyChannelExists(client, policyID, channelID) + if err != nil { + return err + } + if !exists { + return fmt.Errorf("Resource not found: %v", rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckNewRelicAlertPolicyChannelConfig(rName string) string { + return fmt.Sprintf(` +resource "newrelic_alert_policy" "foo" { + name = "tf-test-%[1]s" +} + +resource "newrelic_alert_channel" "foo" { + name = "tf-test-%[1]s" + type = "email" + + configuration = { + recipients = "foo@example.com" + include_json_attachment = "1" + } +} + +resource "newrelic_alert_policy_channel" "foo" { + policy_id = "${newrelic_alert_policy.foo.id}" + channel_id = "${newrelic_alert_channel.foo.id}" +} +`, rName) +} + +func testAccCheckNewRelicAlertPolicyChannelConfigUpdated(rName string) string { + return fmt.Sprintf(` +resource "newrelic_alert_policy" "bar" { + name = "tf-test-updated-%[1]s" +} + +resource "newrelic_alert_channel" "foo" { + name = "tf-test-updated-%[1]s" + type = "email" + + configuration = { + recipients = "bar@example.com" + include_json_attachment = "0" + } +} + +resource "newrelic_alert_policy_channel" "foo" { + policy_id = "${newrelic_alert_policy.bar.id}" + channel_id = "${newrelic_alert_channel.foo.id}" +} +`, rName) +} diff --git a/builtin/providers/newrelic/resource_newrelic_alert_policy_test.go b/builtin/providers/newrelic/resource_newrelic_alert_policy_test.go new file mode 100644 index 0000000000..a76b452eb9 --- /dev/null +++ b/builtin/providers/newrelic/resource_newrelic_alert_policy_test.go @@ -0,0 +1,112 @@ +package newrelic + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + newrelic "github.com/paultyng/go-newrelic/api" +) + +func TestAccNewRelicAlertPolicy_Basic(t *testing.T) { + rName := acctest.RandString(5) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNewRelicAlertPolicyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckNewRelicAlertPolicyConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckNewRelicAlertPolicyExists("newrelic_alert_policy.foo"), + resource.TestCheckResourceAttr( + "newrelic_alert_policy.foo", "name", fmt.Sprintf("tf-test-%s", rName)), + resource.TestCheckResourceAttr( + "newrelic_alert_policy.foo", "incident_preference", "PER_POLICY"), + ), + }, + resource.TestStep{ + Config: testAccCheckNewRelicAlertPolicyConfigUpdated(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckNewRelicAlertPolicyExists("newrelic_alert_policy.foo"), + resource.TestCheckResourceAttr( + "newrelic_alert_policy.foo", "name", fmt.Sprintf("tf-test-updated-%s", rName)), + resource.TestCheckResourceAttr( + "newrelic_alert_policy.foo", "incident_preference", "PER_CONDITION"), + ), + }, + }, + }) +} + +func testAccCheckNewRelicAlertPolicyDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*newrelic.Client) + for _, r := range s.RootModule().Resources { + if r.Type != "newrelic_alert_policy" { + continue + } + + id, err := strconv.ParseInt(r.Primary.ID, 10, 32) + if err != nil { + return err + } + + _, err = client.GetAlertPolicy(int(id)) + + if err == nil { + return fmt.Errorf("Policy still exists") + } + + } + return nil +} + +func testAccCheckNewRelicAlertPolicyExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + if rs.Primary.ID == "" { + return fmt.Errorf("No policy ID is set") + } + + client := testAccProvider.Meta().(*newrelic.Client) + + id, err := strconv.ParseInt(rs.Primary.ID, 10, 32) + if err != nil { + return err + } + + found, err := client.GetAlertPolicy(int(id)) + if err != nil { + return err + } + + if strconv.Itoa(found.ID) != rs.Primary.ID { + return fmt.Errorf("Policy not found: %v - %v", rs.Primary.ID, found) + } + + return nil + } +} + +func testAccCheckNewRelicAlertPolicyConfig(rName string) string { + return fmt.Sprintf(` +resource "newrelic_alert_policy" "foo" { + name = "tf-test-%s" +} +`, rName) +} + +func testAccCheckNewRelicAlertPolicyConfigUpdated(rName string) string { + return fmt.Sprintf(` +resource "newrelic_alert_policy" "foo" { + name = "tf-test-updated-%s" + incident_preference = "PER_CONDITION" +} +`, rName) +} diff --git a/builtin/providers/newrelic/validation.go b/builtin/providers/newrelic/validation.go new file mode 100644 index 0000000000..11815b5b5f --- /dev/null +++ b/builtin/providers/newrelic/validation.go @@ -0,0 +1,43 @@ +package newrelic + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func float64Gte(gte float64) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(float64) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be float64", k)) + return + } + + if v >= gte { + return + } + + es = append(es, fmt.Errorf("expected %s to be greater than or equal to %v, got %v", k, gte, v)) + return + } +} + +func intInSlice(valid []int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(int) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be int", k)) + return + } + + for _, p := range valid { + if v == p { + return + } + } + + es = append(es, fmt.Errorf("expected %s to be one of %v, got %v", k, valid, v)) + return + } +} diff --git a/builtin/providers/newrelic/validation_test.go b/builtin/providers/newrelic/validation_test.go new file mode 100644 index 0000000000..03552823b7 --- /dev/null +++ b/builtin/providers/newrelic/validation_test.go @@ -0,0 +1,81 @@ +package newrelic + +import ( + "regexp" + "testing" + + "github.com/hashicorp/terraform/helper/schema" +) + +type testCase struct { + val interface{} + f schema.SchemaValidateFunc + expectedErr *regexp.Regexp +} + +func TestValidationIntInInSlice(t *testing.T) { + runTestCases(t, []testCase{ + { + val: 2, + f: intInSlice([]int{1, 2, 3}), + }, + { + val: 4, + f: intInSlice([]int{1, 2, 3}), + expectedErr: regexp.MustCompile("expected [\\w]+ to be one of \\[1 2 3\\], got 4"), + }, + { + val: "foo", + f: intInSlice([]int{1, 2, 3}), + expectedErr: regexp.MustCompile("expected type of [\\w]+ to be int"), + }, + }) +} + +func TestValidationFloat64Gte(t *testing.T) { + runTestCases(t, []testCase{ + { + val: 1.1, + f: float64Gte(1.1), + }, + { + val: 1.2, + f: float64Gte(1.1), + }, + { + val: "foo", + f: float64Gte(1.1), + expectedErr: regexp.MustCompile("expected type of [\\w]+ to be float64"), + }, + { + val: 0.1, + f: float64Gte(1.1), + expectedErr: regexp.MustCompile("expected [\\w]+ to be greater than or equal to 1.1, got 0.1"), + }, + }) +} + +func runTestCases(t *testing.T, cases []testCase) { + matchErr := func(errs []error, r *regexp.Regexp) bool { + // err must match one provided + for _, err := range errs { + if r.MatchString(err.Error()) { + return true + } + } + + return false + } + + for i, tc := range cases { + _, errs := tc.f(tc.val, "test_property") + + if len(errs) == 0 && tc.expectedErr == nil { + continue + } + + if !matchErr(errs, tc.expectedErr) { + t.Fatalf("expected test case %d to produce error matching \"%s\", got %v", i, tc.expectedErr, errs) + } + } +} diff --git a/command/internal_plugin_list.go b/command/internal_plugin_list.go index 5b0f2cc3db..54c4ae88aa 100644 --- a/command/internal_plugin_list.go +++ b/command/internal_plugin_list.go @@ -36,6 +36,7 @@ import ( logentriesprovider "github.com/hashicorp/terraform/builtin/providers/logentries" mailgunprovider "github.com/hashicorp/terraform/builtin/providers/mailgun" mysqlprovider "github.com/hashicorp/terraform/builtin/providers/mysql" + newrelicprovider "github.com/hashicorp/terraform/builtin/providers/newrelic" nomadprovider "github.com/hashicorp/terraform/builtin/providers/nomad" nullprovider "github.com/hashicorp/terraform/builtin/providers/null" openstackprovider "github.com/hashicorp/terraform/builtin/providers/openstack" @@ -99,6 +100,7 @@ var InternalProviders = map[string]plugin.ProviderFunc{ "logentries": logentriesprovider.Provider, "mailgun": mailgunprovider.Provider, "mysql": mysqlprovider.Provider, + "newrelic": newrelicprovider.Provider, "nomad": nomadprovider.Provider, "null": nullprovider.Provider, "openstack": openstackprovider.Provider, diff --git a/vendor/github.com/newrelic/go-agent/CHANGELOG.md b/vendor/github.com/newrelic/go-agent/CHANGELOG.md new file mode 100644 index 0000000000..465f0ec22c --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/CHANGELOG.md @@ -0,0 +1,155 @@ +## ChangeLog + +## 1.5.0 + +* Added support for Windows. Thanks to @ianomad and @lvxv for the contributions. + +* The number of heap objects allocated is recorded in the + `Memory/Heap/AllocatedObjects` metric. This will soon be displayed on the "Go + runtime" page. + +* If the [DatastoreSegment](https://godoc.org/github.com/newrelic/go-agent#DatastoreSegment) + fields `Host` and `PortPathOrID` are not provided, they will no longer appear + as `"unknown"` in transaction traces and slow query traces. + +* Stack traces will now be nicely aligned in the APM UI. + +## 1.4.0 + +* Added support for slow query traces. Slow datastore segments will now + generate slow query traces viewable on the datastore tab. These traces include + a stack trace and help you to debug slow datastore activity. + [Slow Query Documentation](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/viewing-slow-query-details) + +* Added new +[DatastoreSegment](https://godoc.org/github.com/newrelic/go-agent#DatastoreSegment) +fields `ParameterizedQuery`, `QueryParameters`, `Host`, `PortPathOrID`, and +`DatabaseName`. These fields will be shown in transaction traces and in slow +query traces. + +## 1.3.0 + +* Breaking Change: Added a timeout parameter to the `Application.Shutdown` method. + +## 1.2.0 + +* Added support for instrumenting short-lived processes: + * The new `Application.Shutdown` method allows applications to report + data to New Relic without waiting a full minute. + * The new `Application.WaitForConnection` method allows your process to + defer instrumentation until the application is connected and ready to + gather data. + * Full documentation here: [application.go](application.go) + * Example short-lived process: [examples/short-lived-process/main.go](examples/short-lived-process/main.go) + +* Error metrics are no longer created when `ErrorCollector.Enabled = false`. + +* Added support for [github.com/mgutz/logxi](github.com/mgutz/logxi). See + [_integrations/nrlogxi/v1/nrlogxi.go](_integrations/nrlogxi/v1/nrlogxi.go). + +* Fixed bug where Transaction Trace thresholds based upon Apdex were not being + applied to background transactions. + +## 1.1.0 + +* Added support for Transaction Traces. + +* Stack trace filenames have been shortened: Any thing preceding the first + `/src/` is now removed. + +## 1.0.0 + +* Removed `BetaToken` from the `Config` structure. + +* Breaking Datastore Change: `datastore` package contents moved to top level + `newrelic` package. `datastore.MySQL` has become `newrelic.DatastoreMySQL`. + +* Breaking Attributes Change: `attributes` package contents moved to top + level `newrelic` package. `attributes.ResponseCode` has become + `newrelic.AttributeResponseCode`. Some attribute name constants have been + shortened. + +* Added "runtime.NumCPU" to the environment tab. Thanks sergeylanzman for the + contribution. + +* Prefixed the environment tab values "Compiler", "GOARCH", "GOOS", and + "Version" with "runtime.". + +## 0.8.0 + +* Breaking Segments API Changes: The segments API has been rewritten with the + goal of being easier to use and to avoid nil Transaction checks. See: + + * [segments.go](segments.go) + * [examples/server/main.go](examples/server/main.go) + * [GUIDE.md#segments](GUIDE.md#segments) + +* Updated LICENSE.txt with contribution information. + +## 0.7.1 + +* Fixed a bug causing the `Config` to fail to serialize into JSON when the + `Transport` field was populated. + +## 0.7.0 + +* Eliminated `api`, `version`, and `log` packages. `Version`, `Config`, + `Application`, and `Transaction` now live in the top level `newrelic` package. + If you imported the `attributes` or `datastore` packages then you will need + to remove `api` from the import path. + +* Breaking Logging Changes + +Logging is no longer controlled though a single global. Instead, logging is +configured on a per-application basis with the new `Config.Logger` field. The +logger is an interface described in [log.go](log.go). See +[GUIDE.md#logging](GUIDE.md#logging). + +## 0.6.1 + +* No longer create "GC/System/Pauses" metric if no GC pauses happened. + +## 0.6.0 + +* Introduced beta token to support our beta program. + +* Rename `Config.Development` to `Config.Enabled` (and change boolean + direction). + +* Fixed a bug where exclusive time could be incorrect if segments were not + ended. + +* Fix unit tests broken in 1.6. + +* In `Config.Enabled = false` mode, the license must be the proper length or empty. + +* Added runtime statistics for CPU/memory usage, garbage collection, and number + of goroutines. + +## 0.5.0 + +* Added segment timing methods to `Transaction`. These methods must only be + used in a single goroutine. + +* The license length check will not be performed in `Development` mode. + +* Rename `SetLogFile` to `SetFile` to reduce redundancy. + +* Added `DebugEnabled` logging guard to reduce overhead. + +* `Transaction` now implements an `Ignore` method which will prevent + any of the transaction's data from being recorded. + +* `Transaction` now implements a subset of the interfaces + `http.CloseNotifier`, `http.Flusher`, `http.Hijacker`, and `io.ReaderFrom` + to match the behavior of its wrapped `http.ResponseWriter`. + +* Changed project name from `go-sdk` to `go-agent`. + +## 0.4.0 + +* Queue time support added: if the inbound request contains an +`"X-Request-Start"` or `"X-Queue-Start"` header with a unix timestamp, the +agent will report queue time metrics. Queue time will appear on the +application overview chart. The timestamp may fractional seconds, +milliseconds, or microseconds: the agent will deduce the correct units. diff --git a/vendor/github.com/newrelic/go-agent/CONTRIBUTING.md b/vendor/github.com/newrelic/go-agent/CONTRIBUTING.md new file mode 100644 index 0000000000..d04bd5e7fa --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/CONTRIBUTING.md @@ -0,0 +1,9 @@ +# Contributing + +You are welcome to send pull requests to us. By doing so you agree that you are +granting New Relic a non-exclusive, non-revokable, no-cost license to use the +code, algorithms, patents, and ideas in that code in our products if we so +choose. You also agree the code is provided as-is and you provide no warranties +as to its fitness or correctness for any purpose. + +* [LICENSE.txt](LICENSE.txt) diff --git a/vendor/github.com/newrelic/go-agent/GUIDE.md b/vendor/github.com/newrelic/go-agent/GUIDE.md new file mode 100644 index 0000000000..7230db6b45 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/GUIDE.md @@ -0,0 +1,325 @@ +# New Relic Go Agent Guide + +* [Installation](#installation) +* [Config and Application](#config-and-application) +* [Logging](#logging) + * [logrus](#logrus) +* [Transactions](#transactions) +* [Segments](#segments) + * [Datastore Segments](#datastore-segments) + * [External Segments](#external-segments) +* [Attributes](#attributes) +* [Request Queuing](#request-queuing) + +## Installation + +Installing the Go Agent is the same as installing any other Go library. The +simplest way is to run: + +``` +go get github.com/newrelic/go-agent +``` + +Then import the `github.com/newrelic/go-agent` package in your application. + +## Config and Application + +* [config.go](config.go) +* [application.go](application.go) + +In your `main` function or in an `init` block: + +```go +config := newrelic.NewConfig("Your Application Name", "__YOUR_NEW_RELIC_LICENSE_KEY__") +app, err := newrelic.NewApplication(config) +``` + +Find your application in the New Relic UI. Click on it to see the Go runtime +tab that shows information about goroutine counts, garbage collection, memory, +and CPU usage. + +If you are working in a development environment or running unit tests, you may +not want the Go Agent to spawn goroutines or report to New Relic. You're in +luck! Set the config's `Enabled` field to false. This makes the license key +optional. + +```go +config := newrelic.NewConfig("Your Application Name", "") +config.Enabled = false +app, err := newrelic.NewApplication(config) +``` + +## Logging + +* [log.go](log.go) + +The agent's logging system is designed to be easily extensible. By default, no +logging will occur. To enable logging, assign the `Config.Logger` field to +something implementing the `Logger` interface. A basic logging +implementation is included. + +To log at debug level to standard out, set: + +```go +config.Logger = newrelic.NewDebugLogger(os.Stdout) +``` + +To log at info level to a file, set: + +```go +w, err := os.OpenFile("my_log_file", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) +if nil == err { + config.Logger = newrelic.NewLogger(w) +} +``` + +### logrus + +* [_integrations/nrlogrus/nrlogrus.go](_integrations/nrlogrus/nrlogrus.go) + +If you are using `logrus` and would like to send the agent's log messages to its +standard logger, import the +`github.com/newrelic/go-agent/_integrations/nrlogrus` package, then set: + +```go +config.Logger = nrlogrus.StandardLogger() +``` + +## Transactions + +* [transaction.go](transaction.go) +* [More info on Transactions](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/transactions-page) + +Transactions time requests and background tasks. Each transaction should only +be used in a single goroutine. Start a new transaction when you spawn a new +goroutine. + +The simplest way to create transactions is to use +`Application.StartTransaction` and `Transaction.End`. + +```go +txn := app.StartTransaction("transactionName", responseWriter, request) +defer txn.End() +``` + +The response writer and request parameters are optional. Leave them `nil` to +instrument a background task. + +```go +txn := app.StartTransaction("backgroundTask", nil, nil) +defer txn.End() +``` + +The transaction has helpful methods like `NoticeError` and `SetName`. +See more in [transaction.go](transaction.go). + +If you are using the `http` standard library package, use `WrapHandle` and +`WrapHandleFunc`. These wrappers automatically start and end transactions with +the request and response writer. See [instrumentation.go](instrumentation.go). + +```go +http.HandleFunc(newrelic.WrapHandleFunc(app, "/users", usersHandler)) +``` + +To access the transaction in your handler, use type assertion on the response +writer passed to the handler. + +```go +func myHandler(w http.ResponseWriter, r *http.Request) { + if txn, ok := w.(newrelic.Transaction); ok { + txn.NoticeError(errors.New("my error message")) + } +} +``` + +## Segments + +* [segments.go](segments.go) + +Find out where the time in your transactions is being spent! Each transaction +should only track segments in a single goroutine. + +`Segment` is used to instrument functions, methods, and blocks of code. A +segment begins when its `StartTime` field is populated, and finishes when its +`End` method is called. + +```go +segment := newrelic.Segment{} +segment.Name = "mySegmentName" +segment.StartTime = newrelic.StartSegmentNow(txn) +// ... code you want to time here ... +segment.End() +``` + +`StartSegment` is a convenient helper. It creates a segment and starts it: + +```go +segment := newrelic.StartSegment(txn, "mySegmentName") +// ... code you want to time here ... +segment.End() +``` + +Timing a function is easy using `StartSegment` and `defer`. Just add the +following line to the beginning of that function: + +```go +defer newrelic.StartSegment(txn, "mySegmentName").End() +``` + +Segments may be nested. The segment being ended must be the most recently +started segment. + +```go +s1 := newrelic.StartSegment(txn, "outerSegment") +s2 := newrelic.StartSegment(txn, "innerSegment") +// s2 must be ended before s1 +s2.End() +s1.End() +``` + +A zero value segment may safely be ended. Therefore, the following code +is safe even if the conditional fails: + +```go +var s newrelic.Segment +if txn, ok := w.(newrelic.Transaction); ok { + s.StartTime = newrelic.StartSegmentNow(txn), +} +// ... code you wish to time here ... +s.End() +``` + +### Datastore Segments + +Datastore segments appear in the transaction "Breakdown table" and in the +"Databases" tab. + +* [datastore.go](datastore.go) +* [More info on Databases tab](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/databases-slow-queries-page) + +Datastore segments are instrumented using `DatastoreSegment`. Just like basic +segments, datastore segments begin when the `StartTime` field is populated and +finish when the `End` method is called. Here is an example: + +```go +s := newrelic.DatastoreSegment{ + // Product is the datastore type. See the constants in datastore.go. + Product: newrelic.DatastoreMySQL, + // Collection is the table or group. + Collection: "my_table", + // Operation is the relevant action, e.g. "SELECT" or "GET". + Operation: "SELECT", +} +s.StartTime = newrelic.StartSegmentNow(txn) +// ... make the datastore call +s.End() +``` + +This may be combined into a single line when instrumenting a datastore call +that spans an entire function call: + +```go +defer newrelic.DatastoreSegment{ + StartTime: newrelic.StartSegmentNow(txn), + Product: newrelic.DatastoreMySQL, + Collection: "my_table", + Operation: "SELECT", +}.End() +``` + +### External Segments + +External segments appear in the transaction "Breakdown table" and in the +"External services" tab. + +* [More info on External Services tab](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/external-services-page) + +External segments are instrumented using `ExternalSegment`. Populate either the +`URL` or `Request` field to indicate the endpoint. Here is an example: + +```go +func external(txn newrelic.Transaction, url string) (*http.Response, error) { + defer newrelic.ExternalSegment{ + StartTime: newrelic.StartSegmentNow(txn), + URL: url, + }.End() + + return http.Get(url) +} +``` + +We recommend using the `Request` and `Response` fields since they provide more +information about the external call. The `StartExternalSegment` helper is +useful when the request is available. This function may be modified in the +future to add headers that will trace activity between applications that are +instrumented by New Relic. + +```go +func external(txn newrelic.Transaction, req *http.Request) (*http.Response, error) { + s := newrelic.StartExternalSegment(txn, req) + response, err := http.DefaultClient.Do(req) + s.Response = response + s.End() + return response, err +} +``` + +`NewRoundTripper` is another useful helper. As with all segments, the round +tripper returned **must** only be used in the same goroutine as the transaction. + +```go +client := &http.Client{} +client.Transport = newrelic.NewRoundTripper(txn, nil) +resp, err := client.Get("http://example.com/") +``` + +## Attributes + +Attributes add context to errors and allow you to filter performance data +in Insights. + +You may add them using the `Transaction.AddAttribute` method. + +```go +txn.AddAttribute("key", "value") +txn.AddAttribute("product", "widget") +txn.AddAttribute("price", 19.99) +txn.AddAttribute("importantCustomer", true) +``` + +* [More info on Custom Attributes](https://docs.newrelic.com/docs/insights/new-relic-insights/decorating-events/insights-custom-attributes) + +Some attributes are recorded automatically. These are called agent attributes. +They are listed here: + +* [attributes.go](attributes.go) + +To disable one of these agents attributes, `AttributeResponseCode` for +example, modify the config like this: + +```go +config.Attributes.Exclude = append(config.Attributes.Exclude, newrelic.AttributeResponseCode) +``` + +* [More info on Agent Attributes](https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-metrics/agent-attributes) + +## Custom Events + +You may track arbitrary events using custom Insights events. + +```go +app.RecordCustomEvent("MyEventType", map[string]interface{}{ + "myString": "hello", + "myFloat": 0.603, + "myInt": 123, + "myBool": true, +}) +``` + +## Request Queuing + +If you are running a load balancer or reverse web proxy then you may configure +it to add a `X-Queue-Start` header with a Unix timestamp. This will create a +band on the application overview chart showing queue time. + +* [More info on Request Queuing](https://docs.newrelic.com/docs/apm/applications-menu/features/request-queuing-tracking-front-end-time) diff --git a/vendor/github.com/newrelic/go-agent/LICENSE.txt b/vendor/github.com/newrelic/go-agent/LICENSE.txt new file mode 100644 index 0000000000..8f55fde11a --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/LICENSE.txt @@ -0,0 +1,50 @@ +This product includes source derived from 'go' by The Go Authors, distributed +under the following BSD license: + + https://github.com/golang/go/blob/master/LICENSE + +------------------------------------------------------------------------------- + +All components of this product are Copyright (c) 2016 New Relic, Inc. All +rights reserved. + +Certain inventions disclosed in this file may be claimed within patents owned or +patent applications filed by New Relic, Inc. or third parties. + +Subject to the terms of this notice, New Relic grants you a nonexclusive, +nontransferable license, without the right to sublicense, to (a) install and +execute one copy of these files on any number of workstations owned or +controlled by you and (b) distribute verbatim copies of these files to third +parties. You may install, execute, and distribute these files and their +contents only in conjunction with your direct use of New Relic’s services. +These files and their contents shall not be used in conjunction with any other +product or software, including but not limited to those that may compete with +any New Relic product, feature, or software. As a condition to the foregoing +grant, you must provide this notice along with each copy you distribute and you +must not remove, alter, or obscure this notice. In the event you submit or +provide any feedback, code, pull requests, or suggestions to New Relic you +hereby grant New Relic a worldwide, non-exclusive, irrevocable, transferrable, +fully paid-up license to use the code, algorithms, patents, and ideas therein in +our products. + +All other use, reproduction, modification, distribution, or other exploitation +of these files is strictly prohibited, except as may be set forth in a separate +written license agreement between you and New Relic. The terms of any such +license agreement will control over this notice. The license stated above will +be automatically terminated and revoked if you exceed its scope or violate any +of the terms of this notice. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of New Relic, except as required for reasonable +and customary use in describing the origin of this file and reproducing the +content of this notice. You may not mark or brand this file with any trade +name, trademarks, service marks, or product names other than the original brand +(if any) provided by New Relic. + +Unless otherwise expressly agreed by New Relic in a separate written license +agreement, these files are provided AS IS, WITHOUT WARRANTY OF ANY KIND, +including without any implied warranties of MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE, TITLE, or NON-INFRINGEMENT. As a condition to your use of +these files, you are solely responsible for such use. New Relic will have no +liability to you for direct, indirect, consequential, incidental, special, or +punitive damages or for lost profits or data. diff --git a/vendor/github.com/newrelic/go-agent/README.md b/vendor/github.com/newrelic/go-agent/README.md new file mode 100644 index 0000000000..97ad223196 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/README.md @@ -0,0 +1,157 @@ +# New Relic Go Agent + +## Description + +The New Relic Go Agent allows you to monitor your Go applications with New +Relic. It helps you track transactions, outbound requests, database calls, and +other parts of your Go application's behavior and provides a running overview of +garbage collection, goroutine activity, and memory use. + +## Requirements + +Go 1.3+ is required, due to the use of http.Client's Timeout field. + +Linux, OS X, and Windows (Vista, Server 2008 and later) are supported. + +## Getting Started + +Here are the basic steps to instrumenting your application. For more +information, see [GUIDE.md](GUIDE.md). + +#### Step 0: Installation + +Installing the Go Agent is the same as installing any other Go library. The +simplest way is to run: + +``` +go get github.com/newrelic/go-agent +``` + +Then import the `github.com/newrelic/go-agent` package in your application. + +#### Step 1: Create a Config and an Application + +In your `main` function or an `init` block: + +```go +config := newrelic.NewConfig("Your Application Name", "__YOUR_NEW_RELIC_LICENSE_KEY__") +app, err := newrelic.NewApplication(config) +``` + +[more info](GUIDE.md#config-and-application), [application.go](application.go), +[config.go](config.go) + +#### Step 2: Add Transactions + +Transactions time requests and background tasks. Use `WrapHandle` and +`WrapHandleFunc` to create transactions for requests handled by the `http` +standard library package. + +```go +http.HandleFunc(newrelic.WrapHandleFunc(app, "/users", usersHandler)) +``` + +Alternatively, create transactions directly using the application's +`StartTransaction` method: + +```go +txn := app.StartTransaction("myTxn", optionalResponseWriter, optionalRequest) +defer txn.End() +``` + +[more info](GUIDE.md#transactions), [transaction.go](transaction.go) + +#### Step 3: Instrument Segments + +Segments show you where time in your transactions is being spent. At the +beginning of important functions, add: + +```go +defer newrelic.StartSegment(txn, "mySegmentName").End() +``` + +[more info](GUIDE.md#segments), [segments.go](segments.go) + +## Runnable Example + +[examples/server/main.go](./examples/server/main.go) is an example that will appear as "My Go +Application" in your New Relic applications list. To run it: + +``` +env NEW_RELIC_LICENSE_KEY=__YOUR_NEW_RELIC_LICENSE_KEY__LICENSE__ \ + go run examples/server/main.go +``` + +Some endpoints exposed are [http://localhost:8000/](http://localhost:8000/) +and [http://localhost:8000/notice_error](http://localhost:8000/notice_error) + + +## Basic Example + +Before Instrumentation + +```go +package main + +import ( + "io" + "net/http" +) + +func helloHandler(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "hello, world") +} + +func main() { + http.HandleFunc("/", helloHandler) + http.ListenAndServe(":8000", nil) +} +``` + +After Instrumentation + +```go +package main + +import ( + "fmt" + "io" + "net/http" + "os" + + "github.com/newrelic/go-agent" +) + +func helloHandler(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "hello, world") +} + +func main() { + // Create a config. You need to provide the desired application name + // and your New Relic license key. + cfg := newrelic.NewConfig("My Go Application", "__YOUR_NEW_RELIC_LICENSE_KEY__") + + // Create an application. This represents an application in the New + // Relic UI. + app, err := newrelic.NewApplication(cfg) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + // Wrap helloHandler. The performance of this handler will be recorded. + http.HandleFunc(newrelic.WrapHandleFunc(app, "/", helloHandler)) + http.ListenAndServe(":8000", nil) +} +``` + +## Support + +You can find more detailed documentation [in the guide](GUIDE.md). + +If you can't find what you're looking for there, reach out to us on our [support +site](http://support.newrelic.com/) or our [community +forum](http://forum.newrelic.com) and we'll be happy to help you. + +Find a bug? Contact us via [support.newrelic.com](http://support.newrelic.com/), +or email support@newrelic.com. diff --git a/vendor/github.com/newrelic/go-agent/application.go b/vendor/github.com/newrelic/go-agent/application.go new file mode 100644 index 0000000000..9cd6d4ff74 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/application.go @@ -0,0 +1,58 @@ +package newrelic + +import ( + "net/http" + "time" +) + +// Application represents your application. +type Application interface { + // StartTransaction begins a Transaction. + // * The Transaction should only be used in a single goroutine. + // * This method never returns nil. + // * If an http.Request is provided then the Transaction is considered + // a web transaction. + // * If an http.ResponseWriter is provided then the Transaction can be + // used in its place. This allows instrumentation of the response + // code and response headers. + StartTransaction(name string, w http.ResponseWriter, r *http.Request) Transaction + + // RecordCustomEvent adds a custom event to the application. This + // feature is incompatible with high security mode. + // + // eventType must consist of alphanumeric characters, underscores, and + // colons, and must contain fewer than 255 bytes. + // + // Each value in the params map must be a number, string, or boolean. + // Keys must be less than 255 bytes. The params map may not contain + // more than 64 attributes. For more information, and a set of + // restricted keywords, see: + // + // https://docs.newrelic.com/docs/insights/new-relic-insights/adding-querying-data/inserting-custom-events-new-relic-apm-agents + RecordCustomEvent(eventType string, params map[string]interface{}) error + + // WaitForConnection blocks until the application is connected, is + // incapable of being connected, or the timeout has been reached. This + // method is useful for short-lived processes since the application will + // not gather data until it is connected. nil is returned if the + // application is connected successfully. + WaitForConnection(timeout time.Duration) error + + // Shutdown flushes data to New Relic's servers and stops all + // agent-related goroutines managing this application. After Shutdown + // is called, the application is disabled and no more data will be + // collected. This method will block until all final data is sent to + // New Relic or the timeout has elapsed. + Shutdown(timeout time.Duration) +} + +// NewApplication creates an Application and spawns goroutines to manage the +// aggregation and harvesting of data. On success, a non-nil Application and a +// nil error are returned. On failure, a nil Application and a non-nil error +// are returned. +// +// Applications do not share global state (other than the shared log.Logger). +// Therefore, it is safe to create multiple applications. +func NewApplication(c Config) (Application, error) { + return newApp(c) +} diff --git a/vendor/github.com/newrelic/go-agent/attributes.go b/vendor/github.com/newrelic/go-agent/attributes.go new file mode 100644 index 0000000000..f5f2761ac5 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/attributes.go @@ -0,0 +1,42 @@ +package newrelic + +// This file contains the names of the automatically captured attributes. +// Attributes are key value pairs attached to transaction events, error events, +// and traced errors. You may add your own attributes using the +// Transaction.AddAttribute method (see transaction.go). +// +// These attribute names are exposed here to facilitate configuration. +// +// For more information, see: +// https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-metrics/agent-attributes + +// Attributes destined for Transaction Events and Errors: +const ( + // AttributeResponseCode is the response status code for a web request. + AttributeResponseCode = "httpResponseCode" + // AttributeRequestMethod is the request's method. + AttributeRequestMethod = "request.method" + // AttributeRequestAccept is the request's "Accept" header. + AttributeRequestAccept = "request.headers.accept" + // AttributeRequestContentType is the request's "Content-Type" header. + AttributeRequestContentType = "request.headers.contentType" + // AttributeRequestContentLength is the request's "Content-Length" header. + AttributeRequestContentLength = "request.headers.contentLength" + // AttributeRequestHost is the request's "Host" header. + AttributeRequestHost = "request.headers.host" + // AttributeResponseContentType is the response "Content-Type" header. + AttributeResponseContentType = "response.headers.contentType" + // AttributeResponseContentLength is the response "Content-Length" header. + AttributeResponseContentLength = "response.headers.contentLength" + // AttributeHostDisplayName contains the value of Config.HostDisplayName. + AttributeHostDisplayName = "host.displayName" +) + +// Attributes destined for Errors: +const ( + // AttributeRequestUserAgent is the request's "User-Agent" header. + AttributeRequestUserAgent = "request.headers.User-Agent" + // AttributeRequestReferer is the request's "Referer" header. Query + // string parameters are removed. + AttributeRequestReferer = "request.headers.referer" +) diff --git a/vendor/github.com/newrelic/go-agent/config.go b/vendor/github.com/newrelic/go-agent/config.go new file mode 100644 index 0000000000..af8d8c37b7 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/config.go @@ -0,0 +1,257 @@ +package newrelic + +import ( + "errors" + "fmt" + "net/http" + "strings" + "time" +) + +// Config contains Application and Transaction behavior settings. +// Use NewConfig to create a Config with proper defaults. +type Config struct { + // AppName is used by New Relic to link data across servers. + // + // https://docs.newrelic.com/docs/apm/new-relic-apm/installation-configuration/naming-your-application + AppName string + + // License is your New Relic license key. + // + // https://docs.newrelic.com/docs/accounts-partnerships/accounts/account-setup/license-key + License string + + // Logger controls go-agent logging. See log.go. + Logger Logger + + // Enabled determines whether the agent will communicate with the New + // Relic servers and spawn goroutines. Setting this to be false can be + // useful in testing and staging situations. + Enabled bool + + // Labels are key value pairs used to roll up applications into specific + // categories. + // + // https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/labels-categories-organizing-your-apps-servers + Labels map[string]string + + // HighSecurity guarantees that certain agent settings can not be made + // more permissive. This setting must match the corresponding account + // setting in the New Relic UI. + // + // https://docs.newrelic.com/docs/accounts-partnerships/accounts/security/high-security + HighSecurity bool + + // CustomInsightsEvents controls the behavior of + // Application.RecordCustomEvent. + // + // https://docs.newrelic.com/docs/insights/new-relic-insights/adding-querying-data/inserting-custom-events-new-relic-apm-agents + CustomInsightsEvents struct { + // Enabled controls whether RecordCustomEvent will collect + // custom analytics events. High security mode overrides this + // setting. + Enabled bool + } + + // TransactionEvents controls the behavior of transaction analytics + // events. + TransactionEvents struct { + // Enabled controls whether transaction events are captured. + Enabled bool + // Attributes controls the attributes included with transaction + // events. + Attributes AttributeDestinationConfig + } + + // ErrorCollector controls the capture of errors. + ErrorCollector struct { + // Enabled controls whether errors are captured. This setting + // affects both traced errors and error analytics events. + Enabled bool + // CaptureEvents controls whether error analytics events are + // captured. + CaptureEvents bool + // IgnoreStatusCodes controls which http response codes are + // automatically turned into errors. By default, response codes + // greater than or equal to 400, with the exception of 404, are + // turned into errors. + IgnoreStatusCodes []int + // Attributes controls the attributes included with errors. + Attributes AttributeDestinationConfig + } + + // TransactionTracer controls the capture of transaction traces. + TransactionTracer struct { + // Enabled controls whether transaction traces are captured. + Enabled bool + // Threshold controls whether a transaction trace will be + // considered for capture. Of the traces exceeding the + // threshold, the slowest trace every minute is captured. + Threshold struct { + // If IsApdexFailing is true then the trace threshold is + // four times the apdex threshold. + IsApdexFailing bool + // If IsApdexFailing is false then this field is the + // threshold, otherwise it is ignored. + Duration time.Duration + } + // SegmentThreshold is the threshold at which segments will be + // added to the trace. Lowering this setting may increase + // overhead. + SegmentThreshold time.Duration + // StackTraceThreshold is the threshold at which segments will + // be given a stack trace in the transaction trace. Lowering + // this setting will drastically increase overhead. + StackTraceThreshold time.Duration + // Attributes controls the attributes included with transaction + // traces. + Attributes AttributeDestinationConfig + } + + // HostDisplayName gives this server a recognizable name in the New + // Relic UI. This is an optional setting. + HostDisplayName string + + // UseTLS controls whether http or https is used to send data to New + // Relic servers. + UseTLS bool + + // Transport customizes http.Client communication with New Relic + // servers. This may be used to configure a proxy. + Transport http.RoundTripper + + // Utilization controls the detection and gathering of system + // information. + Utilization struct { + // DetectAWS controls whether the Application attempts to detect + // AWS. + DetectAWS bool + // DetectDocker controls whether the Application attempts to + // detect Docker. + DetectDocker bool + + // These settings provide system information when custom values + // are required. + LogicalProcessors int + TotalRAMMIB int + BillingHostname string + } + + // DatastoreTracer controls behavior relating to datastore segments. + DatastoreTracer struct { + InstanceReporting struct { + Enabled bool + } + DatabaseNameReporting struct { + Enabled bool + } + QueryParameters struct { + Enabled bool + } + // SlowQuery controls the capture of slow query traces. Slow + // query traces show you instances of your slowest datastore + // segments. + SlowQuery struct { + Enabled bool + Threshold time.Duration + } + } + + // Attributes controls the attributes included with errors and + // transaction events. + Attributes AttributeDestinationConfig + + // RuntimeSampler controls the collection of runtime statistics like + // CPU/Memory usage, goroutine count, and GC pauses. + RuntimeSampler struct { + // Enabled controls whether runtime statistics are captured. + Enabled bool + } +} + +// AttributeDestinationConfig controls the attributes included with errors and +// transaction events. +type AttributeDestinationConfig struct { + Enabled bool + Include []string + Exclude []string +} + +// NewConfig creates an Config populated with the given appname, license, +// and expected default values. +func NewConfig(appname, license string) Config { + c := Config{} + + c.AppName = appname + c.License = license + c.Enabled = true + c.Labels = make(map[string]string) + c.CustomInsightsEvents.Enabled = true + c.TransactionEvents.Enabled = true + c.TransactionEvents.Attributes.Enabled = true + c.HighSecurity = false + c.UseTLS = true + c.ErrorCollector.Enabled = true + c.ErrorCollector.CaptureEvents = true + c.ErrorCollector.IgnoreStatusCodes = []int{ + http.StatusNotFound, // 404 + } + c.ErrorCollector.Attributes.Enabled = true + c.Utilization.DetectAWS = true + c.Utilization.DetectDocker = true + c.Attributes.Enabled = true + c.RuntimeSampler.Enabled = true + + c.TransactionTracer.Enabled = true + c.TransactionTracer.Threshold.IsApdexFailing = true + c.TransactionTracer.Threshold.Duration = 500 * time.Millisecond + c.TransactionTracer.SegmentThreshold = 2 * time.Millisecond + c.TransactionTracer.StackTraceThreshold = 500 * time.Millisecond + c.TransactionTracer.Attributes.Enabled = true + + c.DatastoreTracer.InstanceReporting.Enabled = true + c.DatastoreTracer.DatabaseNameReporting.Enabled = true + c.DatastoreTracer.QueryParameters.Enabled = true + c.DatastoreTracer.SlowQuery.Enabled = true + c.DatastoreTracer.SlowQuery.Threshold = 10 * time.Millisecond + + return c +} + +const ( + licenseLength = 40 + appNameLimit = 3 +) + +// The following errors will be returned if your Config fails to validate. +var ( + errLicenseLen = fmt.Errorf("license length is not %d", licenseLength) + errHighSecurityTLS = errors.New("high security requires TLS") + errAppNameMissing = errors.New("AppName required") + errAppNameLimit = fmt.Errorf("max of %d rollup application names", appNameLimit) +) + +// Validate checks the config for improper fields. If the config is invalid, +// newrelic.NewApplication returns an error. +func (c Config) Validate() error { + if c.Enabled { + if len(c.License) != licenseLength { + return errLicenseLen + } + } else { + // The License may be empty when the agent is not enabled. + if len(c.License) != licenseLength && len(c.License) != 0 { + return errLicenseLen + } + } + if c.HighSecurity && !c.UseTLS { + return errHighSecurityTLS + } + if "" == c.AppName { + return errAppNameMissing + } + if strings.Count(c.AppName, ";") >= appNameLimit { + return errAppNameLimit + } + return nil +} diff --git a/vendor/github.com/newrelic/go-agent/datastore.go b/vendor/github.com/newrelic/go-agent/datastore.go new file mode 100644 index 0000000000..6a2db2403a --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/datastore.go @@ -0,0 +1,27 @@ +package newrelic + +// DatastoreProduct encourages consistent metrics across New Relic agents. You +// may create your own if your datastore is not listed below. +type DatastoreProduct string + +// Datastore names used across New Relic agents: +const ( + DatastoreCassandra DatastoreProduct = "Cassandra" + DatastoreDerby = "Derby" + DatastoreElasticsearch = "Elasticsearch" + DatastoreFirebird = "Firebird" + DatastoreIBMDB2 = "IBMDB2" + DatastoreInformix = "Informix" + DatastoreMemcached = "Memcached" + DatastoreMongoDB = "MongoDB" + DatastoreMySQL = "MySQL" + DatastoreMSSQL = "MSSQL" + DatastoreOracle = "Oracle" + DatastorePostgres = "Postgres" + DatastoreRedis = "Redis" + DatastoreSolr = "Solr" + DatastoreSQLite = "SQLite" + DatastoreCouchDB = "CouchDB" + DatastoreRiak = "Riak" + DatastoreVoltDB = "VoltDB" +) diff --git a/vendor/github.com/newrelic/go-agent/instrumentation.go b/vendor/github.com/newrelic/go-agent/instrumentation.go new file mode 100644 index 0000000000..12b0bf1930 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/instrumentation.go @@ -0,0 +1,68 @@ +package newrelic + +import "net/http" + +// instrumentation.go contains helpers built on the lower level api. + +// WrapHandle facilitates instrumentation of handlers registered with an +// http.ServeMux. For example, to instrument this code: +// +// http.Handle("/foo", fooHandler) +// +// Perform this replacement: +// +// http.Handle(newrelic.WrapHandle(app, "/foo", fooHandler)) +// +// The Transaction is passed to the handler in place of the original +// http.ResponseWriter, so it can be accessed using type assertion. +// For example, to rename the transaction: +// +// // 'w' is the variable name of the http.ResponseWriter. +// if txn, ok := w.(newrelic.Transaction); ok { +// txn.SetName("other-name") +// } +// +func WrapHandle(app Application, pattern string, handler http.Handler) (string, http.Handler) { + return pattern, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + txn := app.StartTransaction(pattern, w, r) + defer txn.End() + + handler.ServeHTTP(txn, r) + }) +} + +// WrapHandleFunc serves the same purpose as WrapHandle for functions registered +// with ServeMux.HandleFunc. +func WrapHandleFunc(app Application, pattern string, handler func(http.ResponseWriter, *http.Request)) (string, func(http.ResponseWriter, *http.Request)) { + p, h := WrapHandle(app, pattern, http.HandlerFunc(handler)) + return p, func(w http.ResponseWriter, r *http.Request) { h.ServeHTTP(w, r) } +} + +// NewRoundTripper creates an http.RoundTripper to instrument external requests. +// This RoundTripper must be used in same the goroutine as the other uses of the +// Transaction's SegmentTracer methods. http.DefaultTransport is used if an +// http.RoundTripper is not provided. +// +// client := &http.Client{} +// client.Transport = newrelic.NewRoundTripper(txn, nil) +// resp, err := client.Get("http://example.com/") +// +func NewRoundTripper(txn Transaction, original http.RoundTripper) http.RoundTripper { + return roundTripperFunc(func(request *http.Request) (*http.Response, error) { + segment := StartExternalSegment(txn, request) + + if nil == original { + original = http.DefaultTransport + } + response, err := original.RoundTrip(request) + + segment.Response = response + segment.End() + + return response, err + }) +} + +type roundTripperFunc func(*http.Request) (*http.Response, error) + +func (f roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { return f(r) } diff --git a/vendor/github.com/newrelic/go-agent/internal/analytics_events.go b/vendor/github.com/newrelic/go-agent/internal/analytics_events.go new file mode 100644 index 0000000000..151766a325 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/analytics_events.go @@ -0,0 +1,122 @@ +package internal + +import ( + "bytes" + "container/heap" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +// eventStamp allows for uniform random sampling of events. When an event is +// created it is given an eventStamp. Whenever an event pool is full and events +// need to be dropped, the events with the lowest stamps are dropped. +type eventStamp float32 + +func eventStampCmp(a, b eventStamp) bool { + return a < b +} + +type analyticsEvent struct { + stamp eventStamp + jsonWriter +} + +type analyticsEventHeap []analyticsEvent + +type analyticsEvents struct { + numSeen int + events analyticsEventHeap + failedHarvests int +} + +func (events *analyticsEvents) NumSeen() float64 { return float64(events.numSeen) } +func (events *analyticsEvents) NumSaved() float64 { return float64(len(events.events)) } + +func (h analyticsEventHeap) Len() int { return len(h) } +func (h analyticsEventHeap) Less(i, j int) bool { return eventStampCmp(h[i].stamp, h[j].stamp) } +func (h analyticsEventHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +// Push and Pop are unused: only heap.Init and heap.Fix are used. +func (h analyticsEventHeap) Push(x interface{}) {} +func (h analyticsEventHeap) Pop() interface{} { return nil } + +func newAnalyticsEvents(max int) *analyticsEvents { + return &analyticsEvents{ + numSeen: 0, + events: make(analyticsEventHeap, 0, max), + failedHarvests: 0, + } +} + +func (events *analyticsEvents) addEvent(e analyticsEvent) { + events.numSeen++ + + if len(events.events) < cap(events.events) { + events.events = append(events.events, e) + if len(events.events) == cap(events.events) { + // Delay heap initialization so that we can have + // deterministic ordering for integration tests (the max + // is not being reached). + heap.Init(events.events) + } + return + } + + if eventStampCmp(e.stamp, events.events[0].stamp) { + return + } + + events.events[0] = e + heap.Fix(events.events, 0) +} + +func (events *analyticsEvents) mergeFailed(other *analyticsEvents) { + fails := other.failedHarvests + 1 + if fails >= failedEventsAttemptsLimit { + return + } + events.failedHarvests = fails + events.Merge(other) +} + +func (events *analyticsEvents) Merge(other *analyticsEvents) { + allSeen := events.numSeen + other.numSeen + + for _, e := range other.events { + events.addEvent(e) + } + events.numSeen = allSeen +} + +func (events *analyticsEvents) CollectorJSON(agentRunID string) ([]byte, error) { + if 0 == events.numSeen { + return nil, nil + } + + estimate := 256 * len(events.events) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + + buf.WriteByte('[') + jsonx.AppendString(buf, agentRunID) + buf.WriteByte(',') + buf.WriteByte('{') + buf.WriteString(`"reservoir_size":`) + jsonx.AppendUint(buf, uint64(cap(events.events))) + buf.WriteByte(',') + buf.WriteString(`"events_seen":`) + jsonx.AppendUint(buf, uint64(events.numSeen)) + buf.WriteByte('}') + buf.WriteByte(',') + buf.WriteByte('[') + for i, e := range events.events { + if i > 0 { + buf.WriteByte(',') + } + e.WriteJSON(buf) + } + buf.WriteByte(']') + buf.WriteByte(']') + + return buf.Bytes(), nil + +} diff --git a/vendor/github.com/newrelic/go-agent/internal/apdex.go b/vendor/github.com/newrelic/go-agent/internal/apdex.go new file mode 100644 index 0000000000..28225f7d01 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/apdex.go @@ -0,0 +1,48 @@ +package internal + +import "time" + +// ApdexZone is a transaction classification. +type ApdexZone int + +// https://en.wikipedia.org/wiki/Apdex +const ( + ApdexNone ApdexZone = iota + ApdexSatisfying + ApdexTolerating + ApdexFailing +) + +// ApdexFailingThreshold calculates the threshold at which the transaction is +// considered a failure. +func ApdexFailingThreshold(threshold time.Duration) time.Duration { + return 4 * threshold +} + +// CalculateApdexZone calculates the apdex based on the transaction duration and +// threshold. +// +// Note that this does not take into account whether or not the transaction +// had an error. That is expected to be done by the caller. +func CalculateApdexZone(threshold, duration time.Duration) ApdexZone { + if duration <= threshold { + return ApdexSatisfying + } + if duration <= ApdexFailingThreshold(threshold) { + return ApdexTolerating + } + return ApdexFailing +} + +func (zone ApdexZone) label() string { + switch zone { + case ApdexSatisfying: + return "S" + case ApdexTolerating: + return "T" + case ApdexFailing: + return "F" + default: + return "" + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/attributes.go b/vendor/github.com/newrelic/go-agent/internal/attributes.go new file mode 100644 index 0000000000..40f8631020 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/attributes.go @@ -0,0 +1,572 @@ +package internal + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "sort" + "strconv" + "strings" +) + +// New agent attributes must be added in the following places: +// * Constants here. +// * Top level attributes.go file. +// * agentAttributes +// * agentAttributeDests +// * calculateAgentAttributeDests +// * writeAgentAttributes +const ( + responseCode = "httpResponseCode" + requestMethod = "request.method" + requestAccept = "request.headers.accept" + requestContentType = "request.headers.contentType" + requestContentLength = "request.headers.contentLength" + requestHost = "request.headers.host" + responseContentType = "response.headers.contentType" + responseContentLength = "response.headers.contentLength" + hostDisplayName = "host.displayName" + requestUserAgent = "request.headers.User-Agent" + requestReferer = "request.headers.referer" +) + +// https://source.datanerd.us/agents/agent-specs/blob/master/Agent-Attributes-PORTED.md + +// AttributeDestinationConfig matches newrelic.AttributeDestinationConfig to +// avoid circular dependency issues. +type AttributeDestinationConfig struct { + Enabled bool + Include []string + Exclude []string +} + +type destinationSet int + +const ( + destTxnEvent destinationSet = 1 << iota + destError + destTxnTrace + destBrowser +) + +const ( + destNone destinationSet = 0 + // DestAll contains all destinations. + DestAll destinationSet = destTxnEvent | destTxnTrace | destError | destBrowser +) + +const ( + attributeWildcardSuffix = '*' +) + +type attributeModifier struct { + match string // This will not contain a trailing '*'. + includeExclude +} + +type byMatch []*attributeModifier + +func (m byMatch) Len() int { return len(m) } +func (m byMatch) Swap(i, j int) { m[i], m[j] = m[j], m[i] } +func (m byMatch) Less(i, j int) bool { return m[i].match < m[j].match } + +// AttributeConfig is created at application creation and shared between all +// transactions. +type AttributeConfig struct { + disabledDestinations destinationSet + exactMatchModifiers map[string]*attributeModifier + // Once attributeConfig is constructed, wildcardModifiers is sorted in + // lexicographical order. Modifiers appearing later have precedence + // over modifiers appearing earlier. + wildcardModifiers []*attributeModifier + agentDests agentAttributeDests +} + +type includeExclude struct { + include destinationSet + exclude destinationSet +} + +func modifierApply(m *attributeModifier, d destinationSet) destinationSet { + // Include before exclude, since exclude has priority. + d |= m.include + d &^= m.exclude + return d +} + +func applyAttributeConfig(c *AttributeConfig, key string, d destinationSet) destinationSet { + // Important: The wildcard modifiers must be applied before the exact + // match modifiers, and the slice must be iterated in a forward + // direction. + for _, m := range c.wildcardModifiers { + if strings.HasPrefix(key, m.match) { + d = modifierApply(m, d) + } + } + + if m, ok := c.exactMatchModifiers[key]; ok { + d = modifierApply(m, d) + } + + d &^= c.disabledDestinations + + return d +} + +func addModifier(c *AttributeConfig, match string, d includeExclude) { + if "" == match { + return + } + exactMatch := true + if attributeWildcardSuffix == match[len(match)-1] { + exactMatch = false + match = match[0 : len(match)-1] + } + mod := &attributeModifier{ + match: match, + includeExclude: d, + } + + if exactMatch { + if m, ok := c.exactMatchModifiers[mod.match]; ok { + m.include |= mod.include + m.exclude |= mod.exclude + } else { + c.exactMatchModifiers[mod.match] = mod + } + } else { + for _, m := range c.wildcardModifiers { + // Important: Duplicate entries for the same match + // string would not work because exclude needs + // precedence over include. + if m.match == mod.match { + m.include |= mod.include + m.exclude |= mod.exclude + return + } + } + c.wildcardModifiers = append(c.wildcardModifiers, mod) + } +} + +func processDest(c *AttributeConfig, dc *AttributeDestinationConfig, d destinationSet) { + if !dc.Enabled { + c.disabledDestinations |= d + } + for _, match := range dc.Include { + addModifier(c, match, includeExclude{include: d}) + } + for _, match := range dc.Exclude { + addModifier(c, match, includeExclude{exclude: d}) + } +} + +// AttributeConfigInput is used as the input to CreateAttributeConfig: it +// transforms newrelic.Config settings into an AttributeConfig. +type AttributeConfigInput struct { + Attributes AttributeDestinationConfig + ErrorCollector AttributeDestinationConfig + TransactionEvents AttributeDestinationConfig + browserMonitoring AttributeDestinationConfig + TransactionTracer AttributeDestinationConfig +} + +var ( + sampleAttributeConfigInput = AttributeConfigInput{ + Attributes: AttributeDestinationConfig{Enabled: true}, + ErrorCollector: AttributeDestinationConfig{Enabled: true}, + TransactionEvents: AttributeDestinationConfig{Enabled: true}, + TransactionTracer: AttributeDestinationConfig{Enabled: true}, + } +) + +// CreateAttributeConfig creates a new AttributeConfig. +func CreateAttributeConfig(input AttributeConfigInput) *AttributeConfig { + c := &AttributeConfig{ + exactMatchModifiers: make(map[string]*attributeModifier), + wildcardModifiers: make([]*attributeModifier, 0, 64), + } + + processDest(c, &input.Attributes, DestAll) + processDest(c, &input.ErrorCollector, destError) + processDest(c, &input.TransactionEvents, destTxnEvent) + processDest(c, &input.TransactionTracer, destTxnTrace) + processDest(c, &input.browserMonitoring, destBrowser) + + sort.Sort(byMatch(c.wildcardModifiers)) + + c.agentDests = calculateAgentAttributeDests(c) + + return c +} + +type userAttribute struct { + value interface{} + dests destinationSet +} + +// Attributes are key value pairs attached to the various collected data types. +type Attributes struct { + config *AttributeConfig + user map[string]userAttribute + Agent agentAttributes +} + +type agentAttributes struct { + HostDisplayName string + RequestMethod string + RequestAcceptHeader string + RequestContentType string + RequestContentLength int + RequestHeadersHost string + RequestHeadersUserAgent string + RequestHeadersReferer string + ResponseHeadersContentType string + ResponseHeadersContentLength int + ResponseCode string +} + +type agentAttributeDests struct { + HostDisplayName destinationSet + RequestMethod destinationSet + RequestAcceptHeader destinationSet + RequestContentType destinationSet + RequestContentLength destinationSet + RequestHeadersHost destinationSet + RequestHeadersUserAgent destinationSet + RequestHeadersReferer destinationSet + ResponseHeadersContentType destinationSet + ResponseHeadersContentLength destinationSet + ResponseCode destinationSet +} + +func calculateAgentAttributeDests(c *AttributeConfig) agentAttributeDests { + usual := DestAll &^ destBrowser + traces := destTxnTrace | destError + return agentAttributeDests{ + HostDisplayName: applyAttributeConfig(c, hostDisplayName, usual), + RequestMethod: applyAttributeConfig(c, requestMethod, usual), + RequestAcceptHeader: applyAttributeConfig(c, requestAccept, usual), + RequestContentType: applyAttributeConfig(c, requestContentType, usual), + RequestContentLength: applyAttributeConfig(c, requestContentLength, usual), + RequestHeadersHost: applyAttributeConfig(c, requestHost, usual), + RequestHeadersUserAgent: applyAttributeConfig(c, requestUserAgent, traces), + RequestHeadersReferer: applyAttributeConfig(c, requestReferer, traces), + ResponseHeadersContentType: applyAttributeConfig(c, responseContentType, usual), + ResponseHeadersContentLength: applyAttributeConfig(c, responseContentLength, usual), + ResponseCode: applyAttributeConfig(c, responseCode, usual), + } +} + +type agentAttributeWriter struct { + jsonFieldsWriter + d destinationSet +} + +func (w *agentAttributeWriter) writeString(name string, val string, d destinationSet) { + if "" != val && 0 != w.d&d { + w.stringField(name, truncateStringValueIfLong(val)) + } +} + +func (w *agentAttributeWriter) writeInt(name string, val int, d destinationSet) { + if val >= 0 && 0 != w.d&d { + w.intField(name, int64(val)) + } +} + +func writeAgentAttributes(buf *bytes.Buffer, d destinationSet, values agentAttributes, dests agentAttributeDests) { + w := &agentAttributeWriter{ + jsonFieldsWriter: jsonFieldsWriter{buf: buf}, + d: d, + } + buf.WriteByte('{') + w.writeString(hostDisplayName, values.HostDisplayName, dests.HostDisplayName) + w.writeString(requestMethod, values.RequestMethod, dests.RequestMethod) + w.writeString(requestAccept, values.RequestAcceptHeader, dests.RequestAcceptHeader) + w.writeString(requestContentType, values.RequestContentType, dests.RequestContentType) + w.writeInt(requestContentLength, values.RequestContentLength, dests.RequestContentLength) + w.writeString(requestHost, values.RequestHeadersHost, dests.RequestHeadersHost) + w.writeString(requestUserAgent, values.RequestHeadersUserAgent, dests.RequestHeadersUserAgent) + w.writeString(requestReferer, values.RequestHeadersReferer, dests.RequestHeadersReferer) + w.writeString(responseContentType, values.ResponseHeadersContentType, dests.ResponseHeadersContentType) + w.writeInt(responseContentLength, values.ResponseHeadersContentLength, dests.ResponseHeadersContentLength) + w.writeString(responseCode, values.ResponseCode, dests.ResponseCode) + buf.WriteByte('}') +} + +// NewAttributes creates a new Attributes. +func NewAttributes(config *AttributeConfig) *Attributes { + return &Attributes{ + config: config, + Agent: agentAttributes{ + RequestContentLength: -1, + ResponseHeadersContentLength: -1, + }, + } +} + +// ErrInvalidAttribute is returned when the value is not valid. +type ErrInvalidAttribute struct{ typeString string } + +func (e ErrInvalidAttribute) Error() string { + return fmt.Sprintf("attribute value type %s is invalid", e.typeString) +} + +func valueIsValid(val interface{}) error { + switch val.(type) { + case string, bool, nil, + uint8, uint16, uint32, uint64, int8, int16, int32, int64, + float32, float64, uint, int, uintptr: + return nil + default: + return ErrInvalidAttribute{ + typeString: fmt.Sprintf("%T", val), + } + } +} + +type invalidAttributeKeyErr struct{ key string } + +func (e invalidAttributeKeyErr) Error() string { + return fmt.Sprintf("attribute key '%.32s...' exceeds length limit %d", + e.key, attributeKeyLengthLimit) +} + +type userAttributeLimitErr struct{ key string } + +func (e userAttributeLimitErr) Error() string { + return fmt.Sprintf("attribute '%s' discarded: limit of %d reached", e.key, + attributeUserLimit) +} + +func validAttributeKey(key string) error { + // Attributes whose keys are excessively long are dropped rather than + // truncated to avoid worrying about the application of configuration to + // truncated values or performing the truncation after configuration. + if len(key) > attributeKeyLengthLimit { + return invalidAttributeKeyErr{key: key} + } + return nil +} + +func truncateStringValueIfLong(val string) string { + if len(val) > attributeValueLengthLimit { + return StringLengthByteLimit(val, attributeValueLengthLimit) + } + return val +} + +func truncateStringValueIfLongInterface(val interface{}) interface{} { + if str, ok := val.(string); ok { + val = interface{}(truncateStringValueIfLong(str)) + } + return val +} + +// AddUserAttribute adds a user attribute. +func AddUserAttribute(a *Attributes, key string, val interface{}, d destinationSet) error { + val = truncateStringValueIfLongInterface(val) + if err := valueIsValid(val); nil != err { + return err + } + if err := validAttributeKey(key); nil != err { + return err + } + dests := applyAttributeConfig(a.config, key, d) + if destNone == dests { + return nil + } + if nil == a.user { + a.user = make(map[string]userAttribute) + } + + if _, exists := a.user[key]; !exists && len(a.user) >= attributeUserLimit { + return userAttributeLimitErr{key} + } + + // Note: Duplicates are overridden: last attribute in wins. + a.user[key] = userAttribute{ + value: val, + dests: dests, + } + return nil +} + +func writeAttributeValueJSON(w *jsonFieldsWriter, key string, val interface{}) { + switch v := val.(type) { + case nil: + w.rawField(key, `null`) + case string: + w.stringField(key, v) + case bool: + if v { + w.rawField(key, `true`) + } else { + w.rawField(key, `false`) + } + case uint8: + w.intField(key, int64(v)) + case uint16: + w.intField(key, int64(v)) + case uint32: + w.intField(key, int64(v)) + case uint64: + w.intField(key, int64(v)) + case uint: + w.intField(key, int64(v)) + case uintptr: + w.intField(key, int64(v)) + case int8: + w.intField(key, int64(v)) + case int16: + w.intField(key, int64(v)) + case int32: + w.intField(key, int64(v)) + case int64: + w.intField(key, v) + case int: + w.intField(key, int64(v)) + case float32: + w.floatField(key, float64(v)) + case float64: + w.floatField(key, v) + default: + w.stringField(key, fmt.Sprintf("%T", v)) + } +} + +type agentAttributesJSONWriter struct { + attributes *Attributes + dest destinationSet +} + +func (w agentAttributesJSONWriter) WriteJSON(buf *bytes.Buffer) { + if nil == w.attributes { + buf.WriteString("{}") + return + } + writeAgentAttributes(buf, w.dest, w.attributes.Agent, w.attributes.config.agentDests) +} + +func agentAttributesJSON(a *Attributes, buf *bytes.Buffer, d destinationSet) { + agentAttributesJSONWriter{ + attributes: a, + dest: d, + }.WriteJSON(buf) +} + +type userAttributesJSONWriter struct { + attributes *Attributes + dest destinationSet +} + +func (u userAttributesJSONWriter) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('{') + if nil != u.attributes { + w := jsonFieldsWriter{buf: buf} + for name, atr := range u.attributes.user { + if 0 != atr.dests&u.dest { + writeAttributeValueJSON(&w, name, atr.value) + } + } + } + buf.WriteByte('}') +} + +func userAttributesJSON(a *Attributes, buf *bytes.Buffer, d destinationSet) { + userAttributesJSONWriter{ + attributes: a, + dest: d, + }.WriteJSON(buf) +} + +func userAttributesStringJSON(a *Attributes, d destinationSet) JSONString { + if nil == a { + return JSONString("{}") + } + estimate := len(a.user) * 128 + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + userAttributesJSON(a, buf, d) + bs := buf.Bytes() + return JSONString(bs) +} + +func agentAttributesStringJSON(a *Attributes, d destinationSet) JSONString { + if nil == a { + return JSONString("{}") + } + estimate := 1024 + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + agentAttributesJSON(a, buf, d) + return JSONString(buf.Bytes()) +} + +func getUserAttributes(a *Attributes, d destinationSet) map[string]interface{} { + v := make(map[string]interface{}) + json.Unmarshal([]byte(userAttributesStringJSON(a, d)), &v) + return v +} + +func getAgentAttributes(a *Attributes, d destinationSet) map[string]interface{} { + v := make(map[string]interface{}) + json.Unmarshal([]byte(agentAttributesStringJSON(a, d)), &v) + return v +} + +// RequestAgentAttributes gathers agent attributes out of the request. +func RequestAgentAttributes(a *Attributes, r *http.Request) { + a.Agent.RequestMethod = r.Method + + h := r.Header + if nil == h { + return + } + a.Agent.RequestAcceptHeader = h.Get("Accept") + a.Agent.RequestContentType = h.Get("Content-Type") + a.Agent.RequestHeadersHost = h.Get("Host") + a.Agent.RequestHeadersUserAgent = h.Get("User-Agent") + a.Agent.RequestHeadersReferer = SafeURLFromString(h.Get("Referer")) + + if cl := h.Get("Content-Length"); "" != cl { + if x, err := strconv.Atoi(cl); nil == err { + a.Agent.RequestContentLength = x + } + } +} + +// ResponseHeaderAttributes gather agent attributes from the response headers. +func ResponseHeaderAttributes(a *Attributes, h http.Header) { + if nil == h { + return + } + a.Agent.ResponseHeadersContentType = h.Get("Content-Type") + if val := h.Get("Content-Length"); "" != val { + if x, err := strconv.Atoi(val); nil == err { + a.Agent.ResponseHeadersContentLength = x + } + } +} + +var ( + // statusCodeLookup avoids a strconv.Itoa call. + statusCodeLookup = map[int]string{ + 100: "100", 101: "101", + 200: "200", 201: "201", 202: "202", 203: "203", 204: "204", 205: "205", 206: "206", + 300: "300", 301: "301", 302: "302", 303: "303", 304: "304", 305: "305", 307: "307", + 400: "400", 401: "401", 402: "402", 403: "403", 404: "404", 405: "405", 406: "406", + 407: "407", 408: "408", 409: "409", 410: "410", 411: "411", 412: "412", 413: "413", + 414: "414", 415: "415", 416: "416", 417: "417", 418: "418", 428: "428", 429: "429", + 431: "431", 451: "451", + 500: "500", 501: "501", 502: "502", 503: "503", 504: "504", 505: "505", 511: "511", + } +) + +// ResponseCodeAttribute sets the response code agent attribute. +func ResponseCodeAttribute(a *Attributes, code int) { + a.Agent.ResponseCode = statusCodeLookup[code] + if a.Agent.ResponseCode == "" { + a.Agent.ResponseCode = strconv.Itoa(code) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/collector.go b/vendor/github.com/newrelic/go-agent/internal/collector.go new file mode 100644 index 0000000000..59bc19442c --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/collector.go @@ -0,0 +1,267 @@ +package internal + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/newrelic/go-agent/internal/logger" +) + +const ( + procotolVersion = "14" + userAgentPrefix = "NewRelic-Go-Agent/" + + // Methods used in collector communication. + cmdRedirect = "get_redirect_host" + cmdConnect = "connect" + cmdMetrics = "metric_data" + cmdCustomEvents = "custom_event_data" + cmdTxnEvents = "analytic_event_data" + cmdErrorEvents = "error_event_data" + cmdErrorData = "error_data" + cmdTxnTraces = "transaction_sample_data" + cmdSlowSQLs = "sql_trace_data" +) + +var ( + // ErrPayloadTooLarge is created in response to receiving a 413 response + // code. + ErrPayloadTooLarge = errors.New("payload too large") + // ErrUnsupportedMedia is created in response to receiving a 415 + // response code. + ErrUnsupportedMedia = errors.New("unsupported media") +) + +// RpmCmd contains fields specific to an individual call made to RPM. +type RpmCmd struct { + Name string + Collector string + RunID string + Data []byte +} + +// RpmControls contains fields which will be the same for all calls made +// by the same application. +type RpmControls struct { + UseTLS bool + License string + Client *http.Client + Logger logger.Logger + AgentVersion string +} + +func rpmURL(cmd RpmCmd, cs RpmControls) string { + var u url.URL + + u.Host = cmd.Collector + u.Path = "agent_listener/invoke_raw_method" + + if cs.UseTLS { + u.Scheme = "https" + } else { + u.Scheme = "http" + } + + query := url.Values{} + query.Set("marshal_format", "json") + query.Set("protocol_version", procotolVersion) + query.Set("method", cmd.Name) + query.Set("license_key", cs.License) + + if len(cmd.RunID) > 0 { + query.Set("run_id", cmd.RunID) + } + + u.RawQuery = query.Encode() + return u.String() +} + +type unexpectedStatusCodeErr struct { + code int +} + +func (e unexpectedStatusCodeErr) Error() string { + return fmt.Sprintf("unexpected HTTP status code: %d", e.code) +} + +func collectorRequestInternal(url string, data []byte, cs RpmControls) ([]byte, error) { + deflated, err := compress(data) + if nil != err { + return nil, err + } + + req, err := http.NewRequest("POST", url, bytes.NewBuffer(deflated)) + if nil != err { + return nil, err + } + + req.Header.Add("Accept-Encoding", "identity, deflate") + req.Header.Add("Content-Type", "application/octet-stream") + req.Header.Add("User-Agent", userAgentPrefix+cs.AgentVersion) + req.Header.Add("Content-Encoding", "deflate") + + resp, err := cs.Client.Do(req) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + if 413 == resp.StatusCode { + return nil, ErrPayloadTooLarge + } + + if 415 == resp.StatusCode { + return nil, ErrUnsupportedMedia + } + + // If the response code is not 200, then the collector may not return + // valid JSON. + if 200 != resp.StatusCode { + return nil, unexpectedStatusCodeErr{code: resp.StatusCode} + } + + b, err := ioutil.ReadAll(resp.Body) + if nil != err { + return nil, err + } + return parseResponse(b) +} + +// CollectorRequest makes a request to New Relic. +func CollectorRequest(cmd RpmCmd, cs RpmControls) ([]byte, error) { + url := rpmURL(cmd, cs) + + if cs.Logger.DebugEnabled() { + cs.Logger.Debug("rpm request", map[string]interface{}{ + "command": cmd.Name, + "url": url, + "payload": JSONString(cmd.Data), + }) + } + + resp, err := collectorRequestInternal(url, cmd.Data, cs) + if err != nil { + cs.Logger.Debug("rpm failure", map[string]interface{}{ + "command": cmd.Name, + "url": url, + "error": err.Error(), + }) + } + + if cs.Logger.DebugEnabled() { + cs.Logger.Debug("rpm response", map[string]interface{}{ + "command": cmd.Name, + "url": url, + "response": JSONString(resp), + }) + } + + return resp, err +} + +type rpmException struct { + Message string `json:"message"` + ErrorType string `json:"error_type"` +} + +func (e *rpmException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorType, e.Message) +} + +func hasType(e error, expected string) bool { + rpmErr, ok := e.(*rpmException) + if !ok { + return false + } + return rpmErr.ErrorType == expected + +} + +const ( + forceRestartType = "NewRelic::Agent::ForceRestartException" + disconnectType = "NewRelic::Agent::ForceDisconnectException" + licenseInvalidType = "NewRelic::Agent::LicenseException" + runtimeType = "RuntimeError" +) + +// IsRestartException indicates if the error was a restart exception. +func IsRestartException(e error) bool { return hasType(e, forceRestartType) } + +// IsLicenseException indicates if the error was an invalid exception. +func IsLicenseException(e error) bool { return hasType(e, licenseInvalidType) } + +// IsRuntime indicates if the error was a runtime exception. +func IsRuntime(e error) bool { return hasType(e, runtimeType) } + +// IsDisconnect indicates if the error was a disconnect exception. +func IsDisconnect(e error) bool { return hasType(e, disconnectType) } + +func parseResponse(b []byte) ([]byte, error) { + var r struct { + ReturnValue json.RawMessage `json:"return_value"` + Exception *rpmException `json:"exception"` + } + + err := json.Unmarshal(b, &r) + if nil != err { + return nil, err + } + + if nil != r.Exception { + return nil, r.Exception + } + + return r.ReturnValue, nil +} + +// ConnectAttempt tries to connect an application. +func ConnectAttempt(js []byte, redirectHost string, cs RpmControls) (*AppRun, error) { + call := RpmCmd{ + Name: cmdRedirect, + Collector: redirectHost, + Data: []byte("[]"), + } + + out, err := CollectorRequest(call, cs) + if nil != err { + // err is intentionally unmodified: We do not want to change + // the type of these collector errors. + return nil, err + } + + var host string + err = json.Unmarshal(out, &host) + if nil != err { + return nil, fmt.Errorf("unable to parse redirect reply: %v", err) + } + + call.Collector = host + call.Data = js + call.Name = cmdConnect + + rawReply, err := CollectorRequest(call, cs) + if nil != err { + // err is intentionally unmodified: We do not want to change + // the type of these collector errors. + return nil, err + } + + reply := ConnectReplyDefaults() + err = json.Unmarshal(rawReply, reply) + if nil != err { + return nil, fmt.Errorf("unable to parse connect reply: %v", err) + } + // Note: This should never happen. It would mean the collector + // response is malformed. This exists merely as extra defensiveness. + if "" == reply.RunID { + return nil, errors.New("connect reply missing agent run id") + } + + return &AppRun{reply, host}, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/compress.go b/vendor/github.com/newrelic/go-agent/internal/compress.go new file mode 100644 index 0000000000..a44dfccd22 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/compress.go @@ -0,0 +1,50 @@ +package internal + +import ( + "bytes" + "compress/zlib" + "encoding/base64" + "io/ioutil" +) + +func compress(b []byte) ([]byte, error) { + buf := bytes.Buffer{} + w := zlib.NewWriter(&buf) + _, err := w.Write(b) + w.Close() + + if nil != err { + return nil, err + } + + return buf.Bytes(), nil +} + +func uncompress(b []byte) ([]byte, error) { + buf := bytes.NewBuffer(b) + r, err := zlib.NewReader(buf) + if nil != err { + return nil, err + } + defer r.Close() + + return ioutil.ReadAll(r) +} + +func compressEncode(b []byte) (string, error) { + compressed, err := compress(b) + + if nil != err { + return "", err + } + return base64.StdEncoding.EncodeToString(compressed), nil +} + +func uncompressDecode(s string) ([]byte, error) { + decoded, err := base64.StdEncoding.DecodeString(s) + if nil != err { + return nil, err + } + + return uncompress(decoded) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/connect_reply.go b/vendor/github.com/newrelic/go-agent/internal/connect_reply.go new file mode 100644 index 0000000000..3eddd7df4b --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/connect_reply.go @@ -0,0 +1,114 @@ +package internal + +import ( + "strings" + "time" +) + +// AgentRunID identifies the current connection with the collector. +type AgentRunID string + +func (id AgentRunID) String() string { + return string(id) +} + +// AppRun contains information regarding a single connection session with the +// collector. It is created upon application connect and is afterwards +// immutable. +type AppRun struct { + *ConnectReply + Collector string +} + +// ConnectReply contains all of the settings and state send down from the +// collector. It should not be modified after creation. +type ConnectReply struct { + RunID AgentRunID `json:"agent_run_id"` + + // Transaction Name Modifiers + SegmentTerms segmentRules `json:"transaction_segment_terms"` + TxnNameRules metricRules `json:"transaction_name_rules"` + URLRules metricRules `json:"url_rules"` + MetricRules metricRules `json:"metric_name_rules"` + + // Cross Process + EncodingKey string `json:"encoding_key"` + CrossProcessID string `json:"cross_process_id"` + TrustedAccounts []int `json:"trusted_account_ids"` + + // Settings + KeyTxnApdex map[string]float64 `json:"web_transactions_apdex"` + ApdexThresholdSeconds float64 `json:"apdex_t"` + CollectAnalyticsEvents bool `json:"collect_analytics_events"` + CollectCustomEvents bool `json:"collect_custom_events"` + CollectTraces bool `json:"collect_traces"` + CollectErrors bool `json:"collect_errors"` + CollectErrorEvents bool `json:"collect_error_events"` + + // RUM + AgentLoader string `json:"js_agent_loader"` + Beacon string `json:"beacon"` + BrowserKey string `json:"browser_key"` + AppID string `json:"application_id"` + ErrorBeacon string `json:"error_beacon"` + JSAgentFile string `json:"js_agent_file"` + + Messages []struct { + Message string `json:"message"` + Level string `json:"level"` + } `json:"messages"` +} + +// ConnectReplyDefaults returns a newly allocated ConnectReply with the proper +// default settings. A pointer to a global is not used to prevent consumers +// from changing the default settings. +func ConnectReplyDefaults() *ConnectReply { + return &ConnectReply{ + ApdexThresholdSeconds: 0.5, + CollectAnalyticsEvents: true, + CollectCustomEvents: true, + CollectTraces: true, + CollectErrors: true, + CollectErrorEvents: true, + } +} + +// CalculateApdexThreshold calculates the apdex threshold. +func CalculateApdexThreshold(c *ConnectReply, txnName string) time.Duration { + if t, ok := c.KeyTxnApdex[txnName]; ok { + return floatSecondsToDuration(t) + } + return floatSecondsToDuration(c.ApdexThresholdSeconds) +} + +// CreateFullTxnName uses collector rules and the appropriate metric prefix to +// construct the full transaction metric name from the name given by the +// consumer. +func CreateFullTxnName(input string, reply *ConnectReply, isWeb bool) string { + var afterURLRules string + if "" != input { + afterURLRules = reply.URLRules.Apply(input) + if "" == afterURLRules { + return "" + } + } + + prefix := backgroundMetricPrefix + if isWeb { + prefix = webMetricPrefix + } + + var beforeNameRules string + if strings.HasPrefix(afterURLRules, "/") { + beforeNameRules = prefix + afterURLRules + } else { + beforeNameRules = prefix + "/" + afterURLRules + } + + afterNameRules := reply.TxnNameRules.Apply(beforeNameRules) + if "" == afterNameRules { + return "" + } + + return reply.SegmentTerms.apply(afterNameRules) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/custom_event.go b/vendor/github.com/newrelic/go-agent/internal/custom_event.go new file mode 100644 index 0000000000..3bd46d7422 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/custom_event.go @@ -0,0 +1,108 @@ +package internal + +import ( + "bytes" + "fmt" + "regexp" + "time" +) + +// https://newrelic.atlassian.net/wiki/display/eng/Custom+Events+in+New+Relic+Agents + +var ( + eventTypeRegexRaw = `^[a-zA-Z0-9:_ ]+$` + eventTypeRegex = regexp.MustCompile(eventTypeRegexRaw) + + errEventTypeLength = fmt.Errorf("event type exceeds length limit of %d", + attributeKeyLengthLimit) + // ErrEventTypeRegex will be returned to caller of app.RecordCustomEvent + // if the event type is not valid. + ErrEventTypeRegex = fmt.Errorf("event type must match %s", eventTypeRegexRaw) + errNumAttributes = fmt.Errorf("maximum of %d attributes exceeded", + customEventAttributeLimit) +) + +// CustomEvent is a custom event. +type CustomEvent struct { + eventType string + timestamp time.Time + truncatedParams map[string]interface{} +} + +// WriteJSON prepares JSON in the format expected by the collector. +func (e *CustomEvent) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('[') + buf.WriteByte('{') + w.stringField("type", e.eventType) + w.floatField("timestamp", timeToFloatSeconds(e.timestamp)) + buf.WriteByte('}') + + buf.WriteByte(',') + buf.WriteByte('{') + w = jsonFieldsWriter{buf: buf} + for key, val := range e.truncatedParams { + writeAttributeValueJSON(&w, key, val) + } + buf.WriteByte('}') + + buf.WriteByte(',') + buf.WriteByte('{') + buf.WriteByte('}') + buf.WriteByte(']') +} + +// MarshalJSON is used for testing. +func (e *CustomEvent) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, 256)) + + e.WriteJSON(buf) + + return buf.Bytes(), nil +} + +func eventTypeValidate(eventType string) error { + if len(eventType) > attributeKeyLengthLimit { + return errEventTypeLength + } + if !eventTypeRegex.MatchString(eventType) { + return ErrEventTypeRegex + } + return nil +} + +// CreateCustomEvent creates a custom event. +func CreateCustomEvent(eventType string, params map[string]interface{}, now time.Time) (*CustomEvent, error) { + if err := eventTypeValidate(eventType); nil != err { + return nil, err + } + + if len(params) > customEventAttributeLimit { + return nil, errNumAttributes + } + + truncatedParams := make(map[string]interface{}) + for key, val := range params { + if err := validAttributeKey(key); nil != err { + return nil, err + } + + val = truncateStringValueIfLongInterface(val) + + if err := valueIsValid(val); nil != err { + return nil, err + } + truncatedParams[key] = val + } + + return &CustomEvent{ + eventType: eventType, + timestamp: now, + truncatedParams: truncatedParams, + }, nil +} + +// MergeIntoHarvest implements Harvestable. +func (e *CustomEvent) MergeIntoHarvest(h *Harvest) { + h.CustomEvents.Add(e) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/custom_events.go b/vendor/github.com/newrelic/go-agent/internal/custom_events.go new file mode 100644 index 0000000000..44e6b973eb --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/custom_events.go @@ -0,0 +1,32 @@ +package internal + +import ( + "math/rand" + "time" +) + +type customEvents struct { + events *analyticsEvents +} + +func newCustomEvents(max int) *customEvents { + return &customEvents{ + events: newAnalyticsEvents(max), + } +} + +func (cs *customEvents) Add(e *CustomEvent) { + stamp := eventStamp(rand.Float32()) + cs.events.addEvent(analyticsEvent{stamp, e}) +} + +func (cs *customEvents) MergeIntoHarvest(h *Harvest) { + h.CustomEvents.events.mergeFailed(cs.events) +} + +func (cs *customEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return cs.events.CollectorJSON(agentRunID) +} + +func (cs *customEvents) numSeen() float64 { return cs.events.NumSeen() } +func (cs *customEvents) numSaved() float64 { return cs.events.NumSaved() } diff --git a/vendor/github.com/newrelic/go-agent/internal/environment.go b/vendor/github.com/newrelic/go-agent/internal/environment.go new file mode 100644 index 0000000000..f7f2780122 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/environment.go @@ -0,0 +1,61 @@ +package internal + +import ( + "encoding/json" + "reflect" + "runtime" +) + +// Environment describes the application's environment. +type Environment struct { + Compiler string `env:"runtime.Compiler"` + GOARCH string `env:"runtime.GOARCH"` + GOOS string `env:"runtime.GOOS"` + Version string `env:"runtime.Version"` + NumCPU int `env:"runtime.NumCPU"` +} + +var ( + // SampleEnvironment is useful for testing. + SampleEnvironment = Environment{ + Compiler: "comp", + GOARCH: "arch", + GOOS: "goos", + Version: "vers", + NumCPU: 8, + } +) + +// NewEnvironment returns a new Environment. +func NewEnvironment() Environment { + return Environment{ + Compiler: runtime.Compiler, + GOARCH: runtime.GOARCH, + GOOS: runtime.GOOS, + Version: runtime.Version(), + NumCPU: runtime.NumCPU(), + } +} + +// MarshalJSON prepares Environment JSON in the format expected by the collector +// during the connect command. +func (e Environment) MarshalJSON() ([]byte, error) { + var arr [][]interface{} + + val := reflect.ValueOf(e) + numFields := val.NumField() + + arr = make([][]interface{}, numFields) + + for i := 0; i < numFields; i++ { + v := val.Field(i) + t := val.Type().Field(i).Tag.Get("env") + + arr[i] = []interface{}{ + t, + v.Interface(), + } + } + + return json.Marshal(arr) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/error_events.go b/vendor/github.com/newrelic/go-agent/internal/error_events.go new file mode 100644 index 0000000000..2b1f3493cf --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/error_events.go @@ -0,0 +1,87 @@ +package internal + +import ( + "bytes" + "math/rand" + "time" +) + +// ErrorEvent is an error event. +type ErrorEvent struct { + Klass string + Msg string + When time.Time + TxnName string + Duration time.Duration + Queuing time.Duration + Attrs *Attributes + DatastoreExternalTotals +} + +// MarshalJSON is used for testing. +func (e *ErrorEvent) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, 256)) + + e.WriteJSON(buf) + + return buf.Bytes(), nil +} + +// WriteJSON prepares JSON in the format expected by the collector. +// https://source.datanerd.us/agents/agent-specs/blob/master/Error-Events.md +func (e *ErrorEvent) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('[') + buf.WriteByte('{') + w.stringField("type", "TransactionError") + w.stringField("error.class", e.Klass) + w.stringField("error.message", e.Msg) + w.floatField("timestamp", timeToFloatSeconds(e.When)) + w.stringField("transactionName", e.TxnName) + w.floatField("duration", e.Duration.Seconds()) + if e.Queuing > 0 { + w.floatField("queueDuration", e.Queuing.Seconds()) + } + if e.externalCallCount > 0 { + w.intField("externalCallCount", int64(e.externalCallCount)) + w.floatField("externalDuration", e.externalDuration.Seconds()) + } + if e.datastoreCallCount > 0 { + // Note that "database" is used for the keys here instead of + // "datastore" for historical reasons. + w.intField("databaseCallCount", int64(e.datastoreCallCount)) + w.floatField("databaseDuration", e.datastoreDuration.Seconds()) + } + buf.WriteByte('}') + buf.WriteByte(',') + userAttributesJSON(e.Attrs, buf, destError) + buf.WriteByte(',') + agentAttributesJSON(e.Attrs, buf, destError) + buf.WriteByte(']') +} + +type errorEvents struct { + events *analyticsEvents +} + +func newErrorEvents(max int) *errorEvents { + return &errorEvents{ + events: newAnalyticsEvents(max), + } +} + +func (events *errorEvents) Add(e *ErrorEvent) { + stamp := eventStamp(rand.Float32()) + events.events.addEvent(analyticsEvent{stamp, e}) +} + +func (events *errorEvents) MergeIntoHarvest(h *Harvest) { + h.ErrorEvents.events.mergeFailed(events.events) +} + +func (events *errorEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return events.events.CollectorJSON(agentRunID) +} + +func (events *errorEvents) numSeen() float64 { return events.events.NumSeen() } +func (events *errorEvents) numSaved() float64 { return events.events.NumSaved() } diff --git a/vendor/github.com/newrelic/go-agent/internal/errors.go b/vendor/github.com/newrelic/go-agent/internal/errors.go new file mode 100644 index 0000000000..a461a9614d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/errors.go @@ -0,0 +1,179 @@ +package internal + +import ( + "bytes" + "fmt" + "net/http" + "reflect" + "strconv" + "time" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +const ( + // PanicErrorKlass is the error klass used for errors generated by + // recovering panics in txn.End. + PanicErrorKlass = "panic" +) + +func panicValueMsg(v interface{}) string { + switch val := v.(type) { + case error: + return val.Error() + default: + return fmt.Sprintf("%v", v) + } +} + +// TxnErrorFromPanic creates a new TxnError from a panic. +func TxnErrorFromPanic(now time.Time, v interface{}) TxnError { + return TxnError{ + When: now, + Msg: panicValueMsg(v), + Klass: PanicErrorKlass, + } +} + +// TxnErrorFromError creates a new TxnError from an error. +func TxnErrorFromError(now time.Time, err error) TxnError { + return TxnError{ + When: now, + Msg: err.Error(), + Klass: reflect.TypeOf(err).String(), + } +} + +// TxnErrorFromResponseCode creates a new TxnError from an http response code. +func TxnErrorFromResponseCode(now time.Time, code int) TxnError { + return TxnError{ + When: now, + Msg: http.StatusText(code), + Klass: strconv.Itoa(code), + } +} + +// TxnError is an error captured in a Transaction. +type TxnError struct { + When time.Time + Stack *StackTrace + Msg string + Klass string +} + +// TxnErrors is a set of errors captured in a Transaction. +type TxnErrors []*TxnError + +// NewTxnErrors returns a new empty TxnErrors. +func NewTxnErrors(max int) TxnErrors { + return make([]*TxnError, 0, max) +} + +// Add adds a TxnError. +func (errors *TxnErrors) Add(e TxnError) { + if len(*errors) < cap(*errors) { + *errors = append(*errors, &e) + } +} + +func (h *harvestError) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('[') + jsonx.AppendFloat(buf, timeToFloatMilliseconds(h.When)) + buf.WriteByte(',') + jsonx.AppendString(buf, h.txnName) + buf.WriteByte(',') + jsonx.AppendString(buf, h.Msg) + buf.WriteByte(',') + jsonx.AppendString(buf, h.Klass) + buf.WriteByte(',') + buf.WriteByte('{') + w := jsonFieldsWriter{buf: buf} + if nil != h.Stack { + w.writerField("stack_trace", h.Stack) + } + w.writerField("agentAttributes", agentAttributesJSONWriter{ + attributes: h.attrs, + dest: destError, + }) + w.writerField("userAttributes", userAttributesJSONWriter{ + attributes: h.attrs, + dest: destError, + }) + w.rawField("intrinsics", JSONString("{}")) + if h.requestURI != "" { + w.stringField("request_uri", h.requestURI) + } + buf.WriteByte('}') + buf.WriteByte(']') +} + +// MarshalJSON is used for testing. +func (h *harvestError) MarshalJSON() ([]byte, error) { + buf := &bytes.Buffer{} + h.WriteJSON(buf) + return buf.Bytes(), nil +} + +type harvestError struct { + TxnError + txnName string + requestURI string + attrs *Attributes +} + +type harvestErrors struct { + errors []*harvestError +} + +func newHarvestErrors(max int) *harvestErrors { + return &harvestErrors{ + errors: make([]*harvestError, 0, max), + } +} + +func harvestErrorFromTxnError(e *TxnError, txnName string, requestURI string, attrs *Attributes) *harvestError { + return &harvestError{ + TxnError: *e, + txnName: txnName, + requestURI: requestURI, + attrs: attrs, + } +} + +func addTxnError(errors *harvestErrors, e *TxnError, txnName string, requestURI string, attrs *Attributes) { + he := harvestErrorFromTxnError(e, txnName, requestURI, attrs) + errors.errors = append(errors.errors, he) +} + +// MergeTxnErrors merges a transaction's errors into the harvest's errors. +func MergeTxnErrors(errors *harvestErrors, errs TxnErrors, txnName string, requestURI string, attrs *Attributes) { + for _, e := range errs { + if len(errors.errors) == cap(errors.errors) { + return + } + addTxnError(errors, e, txnName, requestURI, attrs) + } +} + +func (errors *harvestErrors) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + if 0 == len(errors.errors) { + return nil, nil + } + estimate := 1024 * len(errors.errors) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + buf.WriteByte('[') + jsonx.AppendString(buf, agentRunID) + buf.WriteByte(',') + buf.WriteByte('[') + for i, e := range errors.errors { + if i > 0 { + buf.WriteByte(',') + } + e.WriteJSON(buf) + } + buf.WriteByte(']') + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func (errors *harvestErrors) MergeIntoHarvest(h *Harvest) {} diff --git a/vendor/github.com/newrelic/go-agent/internal/expect.go b/vendor/github.com/newrelic/go-agent/internal/expect.go new file mode 100644 index 0000000000..ff78250353 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/expect.go @@ -0,0 +1,402 @@ +package internal + +import ( + "fmt" + "runtime" + "time" +) + +var ( + // Unfortunately, the resolution of time.Now() on Windows is coarse: Two + // sequential calls to time.Now() may return the same value, and tests + // which expect non-zero durations may fail. To avoid adding sleep + // statements or mocking time.Now(), those tests are skipped on Windows. + doDurationTests = runtime.GOOS != `windows` +) + +// Validator is used for testing. +type Validator interface { + Error(...interface{}) +} + +func validateStringField(v Validator, fieldName, v1, v2 string) { + if v1 != v2 { + v.Error(fieldName, v1, v2) + } +} + +type addValidatorField struct { + field interface{} + original Validator +} + +func (a addValidatorField) Error(fields ...interface{}) { + fields = append([]interface{}{a.field}, fields...) + a.original.Error(fields...) +} + +// ExtendValidator is used to add more context to a validator. +func ExtendValidator(v Validator, field interface{}) Validator { + return addValidatorField{ + field: field, + original: v, + } +} + +// WantMetric is a metric expectation. If Data is nil, then any data values are +// acceptable. +type WantMetric struct { + Name string + Scope string + Forced interface{} // true, false, or nil + Data []float64 +} + +// WantCustomEvent is a custom event expectation. +type WantCustomEvent struct { + Type string + Params map[string]interface{} +} + +// WantError is a traced error expectation. +type WantError struct { + TxnName string + Msg string + Klass string + Caller string + URL string + UserAttributes map[string]interface{} + AgentAttributes map[string]interface{} +} + +// WantErrorEvent is an error event expectation. +type WantErrorEvent struct { + TxnName string + Msg string + Klass string + Queuing bool + ExternalCallCount uint64 + DatastoreCallCount uint64 + UserAttributes map[string]interface{} + AgentAttributes map[string]interface{} +} + +// WantTxnEvent is a transaction event expectation. +type WantTxnEvent struct { + Name string + Zone string + Queuing bool + ExternalCallCount uint64 + DatastoreCallCount uint64 + UserAttributes map[string]interface{} + AgentAttributes map[string]interface{} +} + +// WantTxnTrace is a transaction trace expectation. +type WantTxnTrace struct { + MetricName string + CleanURL string + NumSegments int + UserAttributes map[string]interface{} + AgentAttributes map[string]interface{} +} + +// WantSlowQuery is a slowQuery expectation. +type WantSlowQuery struct { + Count int32 + MetricName string + Query string + TxnName string + TxnURL string + DatabaseName string + Host string + PortPathOrID string + Params map[string]interface{} +} + +// Expect exposes methods that allow for testing whether the correct data was +// captured. +type Expect interface { + ExpectCustomEvents(t Validator, want []WantCustomEvent) + ExpectErrors(t Validator, want []WantError) + ExpectErrorEvents(t Validator, want []WantErrorEvent) + ExpectTxnEvents(t Validator, want []WantTxnEvent) + ExpectMetrics(t Validator, want []WantMetric) + ExpectTxnTraces(t Validator, want []WantTxnTrace) + ExpectSlowQueries(t Validator, want []WantSlowQuery) +} + +func expectMetricField(t Validator, id metricID, v1, v2 float64, fieldName string) { + if v1 != v2 { + t.Error("metric fields do not match", id, v1, v2, fieldName) + } +} + +// ExpectMetrics allows testing of metrics. +func ExpectMetrics(t Validator, mt *metricTable, expect []WantMetric) { + if len(mt.metrics) != len(expect) { + t.Error("metric counts do not match expectations", len(mt.metrics), len(expect)) + } + expectedIds := make(map[metricID]struct{}) + for _, e := range expect { + id := metricID{Name: e.Name, Scope: e.Scope} + expectedIds[id] = struct{}{} + m := mt.metrics[id] + if nil == m { + t.Error("unable to find metric", id) + continue + } + + if b, ok := e.Forced.(bool); ok { + if b != (forced == m.forced) { + t.Error("metric forced incorrect", b, m.forced, id) + } + } + + if nil != e.Data { + expectMetricField(t, id, e.Data[0], m.data.countSatisfied, "countSatisfied") + expectMetricField(t, id, e.Data[1], m.data.totalTolerated, "totalTolerated") + expectMetricField(t, id, e.Data[2], m.data.exclusiveFailed, "exclusiveFailed") + expectMetricField(t, id, e.Data[3], m.data.min, "min") + expectMetricField(t, id, e.Data[4], m.data.max, "max") + expectMetricField(t, id, e.Data[5], m.data.sumSquares, "sumSquares") + } + } + for id := range mt.metrics { + if _, ok := expectedIds[id]; !ok { + t.Error("expected metrics does not contain", id.Name, id.Scope) + } + } +} + +func expectAttributes(v Validator, exists map[string]interface{}, expect map[string]interface{}) { + // TODO: This params comparison can be made smarter: Alert differences + // based on sub/super set behavior. + if len(exists) != len(expect) { + v.Error("attributes length difference", exists, expect) + return + } + for key, val := range expect { + found, ok := exists[key] + if !ok { + v.Error("missing key", key) + continue + } + v1 := fmt.Sprint(found) + v2 := fmt.Sprint(val) + if v1 != v2 { + v.Error("value difference", fmt.Sprintf("key=%s", key), + v1, v2) + } + } +} + +func expectCustomEvent(v Validator, event *CustomEvent, expect WantCustomEvent) { + if event.eventType != expect.Type { + v.Error("type mismatch", event.eventType, expect.Type) + } + now := time.Now() + diff := absTimeDiff(now, event.timestamp) + if diff > time.Hour { + v.Error("large timestamp difference", event.eventType, now, event.timestamp) + } + expectAttributes(v, event.truncatedParams, expect.Params) +} + +// ExpectCustomEvents allows testing of custom events. +func ExpectCustomEvents(v Validator, cs *customEvents, expect []WantCustomEvent) { + if len(cs.events.events) != len(expect) { + v.Error("number of custom events does not match", len(cs.events.events), + len(expect)) + return + } + for i, e := range expect { + event, ok := cs.events.events[i].jsonWriter.(*CustomEvent) + if !ok { + v.Error("wrong custom event") + } else { + expectCustomEvent(v, event, e) + } + } +} + +func expectErrorEvent(v Validator, err *ErrorEvent, expect WantErrorEvent) { + validateStringField(v, "txnName", expect.TxnName, err.TxnName) + validateStringField(v, "klass", expect.Klass, err.Klass) + validateStringField(v, "msg", expect.Msg, err.Msg) + if (0 != err.Queuing) != expect.Queuing { + v.Error("queuing", err.Queuing) + } + if nil != expect.UserAttributes { + expectAttributes(v, getUserAttributes(err.Attrs, destError), expect.UserAttributes) + } + if nil != expect.AgentAttributes { + expectAttributes(v, getAgentAttributes(err.Attrs, destError), expect.AgentAttributes) + } + if expect.ExternalCallCount != err.externalCallCount { + v.Error("external call count", expect.ExternalCallCount, err.externalCallCount) + } + if doDurationTests && (0 == expect.ExternalCallCount) != (err.externalDuration == 0) { + v.Error("external duration", err.externalDuration) + } + if expect.DatastoreCallCount != err.datastoreCallCount { + v.Error("datastore call count", expect.DatastoreCallCount, err.datastoreCallCount) + } + if doDurationTests && (0 == expect.DatastoreCallCount) != (err.datastoreDuration == 0) { + v.Error("datastore duration", err.datastoreDuration) + } +} + +// ExpectErrorEvents allows testing of error events. +func ExpectErrorEvents(v Validator, events *errorEvents, expect []WantErrorEvent) { + if len(events.events.events) != len(expect) { + v.Error("number of custom events does not match", + len(events.events.events), len(expect)) + return + } + for i, e := range expect { + event, ok := events.events.events[i].jsonWriter.(*ErrorEvent) + if !ok { + v.Error("wrong error event") + } else { + expectErrorEvent(v, event, e) + } + } +} + +func expectTxnEvent(v Validator, e *TxnEvent, expect WantTxnEvent) { + validateStringField(v, "apdex zone", expect.Zone, e.Zone.label()) + validateStringField(v, "name", expect.Name, e.Name) + if doDurationTests && 0 == e.Duration { + v.Error("zero duration", e.Duration) + } + if (0 != e.Queuing) != expect.Queuing { + v.Error("queuing", e.Queuing) + } + if nil != expect.UserAttributes { + expectAttributes(v, getUserAttributes(e.Attrs, destTxnEvent), expect.UserAttributes) + } + if nil != expect.AgentAttributes { + expectAttributes(v, getAgentAttributes(e.Attrs, destTxnEvent), expect.AgentAttributes) + } + if expect.ExternalCallCount != e.externalCallCount { + v.Error("external call count", expect.ExternalCallCount, e.externalCallCount) + } + if doDurationTests && (0 == expect.ExternalCallCount) != (e.externalDuration == 0) { + v.Error("external duration", e.externalDuration) + } + if expect.DatastoreCallCount != e.datastoreCallCount { + v.Error("datastore call count", expect.DatastoreCallCount, e.datastoreCallCount) + } + if doDurationTests && (0 == expect.DatastoreCallCount) != (e.datastoreDuration == 0) { + v.Error("datastore duration", e.datastoreDuration) + } +} + +// ExpectTxnEvents allows testing of txn events. +func ExpectTxnEvents(v Validator, events *txnEvents, expect []WantTxnEvent) { + if len(events.events.events) != len(expect) { + v.Error("number of txn events does not match", + len(events.events.events), len(expect)) + return + } + for i, e := range expect { + event, ok := events.events.events[i].jsonWriter.(*TxnEvent) + if !ok { + v.Error("wrong txn event") + } else { + expectTxnEvent(v, event, e) + } + } +} + +func expectError(v Validator, err *harvestError, expect WantError) { + caller := topCallerNameBase(err.TxnError.Stack) + validateStringField(v, "caller", expect.Caller, caller) + validateStringField(v, "txnName", expect.TxnName, err.txnName) + validateStringField(v, "klass", expect.Klass, err.TxnError.Klass) + validateStringField(v, "msg", expect.Msg, err.TxnError.Msg) + validateStringField(v, "URL", expect.URL, err.requestURI) + if nil != expect.UserAttributes { + expectAttributes(v, getUserAttributes(err.attrs, destError), expect.UserAttributes) + } + if nil != expect.AgentAttributes { + expectAttributes(v, getAgentAttributes(err.attrs, destError), expect.AgentAttributes) + } +} + +// ExpectErrors allows testing of errors. +func ExpectErrors(v Validator, errors *harvestErrors, expect []WantError) { + if len(errors.errors) != len(expect) { + v.Error("number of errors mismatch", len(errors.errors), len(expect)) + return + } + for i, e := range expect { + expectError(v, errors.errors[i], e) + } +} + +func expectTxnTrace(v Validator, trace *HarvestTrace, expect WantTxnTrace) { + if doDurationTests && 0 == trace.Duration { + v.Error("zero trace duration") + } + validateStringField(v, "metric name", expect.MetricName, trace.MetricName) + validateStringField(v, "request url", expect.CleanURL, trace.CleanURL) + if nil != expect.UserAttributes { + expectAttributes(v, getUserAttributes(trace.Attrs, destTxnTrace), expect.UserAttributes) + } + if nil != expect.AgentAttributes { + expectAttributes(v, getAgentAttributes(trace.Attrs, destTxnTrace), expect.AgentAttributes) + } + if expect.NumSegments != len(trace.Trace.nodes) { + v.Error("wrong number of segments", expect.NumSegments, len(trace.Trace.nodes)) + } +} + +// ExpectTxnTraces allows testing of transaction traces. +func ExpectTxnTraces(v Validator, traces *harvestTraces, want []WantTxnTrace) { + if len(want) == 0 { + if nil != traces.trace { + v.Error("trace exists when not expected") + } + } else if len(want) > 1 { + v.Error("too many traces expected") + } else { + if nil == traces.trace { + v.Error("missing expected trace") + } else { + expectTxnTrace(v, traces.trace, want[0]) + } + } +} + +func expectSlowQuery(t Validator, slowQuery *slowQuery, want WantSlowQuery) { + if slowQuery.Count != want.Count { + t.Error("wrong Count field", slowQuery.Count, want.Count) + } + validateStringField(t, "MetricName", slowQuery.DatastoreMetric, want.MetricName) + validateStringField(t, "Query", slowQuery.ParameterizedQuery, want.Query) + validateStringField(t, "TxnName", slowQuery.TxnName, want.TxnName) + validateStringField(t, "TxnURL", slowQuery.TxnURL, want.TxnURL) + validateStringField(t, "DatabaseName", slowQuery.DatabaseName, want.DatabaseName) + validateStringField(t, "Host", slowQuery.Host, want.Host) + validateStringField(t, "PortPathOrID", slowQuery.PortPathOrID, want.PortPathOrID) + expectAttributes(t, map[string]interface{}(slowQuery.QueryParameters), want.Params) +} + +// ExpectSlowQueries allows testing of slow queries. +func ExpectSlowQueries(t Validator, slowQueries *slowQueries, want []WantSlowQuery) { + if len(want) != len(slowQueries.priorityQueue) { + t.Error("wrong number of slow queries", + "expected", len(want), "got", len(slowQueries.priorityQueue)) + return + } + for _, s := range want { + idx, ok := slowQueries.lookup[s.Query] + if !ok { + t.Error("unable to find slow query", s.Query) + continue + } + expectSlowQuery(t, slowQueries.priorityQueue[idx], s) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/harvest.go b/vendor/github.com/newrelic/go-agent/internal/harvest.go new file mode 100644 index 0000000000..6d63db0dc7 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/harvest.go @@ -0,0 +1,153 @@ +package internal + +import ( + "strings" + "sync" + "time" +) + +// Harvestable is something that can be merged into a Harvest. +type Harvestable interface { + MergeIntoHarvest(h *Harvest) +} + +// Harvest contains collected data. +type Harvest struct { + Metrics *metricTable + CustomEvents *customEvents + TxnEvents *txnEvents + ErrorEvents *errorEvents + ErrorTraces *harvestErrors + TxnTraces *harvestTraces + SlowSQLs *slowQueries +} + +// Payloads returns a map from expected collector method name to data type. +func (h *Harvest) Payloads() map[string]PayloadCreator { + return map[string]PayloadCreator{ + cmdMetrics: h.Metrics, + cmdCustomEvents: h.CustomEvents, + cmdTxnEvents: h.TxnEvents, + cmdErrorEvents: h.ErrorEvents, + cmdErrorData: h.ErrorTraces, + cmdTxnTraces: h.TxnTraces, + cmdSlowSQLs: h.SlowSQLs, + } +} + +// NewHarvest returns a new Harvest. +func NewHarvest(now time.Time) *Harvest { + return &Harvest{ + Metrics: newMetricTable(maxMetrics, now), + CustomEvents: newCustomEvents(maxCustomEvents), + TxnEvents: newTxnEvents(maxTxnEvents), + ErrorEvents: newErrorEvents(maxErrorEvents), + ErrorTraces: newHarvestErrors(maxHarvestErrors), + TxnTraces: newHarvestTraces(), + SlowSQLs: newSlowQueries(maxHarvestSlowSQLs), + } +} + +var ( + trackMutex sync.Mutex + trackMetrics []string +) + +// TrackUsage helps track which integration packages are used. +func TrackUsage(s ...string) { + trackMutex.Lock() + defer trackMutex.Unlock() + + m := "Supportability/" + strings.Join(s, "/") + trackMetrics = append(trackMetrics, m) +} + +func createTrackUsageMetrics(metrics *metricTable) { + trackMutex.Lock() + defer trackMutex.Unlock() + + for _, m := range trackMetrics { + metrics.addSingleCount(m, forced) + } +} + +// CreateFinalMetrics creates extra metrics at harvest time. +func (h *Harvest) CreateFinalMetrics() { + h.Metrics.addSingleCount(instanceReporting, forced) + + h.Metrics.addCount(customEventsSeen, h.CustomEvents.numSeen(), forced) + h.Metrics.addCount(customEventsSent, h.CustomEvents.numSaved(), forced) + + h.Metrics.addCount(txnEventsSeen, h.TxnEvents.numSeen(), forced) + h.Metrics.addCount(txnEventsSent, h.TxnEvents.numSaved(), forced) + + h.Metrics.addCount(errorEventsSeen, h.ErrorEvents.numSeen(), forced) + h.Metrics.addCount(errorEventsSent, h.ErrorEvents.numSaved(), forced) + + if h.Metrics.numDropped > 0 { + h.Metrics.addCount(supportabilityDropped, float64(h.Metrics.numDropped), forced) + } + + createTrackUsageMetrics(h.Metrics) +} + +// PayloadCreator is a data type in the harvest. +type PayloadCreator interface { + // In the event of a rpm request failure (hopefully simply an + // intermittent collector issue) the payload may be merged into the next + // time period's harvest. + Harvestable + // Data prepares JSON in the format expected by the collector endpoint. + // This method should return (nil, nil) if the payload is empty and no + // rpm request is necessary. + Data(agentRunID string, harvestStart time.Time) ([]byte, error) +} + +// CreateTxnMetricsArgs contains the parameters to CreateTxnMetrics. +type CreateTxnMetricsArgs struct { + IsWeb bool + Duration time.Duration + Exclusive time.Duration + Name string + Zone ApdexZone + ApdexThreshold time.Duration + HasErrors bool + Queueing time.Duration +} + +// CreateTxnMetrics creates metrics for a transaction. +func CreateTxnMetrics(args CreateTxnMetricsArgs, metrics *metricTable) { + // Duration Metrics + rollup := backgroundRollup + if args.IsWeb { + rollup = webRollup + metrics.addDuration(dispatcherMetric, "", args.Duration, 0, forced) + } + + metrics.addDuration(args.Name, "", args.Duration, args.Exclusive, forced) + metrics.addDuration(rollup, "", args.Duration, args.Exclusive, forced) + + // Apdex Metrics + if args.Zone != ApdexNone { + metrics.addApdex(apdexRollup, "", args.ApdexThreshold, args.Zone, forced) + + mname := apdexPrefix + removeFirstSegment(args.Name) + metrics.addApdex(mname, "", args.ApdexThreshold, args.Zone, unforced) + } + + // Error Metrics + if args.HasErrors { + metrics.addSingleCount(errorsAll, forced) + if args.IsWeb { + metrics.addSingleCount(errorsWeb, forced) + } else { + metrics.addSingleCount(errorsBackground, forced) + } + metrics.addSingleCount(errorsPrefix+args.Name, forced) + } + + // Queueing Metrics + if args.Queueing > 0 { + metrics.addDuration(queueMetric, "", args.Queueing, args.Queueing, forced) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/json_object_writer.go b/vendor/github.com/newrelic/go-agent/internal/json_object_writer.go new file mode 100644 index 0000000000..65f0f9487b --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/json_object_writer.go @@ -0,0 +1,52 @@ +package internal + +import ( + "bytes" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +type jsonWriter interface { + WriteJSON(buf *bytes.Buffer) +} + +type jsonFieldsWriter struct { + buf *bytes.Buffer + needsComma bool +} + +func (w *jsonFieldsWriter) addKey(key string) { + if w.needsComma { + w.buf.WriteByte(',') + } else { + w.needsComma = true + } + // defensively assume that the key needs escaping: + jsonx.AppendString(w.buf, key) + w.buf.WriteByte(':') +} + +func (w *jsonFieldsWriter) stringField(key string, val string) { + w.addKey(key) + jsonx.AppendString(w.buf, val) +} + +func (w *jsonFieldsWriter) intField(key string, val int64) { + w.addKey(key) + jsonx.AppendInt(w.buf, val) +} + +func (w *jsonFieldsWriter) floatField(key string, val float64) { + w.addKey(key) + jsonx.AppendFloat(w.buf, val) +} + +func (w *jsonFieldsWriter) rawField(key string, val JSONString) { + w.addKey(key) + w.buf.WriteString(string(val)) +} + +func (w *jsonFieldsWriter) writerField(key string, val jsonWriter) { + w.addKey(key) + val.WriteJSON(w.buf) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/jsonx/encode.go b/vendor/github.com/newrelic/go-agent/internal/jsonx/encode.go new file mode 100644 index 0000000000..6495829f78 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/jsonx/encode.go @@ -0,0 +1,174 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonx extends the encoding/json package to encode JSON +// incrementally and without requiring reflection. +package jsonx + +import ( + "bytes" + "encoding/json" + "math" + "reflect" + "strconv" + "unicode/utf8" +) + +var hex = "0123456789abcdef" + +// AppendString escapes s appends it to buf. +func AppendString(buf *bytes.Buffer, s string) { + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.WriteString(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + case '\t': + buf.WriteByte('\\') + buf.WriteByte('t') + default: + // This encodes bytes < 0x20 except for \n and \r, + // as well as <, > and &. The latter are escaped because they + // can lead to security holes when user-controlled strings + // are rendered into JSON and served to some browsers. + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.WriteString(s[start:]) + } + buf.WriteByte('"') +} + +// AppendStringArray appends an array of string literals to buf. +func AppendStringArray(buf *bytes.Buffer, a ...string) { + buf.WriteByte('[') + for i, s := range a { + if i > 0 { + buf.WriteByte(',') + } + AppendString(buf, s) + } + buf.WriteByte(']') +} + +// AppendFloat appends a numeric literal representing the value to buf. +func AppendFloat(buf *bytes.Buffer, x float64) error { + var scratch [64]byte + + if math.IsInf(x, 0) || math.IsNaN(x) { + return &json.UnsupportedValueError{ + Value: reflect.ValueOf(x), + Str: strconv.FormatFloat(x, 'g', -1, 64), + } + } + + buf.Write(strconv.AppendFloat(scratch[:0], x, 'g', -1, 64)) + return nil +} + +// AppendFloatArray appends an array of numeric literals to buf. +func AppendFloatArray(buf *bytes.Buffer, a ...float64) error { + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteByte(',') + } + if err := AppendFloat(buf, x); err != nil { + return err + } + } + buf.WriteByte(']') + return nil +} + +// AppendInt appends a numeric literal representing the value to buf. +func AppendInt(buf *bytes.Buffer, x int64) { + var scratch [64]byte + buf.Write(strconv.AppendInt(scratch[:0], x, 10)) +} + +// AppendIntArray appends an array of numeric literals to buf. +func AppendIntArray(buf *bytes.Buffer, a ...int64) { + var scratch [64]byte + + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteByte(',') + } + buf.Write(strconv.AppendInt(scratch[:0], x, 10)) + } + buf.WriteByte(']') +} + +// AppendUint appends a numeric literal representing the value to buf. +func AppendUint(buf *bytes.Buffer, x uint64) { + var scratch [64]byte + buf.Write(strconv.AppendUint(scratch[:0], x, 10)) +} + +// AppendUintArray appends an array of numeric literals to buf. +func AppendUintArray(buf *bytes.Buffer, a ...uint64) { + var scratch [64]byte + + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteByte(',') + } + buf.Write(strconv.AppendUint(scratch[:0], x, 10)) + } + buf.WriteByte(']') +} diff --git a/vendor/github.com/newrelic/go-agent/internal/labels.go b/vendor/github.com/newrelic/go-agent/internal/labels.go new file mode 100644 index 0000000000..b3671c65c9 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/labels.go @@ -0,0 +1,23 @@ +package internal + +import "encoding/json" + +// Labels is used for connect JSON formatting. +type Labels map[string]string + +// MarshalJSON requires a comment for golint? +func (l Labels) MarshalJSON() ([]byte, error) { + ls := make([]struct { + Key string `json:"label_type"` + Value string `json:"label_value"` + }, len(l)) + + i := 0 + for key, val := range l { + ls[i].Key = key + ls[i].Value = val + i++ + } + + return json.Marshal(ls) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/limits.go b/vendor/github.com/newrelic/go-agent/internal/limits.go new file mode 100644 index 0000000000..f6cee9564b --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/limits.go @@ -0,0 +1,53 @@ +package internal + +import "time" + +const ( + // app behavior + + // ConnectBackoff is the wait time between unsuccessful connect + // attempts. + ConnectBackoff = 20 * time.Second + // HarvestPeriod is the period that collected data is sent to New Relic. + HarvestPeriod = 60 * time.Second + // CollectorTimeout is the timeout used in the client for communication + // with New Relic's servers. + CollectorTimeout = 20 * time.Second + // AppDataChanSize is the size of the channel that contains data sent + // the app processor. + AppDataChanSize = 200 + failedMetricAttemptsLimit = 5 + failedEventsAttemptsLimit = 10 + + // transaction behavior + maxStackTraceFrames = 100 + // MaxTxnErrors is the maximum number of errors captured per + // transaction. + MaxTxnErrors = 5 + maxTxnTraceNodes = 256 + maxTxnSlowQueries = 10 + + // harvest data + maxMetrics = 2 * 1000 + maxCustomEvents = 10 * 1000 + maxTxnEvents = 10 * 1000 + maxErrorEvents = 100 + maxHarvestErrors = 20 + maxHarvestSlowSQLs = 10 + + // attributes + attributeKeyLengthLimit = 255 + attributeValueLengthLimit = 255 + attributeUserLimit = 64 + attributeAgentLimit = 255 - attributeUserLimit + customEventAttributeLimit = 64 + + // Limits affecting Config validation are found in the config package. + + // RuntimeSamplerPeriod is the period of the runtime sampler. Runtime + // metrics should not depend on the sampler period, but the period must + // be the same across instances. For that reason, this value should not + // be changed without notifying customers that they must update all + // instance simultaneously for valid runtime metrics. + RuntimeSamplerPeriod = 60 * time.Second +) diff --git a/vendor/github.com/newrelic/go-agent/internal/logger/logger.go b/vendor/github.com/newrelic/go-agent/internal/logger/logger.go new file mode 100644 index 0000000000..a0e39fcb02 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/logger/logger.go @@ -0,0 +1,89 @@ +package logger + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" +) + +// Logger matches newrelic.Logger to allow implementations to be passed to +// internal packages. +type Logger interface { + Error(msg string, context map[string]interface{}) + Warn(msg string, context map[string]interface{}) + Info(msg string, context map[string]interface{}) + Debug(msg string, context map[string]interface{}) + DebugEnabled() bool +} + +// ShimLogger implements Logger and does nothing. +type ShimLogger struct{} + +// Error allows ShimLogger to implement Logger. +func (s ShimLogger) Error(string, map[string]interface{}) {} + +// Warn allows ShimLogger to implement Logger. +func (s ShimLogger) Warn(string, map[string]interface{}) {} + +// Info allows ShimLogger to implement Logger. +func (s ShimLogger) Info(string, map[string]interface{}) {} + +// Debug allows ShimLogger to implement Logger. +func (s ShimLogger) Debug(string, map[string]interface{}) {} + +// DebugEnabled allows ShimLogger to implement Logger. +func (s ShimLogger) DebugEnabled() bool { return false } + +type logFile struct { + l *log.Logger + doDebug bool +} + +// New creates a basic Logger. +func New(w io.Writer, doDebug bool) Logger { + return &logFile{ + l: log.New(w, logPid, logFlags), + doDebug: doDebug, + } +} + +const logFlags = log.Ldate | log.Ltime | log.Lmicroseconds + +var ( + logPid = fmt.Sprintf("(%d) ", os.Getpid()) +) + +func (f *logFile) fire(level, msg string, ctx map[string]interface{}) { + js, err := json.Marshal(struct { + Level string `json:"level"` + Event string `json:"msg"` + Context map[string]interface{} `json:"context"` + }{ + level, + msg, + ctx, + }) + if nil == err { + f.l.Printf(string(js)) + } else { + f.l.Printf("unable to marshal log entry: %v", err) + } +} + +func (f *logFile) Error(msg string, ctx map[string]interface{}) { + f.fire("error", msg, ctx) +} +func (f *logFile) Warn(msg string, ctx map[string]interface{}) { + f.fire("warn", msg, ctx) +} +func (f *logFile) Info(msg string, ctx map[string]interface{}) { + f.fire("info", msg, ctx) +} +func (f *logFile) Debug(msg string, ctx map[string]interface{}) { + if f.doDebug { + f.fire("debug", msg, ctx) + } +} +func (f *logFile) DebugEnabled() bool { return f.doDebug } diff --git a/vendor/github.com/newrelic/go-agent/internal/metric_names.go b/vendor/github.com/newrelic/go-agent/internal/metric_names.go new file mode 100644 index 0000000000..23668e0cfd --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/metric_names.go @@ -0,0 +1,145 @@ +package internal + +const ( + apdexRollup = "Apdex" + apdexPrefix = "Apdex/" + + webRollup = "WebTransaction" + backgroundRollup = "OtherTransaction/all" + + errorsAll = "Errors/all" + errorsWeb = "Errors/allWeb" + errorsBackground = "Errors/allOther" + errorsPrefix = "Errors/" + + // "HttpDispatcher" metric is used for the overview graph, and + // therefore should only be made for web transactions. + dispatcherMetric = "HttpDispatcher" + + queueMetric = "WebFrontend/QueueTime" + + webMetricPrefix = "WebTransaction/Go" + backgroundMetricPrefix = "OtherTransaction/Go" + + instanceReporting = "Instance/Reporting" + + // https://newrelic.atlassian.net/wiki/display/eng/Custom+Events+in+New+Relic+Agents + customEventsSeen = "Supportability/Events/Customer/Seen" + customEventsSent = "Supportability/Events/Customer/Sent" + + // https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Events-PORTED.md + txnEventsSeen = "Supportability/AnalyticsEvents/TotalEventsSeen" + txnEventsSent = "Supportability/AnalyticsEvents/TotalEventsSent" + + // https://source.datanerd.us/agents/agent-specs/blob/master/Error-Events.md + errorEventsSeen = "Supportability/Events/TransactionError/Seen" + errorEventsSent = "Supportability/Events/TransactionError/Sent" + + supportabilityDropped = "Supportability/MetricsDropped" + + // source.datanerd.us/agents/agent-specs/blob/master/Datastore-Metrics-PORTED.md + datastoreAll = "Datastore/all" + datastoreWeb = "Datastore/allWeb" + datastoreOther = "Datastore/allOther" + + // source.datanerd.us/agents/agent-specs/blob/master/APIs/external_segment.md + // source.datanerd.us/agents/agent-specs/blob/master/APIs/external_cat.md + // source.datanerd.us/agents/agent-specs/blob/master/Cross-Application-Tracing-PORTED.md + externalAll = "External/all" + externalWeb = "External/allWeb" + externalOther = "External/allOther" + + // Runtime/System Metrics + memoryPhysical = "Memory/Physical" + heapObjectsAllocated = "Memory/Heap/AllocatedObjects" + cpuUserUtilization = "CPU/User/Utilization" + cpuSystemUtilization = "CPU/System/Utilization" + cpuUserTime = "CPU/User Time" + cpuSystemTime = "CPU/System Time" + runGoroutine = "Go/Runtime/Goroutines" + gcPauseFraction = "GC/System/Pause Fraction" + gcPauses = "GC/System/Pauses" +) + +func customSegmentMetric(s string) string { + return "Custom/" + s +} + +// DatastoreMetricKey contains the fields by which datastore metrics are +// aggregated. +type DatastoreMetricKey struct { + Product string + Collection string + Operation string + Host string + PortPathOrID string +} + +type externalMetricKey struct { + Host string + ExternalCrossProcessID string + ExternalTransactionName string +} + +type datastoreProductMetrics struct { + All string // Datastore/{datastore}/all + Web string // Datastore/{datastore}/allWeb + Other string // Datastore/{datastore}/allOther +} + +func datastoreScopedMetric(key DatastoreMetricKey) string { + if "" != key.Collection { + return datastoreStatementMetric(key) + } + return datastoreOperationMetric(key) +} + +func datastoreProductMetric(key DatastoreMetricKey) datastoreProductMetrics { + d, ok := datastoreProductMetricsCache[key.Product] + if ok { + return d + } + return datastoreProductMetrics{ + All: "Datastore/" + key.Product + "/all", + Web: "Datastore/" + key.Product + "/allWeb", + Other: "Datastore/" + key.Product + "/allOther", + } +} + +// Datastore/operation/{datastore}/{operation} +func datastoreOperationMetric(key DatastoreMetricKey) string { + return "Datastore/operation/" + key.Product + + "/" + key.Operation +} + +// Datastore/statement/{datastore}/{table}/{operation} +func datastoreStatementMetric(key DatastoreMetricKey) string { + return "Datastore/statement/" + key.Product + + "/" + key.Collection + + "/" + key.Operation +} + +// Datastore/instance/{datastore}/{host}/{port_path_or_id} +func datastoreInstanceMetric(key DatastoreMetricKey) string { + return "Datastore/instance/" + key.Product + + "/" + key.Host + + "/" + key.PortPathOrID +} + +// External/{host}/all +func externalHostMetric(key externalMetricKey) string { + return "External/" + key.Host + "/all" +} + +// ExternalApp/{host}/{external_id}/all +func externalAppMetric(key externalMetricKey) string { + return "ExternalApp/" + key.Host + + "/" + key.ExternalCrossProcessID + "/all" +} + +// ExternalTransaction/{host}/{external_id}/{external_txnname} +func externalTransactionMetric(key externalMetricKey) string { + return "ExternalTransaction/" + key.Host + + "/" + key.ExternalCrossProcessID + + "/" + key.ExternalTransactionName +} diff --git a/vendor/github.com/newrelic/go-agent/internal/metric_names_datastore.go b/vendor/github.com/newrelic/go-agent/internal/metric_names_datastore.go new file mode 100644 index 0000000000..775dccdb64 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/metric_names_datastore.go @@ -0,0 +1,96 @@ +package internal + +var ( + datastoreProductMetricsCache = map[string]datastoreProductMetrics{ + "Cassandra": { + All: "Datastore/Cassandra/all", + Web: "Datastore/Cassandra/allWeb", + Other: "Datastore/Cassandra/allOther", + }, + "Derby": { + All: "Datastore/Derby/all", + Web: "Datastore/Derby/allWeb", + Other: "Datastore/Derby/allOther", + }, + "Elasticsearch": { + All: "Datastore/Elasticsearch/all", + Web: "Datastore/Elasticsearch/allWeb", + Other: "Datastore/Elasticsearch/allOther", + }, + "Firebird": { + All: "Datastore/Firebird/all", + Web: "Datastore/Firebird/allWeb", + Other: "Datastore/Firebird/allOther", + }, + "IBMDB2": { + All: "Datastore/IBMDB2/all", + Web: "Datastore/IBMDB2/allWeb", + Other: "Datastore/IBMDB2/allOther", + }, + "Informix": { + All: "Datastore/Informix/all", + Web: "Datastore/Informix/allWeb", + Other: "Datastore/Informix/allOther", + }, + "Memcached": { + All: "Datastore/Memcached/all", + Web: "Datastore/Memcached/allWeb", + Other: "Datastore/Memcached/allOther", + }, + "MongoDB": { + All: "Datastore/MongoDB/all", + Web: "Datastore/MongoDB/allWeb", + Other: "Datastore/MongoDB/allOther", + }, + "MySQL": { + All: "Datastore/MySQL/all", + Web: "Datastore/MySQL/allWeb", + Other: "Datastore/MySQL/allOther", + }, + "MSSQL": { + All: "Datastore/MSSQL/all", + Web: "Datastore/MSSQL/allWeb", + Other: "Datastore/MSSQL/allOther", + }, + "Oracle": { + All: "Datastore/Oracle/all", + Web: "Datastore/Oracle/allWeb", + Other: "Datastore/Oracle/allOther", + }, + "Postgres": { + All: "Datastore/Postgres/all", + Web: "Datastore/Postgres/allWeb", + Other: "Datastore/Postgres/allOther", + }, + "Redis": { + All: "Datastore/Redis/all", + Web: "Datastore/Redis/allWeb", + Other: "Datastore/Redis/allOther", + }, + "Solr": { + All: "Datastore/Solr/all", + Web: "Datastore/Solr/allWeb", + Other: "Datastore/Solr/allOther", + }, + "SQLite": { + All: "Datastore/SQLite/all", + Web: "Datastore/SQLite/allWeb", + Other: "Datastore/SQLite/allOther", + }, + "CouchDB": { + All: "Datastore/CouchDB/all", + Web: "Datastore/CouchDB/allWeb", + Other: "Datastore/CouchDB/allOther", + }, + "Riak": { + All: "Datastore/Riak/all", + Web: "Datastore/Riak/allWeb", + Other: "Datastore/Riak/allOther", + }, + "VoltDB": { + All: "Datastore/VoltDB/all", + Web: "Datastore/VoltDB/allWeb", + Other: "Datastore/VoltDB/allOther", + }, + } +) diff --git a/vendor/github.com/newrelic/go-agent/internal/metric_rules.go b/vendor/github.com/newrelic/go-agent/internal/metric_rules.go new file mode 100644 index 0000000000..de6ac91c07 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/metric_rules.go @@ -0,0 +1,163 @@ +package internal + +import ( + "encoding/json" + "regexp" + "sort" + "strings" +) + +type ruleResult int + +const ( + ruleChanged ruleResult = iota + ruleUnchanged + ruleIgnore +) + +type metricRule struct { + // 'Ignore' indicates if the entire transaction should be discarded if + // there is a match. This field is only used by "url_rules" and + // "transaction_name_rules", not "metric_name_rules". + Ignore bool `json:"ignore"` + EachSegment bool `json:"each_segment"` + ReplaceAll bool `json:"replace_all"` + Terminate bool `json:"terminate_chain"` + Order int `json:"eval_order"` + OriginalReplacement string `json:"replacement"` + RawExpr string `json:"match_expression"` + + // Go's regexp backreferences use '${1}' instead of the Perlish '\1', so + // we transform the replacement string into the Go syntax and store it + // here. + TransformedReplacement string + re *regexp.Regexp +} + +type metricRules []*metricRule + +// Go's regexp backreferences use `${1}` instead of the Perlish `\1`, so we must +// transform the replacement string. This is non-trivial: `\1` is a +// backreference but `\\1` is not. Rather than count the number of back slashes +// preceding the digit, we simply skip rules with tricky replacements. +var ( + transformReplacementAmbiguous = regexp.MustCompile(`\\\\([0-9]+)`) + transformReplacementRegex = regexp.MustCompile(`\\([0-9]+)`) + transformReplacementReplacement = "$${${1}}" +) + +func (rules *metricRules) UnmarshalJSON(data []byte) (err error) { + var raw []*metricRule + + if err := json.Unmarshal(data, &raw); nil != err { + return err + } + + valid := make(metricRules, 0, len(raw)) + + for _, r := range raw { + re, err := regexp.Compile("(?i)" + r.RawExpr) + if err != nil { + // TODO + // Warn("unable to compile rule", { + // "match_expression": r.RawExpr, + // "error": err.Error(), + // }) + continue + } + + if transformReplacementAmbiguous.MatchString(r.OriginalReplacement) { + // TODO + // Warn("unable to transform replacement", { + // "match_expression": r.RawExpr, + // "replacement": r.OriginalReplacement, + // }) + continue + } + + r.re = re + r.TransformedReplacement = transformReplacementRegex.ReplaceAllString(r.OriginalReplacement, + transformReplacementReplacement) + valid = append(valid, r) + } + + sort.Sort(valid) + + *rules = valid + return nil +} + +func (rules metricRules) Len() int { + return len(rules) +} + +// Rules should be applied in increasing order +func (rules metricRules) Less(i, j int) bool { + return rules[i].Order < rules[j].Order +} +func (rules metricRules) Swap(i, j int) { + rules[i], rules[j] = rules[j], rules[i] +} + +func replaceFirst(re *regexp.Regexp, s string, replacement string) string { + // Note that ReplaceAllStringFunc cannot be used here since it does + // not replace $1 placeholders. + loc := re.FindStringIndex(s) + if nil == loc { + return s + } + firstMatch := s[loc[0]:loc[1]] + firstMatchReplaced := re.ReplaceAllString(firstMatch, replacement) + return s[0:loc[0]] + firstMatchReplaced + s[loc[1]:] +} + +func (r *metricRule) apply(s string) (ruleResult, string) { + // Rules are strange, and there is no spec. + // This code attempts to duplicate the logic of the PHP agent. + // Ambiguity abounds. + + if r.Ignore { + if r.re.MatchString(s) { + return ruleIgnore, "" + } + return ruleUnchanged, s + } + + var out string + + if r.ReplaceAll { + out = r.re.ReplaceAllString(s, r.TransformedReplacement) + } else if r.EachSegment { + segments := strings.Split(string(s), "/") + applied := make([]string, len(segments)) + for i, segment := range segments { + applied[i] = replaceFirst(r.re, segment, r.TransformedReplacement) + } + out = strings.Join(applied, "/") + } else { + out = replaceFirst(r.re, s, r.TransformedReplacement) + } + + if out == s { + return ruleUnchanged, out + } + return ruleChanged, out +} + +func (rules metricRules) Apply(input string) string { + var res ruleResult + s := input + + for _, rule := range rules { + res, s = rule.apply(s) + + if ruleIgnore == res { + return "" + } + if (ruleChanged == res) && rule.Terminate { + break + } + } + + return s +} diff --git a/vendor/github.com/newrelic/go-agent/internal/metrics.go b/vendor/github.com/newrelic/go-agent/internal/metrics.go new file mode 100644 index 0000000000..6cf6e85839 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/metrics.go @@ -0,0 +1,258 @@ +package internal + +import ( + "bytes" + "time" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +type metricForce int + +const ( + forced metricForce = iota + unforced +) + +type metricID struct { + Name string `json:"name"` + Scope string `json:"scope,omitempty"` +} + +type metricData struct { + // These values are in the units expected by the collector. + countSatisfied float64 // Seconds, or count for Apdex + totalTolerated float64 // Seconds, or count for Apdex + exclusiveFailed float64 // Seconds, or count for Apdex + min float64 // Seconds + max float64 // Seconds + sumSquares float64 // Seconds**2, or 0 for Apdex +} + +func metricDataFromDuration(duration, exclusive time.Duration) metricData { + ds := duration.Seconds() + return metricData{ + countSatisfied: 1, + totalTolerated: ds, + exclusiveFailed: exclusive.Seconds(), + min: ds, + max: ds, + sumSquares: ds * ds, + } +} + +type metric struct { + forced metricForce + data metricData +} + +type metricTable struct { + metricPeriodStart time.Time + failedHarvests int + maxTableSize int // After this max is reached, only forced metrics are added + numDropped int // Number of unforced metrics dropped due to full table + metrics map[metricID]*metric +} + +func newMetricTable(maxTableSize int, now time.Time) *metricTable { + return &metricTable{ + metricPeriodStart: now, + metrics: make(map[metricID]*metric), + maxTableSize: maxTableSize, + failedHarvests: 0, + } +} + +func (mt *metricTable) full() bool { + return len(mt.metrics) >= mt.maxTableSize +} + +func (data *metricData) aggregate(src metricData) { + data.countSatisfied += src.countSatisfied + data.totalTolerated += src.totalTolerated + data.exclusiveFailed += src.exclusiveFailed + + if src.min < data.min { + data.min = src.min + } + if src.max > data.max { + data.max = src.max + } + + data.sumSquares += src.sumSquares +} + +func (mt *metricTable) mergeMetric(id metricID, m metric) { + if to := mt.metrics[id]; nil != to { + to.data.aggregate(m.data) + return + } + + if mt.full() && (unforced == m.forced) { + mt.numDropped++ + return + } + // NOTE: `new` is used in place of `&m` since the latter will make `m` + // get heap allocated regardless of whether or not this line gets + // reached (running go version go1.5 darwin/amd64). See + // BenchmarkAddingSameMetrics. + alloc := new(metric) + *alloc = m + mt.metrics[id] = alloc +} + +func (mt *metricTable) mergeFailed(from *metricTable) { + fails := from.failedHarvests + 1 + if fails >= failedMetricAttemptsLimit { + return + } + if from.metricPeriodStart.Before(mt.metricPeriodStart) { + mt.metricPeriodStart = from.metricPeriodStart + } + mt.failedHarvests = fails + mt.merge(from, "") +} + +func (mt *metricTable) merge(from *metricTable, newScope string) { + if "" == newScope { + for id, m := range from.metrics { + mt.mergeMetric(id, *m) + } + } else { + for id, m := range from.metrics { + mt.mergeMetric(metricID{Name: id.Name, Scope: newScope}, *m) + } + } +} + +func (mt *metricTable) add(name, scope string, data metricData, force metricForce) { + mt.mergeMetric(metricID{Name: name, Scope: scope}, metric{data: data, forced: force}) +} + +func (mt *metricTable) addCount(name string, count float64, force metricForce) { + mt.add(name, "", metricData{countSatisfied: count}, force) +} + +func (mt *metricTable) addSingleCount(name string, force metricForce) { + mt.addCount(name, float64(1), force) +} + +func (mt *metricTable) addDuration(name, scope string, duration, exclusive time.Duration, force metricForce) { + mt.add(name, scope, metricDataFromDuration(duration, exclusive), force) +} + +func (mt *metricTable) addValueExclusive(name, scope string, total, exclusive float64, force metricForce) { + data := metricData{ + countSatisfied: 1, + totalTolerated: total, + exclusiveFailed: exclusive, + min: total, + max: total, + sumSquares: total * total, + } + mt.add(name, scope, data, force) +} + +func (mt *metricTable) addValue(name, scope string, total float64, force metricForce) { + mt.addValueExclusive(name, scope, total, total, force) +} + +func (mt *metricTable) addApdex(name, scope string, apdexThreshold time.Duration, zone ApdexZone, force metricForce) { + apdexSeconds := apdexThreshold.Seconds() + data := metricData{min: apdexSeconds, max: apdexSeconds} + + switch zone { + case ApdexSatisfying: + data.countSatisfied = 1 + case ApdexTolerating: + data.totalTolerated = 1 + case ApdexFailing: + data.exclusiveFailed = 1 + } + + mt.add(name, scope, data, force) +} + +func (mt *metricTable) CollectorJSON(agentRunID string, now time.Time) ([]byte, error) { + if 0 == len(mt.metrics) { + return nil, nil + } + estimatedBytesPerMetric := 128 + estimatedLen := len(mt.metrics) * estimatedBytesPerMetric + buf := bytes.NewBuffer(make([]byte, 0, estimatedLen)) + buf.WriteByte('[') + + jsonx.AppendString(buf, agentRunID) + buf.WriteByte(',') + jsonx.AppendInt(buf, mt.metricPeriodStart.Unix()) + buf.WriteByte(',') + jsonx.AppendInt(buf, now.Unix()) + buf.WriteByte(',') + + buf.WriteByte('[') + first := true + for id, metric := range mt.metrics { + if first { + first = false + } else { + buf.WriteByte(',') + } + buf.WriteByte('[') + buf.WriteByte('{') + buf.WriteString(`"name":`) + jsonx.AppendString(buf, id.Name) + if id.Scope != "" { + buf.WriteString(`,"scope":`) + jsonx.AppendString(buf, id.Scope) + } + buf.WriteByte('}') + buf.WriteByte(',') + + jsonx.AppendFloatArray(buf, + metric.data.countSatisfied, + metric.data.totalTolerated, + metric.data.exclusiveFailed, + metric.data.min, + metric.data.max, + metric.data.sumSquares) + + buf.WriteByte(']') + } + buf.WriteByte(']') + + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func (mt *metricTable) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return mt.CollectorJSON(agentRunID, harvestStart) +} +func (mt *metricTable) MergeIntoHarvest(h *Harvest) { + h.Metrics.mergeFailed(mt) +} + +func (mt *metricTable) ApplyRules(rules metricRules) *metricTable { + if nil == rules { + return mt + } + if len(rules) == 0 { + return mt + } + + applied := newMetricTable(mt.maxTableSize, mt.metricPeriodStart) + cache := make(map[string]string) + + for id, m := range mt.metrics { + out, ok := cache[id.Name] + if !ok { + out = rules.Apply(id.Name) + cache[id.Name] = out + } + + if "" != out { + applied.mergeMetric(metricID{Name: out, Scope: id.Scope}, *m) + } + } + + return applied +} diff --git a/vendor/github.com/newrelic/go-agent/internal/queuing.go b/vendor/github.com/newrelic/go-agent/internal/queuing.go new file mode 100644 index 0000000000..cc361f8208 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/queuing.go @@ -0,0 +1,72 @@ +package internal + +import ( + "net/http" + "strconv" + "strings" + "time" +) + +const ( + xRequestStart = "X-Request-Start" + xQueueStart = "X-Queue-Start" +) + +var ( + earliestAcceptableSeconds = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC).Unix() + latestAcceptableSeconds = time.Date(2050, time.January, 1, 0, 0, 0, 0, time.UTC).Unix() +) + +func checkQueueTimeSeconds(secondsFloat float64) time.Time { + seconds := int64(secondsFloat) + nanos := int64((secondsFloat - float64(seconds)) * (1000.0 * 1000.0 * 1000.0)) + if seconds > earliestAcceptableSeconds && seconds < latestAcceptableSeconds { + return time.Unix(seconds, nanos) + } + return time.Time{} +} + +func parseQueueTime(s string) time.Time { + f, err := strconv.ParseFloat(s, 64) + if nil != err { + return time.Time{} + } + if f <= 0 { + return time.Time{} + } + + // try microseconds + if t := checkQueueTimeSeconds(f / (1000.0 * 1000.0)); !t.IsZero() { + return t + } + // try milliseconds + if t := checkQueueTimeSeconds(f / (1000.0)); !t.IsZero() { + return t + } + // try seconds + if t := checkQueueTimeSeconds(f); !t.IsZero() { + return t + } + return time.Time{} +} + +// QueueDuration TODO +func QueueDuration(hdr http.Header, txnStart time.Time) time.Duration { + s := hdr.Get(xQueueStart) + if "" == s { + s = hdr.Get(xRequestStart) + } + if "" == s { + return 0 + } + + s = strings.TrimPrefix(s, "t=") + qt := parseQueueTime(s) + if qt.IsZero() { + return 0 + } + if qt.After(txnStart) { + return 0 + } + return txnStart.Sub(qt) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sampler.go b/vendor/github.com/newrelic/go-agent/internal/sampler.go new file mode 100644 index 0000000000..d78cdc6405 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sampler.go @@ -0,0 +1,145 @@ +package internal + +import ( + "runtime" + "time" + + "github.com/newrelic/go-agent/internal/logger" + "github.com/newrelic/go-agent/internal/sysinfo" +) + +// Sample is a system/runtime snapshot. +type Sample struct { + when time.Time + memStats runtime.MemStats + usage sysinfo.Usage + numGoroutine int + numCPU int +} + +func bytesToMebibytesFloat(bts uint64) float64 { + return float64(bts) / (1024 * 1024) +} + +// GetSample gathers a new Sample. +func GetSample(now time.Time, lg logger.Logger) *Sample { + s := Sample{ + when: now, + numGoroutine: runtime.NumGoroutine(), + numCPU: runtime.NumCPU(), + } + + if usage, err := sysinfo.GetUsage(); err == nil { + s.usage = usage + } else { + lg.Warn("unable to usage", map[string]interface{}{ + "error": err.Error(), + }) + } + + runtime.ReadMemStats(&s.memStats) + + return &s +} + +type cpuStats struct { + used time.Duration + fraction float64 // used / (elapsed * numCPU) +} + +// Stats contains system information for a period of time. +type Stats struct { + numGoroutine int + allocBytes uint64 + heapObjects uint64 + user cpuStats + system cpuStats + gcPauseFraction float64 + deltaNumGC uint32 + deltaPauseTotal time.Duration + minPause time.Duration + maxPause time.Duration +} + +// Samples is used as the parameter to GetStats to avoid mixing up the previous +// and current sample. +type Samples struct { + Previous *Sample + Current *Sample +} + +// GetStats combines two Samples into a Stats. +func GetStats(ss Samples) Stats { + cur := ss.Current + prev := ss.Previous + elapsed := cur.when.Sub(prev.when) + + s := Stats{ + numGoroutine: cur.numGoroutine, + allocBytes: cur.memStats.Alloc, + heapObjects: cur.memStats.HeapObjects, + } + + // CPU Utilization + totalCPUSeconds := elapsed.Seconds() * float64(cur.numCPU) + if prev.usage.User != 0 && cur.usage.User > prev.usage.User { + s.user.used = cur.usage.User - prev.usage.User + s.user.fraction = s.user.used.Seconds() / totalCPUSeconds + } + if prev.usage.System != 0 && cur.usage.System > prev.usage.System { + s.system.used = cur.usage.System - prev.usage.System + s.system.fraction = s.system.used.Seconds() / totalCPUSeconds + } + + // GC Pause Fraction + deltaPauseTotalNs := cur.memStats.PauseTotalNs - prev.memStats.PauseTotalNs + frac := float64(deltaPauseTotalNs) / float64(elapsed.Nanoseconds()) + s.gcPauseFraction = frac + + // GC Pauses + if deltaNumGC := cur.memStats.NumGC - prev.memStats.NumGC; deltaNumGC > 0 { + // In case more than 256 pauses have happened between samples + // and we are examining a subset of the pauses, we ensure that + // the min and max are not on the same side of the average by + // using the average as the starting min and max. + maxPauseNs := deltaPauseTotalNs / uint64(deltaNumGC) + minPauseNs := deltaPauseTotalNs / uint64(deltaNumGC) + for i := prev.memStats.NumGC + 1; i <= cur.memStats.NumGC; i++ { + pause := cur.memStats.PauseNs[(i+255)%256] + if pause > maxPauseNs { + maxPauseNs = pause + } + if pause < minPauseNs { + minPauseNs = pause + } + } + s.deltaPauseTotal = time.Duration(deltaPauseTotalNs) * time.Nanosecond + s.deltaNumGC = deltaNumGC + s.minPause = time.Duration(minPauseNs) * time.Nanosecond + s.maxPause = time.Duration(maxPauseNs) * time.Nanosecond + } + + return s +} + +// MergeIntoHarvest implements Harvestable. +func (s Stats) MergeIntoHarvest(h *Harvest) { + h.Metrics.addValue(heapObjectsAllocated, "", float64(s.heapObjects), forced) + h.Metrics.addValue(runGoroutine, "", float64(s.numGoroutine), forced) + h.Metrics.addValueExclusive(memoryPhysical, "", bytesToMebibytesFloat(s.allocBytes), 0, forced) + h.Metrics.addValueExclusive(cpuUserUtilization, "", s.user.fraction, 0, forced) + h.Metrics.addValueExclusive(cpuSystemUtilization, "", s.system.fraction, 0, forced) + h.Metrics.addValue(cpuUserTime, "", s.user.used.Seconds(), forced) + h.Metrics.addValue(cpuSystemTime, "", s.system.used.Seconds(), forced) + h.Metrics.addValueExclusive(gcPauseFraction, "", s.gcPauseFraction, 0, forced) + if s.deltaNumGC > 0 { + h.Metrics.add(gcPauses, "", metricData{ + countSatisfied: float64(s.deltaNumGC), + totalTolerated: s.deltaPauseTotal.Seconds(), + exclusiveFailed: 0, + min: s.minPause.Seconds(), + max: s.maxPause.Seconds(), + sumSquares: s.deltaPauseTotal.Seconds() * s.deltaPauseTotal.Seconds(), + }, forced) + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/segment_terms.go b/vendor/github.com/newrelic/go-agent/internal/segment_terms.go new file mode 100644 index 0000000000..a0fd1f2e66 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/segment_terms.go @@ -0,0 +1,145 @@ +package internal + +// https://newrelic.atlassian.net/wiki/display/eng/Language+agent+transaction+segment+terms+rules + +import ( + "encoding/json" + "strings" +) + +const ( + placeholder = "*" + separator = "/" +) + +type segmentRule struct { + Prefix string `json:"prefix"` + Terms []string `json:"terms"` + TermsMap map[string]struct{} +} + +// segmentRules is keyed by each segmentRule's Prefix field with any trailing +// slash removed. +type segmentRules map[string]*segmentRule + +func buildTermsMap(terms []string) map[string]struct{} { + m := make(map[string]struct{}, len(terms)) + for _, t := range terms { + m[t] = struct{}{} + } + return m +} + +func (rules *segmentRules) UnmarshalJSON(b []byte) error { + var raw []*segmentRule + + if err := json.Unmarshal(b, &raw); nil != err { + return err + } + + rs := make(map[string]*segmentRule) + + for _, rule := range raw { + prefix := strings.TrimSuffix(rule.Prefix, "/") + if len(strings.Split(prefix, "/")) != 2 { + // TODO + // Warn("invalid segment term rule prefix", + // {"prefix": rule.Prefix}) + continue + } + + if nil == rule.Terms { + // TODO + // Warn("segment term rule has missing terms", + // {"prefix": rule.Prefix}) + continue + } + + rule.TermsMap = buildTermsMap(rule.Terms) + + rs[prefix] = rule + } + + *rules = rs + return nil +} + +func (rule *segmentRule) apply(name string) string { + if !strings.HasPrefix(name, rule.Prefix) { + return name + } + + s := strings.TrimPrefix(name, rule.Prefix) + + leadingSlash := "" + if strings.HasPrefix(s, separator) { + leadingSlash = separator + s = strings.TrimPrefix(s, separator) + } + + if "" != s { + segments := strings.Split(s, separator) + + for i, segment := range segments { + _, whitelisted := rule.TermsMap[segment] + if whitelisted { + segments[i] = segment + } else { + segments[i] = placeholder + } + } + + segments = collapsePlaceholders(segments) + s = strings.Join(segments, separator) + } + + return rule.Prefix + leadingSlash + s +} + +func (rules segmentRules) apply(name string) string { + if nil == rules { + return name + } + + rule, ok := rules[firstTwoSegments(name)] + if !ok { + return name + } + + return rule.apply(name) +} + +func firstTwoSegments(name string) string { + firstSlashIdx := strings.Index(name, separator) + if firstSlashIdx == -1 { + return name + } + + secondSlashIdx := strings.Index(name[firstSlashIdx+1:], separator) + if secondSlashIdx == -1 { + return name + } + + return name[0 : firstSlashIdx+secondSlashIdx+1] +} + +func collapsePlaceholders(segments []string) []string { + j := 0 + prevStar := false + for i := 0; i < len(segments); i++ { + segment := segments[i] + if placeholder == segment { + if prevStar { + continue + } + segments[j] = placeholder + j++ + prevStar = true + } else { + segments[j] = segment + j++ + prevStar = false + } + } + return segments[0:j] +} diff --git a/vendor/github.com/newrelic/go-agent/internal/slow_queries.go b/vendor/github.com/newrelic/go-agent/internal/slow_queries.go new file mode 100644 index 0000000000..dec2a09166 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/slow_queries.go @@ -0,0 +1,254 @@ +package internal + +import ( + "bytes" + "container/heap" + "hash/fnv" + "time" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +type queryParameters map[string]interface{} + +func vetQueryParameters(params map[string]interface{}) queryParameters { + if nil == params { + return nil + } + // Copying the parameters into a new map is safer than modifying the map + // from the customer. + vetted := make(map[string]interface{}) + for key, val := range params { + if err := validAttributeKey(key); nil != err { + continue + } + val = truncateStringValueIfLongInterface(val) + if err := valueIsValid(val); nil != err { + continue + } + vetted[key] = val + } + return queryParameters(vetted) +} + +func (q queryParameters) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('{') + w := jsonFieldsWriter{buf: buf} + for key, val := range q { + writeAttributeValueJSON(&w, key, val) + } + buf.WriteByte('}') +} + +// https://source.datanerd.us/agents/agent-specs/blob/master/Slow-SQLs-LEGACY.md + +// slowQueryInstance represents a single datastore call. +type slowQueryInstance struct { + // Fields populated right after the datastore segment finishes: + + Duration time.Duration + DatastoreMetric string + ParameterizedQuery string + QueryParameters queryParameters + Host string + PortPathOrID string + DatabaseName string + StackTrace *StackTrace + + // Fields populated when merging into the harvest: + + TxnName string + TxnURL string +} + +// Aggregation is performed to avoid reporting multiple slow queries with same +// query string. Since some datastore segments may be below the slow query +// threshold, the aggregation fields Count, Total, and Min should be taken with +// a grain of salt. +type slowQuery struct { + Count int32 // number of times the query has been observed + Total time.Duration // cummulative duration + Min time.Duration // minimum observed duration + + // When Count > 1, slowQueryInstance contains values from the slowest + // observation. + slowQueryInstance +} + +type slowQueries struct { + priorityQueue []*slowQuery + // lookup maps query strings to indices in the priorityQueue + lookup map[string]int +} + +func (slows *slowQueries) Len() int { + return len(slows.priorityQueue) +} +func (slows *slowQueries) Less(i, j int) bool { + pq := slows.priorityQueue + return pq[i].Duration < pq[j].Duration +} +func (slows *slowQueries) Swap(i, j int) { + pq := slows.priorityQueue + si := pq[i] + sj := pq[j] + pq[i], pq[j] = pq[j], pq[i] + slows.lookup[si.ParameterizedQuery] = j + slows.lookup[sj.ParameterizedQuery] = i +} + +// Push and Pop are unused: only heap.Init and heap.Fix are used. +func (slows *slowQueries) Push(x interface{}) {} +func (slows *slowQueries) Pop() interface{} { return nil } + +func newSlowQueries(max int) *slowQueries { + return &slowQueries{ + lookup: make(map[string]int, max), + priorityQueue: make([]*slowQuery, 0, max), + } +} + +// Merge is used to merge slow queries from the transaction into the harvest. +func (slows *slowQueries) Merge(other *slowQueries, txnName, txnURL string) { + for _, s := range other.priorityQueue { + cp := *s + cp.TxnName = txnName + cp.TxnURL = txnURL + slows.observe(cp) + } +} + +// merge aggregates the observations from two slow queries with the same Query. +func (slow *slowQuery) merge(other slowQuery) { + slow.Count += other.Count + slow.Total += other.Total + + if other.Min < slow.Min { + slow.Min = other.Min + } + if other.Duration > slow.Duration { + slow.slowQueryInstance = other.slowQueryInstance + } +} + +func (slows *slowQueries) observeInstance(slow slowQueryInstance) { + slows.observe(slowQuery{ + Count: 1, + Total: slow.Duration, + Min: slow.Duration, + slowQueryInstance: slow, + }) +} + +func (slows *slowQueries) insertAtIndex(slow slowQuery, idx int) { + cpy := new(slowQuery) + *cpy = slow + slows.priorityQueue[idx] = cpy + slows.lookup[slow.ParameterizedQuery] = idx + heap.Fix(slows, idx) +} + +func (slows *slowQueries) observe(slow slowQuery) { + // Has the query has previously been observed? + if idx, ok := slows.lookup[slow.ParameterizedQuery]; ok { + slows.priorityQueue[idx].merge(slow) + heap.Fix(slows, idx) + return + } + // Has the collection reached max capacity? + if len(slows.priorityQueue) < cap(slows.priorityQueue) { + idx := len(slows.priorityQueue) + slows.priorityQueue = slows.priorityQueue[0 : idx+1] + slows.insertAtIndex(slow, idx) + return + } + // Is this query slower than the existing fastest? + fastest := slows.priorityQueue[0] + if slow.Duration > fastest.Duration { + delete(slows.lookup, fastest.ParameterizedQuery) + slows.insertAtIndex(slow, 0) + return + } +} + +// The third element of the slow query JSON should be a hash of the query +// string. This hash may be used by backend services to aggregate queries which +// have the have the same query string. It is unknown if this actually used. +func makeSlowQueryID(query string) uint32 { + h := fnv.New32a() + h.Write([]byte(query)) + return h.Sum32() +} + +func (slow *slowQuery) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('[') + jsonx.AppendString(buf, slow.TxnName) + buf.WriteByte(',') + jsonx.AppendString(buf, slow.TxnURL) + buf.WriteByte(',') + jsonx.AppendInt(buf, int64(makeSlowQueryID(slow.ParameterizedQuery))) + buf.WriteByte(',') + jsonx.AppendString(buf, slow.ParameterizedQuery) + buf.WriteByte(',') + jsonx.AppendString(buf, slow.DatastoreMetric) + buf.WriteByte(',') + jsonx.AppendInt(buf, int64(slow.Count)) + buf.WriteByte(',') + jsonx.AppendFloat(buf, slow.Total.Seconds()*1000.0) + buf.WriteByte(',') + jsonx.AppendFloat(buf, slow.Min.Seconds()*1000.0) + buf.WriteByte(',') + jsonx.AppendFloat(buf, slow.Duration.Seconds()*1000.0) + buf.WriteByte(',') + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('{') + if "" != slow.Host { + w.stringField("host", slow.Host) + } + if "" != slow.PortPathOrID { + w.stringField("port_path_or_id", slow.PortPathOrID) + } + if "" != slow.DatabaseName { + w.stringField("database_name", slow.DatabaseName) + } + if nil != slow.StackTrace { + w.writerField("backtrace", slow.StackTrace) + } + if nil != slow.QueryParameters { + w.writerField("query_parameters", slow.QueryParameters) + } + buf.WriteByte('}') + buf.WriteByte(']') +} + +// WriteJSON marshals the collection of slow queries into JSON according to the +// schema expected by the collector. +// +// Note: This JSON does not contain the agentRunID. This is for unknown +// historical reasons. Since the agentRunID is included in the url, +// its use in the other commands' JSON is redundant (although required). +func (slows *slowQueries) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('[') + buf.WriteByte('[') + for idx, s := range slows.priorityQueue { + if idx > 0 { + buf.WriteByte(',') + } + s.WriteJSON(buf) + } + buf.WriteByte(']') + buf.WriteByte(']') +} + +func (slows *slowQueries) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + if 0 == len(slows.priorityQueue) { + return nil, nil + } + estimate := 1024 * len(slows.priorityQueue) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + slows.WriteJSON(buf) + return buf.Bytes(), nil +} + +func (slows *slowQueries) MergeIntoHarvest(newHarvest *Harvest) { +} diff --git a/vendor/github.com/newrelic/go-agent/internal/stacktrace.go b/vendor/github.com/newrelic/go-agent/internal/stacktrace.go new file mode 100644 index 0000000000..8e61f88605 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/stacktrace.go @@ -0,0 +1,82 @@ +package internal + +import ( + "bytes" + "path" + "runtime" +) + +// StackTrace is a stack trace. +type StackTrace struct { + callers []uintptr + written int +} + +// GetStackTrace returns a new StackTrace. +func GetStackTrace(skipFrames int) *StackTrace { + st := &StackTrace{} + + skip := 2 // skips runtime.Callers and this function + skip += skipFrames + + st.callers = make([]uintptr, maxStackTraceFrames) + st.written = runtime.Callers(skip, st.callers) + st.callers = st.callers[0:st.written] + + return st +} + +func pcToFunc(pc uintptr) (*runtime.Func, uintptr) { + // The Golang runtime package documentation says "To look up the file + // and line number of the call itself, use pc[i]-1. As an exception to + // this rule, if pc[i-1] corresponds to the function runtime.sigpanic, + // then pc[i] is the program counter of a faulting instruction and + // should be used without any subtraction." + // + // TODO: Fully understand when this subtraction is necessary. + place := pc - 1 + return runtime.FuncForPC(place), place +} + +func topCallerNameBase(st *StackTrace) string { + f, _ := pcToFunc(st.callers[0]) + if nil == f { + return "" + } + return path.Base(f.Name()) +} + +// WriteJSON adds the stack trace to the buffer in the JSON form expected by the +// collector. +func (st *StackTrace) WriteJSON(buf *bytes.Buffer) { + buf.WriteByte('[') + for i, pc := range st.callers { + if i > 0 { + buf.WriteByte(',') + } + // Implements the format documented here: + // https://source.datanerd.us/agents/agent-specs/blob/master/Stack-Traces.md + buf.WriteByte('{') + if f, place := pcToFunc(pc); nil != f { + name := path.Base(f.Name()) + file, line := f.FileLine(place) + + w := jsonFieldsWriter{buf: buf} + w.stringField("filepath", file) + w.stringField("name", name) + w.intField("line", int64(line)) + } + buf.WriteByte('}') + } + buf.WriteByte(']') +} + +// MarshalJSON prepares JSON in the format expected by the collector. +func (st *StackTrace) MarshalJSON() ([]byte, error) { + estimate := 256 * len(st.callers) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + + st.WriteJSON(buf) + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/docker.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/docker.go new file mode 100644 index 0000000000..f031c76dd6 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/docker.go @@ -0,0 +1,124 @@ +package sysinfo + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "regexp" + "runtime" +) + +var ( + // ErrDockerUnsupported is returned if Docker is not supported on the + // platform. + ErrDockerUnsupported = errors.New("Docker unsupported on this platform") + // ErrDockerNotFound is returned if a Docker ID is not found in + // /proc/self/cgroup + ErrDockerNotFound = errors.New("Docker ID not found") +) + +// DockerID attempts to detect Docker. +func DockerID() (string, error) { + if "linux" != runtime.GOOS { + return "", ErrDockerUnsupported + } + + f, err := os.Open("/proc/self/cgroup") + if err != nil { + return "", err + } + defer f.Close() + + return parseDockerID(f) +} + +var ( + dockerIDLength = 64 + dockerIDRegexRaw = fmt.Sprintf("^[0-9a-f]{%d}$", dockerIDLength) + dockerIDRegex = regexp.MustCompile(dockerIDRegexRaw) +) + +func parseDockerID(r io.Reader) (string, error) { + // Each line in the cgroup file consists of three colon delimited fields. + // 1. hierarchy ID - we don't care about this + // 2. subsystems - comma separated list of cgroup subsystem names + // 3. control group - control group to which the process belongs + // + // Example + // 5:cpuacct,cpu,cpuset:/daemons + + for scanner := bufio.NewScanner(r); scanner.Scan(); { + line := scanner.Bytes() + cols := bytes.SplitN(line, []byte(":"), 3) + + if len(cols) < 3 { + continue + } + + // We're only interested in the cpu subsystem. + if !isCPUCol(cols[1]) { + continue + } + + // We're only interested in Docker generated cgroups. + // Reference Implementation: + // case cpu_cgroup + // # docker native driver w/out systemd (fs) + // when %r{^/docker/([0-9a-f]+)$} then $1 + // # docker native driver with systemd + // when %r{^/system\.slice/docker-([0-9a-f]+)\.scope$} then $1 + // # docker lxc driver + // when %r{^/lxc/([0-9a-f]+)$} then $1 + // + var id string + if bytes.HasPrefix(cols[2], []byte("/docker/")) { + id = string(cols[2][len("/docker/"):]) + } else if bytes.HasPrefix(cols[2], []byte("/lxc/")) { + id = string(cols[2][len("/lxc/"):]) + } else if bytes.HasPrefix(cols[2], []byte("/system.slice/docker-")) && + bytes.HasSuffix(cols[2], []byte(".scope")) { + id = string(cols[2][len("/system.slice/docker-") : len(cols[2])-len(".scope")]) + } else { + continue + } + + if err := validateDockerID(id); err != nil { + // We can stop searching at this point, the CPU + // subsystem should only occur once, and its cgroup is + // not docker or not a format we accept. + return "", err + } + return id, nil + } + + return "", ErrDockerNotFound +} + +func isCPUCol(col []byte) bool { + // Sometimes we have multiple subsystems in one line, as in this example + // from: + // https://source.datanerd.us/newrelic/cross_agent_tests/blob/master/docker_container_id/docker-1.1.2-native-driver-systemd.txt + // + // 3:cpuacct,cpu:/system.slice/docker-67f98c9e6188f9c1818672a15dbe46237b6ee7e77f834d40d41c5fb3c2f84a2f.scope + splitCSV := func(r rune) bool { return r == ',' } + subsysCPU := []byte("cpu") + + for _, subsys := range bytes.FieldsFunc(col, splitCSV) { + if bytes.Equal(subsysCPU, subsys) { + return true + } + } + return false +} + +func validateDockerID(id string) error { + if !dockerIDRegex.MatchString(id) { + return fmt.Errorf("%s does not match %s", + id, dockerIDRegexRaw) + } + + return nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_generic.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_generic.go new file mode 100644 index 0000000000..ccef4fcab5 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_generic.go @@ -0,0 +1,10 @@ +// +build !linux + +package sysinfo + +import "os" + +// Hostname returns the host name. +func Hostname() (string, error) { + return os.Hostname() +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_linux.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_linux.go new file mode 100644 index 0000000000..e2300854d0 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/hostname_linux.go @@ -0,0 +1,50 @@ +package sysinfo + +import ( + "os" + "syscall" +) + +// Hostname returns the host name. +func Hostname() (string, error) { + // Try the builtin API first, which is designed to match the output of + // /bin/hostname, and fallback to uname(2) if that fails to match the + // behavior of gethostname(2) as implemented by glibc. On Linux, all + // these method should result in the same value because sethostname(2) + // limits the hostname to 64 bytes, the same size of the nodename field + // returned by uname(2). Note that is correspondence is not true on + // other platforms. + // + // os.Hostname failures should be exceedingly rare, however some systems + // configure SELinux to deny read access to /proc/sys/kernel/hostname. + // Redhat's OpenShift platform for example. os.Hostname can also fail if + // some or all of /proc has been hidden via chroot(2) or manipulation of + // the current processes' filesystem namespace via the cgroups APIs. + // Docker is an example of a tool that can configure such an + // environment. + name, err := os.Hostname() + if err == nil { + return name, nil + } + + var uts syscall.Utsname + if err2 := syscall.Uname(&uts); err2 != nil { + // The man page documents only one possible error for uname(2), + // suggesting that as long as the buffer given is valid, the + // call will never fail. Return the original error in the hope + // it provides more relevant information about why the hostname + // can't be retrieved. + return "", err + } + + // Convert Nodename to a Go string. + buf := make([]byte, 0, len(uts.Nodename)) + for _, c := range uts.Nodename { + if c == 0 { + break + } + buf = append(buf, byte(c)) + } + + return string(buf), nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal.go new file mode 100644 index 0000000000..0763ee301a --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal.go @@ -0,0 +1,40 @@ +package sysinfo + +import ( + "bufio" + "errors" + "io" + "regexp" + "strconv" +) + +// BytesToMebibytes converts bytes into mebibytes. +func BytesToMebibytes(bts uint64) uint64 { + return bts / ((uint64)(1024 * 1024)) +} + +var ( + meminfoRe = regexp.MustCompile(`^MemTotal:\s+([0-9]+)\s+[kK]B$`) + errMemTotalNotFound = errors.New("supported MemTotal not found in /proc/meminfo") +) + +// parseProcMeminfo is used to parse Linux's "/proc/meminfo". It is located +// here so that the relevant cross agent tests will be run on all platforms. +func parseProcMeminfo(f io.Reader) (uint64, error) { + scanner := bufio.NewScanner(f) + for scanner.Scan() { + if m := meminfoRe.FindSubmatch(scanner.Bytes()); m != nil { + kb, err := strconv.ParseUint(string(m[1]), 10, 64) + if err != nil { + return 0, err + } + return kb * 1024, nil + } + } + + err := scanner.Err() + if err == nil { + err = errMemTotalNotFound + } + return 0, err +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_darwin.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_darwin.go new file mode 100644 index 0000000000..3c40f42d5d --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_darwin.go @@ -0,0 +1,29 @@ +package sysinfo + +import ( + "syscall" + "unsafe" +) + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + mib := []int32{6 /* CTL_HW */, 24 /* HW_MEMSIZE */} + + buf := make([]byte, 8) + bufLen := uintptr(8) + + _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), + uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&bufLen)), + uintptr(0), uintptr(0)) + + if e1 != 0 { + return 0, e1 + } + + if bufLen != 8 { + return 0, syscall.EIO + } + + return *(*uint64)(unsafe.Pointer(&buf[0])), nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_freebsd.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_freebsd.go new file mode 100644 index 0000000000..2e82320ac7 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_freebsd.go @@ -0,0 +1,32 @@ +package sysinfo + +import ( + "syscall" + "unsafe" +) + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + mib := []int32{6 /* CTL_HW */, 5 /* HW_PHYSMEM */} + + buf := make([]byte, 8) + bufLen := uintptr(8) + + _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), + uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&bufLen)), + uintptr(0), uintptr(0)) + + if e1 != 0 { + return 0, e1 + } + + switch bufLen { + case 4: + return uint64(*(*uint32)(unsafe.Pointer(&buf[0]))), nil + case 8: + return *(*uint64)(unsafe.Pointer(&buf[0])), nil + default: + return 0, syscall.EIO + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_linux.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_linux.go new file mode 100644 index 0000000000..958e569937 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_linux.go @@ -0,0 +1,14 @@ +package sysinfo + +import "os" + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + f, err := os.Open("/proc/meminfo") + if err != nil { + return 0, err + } + defer f.Close() + + return parseProcMeminfo(f) +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_solaris.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_solaris.go new file mode 100644 index 0000000000..4f1c818e55 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_solaris.go @@ -0,0 +1,26 @@ +package sysinfo + +/* +#include +*/ +import "C" + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + // The function we're calling on Solaris is + // long sysconf(int name); + var pages C.long + var pagesizeBytes C.long + var err error + + pagesizeBytes, err = C.sysconf(C._SC_PAGE_SIZE) + if pagesizeBytes < 1 { + return 0, err + } + pages, err = C.sysconf(C._SC_PHYS_PAGES) + if pages < 1 { + return 0, err + } + + return uint64(pages) * uint64(pagesizeBytes), nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_windows.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_windows.go new file mode 100644 index 0000000000..b211317e1f --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/memtotal_windows.go @@ -0,0 +1,23 @@ +package sysinfo + +import ( + "syscall" + "unsafe" +) + +// PhysicalMemoryBytes returns the total amount of host memory. +func PhysicalMemoryBytes() (uint64, error) { + // https://msdn.microsoft.com/en-us/library/windows/desktop/cc300158(v=vs.85).aspx + // http://stackoverflow.com/questions/30743070/query-total-physical-memory-in-windows-with-golang + mod := syscall.NewLazyDLL("kernel32.dll") + proc := mod.NewProc("GetPhysicallyInstalledSystemMemory") + var memkb uint64 + + ret, _, err := proc.Call(uintptr(unsafe.Pointer(&memkb))) + // return value TRUE(1) succeeds, FAILED(0) fails + if ret != 1 { + return 0, err + } + + return memkb * 1024, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage.go new file mode 100644 index 0000000000..071049edab --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage.go @@ -0,0 +1,11 @@ +package sysinfo + +import ( + "time" +) + +// Usage contains process process times. +type Usage struct { + System time.Duration + User time.Duration +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_posix.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_posix.go new file mode 100644 index 0000000000..3f7ab31f73 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_posix.go @@ -0,0 +1,26 @@ +// +build !windows + +package sysinfo + +import ( + "syscall" + "time" +) + +func timevalToDuration(tv syscall.Timeval) time.Duration { + return time.Duration(tv.Nano()) * time.Nanosecond +} + +// GetUsage gathers process times. +func GetUsage() (Usage, error) { + ru := syscall.Rusage{} + err := syscall.Getrusage(syscall.RUSAGE_SELF, &ru) + if err != nil { + return Usage{}, err + } + + return Usage{ + System: timevalToDuration(ru.Stime), + User: timevalToDuration(ru.Utime), + }, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_windows.go b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_windows.go new file mode 100644 index 0000000000..8a8677a35b --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/sysinfo/usage_windows.go @@ -0,0 +1,34 @@ +package sysinfo + +import ( + "syscall" + "time" +) + +func filetimeToDuration(ft *syscall.Filetime) time.Duration { + ns := ft.Nanoseconds() + return time.Duration(ns) +} + +// GetUsage gathers process times. +func GetUsage() (Usage, error) { + var creationTime syscall.Filetime + var exitTime syscall.Filetime + var kernelTime syscall.Filetime + var userTime syscall.Filetime + + handle, err := syscall.GetCurrentProcess() + if err != nil { + return Usage{}, err + } + + err = syscall.GetProcessTimes(handle, &creationTime, &exitTime, &kernelTime, &userTime) + if err != nil { + return Usage{}, err + } + + return Usage{ + System: filetimeToDuration(&kernelTime), + User: filetimeToDuration(&userTime), + }, nil +} diff --git a/vendor/github.com/newrelic/go-agent/internal/tracing.go b/vendor/github.com/newrelic/go-agent/internal/tracing.go new file mode 100644 index 0000000000..3a54f99a6f --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/tracing.go @@ -0,0 +1,413 @@ +package internal + +import ( + "fmt" + "net/url" + "time" + + "github.com/newrelic/go-agent/internal/sysinfo" +) + +type segmentStamp uint64 + +type segmentTime struct { + Stamp segmentStamp + Time time.Time +} + +// SegmentStartTime is embedded into the top level segments (rather than +// segmentTime) to minimize the structure sizes to minimize allocations. +type SegmentStartTime struct { + Stamp segmentStamp + Depth int +} + +type segmentFrame struct { + segmentTime + children time.Duration +} + +type segmentEnd struct { + valid bool + start segmentTime + stop segmentTime + duration time.Duration + exclusive time.Duration +} + +// Tracer tracks segments. +type Tracer struct { + finishedChildren time.Duration + stamp segmentStamp + currentDepth int + stack []segmentFrame + + customSegments map[string]*metricData + datastoreSegments map[DatastoreMetricKey]*metricData + externalSegments map[externalMetricKey]*metricData + + DatastoreExternalTotals + + TxnTrace + + SlowQueriesEnabled bool + SlowQueryThreshold time.Duration + SlowQueries *slowQueries +} + +const ( + startingStackDepthAlloc = 128 + datastoreProductUnknown = "Unknown" + datastoreOperationUnknown = "other" +) + +func (t *Tracer) time(now time.Time) segmentTime { + // Update the stamp before using it so that a 0 stamp can be special. + t.stamp++ + return segmentTime{ + Time: now, + Stamp: t.stamp, + } +} + +// TracerRootChildren is used to calculate a transaction's exclusive duration. +func TracerRootChildren(t *Tracer) time.Duration { + var lostChildren time.Duration + for i := 0; i < t.currentDepth; i++ { + lostChildren += t.stack[i].children + } + return t.finishedChildren + lostChildren +} + +// StartSegment begins a segment. +func StartSegment(t *Tracer, now time.Time) SegmentStartTime { + if nil == t.stack { + t.stack = make([]segmentFrame, startingStackDepthAlloc) + } + if cap(t.stack) == t.currentDepth { + newLimit := 2 * t.currentDepth + newStack := make([]segmentFrame, newLimit) + copy(newStack, t.stack) + t.stack = newStack + } + + tm := t.time(now) + + depth := t.currentDepth + t.currentDepth++ + t.stack[depth].children = 0 + t.stack[depth].segmentTime = tm + + return SegmentStartTime{ + Stamp: tm.Stamp, + Depth: depth, + } +} + +func endSegment(t *Tracer, start SegmentStartTime, now time.Time) segmentEnd { + var s segmentEnd + if 0 == start.Stamp { + return s + } + if start.Depth >= t.currentDepth { + return s + } + if start.Depth < 0 { + return s + } + if start.Stamp != t.stack[start.Depth].Stamp { + return s + } + + var children time.Duration + for i := start.Depth; i < t.currentDepth; i++ { + children += t.stack[i].children + } + s.valid = true + s.stop = t.time(now) + s.start = t.stack[start.Depth].segmentTime + if s.stop.Time.After(s.start.Time) { + s.duration = s.stop.Time.Sub(s.start.Time) + } + if s.duration > children { + s.exclusive = s.duration - children + } + + // Note that we expect (depth == (t.currentDepth - 1)). However, if + // (depth < (t.currentDepth - 1)), that's ok: could be a panic popped + // some stack frames (and the consumer was not using defer). + t.currentDepth = start.Depth + + if 0 == t.currentDepth { + t.finishedChildren += s.duration + } else { + t.stack[t.currentDepth-1].children += s.duration + } + return s +} + +// EndBasicSegment ends a basic segment. +func EndBasicSegment(t *Tracer, start SegmentStartTime, now time.Time, name string) { + end := endSegment(t, start, now) + if !end.valid { + return + } + if nil == t.customSegments { + t.customSegments = make(map[string]*metricData) + } + m := metricDataFromDuration(end.duration, end.exclusive) + if data, ok := t.customSegments[name]; ok { + data.aggregate(m) + } else { + // Use `new` in place of &m so that m is not + // automatically moved to the heap. + cpy := new(metricData) + *cpy = m + t.customSegments[name] = cpy + } + + if t.TxnTrace.considerNode(end) { + t.TxnTrace.witnessNode(end, customSegmentMetric(name), nil) + } +} + +// EndExternalSegment ends an external segment. +func EndExternalSegment(t *Tracer, start SegmentStartTime, now time.Time, u *url.URL) { + end := endSegment(t, start, now) + if !end.valid { + return + } + host := HostFromURL(u) + if "" == host { + host = "unknown" + } + key := externalMetricKey{ + Host: host, + ExternalCrossProcessID: "", + ExternalTransactionName: "", + } + if nil == t.externalSegments { + t.externalSegments = make(map[externalMetricKey]*metricData) + } + t.externalCallCount++ + t.externalDuration += end.duration + m := metricDataFromDuration(end.duration, end.exclusive) + if data, ok := t.externalSegments[key]; ok { + data.aggregate(m) + } else { + // Use `new` in place of &m so that m is not + // automatically moved to the heap. + cpy := new(metricData) + *cpy = m + t.externalSegments[key] = cpy + } + + if t.TxnTrace.considerNode(end) { + t.TxnTrace.witnessNode(end, externalHostMetric(key), &traceNodeParams{ + CleanURL: SafeURL(u), + }) + } +} + +// EndDatastoreParams contains the parameters for EndDatastoreSegment. +type EndDatastoreParams struct { + Tracer *Tracer + Start SegmentStartTime + Now time.Time + Product string + Collection string + Operation string + ParameterizedQuery string + QueryParameters map[string]interface{} + Host string + PortPathOrID string + Database string +} + +const ( + unknownDatastoreHost = "unknown" + unknownDatastorePortPathOrID = "unknown" +) + +var ( + // ThisHost is the system hostname. + ThisHost = func() string { + if h, err := sysinfo.Hostname(); nil == err { + return h + } + return unknownDatastoreHost + }() + hostsToReplace = map[string]struct{}{ + "localhost": struct{}{}, + "127.0.0.1": struct{}{}, + "0.0.0.0": struct{}{}, + "0:0:0:0:0:0:0:1": struct{}{}, + "::1": struct{}{}, + "0:0:0:0:0:0:0:0": struct{}{}, + "::": struct{}{}, + } +) + +func (t Tracer) slowQueryWorthy(d time.Duration) bool { + return t.SlowQueriesEnabled && (d >= t.SlowQueryThreshold) +} + +// EndDatastoreSegment ends a datastore segment. +func EndDatastoreSegment(p EndDatastoreParams) { + end := endSegment(p.Tracer, p.Start, p.Now) + if !end.valid { + return + } + if p.Operation == "" { + p.Operation = datastoreOperationUnknown + } + if p.Product == "" { + p.Product = datastoreProductUnknown + } + if p.Host == "" && p.PortPathOrID != "" { + p.Host = unknownDatastoreHost + } + if p.PortPathOrID == "" && p.Host != "" { + p.PortPathOrID = unknownDatastorePortPathOrID + } + if _, ok := hostsToReplace[p.Host]; ok { + p.Host = ThisHost + } + + // We still want to create a slowQuery if the consumer has not provided + // a Query string since the stack trace has value. + if p.ParameterizedQuery == "" { + collection := p.Collection + if "" == collection { + collection = "unknown" + } + p.ParameterizedQuery = fmt.Sprintf(`'%s' on '%s' using '%s'`, + p.Operation, collection, p.Product) + } + + key := DatastoreMetricKey{ + Product: p.Product, + Collection: p.Collection, + Operation: p.Operation, + Host: p.Host, + PortPathOrID: p.PortPathOrID, + } + if nil == p.Tracer.datastoreSegments { + p.Tracer.datastoreSegments = make(map[DatastoreMetricKey]*metricData) + } + p.Tracer.datastoreCallCount++ + p.Tracer.datastoreDuration += end.duration + m := metricDataFromDuration(end.duration, end.exclusive) + if data, ok := p.Tracer.datastoreSegments[key]; ok { + data.aggregate(m) + } else { + // Use `new` in place of &m so that m is not + // automatically moved to the heap. + cpy := new(metricData) + *cpy = m + p.Tracer.datastoreSegments[key] = cpy + } + + scopedMetric := datastoreScopedMetric(key) + queryParams := vetQueryParameters(p.QueryParameters) + + if p.Tracer.TxnTrace.considerNode(end) { + p.Tracer.TxnTrace.witnessNode(end, scopedMetric, &traceNodeParams{ + Host: p.Host, + PortPathOrID: p.PortPathOrID, + Database: p.Database, + Query: p.ParameterizedQuery, + queryParameters: queryParams, + }) + } + + if p.Tracer.slowQueryWorthy(end.duration) { + if nil == p.Tracer.SlowQueries { + p.Tracer.SlowQueries = newSlowQueries(maxTxnSlowQueries) + } + // Frames to skip: + // this function + // endDatastore + // DatastoreSegment.End + skipFrames := 3 + p.Tracer.SlowQueries.observeInstance(slowQueryInstance{ + Duration: end.duration, + DatastoreMetric: scopedMetric, + ParameterizedQuery: p.ParameterizedQuery, + QueryParameters: queryParams, + Host: p.Host, + PortPathOrID: p.PortPathOrID, + DatabaseName: p.Database, + StackTrace: GetStackTrace(skipFrames), + }) + } +} + +// MergeBreakdownMetrics creates segment metrics. +func MergeBreakdownMetrics(t *Tracer, metrics *metricTable, scope string, isWeb bool) { + // Custom Segment Metrics + for key, data := range t.customSegments { + name := customSegmentMetric(key) + // Unscoped + metrics.add(name, "", *data, unforced) + // Scoped + metrics.add(name, scope, *data, unforced) + } + + // External Segment Metrics + for key, data := range t.externalSegments { + metrics.add(externalAll, "", *data, forced) + if isWeb { + metrics.add(externalWeb, "", *data, forced) + } else { + metrics.add(externalOther, "", *data, forced) + } + hostMetric := externalHostMetric(key) + metrics.add(hostMetric, "", *data, unforced) + if "" != key.ExternalCrossProcessID && "" != key.ExternalTransactionName { + txnMetric := externalTransactionMetric(key) + + // Unscoped CAT metrics + metrics.add(externalAppMetric(key), "", *data, unforced) + metrics.add(txnMetric, "", *data, unforced) + + // Scoped External Metric + metrics.add(txnMetric, scope, *data, unforced) + } else { + // Scoped External Metric + metrics.add(hostMetric, scope, *data, unforced) + } + } + + // Datastore Segment Metrics + for key, data := range t.datastoreSegments { + metrics.add(datastoreAll, "", *data, forced) + + product := datastoreProductMetric(key) + metrics.add(product.All, "", *data, forced) + if isWeb { + metrics.add(datastoreWeb, "", *data, forced) + metrics.add(product.Web, "", *data, forced) + } else { + metrics.add(datastoreOther, "", *data, forced) + metrics.add(product.Other, "", *data, forced) + } + + if key.Host != "" && key.PortPathOrID != "" { + instance := datastoreInstanceMetric(key) + metrics.add(instance, "", *data, unforced) + } + + operation := datastoreOperationMetric(key) + metrics.add(operation, "", *data, unforced) + + if "" != key.Collection { + statement := datastoreStatementMetric(key) + + metrics.add(statement, "", *data, unforced) + metrics.add(statement, scope, *data, unforced) + } else { + metrics.add(operation, scope, *data, unforced) + } + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/txn_events.go b/vendor/github.com/newrelic/go-agent/internal/txn_events.go new file mode 100644 index 0000000000..1d35bdb086 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/txn_events.go @@ -0,0 +1,97 @@ +package internal + +import ( + "bytes" + "math/rand" + "time" +) + +// DatastoreExternalTotals contains overview of external and datastore calls +// made during a transaction. +type DatastoreExternalTotals struct { + externalCallCount uint64 + externalDuration time.Duration + datastoreCallCount uint64 + datastoreDuration time.Duration +} + +// TxnEvent represents a transaction. +// https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Events-PORTED.md +// https://newrelic.atlassian.net/wiki/display/eng/Agent+Support+for+Synthetics%3A+Forced+Transaction+Traces+and+Analytic+Events +type TxnEvent struct { + Name string + Timestamp time.Time + Duration time.Duration + Queuing time.Duration + Zone ApdexZone + Attrs *Attributes + DatastoreExternalTotals +} + +// WriteJSON prepares JSON in the format expected by the collector. +func (e *TxnEvent) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('[') + buf.WriteByte('{') + w.stringField("type", "Transaction") + w.stringField("name", e.Name) + w.floatField("timestamp", timeToFloatSeconds(e.Timestamp)) + w.floatField("duration", e.Duration.Seconds()) + if ApdexNone != e.Zone { + w.stringField("nr.apdexPerfZone", e.Zone.label()) + } + if e.Queuing > 0 { + w.floatField("queueDuration", e.Queuing.Seconds()) + } + if e.externalCallCount > 0 { + w.intField("externalCallCount", int64(e.externalCallCount)) + w.floatField("externalDuration", e.externalDuration.Seconds()) + } + if e.datastoreCallCount > 0 { + // Note that "database" is used for the keys here instead of + // "datastore" for historical reasons. + w.intField("databaseCallCount", int64(e.datastoreCallCount)) + w.floatField("databaseDuration", e.datastoreDuration.Seconds()) + } + buf.WriteByte('}') + buf.WriteByte(',') + userAttributesJSON(e.Attrs, buf, destTxnEvent) + buf.WriteByte(',') + agentAttributesJSON(e.Attrs, buf, destTxnEvent) + buf.WriteByte(']') +} + +// MarshalJSON is used for testing. +func (e *TxnEvent) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, 0, 256)) + + e.WriteJSON(buf) + + return buf.Bytes(), nil +} + +type txnEvents struct { + events *analyticsEvents +} + +func newTxnEvents(max int) *txnEvents { + return &txnEvents{ + events: newAnalyticsEvents(max), + } +} + +func (events *txnEvents) AddTxnEvent(e *TxnEvent) { + stamp := eventStamp(rand.Float32()) + events.events.addEvent(analyticsEvent{stamp, e}) +} + +func (events *txnEvents) MergeIntoHarvest(h *Harvest) { + h.TxnEvents.events.mergeFailed(events.events) +} + +func (events *txnEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + return events.events.CollectorJSON(agentRunID) +} + +func (events *txnEvents) numSeen() float64 { return events.events.NumSeen() } +func (events *txnEvents) numSaved() float64 { return events.events.NumSaved() } diff --git a/vendor/github.com/newrelic/go-agent/internal/txn_trace.go b/vendor/github.com/newrelic/go-agent/internal/txn_trace.go new file mode 100644 index 0000000000..a635e7f897 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/txn_trace.go @@ -0,0 +1,315 @@ +package internal + +import ( + "bytes" + "container/heap" + "encoding/json" + "sort" + "time" + + "github.com/newrelic/go-agent/internal/jsonx" +) + +// See https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Trace-LEGACY.md + +type traceNodeHeap []traceNode + +// traceNodeParams is used for trace node parameters. A struct is used in place +// of a map[string]interface{} to facilitate testing and reduce JSON Marshal +// overhead. If too many fields get added here, it probably makes sense to +// start using a map. This struct is not embedded into traceNode to minimize +// the size of traceNode: Not all nodes will have parameters. +type traceNodeParams struct { + StackTrace *StackTrace + CleanURL string + Database string + Host string + PortPathOrID string + Query string + queryParameters queryParameters +} + +func (p *traceNodeParams) WriteJSON(buf *bytes.Buffer) { + w := jsonFieldsWriter{buf: buf} + buf.WriteByte('{') + if nil != p.StackTrace { + w.writerField("backtrace", p.StackTrace) + } + if "" != p.CleanURL { + w.stringField("uri", p.CleanURL) + } + if "" != p.Database { + w.stringField("database_name", p.Database) + } + if "" != p.Host { + w.stringField("host", p.Host) + } + if "" != p.PortPathOrID { + w.stringField("port_path_or_id", p.PortPathOrID) + } + if "" != p.Query { + w.stringField("query", p.Query) + } + if nil != p.queryParameters { + w.writerField("query_parameters", p.queryParameters) + } + buf.WriteByte('}') +} + +// MarshalJSON is used for testing. +func (p *traceNodeParams) MarshalJSON() ([]byte, error) { + buf := &bytes.Buffer{} + p.WriteJSON(buf) + return buf.Bytes(), nil +} + +type traceNode struct { + start segmentTime + stop segmentTime + duration time.Duration + params *traceNodeParams + name string +} + +func (h traceNodeHeap) Len() int { return len(h) } +func (h traceNodeHeap) Less(i, j int) bool { return h[i].duration < h[j].duration } +func (h traceNodeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +// Push and Pop are unused: only heap.Init and heap.Fix are used. +func (h traceNodeHeap) Push(x interface{}) {} +func (h traceNodeHeap) Pop() interface{} { return nil } + +// TxnTrace contains the work in progress transaction trace. +type TxnTrace struct { + Enabled bool + SegmentThreshold time.Duration + StackTraceThreshold time.Duration + nodes traceNodeHeap + maxNodes int +} + +// considerNode exists to prevent unnecessary calls to witnessNode: constructing +// the metric name and params map requires allocations. +func (trace *TxnTrace) considerNode(end segmentEnd) bool { + return trace.Enabled && (end.duration >= trace.SegmentThreshold) +} + +func (trace *TxnTrace) witnessNode(end segmentEnd, name string, params *traceNodeParams) { + node := traceNode{ + start: end.start, + stop: end.stop, + duration: end.duration, + name: name, + params: params, + } + if !trace.considerNode(end) { + return + } + if trace.nodes == nil { + max := trace.maxNodes + if 0 == max { + max = maxTxnTraceNodes + } + trace.nodes = make(traceNodeHeap, 0, max) + } + if end.exclusive >= trace.StackTraceThreshold { + if node.params == nil { + p := new(traceNodeParams) + node.params = p + } + // skip the following stack frames: + // this method + // function in tracing.go (EndBasicSegment, EndExternalSegment, EndDatastoreSegment) + // function in internal_txn.go (endSegment, endExternal, endDatastore) + // segment end method + skip := 4 + node.params.StackTrace = GetStackTrace(skip) + } + if len(trace.nodes) < cap(trace.nodes) { + trace.nodes = append(trace.nodes, node) + if len(trace.nodes) == cap(trace.nodes) { + heap.Init(trace.nodes) + } + return + } + if node.duration <= trace.nodes[0].duration { + return + } + trace.nodes[0] = node + heap.Fix(trace.nodes, 0) +} + +// HarvestTrace contains a finished transaction trace ready for serialization to +// the collector. +type HarvestTrace struct { + Start time.Time + Duration time.Duration + MetricName string + CleanURL string + Trace TxnTrace + ForcePersist bool + GUID string + SyntheticsResourceID string + Attrs *Attributes +} + +type nodeDetails struct { + name string + relativeStart time.Duration + relativeStop time.Duration + params *traceNodeParams +} + +func printNodeStart(buf *bytes.Buffer, n nodeDetails) { + // time.Seconds() is intentionally not used here. Millisecond + // precision is enough. + relativeStartMillis := n.relativeStart.Nanoseconds() / (1000 * 1000) + relativeStopMillis := n.relativeStop.Nanoseconds() / (1000 * 1000) + + buf.WriteByte('[') + jsonx.AppendInt(buf, relativeStartMillis) + buf.WriteByte(',') + jsonx.AppendInt(buf, relativeStopMillis) + buf.WriteByte(',') + jsonx.AppendString(buf, n.name) + buf.WriteByte(',') + if nil == n.params { + buf.WriteString("{}") + } else { + n.params.WriteJSON(buf) + } + buf.WriteByte(',') + buf.WriteByte('[') +} + +func printChildren(buf *bytes.Buffer, traceStart time.Time, nodes sortedTraceNodes, next int, stop segmentStamp) int { + firstChild := true + for next < len(nodes) && nodes[next].start.Stamp < stop { + if firstChild { + firstChild = false + } else { + buf.WriteByte(',') + } + printNodeStart(buf, nodeDetails{ + name: nodes[next].name, + relativeStart: nodes[next].start.Time.Sub(traceStart), + relativeStop: nodes[next].stop.Time.Sub(traceStart), + params: nodes[next].params, + }) + next = printChildren(buf, traceStart, nodes, next+1, nodes[next].stop.Stamp) + buf.WriteString("]]") + + } + return next +} + +type sortedTraceNodes []*traceNode + +func (s sortedTraceNodes) Len() int { return len(s) } +func (s sortedTraceNodes) Less(i, j int) bool { return s[i].start.Stamp < s[j].start.Stamp } +func (s sortedTraceNodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func traceDataJSON(trace *HarvestTrace) []byte { + estimate := 100 * len(trace.Trace.nodes) + buf := bytes.NewBuffer(make([]byte, 0, estimate)) + + nodes := make(sortedTraceNodes, len(trace.Trace.nodes)) + for i := 0; i < len(nodes); i++ { + nodes[i] = &trace.Trace.nodes[i] + } + sort.Sort(nodes) + + buf.WriteByte('[') // begin trace data + + // If the trace string pool is used, insert another array here. + + jsonx.AppendFloat(buf, 0.0) // unused timestamp + buf.WriteByte(',') // + buf.WriteString("{}") // unused: formerly request parameters + buf.WriteByte(',') // + buf.WriteString("{}") // unused: formerly custom parameters + buf.WriteByte(',') // + + printNodeStart(buf, nodeDetails{ // begin outer root + name: "ROOT", + relativeStart: 0, + relativeStop: trace.Duration, + }) + + printNodeStart(buf, nodeDetails{ // begin inner root + name: trace.MetricName, + relativeStart: 0, + relativeStop: trace.Duration, + }) + + if len(nodes) > 0 { + lastStopStamp := nodes[len(nodes)-1].stop.Stamp + 1 + printChildren(buf, trace.Start, nodes, 0, lastStopStamp) + } + + buf.WriteString("]]") // end outer root + buf.WriteString("]]") // end inner root + + buf.WriteByte(',') + buf.WriteByte('{') + buf.WriteString(`"agentAttributes":`) + agentAttributesJSON(trace.Attrs, buf, destTxnTrace) + buf.WriteByte(',') + buf.WriteString(`"userAttributes":`) + userAttributesJSON(trace.Attrs, buf, destTxnTrace) + buf.WriteByte(',') + buf.WriteString(`"intrinsics":{}`) // TODO intrinsics + buf.WriteByte('}') + + // If the trace string pool is used, end another array here. + + buf.WriteByte(']') // end trace data + + return buf.Bytes() +} + +// MarshalJSON prepares the trace in the JSON expected by the collector. +func (trace *HarvestTrace) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{ + trace.Start.UnixNano() / 1000, + trace.Duration.Seconds() * 1000.0, + trace.MetricName, + trace.CleanURL, + JSONString(traceDataJSON(trace)), + trace.GUID, + nil, // reserved for future use + trace.ForcePersist, + nil, // X-Ray sessions not supported + trace.SyntheticsResourceID, + }) +} + +type harvestTraces struct { + trace *HarvestTrace +} + +func newHarvestTraces() *harvestTraces { + return &harvestTraces{} +} + +func (traces *harvestTraces) Witness(trace HarvestTrace) { + if nil == traces.trace || traces.trace.Duration < trace.Duration { + cpy := new(HarvestTrace) + *cpy = trace + traces.trace = cpy + } +} + +func (traces *harvestTraces) Data(agentRunID string, harvestStart time.Time) ([]byte, error) { + if nil == traces.trace { + return nil, nil + } + return json.Marshal([]interface{}{ + agentRunID, + []interface{}{ + traces.trace, + }, + }) +} + +func (traces *harvestTraces) MergeIntoHarvest(h *Harvest) {} diff --git a/vendor/github.com/newrelic/go-agent/internal/url.go b/vendor/github.com/newrelic/go-agent/internal/url.go new file mode 100644 index 0000000000..21976ee4fc --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/url.go @@ -0,0 +1,43 @@ +package internal + +import "net/url" + +// SafeURL removes sensitive information from a URL. +func SafeURL(u *url.URL) string { + if nil == u { + return "" + } + if "" != u.Opaque { + // If the URL is opaque, we cannot be sure if it contains + // sensitive information. + return "" + } + + // Omit user, query, and fragment information for security. + ur := url.URL{ + Scheme: u.Scheme, + Host: u.Host, + Path: u.Path, + } + return ur.String() +} + +// SafeURLFromString removes sensitive information from a URL. +func SafeURLFromString(rawurl string) string { + u, err := url.Parse(rawurl) + if nil != err { + return "" + } + return SafeURL(u) +} + +// HostFromURL returns the URL's host. +func HostFromURL(u *url.URL) string { + if nil == u { + return "" + } + if "" != u.Opaque { + return "opaque" + } + return u.Host +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilities.go b/vendor/github.com/newrelic/go-agent/internal/utilities.go new file mode 100644 index 0000000000..12674187b8 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilities.go @@ -0,0 +1,80 @@ +package internal + +import ( + "bytes" + "encoding/json" + "strings" + "time" +) + +// JSONString assists in logging JSON: Based on the formatter used to log +// Context contents, the contents could be marshalled as JSON or just printed +// directly. +type JSONString string + +// MarshalJSON returns the JSONString unmodified without any escaping. +func (js JSONString) MarshalJSON() ([]byte, error) { + if "" == js { + return []byte("null"), nil + } + return []byte(js), nil +} + +func removeFirstSegment(name string) string { + idx := strings.Index(name, "/") + if -1 == idx { + return name + } + return name[idx+1:] +} + +func timeToFloatSeconds(t time.Time) float64 { + return float64(t.UnixNano()) / float64(1000*1000*1000) +} + +func timeToFloatMilliseconds(t time.Time) float64 { + return float64(t.UnixNano()) / float64(1000*1000) +} + +func floatSecondsToDuration(seconds float64) time.Duration { + nanos := seconds * 1000 * 1000 * 1000 + return time.Duration(nanos) * time.Nanosecond +} + +func absTimeDiff(t1, t2 time.Time) time.Duration { + if t1.After(t2) { + return t1.Sub(t2) + } + return t2.Sub(t1) +} + +func compactJSON(js []byte) []byte { + buf := new(bytes.Buffer) + if err := json.Compact(buf, js); err != nil { + return nil + } + return buf.Bytes() +} + +// CompactJSONString removes the whitespace from a JSON string. +func CompactJSONString(js string) string { + out := compactJSON([]byte(js)) + return string(out) +} + +// StringLengthByteLimit truncates strings using a byte-limit boundary and +// avoids terminating in the middle of a multibyte character. +func StringLengthByteLimit(str string, byteLimit int) string { + if len(str) <= byteLimit { + return str + } + + limitIndex := 0 + for pos := range str { + if pos > byteLimit { + break + } + limitIndex = pos + } + return str[0:limitIndex] +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilization/aws.go b/vendor/github.com/newrelic/go-agent/internal/utilization/aws.go new file mode 100644 index 0000000000..2a557ceb64 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilization/aws.go @@ -0,0 +1,121 @@ +package utilization + +import ( + "fmt" + "io" + "net/http" + "time" +) + +const ( + maxResponseLengthBytes = 255 + + // AWS data gathering requires making three web requests, therefore this + // timeout is in keeping with the spec's total timeout of 1 second. + individualConnectionTimeout = 300 * time.Millisecond +) + +const ( + awsHost = "169.254.169.254" + + typeEndpointPath = "/2008-02-01/meta-data/instance-type" + idEndpointPath = "/2008-02-01/meta-data/instance-id" + zoneEndpointPath = "/2008-02-01/meta-data/placement/availability-zone" + + typeEndpoint = "http://" + awsHost + typeEndpointPath + idEndpoint = "http://" + awsHost + idEndpointPath + zoneEndpoint = "http://" + awsHost + zoneEndpointPath +) + +// awsValidationError represents a response from an AWS endpoint that doesn't +// match the format expectations. +type awsValidationError struct { + e error +} + +func (a awsValidationError) Error() string { + return a.e.Error() +} + +func isAWSValidationError(e error) bool { + _, is := e.(awsValidationError) + return is +} + +func getAWS() (*vendor, error) { + return getEndpoints(&http.Client{ + Timeout: individualConnectionTimeout, + }) +} + +func getEndpoints(client *http.Client) (*vendor, error) { + v := &vendor{} + var err error + + v.ID, err = getAndValidate(client, idEndpoint) + if err != nil { + return nil, err + } + v.Type, err = getAndValidate(client, typeEndpoint) + if err != nil { + return nil, err + } + v.Zone, err = getAndValidate(client, zoneEndpoint) + if err != nil { + return nil, err + } + + return v, nil +} + +func getAndValidate(client *http.Client, endpoint string) (string, error) { + response, err := client.Get(endpoint) + if err != nil { + return "", err + } + defer response.Body.Close() + + if response.StatusCode != 200 { + return "", fmt.Errorf("unexpected response code %d", response.StatusCode) + } + + b := make([]byte, maxResponseLengthBytes+1) + num, err := response.Body.Read(b) + if err != nil && err != io.EOF { + return "", err + } + + if num > maxResponseLengthBytes { + return "", awsValidationError{ + fmt.Errorf("maximum length %d exceeded", maxResponseLengthBytes), + } + } + + responseText := string(b[:num]) + + for _, r := range responseText { + if !isAcceptableRune(r) { + return "", awsValidationError{ + fmt.Errorf("invalid character %x", r), + } + } + } + + return responseText, nil +} + +// See: +// https://source.datanerd.us/agents/agent-specs/blob/master/Utilization.md#normalizing-aws-data +func isAcceptableRune(r rune) bool { + switch r { + case 0xFFFD: + return false + case '_', ' ', '/', '.', '-': + return true + default: + return r > 0x7f || + ('0' <= r && r <= '9') || + ('a' <= r && r <= 'z') || + ('A' <= r && r <= 'Z') + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal/utilization/utilization.go b/vendor/github.com/newrelic/go-agent/internal/utilization/utilization.go new file mode 100644 index 0000000000..83d12f8773 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal/utilization/utilization.go @@ -0,0 +1,140 @@ +// Package utilization implements the Utilization spec, available at +// https://source.datanerd.us/agents/agent-specs/blob/master/Utilization.md +package utilization + +import ( + "runtime" + + "github.com/newrelic/go-agent/internal/logger" + "github.com/newrelic/go-agent/internal/sysinfo" +) + +const metadataVersion = 2 + +// Config controls the behavior of utilization information capture. +type Config struct { + DetectAWS bool + DetectDocker bool + LogicalProcessors int + TotalRAMMIB int + BillingHostname string +} + +type override struct { + LogicalProcessors *int `json:"logical_processors,omitempty"` + TotalRAMMIB *int `json:"total_ram_mib,omitempty"` + BillingHostname string `json:"hostname,omitempty"` +} + +// Data contains utilization system information. +type Data struct { + MetadataVersion int `json:"metadata_version"` + LogicalProcessors int `json:"logical_processors"` + RAMMib *uint64 `json:"total_ram_mib"` + Hostname string `json:"hostname"` + Vendors *vendors `json:"vendors,omitempty"` + Config *override `json:"config,omitempty"` +} + +var ( + sampleRAMMib = uint64(1024) + // SampleData contains sample utilization data useful for testing. + SampleData = Data{ + MetadataVersion: metadataVersion, + LogicalProcessors: 16, + RAMMib: &sampleRAMMib, + Hostname: "my-hostname", + } +) + +type vendor struct { + ID string `json:"id,omitempty"` + Type string `json:"type,omitempty"` + Zone string `json:"zone,omitempty"` +} + +type vendors struct { + AWS *vendor `json:"aws,omitempty"` + Docker *vendor `json:"docker,omitempty"` +} + +func overrideFromConfig(config Config) *override { + ov := &override{} + + if 0 != config.LogicalProcessors { + x := config.LogicalProcessors + ov.LogicalProcessors = &x + } + if 0 != config.TotalRAMMIB { + x := config.TotalRAMMIB + ov.TotalRAMMIB = &x + } + ov.BillingHostname = config.BillingHostname + + if "" == ov.BillingHostname && + nil == ov.LogicalProcessors && + nil == ov.TotalRAMMIB { + ov = nil + } + return ov +} + +// Gather gathers system utilization data. +func Gather(config Config, lg logger.Logger) *Data { + uDat := Data{ + MetadataVersion: metadataVersion, + Vendors: &vendors{}, + LogicalProcessors: runtime.NumCPU(), + } + + if config.DetectDocker { + id, err := sysinfo.DockerID() + if err != nil && + err != sysinfo.ErrDockerUnsupported && + err != sysinfo.ErrDockerNotFound { + lg.Warn("error gathering Docker information", map[string]interface{}{ + "error": err.Error(), + }) + } else if id != "" { + uDat.Vendors.Docker = &vendor{ID: id} + } + } + + if config.DetectAWS { + aws, err := getAWS() + if nil == err { + uDat.Vendors.AWS = aws + } else if isAWSValidationError(err) { + lg.Warn("AWS validation error", map[string]interface{}{ + "error": err.Error(), + }) + } + } + + if uDat.Vendors.AWS == nil && uDat.Vendors.Docker == nil { + uDat.Vendors = nil + } + + host, err := sysinfo.Hostname() + if nil == err { + uDat.Hostname = host + } else { + lg.Warn("error getting hostname", map[string]interface{}{ + "error": err.Error(), + }) + } + + bts, err := sysinfo.PhysicalMemoryBytes() + if nil == err { + mib := sysinfo.BytesToMebibytes(bts) + uDat.RAMMib = &mib + } else { + lg.Warn("error getting memory", map[string]interface{}{ + "error": err.Error(), + }) + } + + uDat.Config = overrideFromConfig(config) + + return &uDat +} diff --git a/vendor/github.com/newrelic/go-agent/internal_app.go b/vendor/github.com/newrelic/go-agent/internal_app.go new file mode 100644 index 0000000000..9eb00c48ab --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_app.go @@ -0,0 +1,566 @@ +package newrelic + +import ( + "errors" + "fmt" + "net/http" + "os" + "strings" + "sync" + "time" + + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/logger" +) + +var ( + debugLogging = os.Getenv("NEW_RELIC_DEBUG_LOGGING") + redirectHost = func() string { + if s := os.Getenv("NEW_RELIC_HOST"); "" != s { + return s + } + return "collector.newrelic.com" + }() +) + +type dataConsumer interface { + Consume(internal.AgentRunID, internal.Harvestable) +} + +type appData struct { + id internal.AgentRunID + data internal.Harvestable +} + +type app struct { + config Config + attrConfig *internal.AttributeConfig + rpmControls internal.RpmControls + testHarvest *internal.Harvest + + // initiateShutdown is used to tell the processor to shutdown. + initiateShutdown chan struct{} + + // shutdownStarted and shutdownComplete are closed by the processor + // goroutine to indicate the shutdown status. Two channels are used so + // that the call of app.Shutdown() can block until shutdown has + // completed but other goroutines can exit when shutdown has started. + // This is not just an optimization: This prevents a deadlock if + // harvesting data during the shutdown fails and an attempt is made to + // merge the data into the next harvest. + shutdownStarted chan struct{} + shutdownComplete chan struct{} + + // Sends to these channels should not occur without a <-shutdownStarted + // select option to prevent deadlock. + dataChan chan appData + collectorErrorChan chan error + connectChan chan *internal.AppRun + + harvestTicker *time.Ticker + + // This mutex protects both `run` and `err`, both of which should only + // be accessed using getState and setState. + sync.RWMutex + // run is non-nil when the app is successfully connected. It is + // immutable. + run *internal.AppRun + // err is non-nil if the application will never be connected again + // (disconnect, license exception, shutdown). + err error +} + +var ( + placeholderRun = &internal.AppRun{ + ConnectReply: internal.ConnectReplyDefaults(), + } +) + +func isFatalHarvestError(e error) bool { + return internal.IsDisconnect(e) || + internal.IsLicenseException(e) || + internal.IsRestartException(e) +} + +func shouldSaveFailedHarvest(e error) bool { + if e == internal.ErrPayloadTooLarge || e == internal.ErrUnsupportedMedia { + return false + } + return true +} + +func (app *app) doHarvest(h *internal.Harvest, harvestStart time.Time, run *internal.AppRun) { + h.CreateFinalMetrics() + h.Metrics = h.Metrics.ApplyRules(run.MetricRules) + + payloads := h.Payloads() + for cmd, p := range payloads { + + data, err := p.Data(run.RunID.String(), harvestStart) + + if nil == data && nil == err { + continue + } + + if nil == err { + call := internal.RpmCmd{ + Collector: run.Collector, + RunID: run.RunID.String(), + Name: cmd, + Data: data, + } + + // The reply from harvest calls is always unused. + _, err = internal.CollectorRequest(call, app.rpmControls) + } + + if nil == err { + continue + } + + if isFatalHarvestError(err) { + select { + case app.collectorErrorChan <- err: + case <-app.shutdownStarted: + } + return + } + + app.config.Logger.Warn("harvest failure", map[string]interface{}{ + "cmd": cmd, + "error": err.Error(), + }) + + if shouldSaveFailedHarvest(err) { + app.Consume(run.RunID, p) + } + } +} + +func connectAttempt(app *app) (*internal.AppRun, error) { + js, e := configConnectJSON(app.config) + if nil != e { + return nil, e + } + return internal.ConnectAttempt(js, redirectHost, app.rpmControls) +} + +func (app *app) connectRoutine() { + for { + run, err := connectAttempt(app) + if nil == err { + select { + case app.connectChan <- run: + case <-app.shutdownStarted: + } + return + } + + if internal.IsDisconnect(err) || internal.IsLicenseException(err) { + select { + case app.collectorErrorChan <- err: + case <-app.shutdownStarted: + } + return + } + + app.config.Logger.Warn("application connect failure", map[string]interface{}{ + "error": err.Error(), + }) + + time.Sleep(internal.ConnectBackoff) + } +} + +func debug(data internal.Harvestable, lg Logger) { + now := time.Now() + h := internal.NewHarvest(now) + data.MergeIntoHarvest(h) + ps := h.Payloads() + for cmd, p := range ps { + d, err := p.Data("agent run id", now) + if nil == d && nil == err { + continue + } + if nil != err { + lg.Debug("integration", map[string]interface{}{ + "cmd": cmd, + "error": err.Error(), + }) + continue + } + lg.Debug("integration", map[string]interface{}{ + "cmd": cmd, + "data": internal.JSONString(d), + }) + } +} + +func processConnectMessages(run *internal.AppRun, lg Logger) { + for _, msg := range run.Messages { + event := "collector message" + cn := map[string]interface{}{"msg": msg.Message} + + switch strings.ToLower(msg.Level) { + case "error": + lg.Error(event, cn) + case "warn": + lg.Warn(event, cn) + case "info": + lg.Info(event, cn) + case "debug", "verbose": + lg.Debug(event, cn) + } + } +} + +func (app *app) process() { + // Both the harvest and the run are non-nil when the app is connected, + // and nil otherwise. + var h *internal.Harvest + var run *internal.AppRun + + for { + select { + case <-app.harvestTicker.C: + if nil != run { + now := time.Now() + go app.doHarvest(h, now, run) + h = internal.NewHarvest(now) + } + case d := <-app.dataChan: + if nil != run && run.RunID == d.id { + d.data.MergeIntoHarvest(h) + } + case <-app.initiateShutdown: + close(app.shutdownStarted) + + // Remove the run before merging any final data to + // ensure a bounded number of receives from dataChan. + app.setState(nil, errors.New("application shut down")) + app.harvestTicker.Stop() + + if nil != run { + for done := false; !done; { + select { + case d := <-app.dataChan: + if run.RunID == d.id { + d.data.MergeIntoHarvest(h) + } + default: + done = true + } + } + app.doHarvest(h, time.Now(), run) + } + + close(app.shutdownComplete) + return + case err := <-app.collectorErrorChan: + run = nil + h = nil + app.setState(nil, nil) + + switch { + case internal.IsDisconnect(err): + app.setState(nil, err) + app.config.Logger.Error("application disconnected by New Relic", map[string]interface{}{ + "app": app.config.AppName, + }) + case internal.IsLicenseException(err): + app.setState(nil, err) + app.config.Logger.Error("invalid license", map[string]interface{}{ + "app": app.config.AppName, + "license": app.config.License, + }) + case internal.IsRestartException(err): + app.config.Logger.Info("application restarted", map[string]interface{}{ + "app": app.config.AppName, + }) + go app.connectRoutine() + } + case run = <-app.connectChan: + h = internal.NewHarvest(time.Now()) + app.setState(run, nil) + + app.config.Logger.Info("application connected", map[string]interface{}{ + "app": app.config.AppName, + "run": run.RunID.String(), + }) + processConnectMessages(run, app.config.Logger) + } + } +} + +func (app *app) Shutdown(timeout time.Duration) { + if !app.config.Enabled { + return + } + + select { + case app.initiateShutdown <- struct{}{}: + default: + } + + // Block until shutdown is done or timeout occurs. + t := time.NewTimer(timeout) + select { + case <-app.shutdownComplete: + case <-t.C: + } + t.Stop() + + app.config.Logger.Info("application shutdown", map[string]interface{}{ + "app": app.config.AppName, + }) +} + +func convertAttributeDestinationConfig(c AttributeDestinationConfig) internal.AttributeDestinationConfig { + return internal.AttributeDestinationConfig{ + Enabled: c.Enabled, + Include: c.Include, + Exclude: c.Exclude, + } +} + +func runSampler(app *app, period time.Duration) { + previous := internal.GetSample(time.Now(), app.config.Logger) + t := time.NewTicker(period) + for { + select { + case now := <-t.C: + current := internal.GetSample(now, app.config.Logger) + run, _ := app.getState() + app.Consume(run.RunID, internal.GetStats(internal.Samples{ + Previous: previous, + Current: current, + })) + previous = current + case <-app.shutdownStarted: + t.Stop() + return + } + } +} + +func (app *app) WaitForConnection(timeout time.Duration) error { + if !app.config.Enabled { + return nil + } + deadline := time.Now().Add(timeout) + pollPeriod := 50 * time.Millisecond + + for { + run, err := app.getState() + if nil != err { + return err + } + if run.RunID != "" { + return nil + } + if time.Now().After(deadline) { + return fmt.Errorf("timeout out after %s", timeout.String()) + } + time.Sleep(pollPeriod) + } +} + +func newApp(c Config) (Application, error) { + c = copyConfigReferenceFields(c) + if err := c.Validate(); nil != err { + return nil, err + } + if nil == c.Logger { + c.Logger = logger.ShimLogger{} + } + app := &app{ + config: c, + attrConfig: internal.CreateAttributeConfig(internal.AttributeConfigInput{ + Attributes: convertAttributeDestinationConfig(c.Attributes), + ErrorCollector: convertAttributeDestinationConfig(c.ErrorCollector.Attributes), + TransactionEvents: convertAttributeDestinationConfig(c.TransactionEvents.Attributes), + TransactionTracer: convertAttributeDestinationConfig(c.TransactionTracer.Attributes), + }), + + // This channel must be buffered since Shutdown makes a + // non-blocking send attempt. + initiateShutdown: make(chan struct{}, 1), + + shutdownStarted: make(chan struct{}), + shutdownComplete: make(chan struct{}), + connectChan: make(chan *internal.AppRun, 1), + collectorErrorChan: make(chan error, 1), + dataChan: make(chan appData, internal.AppDataChanSize), + rpmControls: internal.RpmControls{ + UseTLS: c.UseTLS, + License: c.License, + Client: &http.Client{ + Transport: c.Transport, + Timeout: internal.CollectorTimeout, + }, + Logger: c.Logger, + AgentVersion: Version, + }, + } + + app.config.Logger.Info("application created", map[string]interface{}{ + "app": app.config.AppName, + "version": Version, + "enabled": app.config.Enabled, + }) + + if !app.config.Enabled { + return app, nil + } + + app.harvestTicker = time.NewTicker(internal.HarvestPeriod) + + go app.process() + go app.connectRoutine() + + if app.config.RuntimeSampler.Enabled { + go runSampler(app, internal.RuntimeSamplerPeriod) + } + + return app, nil +} + +type expectApp interface { + internal.Expect + Application +} + +func newTestApp(replyfn func(*internal.ConnectReply), cfg Config) (expectApp, error) { + cfg.Enabled = false + application, err := newApp(cfg) + if nil != err { + return nil, err + } + app := application.(*app) + if nil != replyfn { + reply := internal.ConnectReplyDefaults() + replyfn(reply) + app.setState(&internal.AppRun{ConnectReply: reply}, nil) + } + + app.testHarvest = internal.NewHarvest(time.Now()) + + return app, nil +} + +func (app *app) getState() (*internal.AppRun, error) { + app.RLock() + defer app.RUnlock() + + run := app.run + if nil == run { + run = placeholderRun + } + return run, app.err +} + +func (app *app) setState(run *internal.AppRun, err error) { + app.Lock() + defer app.Unlock() + + app.run = run + app.err = err +} + +// StartTransaction implements newrelic.Application's StartTransaction. +func (app *app) StartTransaction(name string, w http.ResponseWriter, r *http.Request) Transaction { + run, _ := app.getState() + return upgradeTxn(newTxn(txnInput{ + Config: app.config, + Reply: run.ConnectReply, + Request: r, + W: w, + Consumer: app, + attrConfig: app.attrConfig, + }, name)) +} + +var ( + errHighSecurityEnabled = errors.New("high security enabled") + errCustomEventsDisabled = errors.New("custom events disabled") + errCustomEventsRemoteDisabled = errors.New("custom events disabled by server") +) + +// RecordCustomEvent implements newrelic.Application's RecordCustomEvent. +func (app *app) RecordCustomEvent(eventType string, params map[string]interface{}) error { + if app.config.HighSecurity { + return errHighSecurityEnabled + } + + if !app.config.CustomInsightsEvents.Enabled { + return errCustomEventsDisabled + } + + event, e := internal.CreateCustomEvent(eventType, params, time.Now()) + if nil != e { + return e + } + + run, _ := app.getState() + if !run.CollectCustomEvents { + return errCustomEventsRemoteDisabled + } + + app.Consume(run.RunID, event) + + return nil +} + +func (app *app) Consume(id internal.AgentRunID, data internal.Harvestable) { + if "" != debugLogging { + debug(data, app.config.Logger) + } + + if nil != app.testHarvest { + data.MergeIntoHarvest(app.testHarvest) + return + } + + if "" == id { + return + } + + select { + case app.dataChan <- appData{id, data}: + case <-app.shutdownStarted: + } +} + +func (app *app) ExpectCustomEvents(t internal.Validator, want []internal.WantCustomEvent) { + internal.ExpectCustomEvents(internal.ExtendValidator(t, "custom events"), app.testHarvest.CustomEvents, want) +} + +func (app *app) ExpectErrors(t internal.Validator, want []internal.WantError) { + t = internal.ExtendValidator(t, "traced errors") + internal.ExpectErrors(t, app.testHarvest.ErrorTraces, want) +} + +func (app *app) ExpectErrorEvents(t internal.Validator, want []internal.WantErrorEvent) { + t = internal.ExtendValidator(t, "error events") + internal.ExpectErrorEvents(t, app.testHarvest.ErrorEvents, want) +} + +func (app *app) ExpectTxnEvents(t internal.Validator, want []internal.WantTxnEvent) { + t = internal.ExtendValidator(t, "txn events") + internal.ExpectTxnEvents(t, app.testHarvest.TxnEvents, want) +} + +func (app *app) ExpectMetrics(t internal.Validator, want []internal.WantMetric) { + t = internal.ExtendValidator(t, "metrics") + internal.ExpectMetrics(t, app.testHarvest.Metrics, want) +} + +func (app *app) ExpectTxnTraces(t internal.Validator, want []internal.WantTxnTrace) { + t = internal.ExtendValidator(t, "txn traces") + internal.ExpectTxnTraces(t, app.testHarvest.TxnTraces, want) +} + +func (app *app) ExpectSlowQueries(t internal.Validator, want []internal.WantSlowQuery) { + t = internal.ExtendValidator(t, "slow queries") + internal.ExpectSlowQueries(t, app.testHarvest.SlowSQLs, want) +} diff --git a/vendor/github.com/newrelic/go-agent/internal_config.go b/vendor/github.com/newrelic/go-agent/internal_config.go new file mode 100644 index 0000000000..f013781f43 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_config.go @@ -0,0 +1,153 @@ +package newrelic + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + "strings" + + "github.com/newrelic/go-agent/internal" + "github.com/newrelic/go-agent/internal/logger" + "github.com/newrelic/go-agent/internal/utilization" +) + +func copyDestConfig(c AttributeDestinationConfig) AttributeDestinationConfig { + cp := c + if nil != c.Include { + cp.Include = make([]string, len(c.Include)) + copy(cp.Include, c.Include) + } + if nil != c.Exclude { + cp.Exclude = make([]string, len(c.Exclude)) + copy(cp.Exclude, c.Exclude) + } + return cp +} + +func copyConfigReferenceFields(cfg Config) Config { + cp := cfg + if nil != cfg.Labels { + cp.Labels = make(map[string]string, len(cfg.Labels)) + for key, val := range cfg.Labels { + cp.Labels[key] = val + } + } + if nil != cfg.ErrorCollector.IgnoreStatusCodes { + ignored := make([]int, len(cfg.ErrorCollector.IgnoreStatusCodes)) + copy(ignored, cfg.ErrorCollector.IgnoreStatusCodes) + cp.ErrorCollector.IgnoreStatusCodes = ignored + } + + cp.Attributes = copyDestConfig(cfg.Attributes) + cp.ErrorCollector.Attributes = copyDestConfig(cfg.ErrorCollector.Attributes) + cp.TransactionEvents.Attributes = copyDestConfig(cfg.TransactionEvents.Attributes) + cp.TransactionTracer.Attributes = copyDestConfig(cfg.TransactionTracer.Attributes) + + return cp +} + +const ( + agentLanguage = "go" +) + +func transportSetting(t http.RoundTripper) interface{} { + if nil == t { + return nil + } + return fmt.Sprintf("%T", t) +} + +func loggerSetting(lg Logger) interface{} { + if nil == lg { + return nil + } + if _, ok := lg.(logger.ShimLogger); ok { + return nil + } + return fmt.Sprintf("%T", lg) +} + +const ( + // https://source.datanerd.us/agents/agent-specs/blob/master/Custom-Host-Names.md + hostByteLimit = 255 +) + +type settings Config + +func (s settings) MarshalJSON() ([]byte, error) { + c := Config(s) + transport := c.Transport + c.Transport = nil + logger := c.Logger + c.Logger = nil + + js, err := json.Marshal(c) + if nil != err { + return nil, err + } + fields := make(map[string]interface{}) + err = json.Unmarshal(js, &fields) + if nil != err { + return nil, err + } + // The License field is not simply ignored by adding the `json:"-"` tag + // to it since we want to allow consumers to populate Config from JSON. + delete(fields, `License`) + fields[`Transport`] = transportSetting(transport) + fields[`Logger`] = loggerSetting(logger) + return json.Marshal(fields) +} + +func configConnectJSONInternal(c Config, pid int, util *utilization.Data, e internal.Environment, version string) ([]byte, error) { + return json.Marshal([]interface{}{struct { + Pid int `json:"pid"` + Language string `json:"language"` + Version string `json:"agent_version"` + Host string `json:"host"` + HostDisplayName string `json:"display_host,omitempty"` + Settings interface{} `json:"settings"` + AppName []string `json:"app_name"` + HighSecurity bool `json:"high_security"` + Labels internal.Labels `json:"labels,omitempty"` + Environment internal.Environment `json:"environment"` + Identifier string `json:"identifier"` + Util *utilization.Data `json:"utilization"` + }{ + Pid: pid, + Language: agentLanguage, + Version: version, + Host: internal.StringLengthByteLimit(util.Hostname, hostByteLimit), + HostDisplayName: internal.StringLengthByteLimit(c.HostDisplayName, hostByteLimit), + Settings: (settings)(c), + AppName: strings.Split(c.AppName, ";"), + HighSecurity: c.HighSecurity, + Labels: internal.Labels(c.Labels), + Environment: e, + // This identifier field is provided to avoid: + // https://newrelic.atlassian.net/browse/DSCORE-778 + // + // This identifier is used by the collector to look up the real + // agent. If an identifier isn't provided, the collector will + // create its own based on the first appname, which prevents a + // single daemon from connecting "a;b" and "a;c" at the same + // time. + // + // Providing the identifier below works around this issue and + // allows users more flexibility in using application rollups. + Identifier: c.AppName, + Util: util, + }}) +} + +func configConnectJSON(c Config) ([]byte, error) { + env := internal.NewEnvironment() + util := utilization.Gather(utilization.Config{ + DetectAWS: c.Utilization.DetectAWS, + DetectDocker: c.Utilization.DetectDocker, + LogicalProcessors: c.Utilization.LogicalProcessors, + TotalRAMMIB: c.Utilization.TotalRAMMIB, + BillingHostname: c.Utilization.BillingHostname, + }, c.Logger) + return configConnectJSONInternal(c, os.Getpid(), util, env, Version) +} diff --git a/vendor/github.com/newrelic/go-agent/internal_response_writer.go b/vendor/github.com/newrelic/go-agent/internal_response_writer.go new file mode 100644 index 0000000000..fd202af261 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_response_writer.go @@ -0,0 +1,121 @@ +package newrelic + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + hasC = 1 << iota // CloseNotifier + hasF // Flusher + hasH // Hijacker + hasR // ReaderFrom +) + +type wrap struct{ *txn } +type wrapR struct{ *txn } +type wrapH struct{ *txn } +type wrapHR struct{ *txn } +type wrapF struct{ *txn } +type wrapFR struct{ *txn } +type wrapFH struct{ *txn } +type wrapFHR struct{ *txn } +type wrapC struct{ *txn } +type wrapCR struct{ *txn } +type wrapCH struct{ *txn } +type wrapCHR struct{ *txn } +type wrapCF struct{ *txn } +type wrapCFR struct{ *txn } +type wrapCFH struct{ *txn } +type wrapCFHR struct{ *txn } + +func (x wrapC) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() } +func (x wrapCR) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() } +func (x wrapCH) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() } +func (x wrapCHR) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() } +func (x wrapCF) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() } +func (x wrapCFR) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() } +func (x wrapCFH) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() } +func (x wrapCFHR) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() } + +func (x wrapF) Flush() { x.W.(http.Flusher).Flush() } +func (x wrapFR) Flush() { x.W.(http.Flusher).Flush() } +func (x wrapFH) Flush() { x.W.(http.Flusher).Flush() } +func (x wrapFHR) Flush() { x.W.(http.Flusher).Flush() } +func (x wrapCF) Flush() { x.W.(http.Flusher).Flush() } +func (x wrapCFR) Flush() { x.W.(http.Flusher).Flush() } +func (x wrapCFH) Flush() { x.W.(http.Flusher).Flush() } +func (x wrapCFHR) Flush() { x.W.(http.Flusher).Flush() } + +func (x wrapH) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() } +func (x wrapHR) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() } +func (x wrapFH) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() } +func (x wrapFHR) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() } +func (x wrapCH) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() } +func (x wrapCHR) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() } +func (x wrapCFH) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() } +func (x wrapCFHR) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() } + +func (x wrapR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) } +func (x wrapHR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) } +func (x wrapFR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) } +func (x wrapFHR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) } +func (x wrapCR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) } +func (x wrapCHR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) } +func (x wrapCFR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) } +func (x wrapCFHR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) } + +func upgradeTxn(txn *txn) Transaction { + x := 0 + if _, ok := txn.W.(http.CloseNotifier); ok { + x |= hasC + } + if _, ok := txn.W.(http.Flusher); ok { + x |= hasF + } + if _, ok := txn.W.(http.Hijacker); ok { + x |= hasH + } + if _, ok := txn.W.(io.ReaderFrom); ok { + x |= hasR + } + + switch x { + default: + // Wrap the transaction even when there are no methods needed to + // ensure consistent error stack trace depth. + return wrap{txn} + case hasR: + return wrapR{txn} + case hasH: + return wrapH{txn} + case hasH | hasR: + return wrapHR{txn} + case hasF: + return wrapF{txn} + case hasF | hasR: + return wrapFR{txn} + case hasF | hasH: + return wrapFH{txn} + case hasF | hasH | hasR: + return wrapFHR{txn} + case hasC: + return wrapC{txn} + case hasC | hasR: + return wrapCR{txn} + case hasC | hasH: + return wrapCH{txn} + case hasC | hasH | hasR: + return wrapCHR{txn} + case hasC | hasF: + return wrapCF{txn} + case hasC | hasF | hasR: + return wrapCFR{txn} + case hasC | hasF | hasH: + return wrapCFH{txn} + case hasC | hasF | hasH | hasR: + return wrapCFHR{txn} + } +} diff --git a/vendor/github.com/newrelic/go-agent/internal_txn.go b/vendor/github.com/newrelic/go-agent/internal_txn.go new file mode 100644 index 0000000000..17549a7f46 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/internal_txn.go @@ -0,0 +1,492 @@ +package newrelic + +import ( + "errors" + "net/http" + "net/url" + "sync" + "time" + + "github.com/newrelic/go-agent/internal" +) + +type txnInput struct { + W http.ResponseWriter + Request *http.Request + Config Config + Reply *internal.ConnectReply + Consumer dataConsumer + attrConfig *internal.AttributeConfig +} + +type txn struct { + txnInput + // This mutex is required since the consumer may call the public API + // interface functions from different routines. + sync.Mutex + // finished indicates whether or not End() has been called. After + // finished has been set to true, no recording should occur. + finished bool + queuing time.Duration + start time.Time + name string // Work in progress name + isWeb bool + ignore bool + errors internal.TxnErrors // Lazily initialized. + attrs *internal.Attributes + + // Fields relating to tracing and breakdown metrics/segments. + tracer internal.Tracer + + // wroteHeader prevents capturing multiple response code errors if the + // user erroneously calls WriteHeader multiple times. + wroteHeader bool + + // Fields assigned at completion + stop time.Time + duration time.Duration + finalName string // Full finalized metric name + zone internal.ApdexZone + apdexThreshold time.Duration +} + +func newTxn(input txnInput, name string) *txn { + txn := &txn{ + txnInput: input, + start: time.Now(), + name: name, + isWeb: nil != input.Request, + attrs: internal.NewAttributes(input.attrConfig), + } + if nil != txn.Request { + txn.queuing = internal.QueueDuration(input.Request.Header, txn.start) + internal.RequestAgentAttributes(txn.attrs, input.Request) + } + txn.attrs.Agent.HostDisplayName = txn.Config.HostDisplayName + txn.tracer.Enabled = txn.txnTracesEnabled() + txn.tracer.SegmentThreshold = txn.Config.TransactionTracer.SegmentThreshold + txn.tracer.StackTraceThreshold = txn.Config.TransactionTracer.StackTraceThreshold + txn.tracer.SlowQueriesEnabled = txn.slowQueriesEnabled() + txn.tracer.SlowQueryThreshold = txn.Config.DatastoreTracer.SlowQuery.Threshold + + return txn +} + +func (txn *txn) slowQueriesEnabled() bool { + return txn.Config.DatastoreTracer.SlowQuery.Enabled && + txn.Reply.CollectTraces +} + +func (txn *txn) txnTracesEnabled() bool { + return txn.Config.TransactionTracer.Enabled && + txn.Reply.CollectTraces +} + +func (txn *txn) txnEventsEnabled() bool { + return txn.Config.TransactionEvents.Enabled && + txn.Reply.CollectAnalyticsEvents +} + +func (txn *txn) errorEventsEnabled() bool { + return txn.Config.ErrorCollector.CaptureEvents && + txn.Reply.CollectErrorEvents +} + +func (txn *txn) freezeName() { + if txn.ignore || ("" != txn.finalName) { + return + } + + txn.finalName = internal.CreateFullTxnName(txn.name, txn.Reply, txn.isWeb) + if "" == txn.finalName { + txn.ignore = true + } +} + +func (txn *txn) getsApdex() bool { + return txn.isWeb +} + +func (txn *txn) txnTraceThreshold() time.Duration { + if txn.Config.TransactionTracer.Threshold.IsApdexFailing { + return internal.ApdexFailingThreshold(txn.apdexThreshold) + } + return txn.Config.TransactionTracer.Threshold.Duration +} + +func (txn *txn) shouldSaveTrace() bool { + return txn.txnTracesEnabled() && + (txn.duration >= txn.txnTraceThreshold()) +} + +func (txn *txn) hasErrors() bool { + return len(txn.errors) > 0 +} + +func (txn *txn) MergeIntoHarvest(h *internal.Harvest) { + exclusive := time.Duration(0) + children := internal.TracerRootChildren(&txn.tracer) + if txn.duration > children { + exclusive = txn.duration - children + } + + internal.CreateTxnMetrics(internal.CreateTxnMetricsArgs{ + IsWeb: txn.isWeb, + Duration: txn.duration, + Exclusive: exclusive, + Name: txn.finalName, + Zone: txn.zone, + ApdexThreshold: txn.apdexThreshold, + HasErrors: txn.hasErrors(), + Queueing: txn.queuing, + }, h.Metrics) + + internal.MergeBreakdownMetrics(&txn.tracer, h.Metrics, txn.finalName, txn.isWeb) + + if txn.txnEventsEnabled() { + h.TxnEvents.AddTxnEvent(&internal.TxnEvent{ + Name: txn.finalName, + Timestamp: txn.start, + Duration: txn.duration, + Queuing: txn.queuing, + Zone: txn.zone, + Attrs: txn.attrs, + DatastoreExternalTotals: txn.tracer.DatastoreExternalTotals, + }) + } + + requestURI := "" + if nil != txn.Request && nil != txn.Request.URL { + requestURI = internal.SafeURL(txn.Request.URL) + } + + internal.MergeTxnErrors(h.ErrorTraces, txn.errors, txn.finalName, requestURI, txn.attrs) + + if txn.errorEventsEnabled() { + for _, e := range txn.errors { + h.ErrorEvents.Add(&internal.ErrorEvent{ + Klass: e.Klass, + Msg: e.Msg, + When: e.When, + TxnName: txn.finalName, + Duration: txn.duration, + Queuing: txn.queuing, + Attrs: txn.attrs, + DatastoreExternalTotals: txn.tracer.DatastoreExternalTotals, + }) + } + } + + if txn.shouldSaveTrace() { + h.TxnTraces.Witness(internal.HarvestTrace{ + Start: txn.start, + Duration: txn.duration, + MetricName: txn.finalName, + CleanURL: requestURI, + Trace: txn.tracer.TxnTrace, + ForcePersist: false, + GUID: "", + SyntheticsResourceID: "", + Attrs: txn.attrs, + }) + } + + if nil != txn.tracer.SlowQueries { + h.SlowSQLs.Merge(txn.tracer.SlowQueries, txn.finalName, requestURI) + } +} + +func responseCodeIsError(cfg *Config, code int) bool { + if code < http.StatusBadRequest { // 400 + return false + } + for _, ignoreCode := range cfg.ErrorCollector.IgnoreStatusCodes { + if code == ignoreCode { + return false + } + } + return true +} + +func headersJustWritten(txn *txn, code int) { + if txn.finished { + return + } + if txn.wroteHeader { + return + } + txn.wroteHeader = true + + internal.ResponseHeaderAttributes(txn.attrs, txn.W.Header()) + internal.ResponseCodeAttribute(txn.attrs, code) + + if responseCodeIsError(&txn.Config, code) { + e := internal.TxnErrorFromResponseCode(time.Now(), code) + e.Stack = internal.GetStackTrace(1) + txn.noticeErrorInternal(e) + } +} + +func (txn *txn) Header() http.Header { return txn.W.Header() } + +func (txn *txn) Write(b []byte) (int, error) { + n, err := txn.W.Write(b) + + txn.Lock() + defer txn.Unlock() + + headersJustWritten(txn, http.StatusOK) + + return n, err +} + +func (txn *txn) WriteHeader(code int) { + txn.W.WriteHeader(code) + + txn.Lock() + defer txn.Unlock() + + headersJustWritten(txn, code) +} + +func (txn *txn) End() error { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + + txn.finished = true + + r := recover() + if nil != r { + e := internal.TxnErrorFromPanic(time.Now(), r) + e.Stack = internal.GetStackTrace(0) + txn.noticeErrorInternal(e) + } + + txn.stop = time.Now() + txn.duration = txn.stop.Sub(txn.start) + + txn.freezeName() + + // Assign apdexThreshold regardless of whether or not the transaction + // gets apdex since it may be used to calculate the trace threshold. + txn.apdexThreshold = internal.CalculateApdexThreshold(txn.Reply, txn.finalName) + + if txn.getsApdex() { + if txn.hasErrors() { + txn.zone = internal.ApdexFailing + } else { + txn.zone = internal.CalculateApdexZone(txn.apdexThreshold, txn.duration) + } + } else { + txn.zone = internal.ApdexNone + } + + if txn.Config.Logger.DebugEnabled() { + txn.Config.Logger.Debug("transaction ended", map[string]interface{}{ + "name": txn.finalName, + "duration_ms": txn.duration.Seconds() * 1000.0, + "ignored": txn.ignore, + "run": txn.Reply.RunID, + }) + } + + if !txn.ignore { + txn.Consumer.Consume(txn.Reply.RunID, txn) + } + + // Note that if a consumer uses `panic(nil)`, the panic will not + // propagate. + if nil != r { + panic(r) + } + + return nil +} + +func (txn *txn) AddAttribute(name string, value interface{}) error { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + + return internal.AddUserAttribute(txn.attrs, name, value, internal.DestAll) +} + +var ( + errorsLocallyDisabled = errors.New("errors locally disabled") + errorsRemotelyDisabled = errors.New("errors remotely disabled") + errNilError = errors.New("nil error") + errAlreadyEnded = errors.New("transaction has already ended") +) + +const ( + highSecurityErrorMsg = "message removed by high security setting" +) + +func (txn *txn) noticeErrorInternal(err internal.TxnError) error { + if !txn.Config.ErrorCollector.Enabled { + return errorsLocallyDisabled + } + + if !txn.Reply.CollectErrors { + return errorsRemotelyDisabled + } + + if nil == txn.errors { + txn.errors = internal.NewTxnErrors(internal.MaxTxnErrors) + } + + if txn.Config.HighSecurity { + err.Msg = highSecurityErrorMsg + } + + txn.errors.Add(err) + + return nil +} + +func (txn *txn) NoticeError(err error) error { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + + if nil == err { + return errNilError + } + + e := internal.TxnErrorFromError(time.Now(), err) + e.Stack = internal.GetStackTrace(2) + return txn.noticeErrorInternal(e) +} + +func (txn *txn) SetName(name string) error { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + + txn.name = name + return nil +} + +func (txn *txn) Ignore() error { + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return errAlreadyEnded + } + txn.ignore = true + return nil +} + +func (txn *txn) StartSegmentNow() SegmentStartTime { + var s internal.SegmentStartTime + txn.Lock() + if !txn.finished { + s = internal.StartSegment(&txn.tracer, time.Now()) + } + txn.Unlock() + return SegmentStartTime{ + segment: segment{ + start: s, + txn: txn, + }, + } +} + +type segment struct { + start internal.SegmentStartTime + txn *txn +} + +func endSegment(s Segment) { + txn := s.StartTime.txn + if nil == txn { + return + } + txn.Lock() + if !txn.finished { + internal.EndBasicSegment(&txn.tracer, s.StartTime.start, time.Now(), s.Name) + } + txn.Unlock() +} + +func endDatastore(s DatastoreSegment) { + txn := s.StartTime.txn + if nil == txn { + return + } + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return + } + if txn.Config.HighSecurity { + s.QueryParameters = nil + } + if !txn.Config.DatastoreTracer.QueryParameters.Enabled { + s.QueryParameters = nil + } + if !txn.Config.DatastoreTracer.DatabaseNameReporting.Enabled { + s.DatabaseName = "" + } + if !txn.Config.DatastoreTracer.InstanceReporting.Enabled { + s.Host = "" + s.PortPathOrID = "" + } + internal.EndDatastoreSegment(internal.EndDatastoreParams{ + Tracer: &txn.tracer, + Start: s.StartTime.start, + Now: time.Now(), + Product: string(s.Product), + Collection: s.Collection, + Operation: s.Operation, + ParameterizedQuery: s.ParameterizedQuery, + QueryParameters: s.QueryParameters, + Host: s.Host, + PortPathOrID: s.PortPathOrID, + Database: s.DatabaseName, + }) +} + +func externalSegmentURL(s ExternalSegment) *url.URL { + if "" != s.URL { + u, _ := url.Parse(s.URL) + return u + } + r := s.Request + if nil != s.Response && nil != s.Response.Request { + r = s.Response.Request + } + if r != nil { + return r.URL + } + return nil +} + +func endExternal(s ExternalSegment) { + txn := s.StartTime.txn + if nil == txn { + return + } + txn.Lock() + defer txn.Unlock() + + if txn.finished { + return + } + internal.EndExternalSegment(&txn.tracer, s.StartTime.start, time.Now(), externalSegmentURL(s)) +} diff --git a/vendor/github.com/newrelic/go-agent/log.go b/vendor/github.com/newrelic/go-agent/log.go new file mode 100644 index 0000000000..56b0936169 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/log.go @@ -0,0 +1,30 @@ +package newrelic + +import ( + "io" + + "github.com/newrelic/go-agent/internal/logger" +) + +// Logger is the interface that is used for logging in the go-agent. Assign the +// Config.Logger field to the Logger you wish to use. Loggers must be safe for +// use in multiple goroutines. +// +// For an example implementation, see: _integrations/nrlogrus/nrlogrus.go +type Logger interface { + Error(msg string, context map[string]interface{}) + Warn(msg string, context map[string]interface{}) + Info(msg string, context map[string]interface{}) + Debug(msg string, context map[string]interface{}) + DebugEnabled() bool +} + +// NewLogger creates a basic Logger at info level. +func NewLogger(w io.Writer) Logger { + return logger.New(w, false) +} + +// NewDebugLogger creates a basic Logger at debug level. +func NewDebugLogger(w io.Writer) Logger { + return logger.New(w, true) +} diff --git a/vendor/github.com/newrelic/go-agent/segments.go b/vendor/github.com/newrelic/go-agent/segments.go new file mode 100644 index 0000000000..3dd0158265 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/segments.go @@ -0,0 +1,113 @@ +package newrelic + +import "net/http" + +// SegmentStartTime is created by Transaction.StartSegmentNow and marks the +// beginning of a segment. A segment with a zero-valued SegmentStartTime may +// safely be ended. +type SegmentStartTime struct{ segment } + +// Segment is used to instrument functions, methods, and blocks of code. The +// easiest way use Segment is the StartSegment function. +type Segment struct { + StartTime SegmentStartTime + Name string +} + +// DatastoreSegment is used to instrument calls to databases and object stores. +// Here is an example: +// +// defer newrelic.DatastoreSegment{ +// StartTime: newrelic.StartSegmentNow(txn), +// Product: newrelic.DatastoreMySQL, +// Collection: "my_table", +// Operation: "SELECT", +// }.End() +// +type DatastoreSegment struct { + StartTime SegmentStartTime + // Product is the datastore type. See the constants in datastore.go. + Product DatastoreProduct + // Collection is the table or group. + Collection string + // Operation is the relevant action, e.g. "SELECT" or "GET". + Operation string + // ParameterizedQuery may be set to the query being performed. It must + // not contain any raw parameters, only placeholders. + ParameterizedQuery string + // QueryParameters may be used to provide query parameters. Care should + // be taken to only provide parameters which are not sensitive. + // QueryParameters are ignored in high security mode. + QueryParameters map[string]interface{} + // Host is the name of the server hosting the datastore. + Host string + // PortPathOrID can represent either the port, path, or id of the + // datastore being connected to. + PortPathOrID string + // DatabaseName is name of database where the current query is being + // executed. + DatabaseName string +} + +// ExternalSegment is used to instrument external calls. StartExternalSegment +// is recommended when you have access to an http.Request. +type ExternalSegment struct { + StartTime SegmentStartTime + Request *http.Request + Response *http.Response + // If you do not have access to the request, this URL field should be + // used to indicate the endpoint. + URL string +} + +// End finishes the segment. +func (s Segment) End() { endSegment(s) } + +// End finishes the datastore segment. +func (s DatastoreSegment) End() { endDatastore(s) } + +// End finishes the external segment. +func (s ExternalSegment) End() { endExternal(s) } + +// StartSegmentNow helps avoid Transaction nil checks. +func StartSegmentNow(txn Transaction) SegmentStartTime { + if nil != txn { + return txn.StartSegmentNow() + } + return SegmentStartTime{} +} + +// StartSegment makes it easy to instrument segments. To time a function, do +// the following: +// +// func timeMe(txn newrelic.Transaction) { +// defer newrelic.StartSegment(txn, "timeMe").End() +// // ... function code here ... +// } +// +// To time a block of code, do the following: +// +// segment := StartSegment(txn, "myBlock") +// // ... code you want to time here ... +// segment.End() +// +func StartSegment(txn Transaction, name string) Segment { + return Segment{ + StartTime: StartSegmentNow(txn), + Name: name, + } +} + +// StartExternalSegment makes it easier to instrument external calls. +// +// segment := newrelic.StartExternalSegment(txn, request) +// resp, err := client.Do(request) +// segment.Response = resp +// segment.End() +// +func StartExternalSegment(txn Transaction, request *http.Request) ExternalSegment { + return ExternalSegment{ + StartTime: StartSegmentNow(txn), + Request: request, + } +} diff --git a/vendor/github.com/newrelic/go-agent/transaction.go b/vendor/github.com/newrelic/go-agent/transaction.go new file mode 100644 index 0000000000..aef66d8496 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/transaction.go @@ -0,0 +1,45 @@ +package newrelic + +import "net/http" + +// Transaction represents a request or a background task. +// Each Transaction should only be used in a single goroutine. +type Transaction interface { + // If StartTransaction is called with a non-nil http.ResponseWriter then + // the Transaction may be used in its place. This allows + // instrumentation of the response code and response headers. + http.ResponseWriter + + // End finishes the current transaction, stopping all further + // instrumentation. Subsequent calls to End will have no effect. + End() error + + // Ignore ensures that this transaction's data will not be recorded. + Ignore() error + + // SetName names the transaction. Transactions will not be grouped + // usefully if too many unique names are used. + SetName(name string) error + + // NoticeError records an error. The first five errors per transaction + // are recorded (this behavior is subject to potential change in the + // future). + NoticeError(err error) error + + // AddAttribute adds a key value pair to the current transaction. This + // information is attached to errors, transaction events, and error + // events. The key must contain fewer than than 255 bytes. The value + // must be a number, string, or boolean. Attribute configuration is + // applied (see config.go). + // + // For more information, see: + // https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-metrics/collect-custom-attributes + AddAttribute(key string, value interface{}) error + + // StartSegmentNow allows the timing of functions, external calls, and + // datastore calls. The segments of each transaction MUST be used in a + // single goroutine. Consumers are encouraged to use the + // `StartSegmentNow` functions which checks if the Transaction is nil. + // See segments.go + StartSegmentNow() SegmentStartTime +} diff --git a/vendor/github.com/newrelic/go-agent/version.go b/vendor/github.com/newrelic/go-agent/version.go new file mode 100644 index 0000000000..aabe4480a8 --- /dev/null +++ b/vendor/github.com/newrelic/go-agent/version.go @@ -0,0 +1,10 @@ +package newrelic + +const ( + major = "1" + minor = "5" + patch = "0" + + // Version is the full string version of this Go Agent. + Version = major + "." + minor + "." + patch +) diff --git a/vendor/github.com/paultyng/go-newrelic/LICENSE b/vendor/github.com/paultyng/go-newrelic/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/github.com/paultyng/go-newrelic/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/paultyng/go-newrelic/api/alert_channels.go b/vendor/github.com/paultyng/go-newrelic/api/alert_channels.go new file mode 100644 index 0000000000..edb078c281 --- /dev/null +++ b/vendor/github.com/paultyng/go-newrelic/api/alert_channels.go @@ -0,0 +1,87 @@ +package api + +import ( + "fmt" + "net/url" +) + +func (c *Client) queryAlertChannels() ([]AlertChannel, error) { + channels := []AlertChannel{} + + reqURL, err := url.Parse("/alerts_channels.json") + if err != nil { + return nil, err + } + + nextPath := reqURL.String() + + for nextPath != "" { + resp := struct { + Channels []AlertChannel `json:"channels,omitempty"` + }{} + + nextPath, err = c.Do("GET", nextPath, nil, &resp) + if err != nil { + return nil, err + } + + channels = append(channels, resp.Channels...) + } + + return channels, nil +} + +// GetAlertChannel returns a specific alert channel by ID +func (c *Client) GetAlertChannel(id int) (*AlertChannel, error) { + channels, err := c.queryAlertChannels() + if err != nil { + return nil, err + } + + for _, channel := range channels { + if channel.ID == id { + return &channel, nil + } + } + + return nil, ErrNotFound +} + +// ListAlertChannels returns all alert policies for the account. +func (c *Client) ListAlertChannels() ([]AlertChannel, error) { + return c.queryAlertChannels() +} + +func (c *Client) CreateAlertChannel(channel AlertChannel) (*AlertChannel, error) { + // TODO: support attaching policy ID's here? + // qs := map[string]string{ + // "policy_ids[]": channel.Links.PolicyIDs, + // } + + if len(channel.Links.PolicyIDs) > 0 { + return nil, fmt.Errorf("You cannot create an alert channel with policy IDs, you must attach polidy IDs after creation.") + } + + req := struct { + Channel AlertChannel `json:"channel"` + }{ + Channel: channel, + } + + resp := struct { + Channels []AlertChannel `json:"channels,omitempty"` + }{} + + _, err := c.Do("POST", "/alerts_channels.json", req, &resp) + if err != nil { + return nil, err + } + + return &resp.Channels[0], nil +} + +func (c *Client) DeleteAlertChannel(id int) error { + u := &url.URL{Path: fmt.Sprintf("/alerts_channels/%v.json", id)} + _, err := c.Do("DELETE", u.String(), nil, nil) + return err +} diff --git a/vendor/github.com/paultyng/go-newrelic/api/alert_conditions.go b/vendor/github.com/paultyng/go-newrelic/api/alert_conditions.go new file mode 100644 index 0000000000..45c5a9f891 --- /dev/null +++ b/vendor/github.com/paultyng/go-newrelic/api/alert_conditions.go @@ -0,0 +1,117 @@ +package api + +import ( + "fmt" + "net/url" + "strconv" +) + +func (c *Client) queryAlertConditions(policyID int) ([]AlertCondition, error) { + conditions := []AlertCondition{} + + reqURL, err := url.Parse("/alerts_conditions.json") + if err != nil { + return nil, err + } + + qs := reqURL.Query() + qs.Set("policy_id", strconv.Itoa(policyID)) + + reqURL.RawQuery = qs.Encode() + + nextPath := reqURL.String() + + for nextPath != "" { + resp := struct { + Conditions []AlertCondition `json:"conditions,omitempty"` + }{} + + nextPath, err = c.Do("GET", nextPath, nil, &resp) + if err != nil { + return nil, err + } + + for _, c := range resp.Conditions { + c.PolicyID = policyID + } + + conditions = append(conditions, resp.Conditions...) + } + + return conditions, nil +} + +func (c *Client) GetAlertCondition(policyID int, id int) (*AlertCondition, error) { + conditions, err := c.queryAlertConditions(policyID) + if err != nil { + return nil, err + } + + for _, condition := range conditions { + if condition.ID == id { + return &condition, nil + } + } + + return nil, ErrNotFound +} + +// ListAlertConditions returns alert conditions for the specified policy. +func (c *Client) ListAlertConditions(policyID int) ([]AlertCondition, error) { + return c.queryAlertConditions(policyID) +} + +func (c *Client) CreateAlertCondition(condition AlertCondition) (*AlertCondition, error) { + policyID := condition.PolicyID + + req := struct { + Condition AlertCondition `json:"condition"` + }{ + Condition: condition, + } + + resp := struct { + Condition AlertCondition `json:"condition,omitempty"` + }{} + + u := &url.URL{Path: fmt.Sprintf("/alerts_conditions/policies/%v.json", policyID)} + _, err := c.Do("POST", u.String(), req, &resp) + if err != nil { + return nil, err + } + + resp.Condition.PolicyID = policyID + + return &resp.Condition, nil +} + +func (c *Client) UpdateAlertCondition(condition AlertCondition) (*AlertCondition, error) { + policyID := condition.PolicyID + id := condition.ID + + req := struct { + Condition AlertCondition `json:"condition"` + }{ + Condition: condition, + } + + resp := struct { + Condition AlertCondition `json:"condition,omitempty"` + }{} + + u := &url.URL{Path: fmt.Sprintf("/alerts_conditions/%v.json", id)} + _, err := c.Do("PUT", u.String(), req, &resp) + if err != nil { + return nil, err + } + + resp.Condition.PolicyID = policyID + + return &resp.Condition, nil +} + +func (c *Client) DeleteAlertCondition(policyID int, id int) error { + u := &url.URL{Path: fmt.Sprintf("/alerts_conditions/%v.json", id)} + _, err := c.Do("DELETE", u.String(), nil, nil) + return err +} diff --git a/vendor/github.com/paultyng/go-newrelic/api/alert_policies.go b/vendor/github.com/paultyng/go-newrelic/api/alert_policies.go new file mode 100644 index 0000000000..3ef92f954b --- /dev/null +++ b/vendor/github.com/paultyng/go-newrelic/api/alert_policies.go @@ -0,0 +1,86 @@ +package api + +import ( + "fmt" + "net/url" +) + +func (c *Client) queryAlertPolicies(name *string) ([]AlertPolicy, error) { + policies := []AlertPolicy{} + + reqURL, err := url.Parse("/alerts_policies.json") + if err != nil { + return nil, err + } + + qs := reqURL.Query() + if name != nil { + qs.Set("filter[name]", *name) + } + reqURL.RawQuery = qs.Encode() + + nextPath := reqURL.String() + + for nextPath != "" { + resp := struct { + Policies []AlertPolicy `json:"policies,omitempty"` + }{} + + nextPath, err = c.Do("GET", nextPath, nil, &resp) + if err != nil { + return nil, err + } + + policies = append(policies, resp.Policies...) + } + + return policies, nil +} + +// GetAlertPolicy returns a specific alert policy by ID +func (c *Client) GetAlertPolicy(id int) (*AlertPolicy, error) { + policies, err := c.queryAlertPolicies(nil) + if err != nil { + return nil, err + } + + for _, policy := range policies { + if policy.ID == id { + return &policy, nil + } + } + + return nil, ErrNotFound +} + +// ListAlertPolicies returns all alert policies for the account. +func (c *Client) ListAlertPolicies() ([]AlertPolicy, error) { + return c.queryAlertPolicies(nil) +} + +// CreateAlertPolicy creates a new alert policy for the account. +func (c *Client) CreateAlertPolicy(policy AlertPolicy) (*AlertPolicy, error) { + req := struct { + Policy AlertPolicy `json:"policy"` + }{ + Policy: policy, + } + + resp := struct { + Policy AlertPolicy `json:"policy,omitempty"` + }{} + + _, err := c.Do("POST", "/alerts_policies.json", req, &resp) + if err != nil { + return nil, err + } + + return &resp.Policy, nil +} + +// DeleteAlertPolicy deletes an existing alert policy from the account. +func (c *Client) DeleteAlertPolicy(id int) error { + u := &url.URL{Path: fmt.Sprintf("/alerts_policies/%v.json", id)} + _, err := c.Do("DELETE", u.String(), nil, nil) + return err +} diff --git a/vendor/github.com/paultyng/go-newrelic/api/alert_policy_channels.go b/vendor/github.com/paultyng/go-newrelic/api/alert_policy_channels.go new file mode 100644 index 0000000000..3c822131f8 --- /dev/null +++ b/vendor/github.com/paultyng/go-newrelic/api/alert_policy_channels.go @@ -0,0 +1,64 @@ +package api + +import ( + "net/url" + "regexp" + "strconv" +) + +func (c *Client) UpdateAlertPolicyChannels(policyID int, channelIDs []int) error { + channelIDStrings := make([]string, len(channelIDs)) + + for i, channelID := range channelIDs { + channelIDStrings[i] = strconv.Itoa(channelID) + } + + reqURL, err := url.Parse("/alerts_policy_channels.json") + if err != nil { + return err + } + + qs := url.Values{ + "policy_id": []string{strconv.Itoa(policyID)}, + "channel_ids": channelIDStrings, + } + reqURL.RawQuery = qs.Encode() + + nextPath := reqURL.String() + + _, err = c.Do("PUT", nextPath, nil, nil) + return err +} + +func (c *Client) DeleteAlertPolicyChannel(policyID int, channelID int) error { + reqURL, err := url.Parse("/alerts_policy_channels.json") + if err != nil { + return err + } + + qs := url.Values{ + "policy_id": []string{strconv.Itoa(policyID)}, + "channel_id": []string{strconv.Itoa(channelID)}, + } + reqURL.RawQuery = qs.Encode() + + nextPath := reqURL.String() + + _, err = c.Do("DELETE", nextPath, nil, nil) + if err != nil { + if apiErr, ok := err.(*ErrorResponse); ok { + matched, err := regexp.MatchString("Alerts policy with ID: \\d+ is not valid.", apiErr.Detail.Title) + if err != nil { + return err + } + + if matched { + return ErrNotFound + } + } + + return err + } + + return nil +} diff --git a/vendor/github.com/paultyng/go-newrelic/api/applications.go b/vendor/github.com/paultyng/go-newrelic/api/applications.go new file mode 100644 index 0000000000..54af0bda28 --- /dev/null +++ b/vendor/github.com/paultyng/go-newrelic/api/applications.go @@ -0,0 +1,58 @@ +package api + +import ( + "net/url" + "strconv" +) + +type applicationsFilters struct { + Name *string + Host *string + IDs []int + Language *string +} + +func (c *Client) queryApplications(filters applicationsFilters) ([]Application, error) { + applications := []Application{} + + reqURL, err := url.Parse("/applications.json") + if err != nil { + return nil, err + } + + qs := reqURL.Query() + if filters.Name != nil { + qs.Set("filter[name]", *filters.Name) + } + if filters.Host != nil { + qs.Set("filter[host]", *filters.Host) + } + for _, id := range filters.IDs { + qs.Add("filter[ids]", strconv.Itoa(id)) + } + if filters.Language != nil { + qs.Set("filter[language]", *filters.Language) + } + reqURL.RawQuery = qs.Encode() + + nextPath := reqURL.String() + + for nextPath != "" { + resp := struct { + Applications []Application `json:"applications,omitempty"` + }{} + + nextPath, err = c.Do("GET", nextPath, nil, &resp) + if err != nil { + return nil, err + } + + applications = append(applications, resp.Applications...) + } + + return applications, nil +} + +func (c *Client) ListApplications() ([]Application, error) { + return c.queryApplications(applicationsFilters{}) +} diff --git a/vendor/github.com/paultyng/go-newrelic/api/client.go b/vendor/github.com/paultyng/go-newrelic/api/client.go new file mode 100644 index 0000000000..e46d6823c6 --- /dev/null +++ b/vendor/github.com/paultyng/go-newrelic/api/client.go @@ -0,0 +1,108 @@ +package api + +import ( + "fmt" + + "github.com/tomnomnom/linkheader" + + resty "gopkg.in/resty.v0" +) + +// Client represents the client state for the API. +type Client struct { + RestyClient *resty.Client +} + +type ErrorResponse struct { + Detail *ErrorDetail `json:"error,omitempty"` +} + +func (e *ErrorResponse) Error() string { + if e != nil && e.Detail != nil { + return e.Detail.Title + } + return "Unknown error" +} + +type ErrorDetail struct { + Title string `json:"title,omitempty"` +} + +// Config contains all the configuration data for the API Client +type Config struct { + APIKey string + BaseURL string + Debug bool +} + +// New returns a new Client for the specified apiKey. +func New(config Config) Client { + r := resty.New() + + baseURL := config.BaseURL + if baseURL == "" { + baseURL = "https://api.newrelic.com/v2" + } + + r.SetHeader("X-Api-Key", config.APIKey) + r.SetHostURL(baseURL) + + if config.Debug { + r.SetDebug(true) + } + + c := Client{ + RestyClient: r, + } + + return c +} + +// Do exectes an API request with the specified parameters. +func (c *Client) Do(method string, path string, body interface{}, response interface{}) (string, error) { + r := c.RestyClient.R(). + SetError(&ErrorResponse{}) + + if body != nil { + r = r.SetBody(body) + } + + if response != nil { + r = r.SetResult(response) + } + + apiResponse, err := r.Execute(method, path) + + if err != nil { + return "", err + } + + nextPath := "" + header := apiResponse.Header().Get("Link") + if header != "" { + links := linkheader.Parse(header) + + for _, link := range links.FilterByRel("next") { + nextPath = link.URL + break + } + } + + statusClass := apiResponse.StatusCode() / 100 % 10 + + if statusClass == 2 { + return nextPath, nil + } + + rawError := apiResponse.Error() + + if rawError != nil { + apiError := rawError.(*ErrorResponse) + + if apiError.Detail != nil { + return "", apiError + } + } + + return "", fmt.Errorf("Unexpected status %v returned from API", apiResponse.StatusCode()) +} diff --git a/vendor/github.com/paultyng/go-newrelic/api/labels.go b/vendor/github.com/paultyng/go-newrelic/api/labels.go new file mode 100644 index 0000000000..933fd9607b --- /dev/null +++ b/vendor/github.com/paultyng/go-newrelic/api/labels.go @@ -0,0 +1,79 @@ +package api + +import ( + "fmt" + "net/url" +) + +func (c *Client) queryLabels() ([]Label, error) { + labels := []Label{} + + reqURL, err := url.Parse("/labels.json") + if err != nil { + return nil, err + } + + nextPath := reqURL.String() + + for nextPath != "" { + resp := struct { + Labels []Label `json:"labels,omitempty"` + }{} + + nextPath, err = c.Do("GET", nextPath, nil, &resp) + if err != nil { + return nil, err + } + + labels = append(labels, resp.Labels...) + } + + return labels, nil +} + +func (c *Client) GetLabel(key string) (*Label, error) { + labels, err := c.queryLabels() + if err != nil { + return nil, err + } + + for _, label := range labels { + if label.Key == key { + return &label, nil + } + } + + return nil, ErrNotFound +} + +// ListLabels returns the labels for the account. +func (c *Client) ListLabels() ([]Label, error) { + return c.queryLabels() +} + +// CreateLabel creates a new label for the account. +func (c *Client) CreateLabel(label Label) error { + if label.Links.Applications == nil { + label.Links.Applications = make([]int, 0) + } + + if label.Links.Servers == nil { + label.Links.Servers = make([]int, 0) + } + + req := struct { + Label Label `json:"label,omitempty"` + }{ + Label: label, + } + + _, err := c.Do("PUT", "/labels.json", req, nil) + return err +} + +// DeleteLabel deletes a label on the account specified by key. +func (c *Client) DeleteLabel(key string) error { + u := &url.URL{Path: fmt.Sprintf("/labels/%v.json", key)} + _, err := c.Do("DELETE", u.String(), nil, nil) + return err +} diff --git a/vendor/github.com/paultyng/go-newrelic/api/types.go b/vendor/github.com/paultyng/go-newrelic/api/types.go new file mode 100644 index 0000000000..df5f887736 --- /dev/null +++ b/vendor/github.com/paultyng/go-newrelic/api/types.go @@ -0,0 +1,120 @@ +package api + +import "errors" + +var ( + ErrNotFound = errors.New("newrelic: Resource not found") +) + +// LabelLinks represents external references on the Label. +type LabelLinks struct { + Applications []int `json:"applications"` + Servers []int `json:"servers"` +} + +// Label represents a New Relic label. +type Label struct { + Key string `json:"key,omitempty"` + Category string `json:"category,omitempty"` + Name string `json:"name,omitempty"` + Links LabelLinks `json:"links,omitempty"` +} + +// AlertPolicy represents a New Relic alert policy. +type AlertPolicy struct { + ID int `json:"id,omitempty"` + IncidentPreference string `json:"incident_preference,omitempty"` + Name string `json:"name,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` +} + +// AlertConditionUserDefined represents user defined metrics for the New Relic alert condition. +type AlertConditionUserDefined struct { + Metric string `json:"metric,omitempty"` + ValueFunction string `json:"value_function,omitempty"` +} + +// AlertConditionTerm represents the terms of a New Relic alert condition. +type AlertConditionTerm struct { + Duration int `json:"duration,string,omitempty"` + Operator string `json:"operator,omitempty"` + Priority string `json:"priority,omitempty"` + Threshold float64 `json:"threshold,string,omitempty"` + TimeFunction string `json:"time_function,omitempty"` +} + +// AlertCondition represents a New Relic alert condition. +// TODO: custom unmarshal entities to ints? +// TODO: handle unmarshaling .75 for float (not just 0.75) +type AlertCondition struct { + PolicyID int `json:"-"` + ID int `json:"id,omitempty"` + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + Enabled bool `json:"enabled,omitempty"` + Entities []string `json:"entities,omitempty"` + Metric string `json:"metric,omitempty"` + RunbookURL string `json:"runbook_url,omitempty"` + Terms []AlertConditionTerm `json:"terms,omitempty"` + UserDefined AlertConditionUserDefined `json:"uder_defined,omitempty"` +} + +// AlertChannelLinks represent the links between policies and alert channels +type AlertChannelLinks struct { + PolicyIDs []int `json:"policy_ids,omitempty"` +} + +// AlertChannel represents a New Relic alert notification channel +type AlertChannel struct { + ID int `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + Configuration map[string]interface{} `json:"configuration,omitempty"` + Links AlertChannelLinks `json:"links,omitempty"` +} + +type ApplicationSummary struct { + ResponseTime float64 `json:"response_time"` + Throughput float64 `json:"throughput"` + ErrorRate float64 `json:"error_rate"` + ApdexTarget float64 `json:"apdex_target"` + ApdexScore float64 `json:"apdex_score"` + HostCount int `json:"host_count"` + InstanceCount int `json:"instance_count"` + ConcurrentInstanceCount int `json:"concurrent_instance_count"` +} + +type ApplicationEndUserSummary struct { + ResponseTime float64 `json:"response_time"` + Throughput float64 `json:"throughput"` + ApdexTarget float64 `json:"apdex_target"` + ApdexScore float64 `json:"apdex_score"` +} + +type ApplicationSettings struct { + AppApdexThreshold float64 `json:"app_apdex_threshold,omitempty"` + EndUserApdexThreshold float64 `json:"end_user_apdex_threshold,omitempty"` + EnableRealUserMonitoring bool `json:"enable_real_user_monitoring,omitempty"` + UseServerSideConfig bool `json:"use_server_side_config,omitempty"` +} + +type ApplicationLinks struct { + ServerIDs []int `json:"servers,omitempty"` + HostIDs []int `json:"application_hosts,omitempty"` + InstanceIDs []int `json:"application_instances,omitempty"` + AlertPolicyID int `json:"alert_policy"` +} + +type Application struct { + ID int `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Language string `json:"language,omitempty"` + HealthStatus string `json:"health_status,omitempty"` + Reporting bool `json:"reporting,omitempty"` + LastReportedAt string `json:"last_reported_at,omitempty"` + Summary ApplicationSummary `json:"application_summary,omitempty"` + EndUserSummary ApplicationEndUserSummary `json:"end_user_summary,omitempty"` + Settings ApplicationSettings `json:"settings,omitempty"` + Links ApplicationLinks `json:"links,omitempty"` +} diff --git a/vendor/github.com/tomnomnom/linkheader/CONTRIBUTING.mkd b/vendor/github.com/tomnomnom/linkheader/CONTRIBUTING.mkd new file mode 100644 index 0000000000..0339bec553 --- /dev/null +++ b/vendor/github.com/tomnomnom/linkheader/CONTRIBUTING.mkd @@ -0,0 +1,10 @@ +# Contributing + +* Raise an issue if appropriate +* Fork the repo +* Bootstrap the dev dependencies (run `./script/bootstrap`) +* Make your changes +* Use [gofmt](https://golang.org/cmd/gofmt/) +* Make sure the tests pass (run `./script/test`) +* Make sure the linters pass (run `./script/lint`) +* Issue a pull request diff --git a/vendor/github.com/tomnomnom/linkheader/LICENSE b/vendor/github.com/tomnomnom/linkheader/LICENSE new file mode 100644 index 0000000000..55192df564 --- /dev/null +++ b/vendor/github.com/tomnomnom/linkheader/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Tom Hudson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/tomnomnom/linkheader/README.mkd b/vendor/github.com/tomnomnom/linkheader/README.mkd new file mode 100644 index 0000000000..8331934cf9 --- /dev/null +++ b/vendor/github.com/tomnomnom/linkheader/README.mkd @@ -0,0 +1,35 @@ +# Golang Link Header Parser + +Library for parsing HTTP Link headers. Requires Go 1.2 or higher. + +Docs can be found on [the GoDoc page](https://godoc.org/github.com/tomnomnom/linkheader). + +[![Build Status](https://travis-ci.org/tomnomnom/linkheader.svg)](https://travis-ci.org/tomnomnom/linkheader) + +## Basic Example + +```go +package main + +import ( + "fmt" + + "github.com/tomnomnom/linkheader" +) + +func main() { + header := "; rel=\"next\"," + + "; rel=\"last\"" + links := linkheader.Parse(header) + + for _, link := range links { + fmt.Printf("URL: %s; Rel: %s\n", link.URL, link.Rel) + } +} + +// Output: +// URL: https://api.github.com/user/58276/repos?page=2; Rel: next +// URL: https://api.github.com/user/58276/repos?page=2; Rel: last +``` + + diff --git a/vendor/github.com/tomnomnom/linkheader/main.go b/vendor/github.com/tomnomnom/linkheader/main.go new file mode 100644 index 0000000000..ce3b24a5a0 --- /dev/null +++ b/vendor/github.com/tomnomnom/linkheader/main.go @@ -0,0 +1,143 @@ +// Package linkheader provides functions for parsing HTTP Link headers +package linkheader + +import ( + "fmt" + "strings" +) + +// A Link is a single URL and related parameters +type Link struct { + URL string + Rel string + Params map[string]string +} + +// HasParam returns if a Link has a particular parameter or not +func (l Link) HasParam(key string) bool { + for p := range l.Params { + if p == key { + return true + } + } + return false +} + +// Param returns the value of a parameter if it exists +func (l Link) Param(key string) string { + for k, v := range l.Params { + if key == k { + return v + } + } + return "" +} + +// String returns the string representation of a link +func (l Link) String() string { + + p := make([]string, 0, len(l.Params)) + for k, v := range l.Params { + p = append(p, fmt.Sprintf("%s=\"%s\"", k, v)) + } + if l.Rel != "" { + p = append(p, fmt.Sprintf("%s=\"%s\"", "rel", l.Rel)) + } + return fmt.Sprintf("<%s>; %s", l.URL, strings.Join(p, "; ")) +} + +// Links is a slice of Link structs +type Links []Link + +// FilterByRel filters a group of Links by the provided Rel attribute +func (l Links) FilterByRel(r string) Links { + links := make(Links, 0) + for _, link := range l { + if link.Rel == r { + links = append(links, link) + } + } + return links +} + +// String returns the string representation of multiple Links +// for use in HTTP responses etc +func (l Links) String() string { + var strs []string + for _, link := range l { + strs = append(strs, link.String()) + } + return strings.Join(strs, ", ") +} + +// Parse parses a raw Link header in the form: +// ; rel="foo", ; rel="bar"; wat="dis" +// returning a slice of Link structs +func Parse(raw string) Links { + links := make(Links, 0) + + // One chunk: ; rel="foo" + for _, chunk := range strings.Split(raw, ",") { + + link := Link{URL: "", Rel: "", Params: make(map[string]string)} + + // Figure out what each piece of the chunk is + for _, piece := range strings.Split(chunk, ";") { + + piece = strings.Trim(piece, " ") + if piece == "" { + continue + } + + // URL + if piece[0] == '<' && piece[len(piece)-1] == '>' { + link.URL = strings.Trim(piece, "<>") + continue + } + + // Params + key, val := parseParam(piece) + if key == "" { + continue + } + + // Special case for rel + if strings.ToLower(key) == "rel" { + link.Rel = val + } + + link.Params[key] = val + + } + + links = append(links, link) + } + + return links +} + +// ParseMultiple is like Parse, but accepts a slice of headers +// rather than just one header string +func ParseMultiple(headers []string) Links { + links := make(Links, 0) + for _, header := range headers { + links = append(links, Parse(header)...) + } + return links +} + +// parseParam takes a raw param in the form key="val" and +// returns the key and value as seperate strings +func parseParam(raw string) (key, val string) { + + parts := strings.SplitN(raw, "=", 2) + if len(parts) != 2 { + return "", "" + } + + key = parts[0] + val = strings.Trim(parts[1], "\"") + + return key, val + +} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go new file mode 100644 index 0000000000..3daa8979e1 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package idna implements IDNA2008 (Internationalized Domain Names for +// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and +// RFC 5894. +package idna // import "golang.org/x/net/idna" + +import ( + "strings" + "unicode/utf8" +) + +// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or +// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11 + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". +func ToASCII(s string) (string, error) { + if ascii(s) { + return s, nil + } + labels := strings.Split(s, ".") + for i, label := range labels { + if !ascii(label) { + a, err := encode(acePrefix, label) + if err != nil { + return "", err + } + labels[i] = a + } + } + return strings.Join(labels, "."), nil +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". +func ToUnicode(s string) (string, error) { + if !strings.Contains(s, acePrefix) { + return s, nil + } + labels := strings.Split(s, ".") + for i, label := range labels { + if strings.HasPrefix(label, acePrefix) { + u, err := decode(label[len(acePrefix):]) + if err != nil { + return "", err + } + labels[i] = u + } + } + return strings.Join(labels, "."), nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 0000000000..92e733f6a7 --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,200 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "fmt" + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", fmt.Errorf("idna: invalid label %q", s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", fmt.Errorf("idna: invalid label %q", s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go new file mode 100644 index 0000000000..a2d4995292 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/gen.go @@ -0,0 +1,713 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This program generates table.go and table_test.go based on the authoritative +// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat +// +// The version is derived from +// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat +// and a human-readable form is at +// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat +// +// To fetch a particular git revision, such as 5c70ccd250, pass +// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" +// and -version "an explicit version string". + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "regexp" + "sort" + "strings" + + "golang.org/x/net/idna" +) + +const ( + // These sum of these four values must be no greater than 32. + nodesBitsChildren = 9 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + // These sum of these four values must be no greater than 32. + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +var ( + maxChildren int + maxTextOffset int + maxTextLength int + maxHi uint32 + maxLo uint32 +) + +func max(a, b int) int { + if a < b { + return b + } + return a +} + +func u32max(a, b uint32) uint32 { + if a < b { + return b + } + return a +} + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 + numNodeType = 3 +) + +func nodeTypeStr(n int) string { + switch n { + case nodeTypeNormal: + return "+" + case nodeTypeException: + return "!" + case nodeTypeParentOnly: + return "o" + } + panic("unreachable") +} + +const ( + defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat" + gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat" +) + +var ( + labelEncoding = map[string]uint32{} + labelsList = []string{} + labelsMap = map[string]bool{} + rules = []string{} + + // validSuffixRE is used to check that the entries in the public suffix + // list are in canonical form (after Punycode encoding). Specifically, + // capital letters are not allowed. + validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) + + shaRE = regexp.MustCompile(`"sha":"([^"]+)"`) + dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`) + + comments = flag.Bool("comments", false, "generate table.go comments, for debugging") + subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") + url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead") + v = flag.Bool("v", false, "verbose output (to stderr)") + version = flag.String("version", "", "the effective_tld_names.dat version") +) + +func main() { + if err := main1(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func main1() error { + flag.Parse() + if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { + return fmt.Errorf("not enough bits to encode the nodes table") + } + if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { + return fmt.Errorf("not enough bits to encode the children table") + } + if *version == "" { + if *url != defaultURL { + return fmt.Errorf("-version was not specified, and the -url is not the default one") + } + sha, date, err := gitCommit() + if err != nil { + return err + } + *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date) + } + var r io.Reader = os.Stdin + if *url != "" { + res, err := http.Get(*url) + if err != nil { + return err + } + if res.StatusCode != http.StatusOK { + return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) + } + r = res.Body + defer res.Body.Close() + } + + var root node + icann := false + br := bufio.NewReader(r) + for { + s, err := br.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + return err + } + s = strings.TrimSpace(s) + if strings.Contains(s, "BEGIN ICANN DOMAINS") { + icann = true + continue + } + if strings.Contains(s, "END ICANN DOMAINS") { + icann = false + continue + } + if s == "" || strings.HasPrefix(s, "//") { + continue + } + s, err = idna.ToASCII(s) + if err != nil { + return err + } + if !validSuffixRE.MatchString(s) { + return fmt.Errorf("bad publicsuffix.org list data: %q", s) + } + + if *subset { + switch { + case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): + case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): + case s == "ao" || strings.HasSuffix(s, ".ao"): + case s == "ar" || strings.HasSuffix(s, ".ar"): + case s == "arpa" || strings.HasSuffix(s, ".arpa"): + case s == "cy" || strings.HasSuffix(s, ".cy"): + case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): + case s == "jp": + case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): + case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): + case s == "om" || strings.HasSuffix(s, ".om"): + case s == "uk" || strings.HasSuffix(s, ".uk"): + case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): + case s == "tw" || strings.HasSuffix(s, ".tw"): + case s == "zw" || strings.HasSuffix(s, ".zw"): + case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): + // xn--p1ai is Russian-Cyrillic "рф". + default: + continue + } + } + + rules = append(rules, s) + + nt, wildcard := nodeTypeNormal, false + switch { + case strings.HasPrefix(s, "*."): + s, nt = s[2:], nodeTypeParentOnly + wildcard = true + case strings.HasPrefix(s, "!"): + s, nt = s[1:], nodeTypeException + } + labels := strings.Split(s, ".") + for n, i := &root, len(labels)-1; i >= 0; i-- { + label := labels[i] + n = n.child(label) + if i == 0 { + if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { + n.nodeType = nt + } + n.icann = n.icann && icann + n.wildcard = n.wildcard || wildcard + } + labelsMap[label] = true + } + } + labelsList = make([]string, 0, len(labelsMap)) + for label := range labelsMap { + labelsList = append(labelsList, label) + } + sort.Strings(labelsList) + + if err := generate(printReal, &root, "table.go"); err != nil { + return err + } + if err := generate(printTest, &root, "table_test.go"); err != nil { + return err + } + return nil +} + +func generate(p func(io.Writer, *node) error, root *node, filename string) error { + buf := new(bytes.Buffer) + if err := p(buf, root); err != nil { + return err + } + b, err := format.Source(buf.Bytes()) + if err != nil { + return err + } + return ioutil.WriteFile(filename, b, 0644) +} + +func gitCommit() (sha, date string, retErr error) { + res, err := http.Get(gitCommitURL) + if err != nil { + return "", "", err + } + if res.StatusCode != http.StatusOK { + return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status) + } + defer res.Body.Close() + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if m := shaRE.FindSubmatch(b); m != nil { + sha = string(m[1]) + } + if m := dateRE.FindSubmatch(b); m != nil { + date = string(m[1]) + } + if sha == "" || date == "" { + retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL) + } + return sha, date, retErr +} + +func printTest(w io.Writer, n *node) error { + fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") + fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n") + for _, rule := range rules { + fmt.Fprintf(w, "%q,\n", rule) + } + fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") + if err := n.walk(w, printNodeLabel); err != nil { + return err + } + fmt.Fprintf(w, "}\n") + return nil +} + +func printReal(w io.Writer, n *node) error { + const header = `// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = %q + +const ( + nodesBitsChildren = %d + nodesBitsICANN = %d + nodesBitsTextOffset = %d + nodesBitsTextLength = %d + + childrenBitsWildcard = %d + childrenBitsNodeType = %d + childrenBitsHi = %d + childrenBitsLo = %d +) + +const ( + nodeTypeNormal = %d + nodeTypeException = %d + nodeTypeParentOnly = %d +) + +// numTLD is the number of top level domains. +const numTLD = %d + +` + fmt.Fprintf(w, header, *version, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, + nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) + + text := combineText(labelsList) + if text == "" { + return fmt.Errorf("internal error: makeText returned no text") + } + for _, label := range labelsList { + offset, length := strings.Index(text, label), len(label) + if offset < 0 { + return fmt.Errorf("internal error: could not find %q in text %q", label, text) + } + maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) + if offset >= 1<= 1< 64 { + n, plus = 64, " +" + } + fmt.Fprintf(w, "%q%s\n", text[:n], plus) + text = text[n:] + } + + if err := n.walk(w, assignIndexes); err != nil { + return err + } + + fmt.Fprintf(w, ` + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// If the table was generated with the -comments flag, there is a //-comment +// after each node's data. In it is the nodes-array indexes of the children, +// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] children index +// [%2d bits] ICANN bit +// [%2d bits] text index +// [%2d bits] text length +var nodes = [...]uint32{ +`, + 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) + if err := n.walk(w, printNode); err != nil { + return err + } + fmt.Fprintf(w, `} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] wildcard bit +// [%2d bits] node type +// [%2d bits] high nodes index (exclusive) of children +// [%2d bits] low nodes index (inclusive) of children +var children=[...]uint32{ +`, + 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) + for i, c := range childrenEncoding { + s := "---------------" + lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 + if *comments { + fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", + c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) + } else { + fmt.Fprintf(w, "0x%x,\n", c) + } + } + fmt.Fprintf(w, "}\n\n") + fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { + ss = ss[1:] + } + return ss +} + +// crush combines a list of strings, taking advantage of overlaps. It returns a +// single string that contains each input string as a substring. +func crush(ss []string) string { + maxLabelLen := 0 + for _, s := range ss { + if maxLabelLen < len(s) { + maxLabelLen = len(s) + } + } + + for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { + prefixes := makePrefixMap(ss, prefixLen) + for i, s := range ss { + if len(s) <= prefixLen { + continue + } + mergeLabel(ss, i, prefixLen, prefixes) + } + } + + return strings.Join(ss, "") +} + +// mergeLabel merges the label at ss[i] with the first available matching label +// in prefixMap, where the last "prefixLen" characters in ss[i] match the first +// "prefixLen" characters in the matching label. +// It will merge ss[i] repeatedly until no more matches are available. +// All matching labels merged into ss[i] are replaced by "". +func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { + s := ss[i] + suffix := s[len(s)-prefixLen:] + for _, j := range prefixes[suffix] { + // Empty strings mean "already used." Also avoid merging with self. + if ss[j] == "" || i == j { + continue + } + if *v { + fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", + prefixLen, i, j, ss[i], ss[j], suffix) + } + ss[i] += ss[j][prefixLen:] + ss[j] = "" + // ss[i] has a new suffix, so merge again if possible. + // Note: we only have to merge again at the same prefix length. Shorter + // prefix lengths will be handled in the next iteration of crush's for loop. + // Can there be matches for longer prefix lengths, introduced by the merge? + // I believe that any such matches would by necessity have been eliminated + // during substring removal or merged at a higher prefix length. For + // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" + // would yield "abcde", which could be merged with "bcdef." However, in + // practice "cde" would already have been elimintated by removeSubstrings. + mergeLabel(ss, i, prefixLen, prefixes) + return + } +} + +// prefixMap maps from a prefix to a list of strings containing that prefix. The +// list of strings is represented as indexes into a slice of strings stored +// elsewhere. +type prefixMap map[string][]int + +// makePrefixMap constructs a prefixMap from a slice of strings. +func makePrefixMap(ss []string, prefixLen int) prefixMap { + prefixes := make(prefixMap) + for i, s := range ss { + // We use < rather than <= because if a label matches on a prefix equal to + // its full length, that's actually a substring match handled by + // removeSubstrings. + if prefixLen < len(s) { + prefix := s[:prefixLen] + prefixes[prefix] = append(prefixes[prefix], i) + } + } + + return prefixes +} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 0000000000..8bbf3bcd7e --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,135 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package publicsuffix provides a public suffix list based on data from +// http://publicsuffix.org/. A public suffix is one under which Internet users +// can directly register names. +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is privately +// managed. For example, foo.org and foo.co.uk are ICANN domains, +// foo.dyndns.org and foo.blogspot.co.uk are private domains. +// +// Use cases for distinguishing ICANN domains like foo.com from private +// domains like foo.appspot.com can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + lo, hi := uint32(0), uint32(numTLD) + s, suffix, wildcard := domain, len(domain), false +loop: + for { + dot := strings.LastIndex(s, ".") + if wildcard { + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) + icann = u&(1<>= nodesBitsICANN + u = children[u&(1<>= childrenBitsLo + hi = u & (1<>= childrenBitsHi + switch u & (1<>= childrenBitsNodeType + wildcard = u&(1<>= nodesBitsTextLength + offset := x & (1< +Response Status Code: 200 +Response Status: 200 OK +Response Time: 644.290186ms +Response Recevied At: 2015-09-15 12:05:28.922780103 -0700 PDT +Response Body: { + "args": {}, + "headers": { + "Accept-Encoding": "gzip", + "Host": "httpbin.org", + "User-Agent": "go-resty v0.1 - https://github.com/go-resty/resty" + }, + "origin": "0.0.0.0", + "url": "http://httpbin.org/get" +} +*/ +``` +#### Enhanced GET +```go +resp, err := resty.R(). + SetQueryParams(map[string]string{ + "page_no": "1", + "limit": "20", + "sort":"name", + "order": "asc", + "random":strconv.FormatInt(time.Now().Unix(), 10), + }). + SetHeader("Accept", "application/json"). + SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F"). + Get("/search_result") + + +// Sample of using Request.SetQueryString method +resp, err := resty.R(). + SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more"). + SetHeader("Accept", "application/json"). + SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F"). + Get("/show_product") +``` + +#### Various POST method combinations +```go +// POST JSON string +// No need to set content type, if you have client level setting +resp, err := resty.R(). + SetHeader("Content-Type", "application/json"). + SetBody(`{"username":"testuser", "password":"testpass"}`). + SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}). + Post("https://myapp.com/login") + +// POST []byte array +// No need to set content type, if you have client level setting +resp, err := resty.R(). + SetHeader("Content-Type", "application/json"). + SetBody([]byte(`{"username":"testuser", "password":"testpass"}`)). + SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}). + Post("https://myapp.com/login") + +// POST Struct, default is JSON content type. No need to set one +resp, err := resty.R(). + SetBody(User{Username: "testuser", Password: "testpass"}). + SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}). + SetError(&AuthError{}). // or SetError(AuthError{}). + Post("https://myapp.com/login") + +// POST Map, default is JSON content type. No need to set one +resp, err := resty.R(). + SetBody(map[string]interface{}{"username": "testuser", "password": "testpass"}). + SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}). + SetError(&AuthError{}). // or SetError(AuthError{}). + Post("https://myapp.com/login") + +// POST of raw bytes for file upload. For example: upload file to Dropbox +fileBytes, _ := ioutil.ReadFile("/Users/jeeva/mydocument.pdf") + +// See we are not setting content-type header, since go-resty automatically detects Content-Type for you +resp, err := resty.R(). + SetBody(fileBytes). + SetContentLength(true). // Dropbox expects this value + SetAuthToken(""). + SetError(&DropboxError{}). // or SetError(DropboxError{}). + Post("https://content.dropboxapi.com/1/files_put/auto/resty/mydocument.pdf") // for upload Dropbox supports PUT too + +// Note: resty detects Content-Type for request body/payload if content type header is not set. +// * For struct and map data type defaults to 'application/json' +// * Fallback is plain text content type +``` + +#### Sample PUT +You can use various combinations of `PUT` method call like demonstrated for `POST`. +```go +// Note: This is one sample of PUT method usage, refer POST for more combination + +// Request goes as JSON content type +// No need to set auth token, error, if you have client level settings +resp, err := resty.R(). + SetBody(Article{ + Title: "go-resty", + Content: "This is my article content, oh ya!", + Author: "Jeevanandam M", + Tags: []string{"article", "sample", "resty"}, + }). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + SetError(&Error{}). // or SetError(Error{}). + Put("https://myapp.com/article/1234") +``` + +#### Sample PATCH +You can use various combinations of `PATCH` method call like demonstrated for `POST`. +```go +// Note: This is one sample of PUT method usage, refer POST for more combination + +// Request goes as JSON content type +// No need to set auth token, error, if you have client level settings +resp, err := resty.R(). + SetBody(Article{ + Tags: []string{"new tag1", "new tag2"}, + }). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + SetError(&Error{}). // or SetError(Error{}). + Patch("https://myapp.com/articles/1234") +``` + +#### Sample DELETE, HEAD, OPTIONS +```go +// DELETE a article +// No need to set auth token, error, if you have client level settings +resp, err := resty.R(). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + SetError(&Error{}). // or SetError(Error{}). + Delete("https://myapp.com/articles/1234") + +// DELETE a articles with payload/body as a JSON string +// No need to set auth token, error, if you have client level settings +resp, err := resty.R(). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + SetError(&Error{}). // or SetError(Error{}). + SetHeader("Content-Type", "application/json"). + SetBody(`{article_ids: [1002, 1006, 1007, 87683, 45432] }`). + Delete("https://myapp.com/articles") + +// HEAD of resource +// No need to set auth token, if you have client level settings +resp, err := resty.R(). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + Head("https://myapp.com/videos/hi-res-video") + +// OPTIONS of resource +// No need to set auth token, if you have client level settings +resp, err := resty.R(). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + Options("https://myapp.com/servers/nyc-dc-01") +``` + +### Multipart File(s) upload +#### Using io.Reader +```go +profileImgBytes, _ := ioutil.ReadFile("/Users/jeeva/test-img.png") +notesBytes, _ := ioutil.ReadFile("/Users/jeeva/text-file.txt") + +resp, err := dclr(). + SetFileReader("profile_img", "test-img.png", bytes.NewReader(profileImgBytes)). + SetFileReader("notes", "text-file.txt", bytes.NewReader(notesBytes)). + SetFormData(map[string]string{ + "first_name": "Jeevanandam", + "last_name": "M", + }). + Post(t"http://myapp.com/upload") +``` + +#### Using File directly from Path +```go +// Single file scenario +resp, err := resty.R(). + SetFile("profile_img", "/Users/jeeva/test-img.png"). + Post("http://myapp.com/upload") + +// Multiple files scenario +resp, err := resty.R(). + SetFiles(map[string]string{ + "profile_img": "/Users/jeeva/test-img.png", + "notes": "/Users/jeeva/text-file.txt", + }). + Post("http://myapp.com/upload") + +// Multipart of form fields and files +resp, err := resty.R(). + SetFiles(map[string]string{ + "profile_img": "/Users/jeeva/test-img.png", + "notes": "/Users/jeeva/text-file.txt", + }). + SetFormData(map[string]string{ + "first_name": "Jeevanandam", + "last_name": "M", + "zip_code": "00001", + "city": "my city", + "access_token": "C6A79608-782F-4ED0-A11D-BD82FAD829CD", + }). + Post("http://myapp.com/profile") +``` + +#### Sample Form submision +```go +// just mentioning about POST as an example with simple flow +// User Login +resp, err := resty.R(). + SetFormData(map[string]string{ + "username": "jeeva", + "password": "mypass", + }). + Post("http://myapp.com/login") + +// Followed by profile update +resp, err := resty.R(). + SetFormData(map[string]string{ + "first_name": "Jeevanandam", + "last_name": "M", + "zip_code": "00001", + "city": "new city update", + }). + Post("http://myapp.com/profile") + +// Multi value form data +criteria := url.Values{ + "search_criteria": []string{"book", "glass", "pencil"}, +} +resp, err := resty.R(). + SetMultiValueFormData(criteria). + Post("http://myapp.com/search") +``` + +#### Save HTTP Response into File +```go +// Setting output directory path, If directory not exists then resty creates one! +// This is optional one, if you're planning using absoule path in +// `Request.SetOutput` and can used together. +resty.SetOutputDirectory("/Users/jeeva/Downloads") + +// HTTP response gets saved into file, similar to curl -o flag +_, err := resty.R(). + SetOutput("plugin/ReplyWithHeader-v5.1-beta.zip"). + Get("http://bit.ly/1LouEKr") + +// OR using absolute path +// Note: output directory path is not used for absoulte path +_, err := resty.R(). + SetOutput("/MyDownloads/plugin/ReplyWithHeader-v5.1-beta.zip"). + Get("http://bit.ly/1LouEKr") +``` + +#### Request and Response Middleware +Resty provides middleware ability to manipulate for Request and Response. It is more flexible than callback approach. +```go +// Registering Request Middleware +resty.OnBeforeRequest(func(c *resty.Client, req *resty.Request) error { + // Now you have access to Client and current Request object + // manipulate it as per your need + + return nil // if its success otherwise return error + }) + +// Registering Response Middleware +resty.OnAfterResponse(func(c *resty.Client, resp *resty.Response) error { + // Now you have access to Client and current Response object + // manipulate it as per your need + + return nil // if its success otherwise return error + }) +``` + +#### Redirect Policy +Resty provides few ready to use redirect policy(s) also it supports multiple policies together. +```go +// Assign Client Redirect Policy. Create one as per you need +resty.SetRedirectPolicy(resty.FlexibleRedirectPolicy(15)) + +// Wanna multiple policies such as redirect count, domain name check, etc +resty.SetRedirectPolicy(resty.FlexibleRedirectPolicy(20), + resty.DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net")) +``` + +##### Custom Redirect Policy +Implement [RedirectPolicy](redirect.go#L20) interface and register it with resty client. Have a look [redirect.go](redirect.go) for more information. +```go +// Using raw func into resty.SetRedirectPolicy +resty.SetRedirectPolicy(resty.RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error { + // Implement your logic here + + // return nil for continue redirect otherwise return error to stop/prevent redirect + return nil +})) + +//--------------------------------------------------- + +// Using struct create more flexible redirect policy +type CustomRedirectPolicy struct { + // variables goes here +} + +func (c *CustomRedirectPolicy) Apply(req *http.Request, via []*http.Request) error { + // Implement your logic here + + // return nil for continue redirect otherwise return error to stop/prevent redirect + return nil +} + +// Registering in resty +resty.SetRedirectPolicy(CustomRedirectPolicy{/* initialize variables */}) +``` + +#### Custom Root Certificates and Client Certifcates +```go +// Custom Root certificates, just supply .pem file. +// you can add one or more root certificates, its get appended +resty.SetRootCertificate("/path/to/root/pemFile1.pem") +resty.SetRootCertificate("/path/to/root/pemFile2.pem") +// ... and so on! + +// Adding Client Certificates, you add one or more certificates +// Sample for creating certificate object +// Parsing public/private key pair from a pair of files. The files must contain PEM encoded data. +cert1, err := tls.LoadX509KeyPair("certs/client.pem", "certs/client.key") +if err != nil { + log.Fatalf("ERROR client certificate: %s", err) +} +// ... + +// You add one or more certificates +resty.SetCertificates(cert1, cert2, cert3) +``` + +#### Proxy Settings - Client as well as at Request Level +Default `Go` supports Proxy via environment variable `HTTP_PROXY`. Resty provides support via `SetProxy` & `RemoveProxy`. +Choose as per your need. + +**Client Level Proxy** settings applied to all the request +```go +// Setting a Proxy URL and Port +resty.SetProxy("http://proxyserver:8888") + +// Want to remove proxy setting +resty.RemoveProxy() +``` +**Request Level Proxy** settings, gives control to override at individal request level +```go +// Set proxy for current request +resp, err := c.R(). + SetProxy("http://sampleproxy:8888"). + Get("http://httpbin.org/get") +``` + +#### Choose REST or HTTP mode +```go +// REST mode. This is Default. +resty.SetRESTMode() + +// HTTP mode +resty.SetHTTPMode() +``` + +#### Wanna Multiple Clients +```go +// Here you go! +// Client 1 +client1 := resty.New() +client1.R().Get("http://httpbin.org") +// ... + +// Client 2 +client2 := resty.New() +client1.R().Head("http://httpbin.org") +// ... + +// Bend it as per your need!!! +``` + +#### Remaining Client Settings & its Options +```go +// Unique settings at Client level +//-------------------------------- +// Enable debug mode +resty.SetDebug(true) + +// Using you custom log writer +logFile, _ := os.OpenFile("/Users/jeeva/go-resty.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) +resty.SetLogger(logFile) + +// Assign Client TLSClientConfig +// One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial +resty.SetTLSClientConfig(&tls.Config{ RootCAs: roots }) + +// or One can disable security check (https) +resty.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true }) + +// Set client timeout as per your need +resty.SetTimeout(time.Duration(1 * time.Minute)) + + +// You can override all below settings and options at request level if you want to +//-------------------------------------------------------------------------------- +// Host URL for all request. So you can use relative URL in the request +resty.SetHostURL("http://httpbin.org") + +// Headers for all request +resty.SetHeader("Accept", "application/json") +resty.SetHeaders(map[string]string{ + "Content-Type": "application/json", + "User-Agent": "My custom User Agent String", + }) + +// Cookies for all request +resty.SetCookie(&http.Cookie{ + Name:"go-resty", + Value:"This is cookie value", + Path: "/", + Domain: "sample.com", + MaxAge: 36000, + HttpOnly: true, + Secure: false, + }) +resty.SetCookies(cookies) + +// URL query parameters for all request +resty.SetQueryParam("user_id", "00001") +resty.SetQueryParams(map[string]string{ // sample of those who use this manner + "api_key": "api-key-here", + "api_secert": "api-secert", + }) +resty.R().SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more") + +// Form data for all request. Typically used with POST and PUT +resty.SetFormData(map[string]string{ + "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F", + }) + +// Basic Auth for all request +resty.SetBasicAuth("myuser", "mypass") + +// Bearer Auth Token for all request +resty.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F") + +// Enabling Content length value for all request +resty.SetContentLength(true) + +// Registering global Error object structure for JSON/XML request +resty.SetError(&Error{}) // or resty.SetError(Error{}) +``` + +#### Unix Socket + +```go +unixSocket := "unix:///var/run/my_socket.sock" + +// Create a Go's http.Transport so we can set it in resty. +transport := http.Transport{ + Dial: func(_, _ string) (net.Conn, error) { + return net.Dial("unix", unixSocket) + }, +} + +// Set the previous transport that we created, set the scheme of the communication to the +// socket and set the unixSocket as the HostURL. +r := resty.New().SetTransport(transport).SetScheme("http").SetHostURL(unixSocket) + +// No need to write the host's URL on the request, just the path. +r.R().Get("/index.html") + +``` + +## Versioning +resty releases versions according to [Semantic Versioning](http://semver.org) + +`gopkg.in/resty.vX` points to appropriate tag versions; `X` denotes version number and it's a stable release. It's recommended to use version, for eg. `gopkg.in/resty.v0`. Development takes place at the master branch. Although the code in master should always compile and test successfully, it might break API's. We aim to maintain backwards compatibility, but API's and behaviour might be changed to fix a bug. + + +## Contributing +Welcome! If you find any improvement or issue you want to fix, feel free to send a pull request, I like pull requests that include test cases for fix/enhancement. I have done my best to bring pretty good code coverage. Feel free to write tests. + +BTW, I'd like to know what you think about go-resty. Kindly open an issue or send me an email; it'd mean a lot to me. + +## Author +Jeevanandam M. - jeeva@myjeeva.com + +## Contributors +Have a look on [Contributors](https://github.com/go-resty/resty/graphs/contributors) page. + +## License +resty released under MIT license, refer [LICENSE](LICENSE) file. diff --git a/vendor/gopkg.in/resty.v0/client.go b/vendor/gopkg.in/resty.v0/client.go new file mode 100644 index 0000000000..ec0e54a229 --- /dev/null +++ b/vendor/gopkg.in/resty.v0/client.go @@ -0,0 +1,931 @@ +// Copyright (c) 2015-2016 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "log" + "mime/multipart" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strings" + "sync" + "time" +) + +const ( + // GET HTTP method + GET = "GET" + + // POST HTTP method + POST = "POST" + + // PUT HTTP method + PUT = "PUT" + + // DELETE HTTP method + DELETE = "DELETE" + + // PATCH HTTP method + PATCH = "PATCH" + + // HEAD HTTP method + HEAD = "HEAD" + + // OPTIONS HTTP method + OPTIONS = "OPTIONS" +) + +var ( + hdrUserAgentKey = http.CanonicalHeaderKey("User-Agent") + hdrAcceptKey = http.CanonicalHeaderKey("Accept") + hdrContentTypeKey = http.CanonicalHeaderKey("Content-Type") + hdrContentLengthKey = http.CanonicalHeaderKey("Content-Length") + hdrAuthorizationKey = http.CanonicalHeaderKey("Authorization") + + plainTextType = "text/plain; charset=utf-8" + jsonContentType = "application/json; charset=utf-8" + formContentType = "application/x-www-form-urlencoded" + + jsonCheck = regexp.MustCompile("(?i:[application|text]/json)") + xmlCheck = regexp.MustCompile("(?i:[application|text]/xml)") + + hdrUserAgentValue = "go-resty v%s - https://github.com/go-resty/resty" +) + +// Client type is used for HTTP/RESTful global values +// for all request raised from the client +type Client struct { + HostURL string + QueryParam url.Values + FormData url.Values + Header http.Header + UserInfo *User + Token string + Cookies []*http.Cookie + Error reflect.Type + Debug bool + DisableWarn bool + Log *log.Logger + RetryCount int + RetryConditions []RetryConditionFunc + + httpClient *http.Client + transport *http.Transport + setContentLength bool + isHTTPMode bool + outputDirectory string + scheme string + proxyURL *url.URL + mutex *sync.Mutex + closeConnection bool + beforeRequest []func(*Client, *Request) error + afterResponse []func(*Client, *Response) error +} + +// User type is to hold an username and password information +type User struct { + Username, Password string +} + +// SetHostURL method is to set Host URL in the client instance. It will be used with request +// raised from this client with relative URL +// // Setting HTTP address +// resty.SetHostURL("http://myjeeva.com") +// +// // Setting HTTPS address +// resty.SetHostURL("https://myjeeva.com") +// +func (c *Client) SetHostURL(url string) *Client { + c.HostURL = strings.TrimRight(url, "/") + return c +} + +// SetHeader method sets a single header field and its value in the client instance. +// These headers will be applied to all requests raised from this client instance. +// Also it can be overridden at request level header options, see `resty.R().SetHeader` +// or `resty.R().SetHeaders`. +// +// Example: To set `Content-Type` and `Accept` as `application/json` +// +// resty. +// SetHeader("Content-Type", "application/json"). +// SetHeader("Accept", "application/json") +// +func (c *Client) SetHeader(header, value string) *Client { + c.Header.Set(header, value) + return c +} + +// SetHeaders method sets multiple headers field and its values at one go in the client instance. +// These headers will be applied to all requests raised from this client instance. Also it can be +// overridden at request level headers options, see `resty.R().SetHeaders` or `resty.R().SetHeader`. +// +// Example: To set `Content-Type` and `Accept` as `application/json` +// +// resty.SetHeaders(map[string]string{ +// "Content-Type": "application/json", +// "Accept": "application/json", +// }) +// +func (c *Client) SetHeaders(headers map[string]string) *Client { + for h, v := range headers { + c.Header.Set(h, v) + } + + return c +} + +// SetCookie method sets a single cookie in the client instance. +// These cookies will be added to all the request raised from this client instance. +// resty.SetCookie(&http.Cookie{ +// Name:"go-resty", +// Value:"This is cookie value", +// Path: "/", +// Domain: "sample.com", +// MaxAge: 36000, +// HttpOnly: true, +// Secure: false, +// }) +// +func (c *Client) SetCookie(hc *http.Cookie) *Client { + c.Cookies = append(c.Cookies, hc) + return c +} + +// SetCookies method sets an array of cookies in the client instance. +// These cookies will be added to all the request raised from this client instance. +// cookies := make([]*http.Cookie, 0) +// +// cookies = append(cookies, &http.Cookie{ +// Name:"go-resty-1", +// Value:"This is cookie 1 value", +// Path: "/", +// Domain: "sample.com", +// MaxAge: 36000, +// HttpOnly: true, +// Secure: false, +// }) +// +// cookies = append(cookies, &http.Cookie{ +// Name:"go-resty-2", +// Value:"This is cookie 2 value", +// Path: "/", +// Domain: "sample.com", +// MaxAge: 36000, +// HttpOnly: true, +// Secure: false, +// }) +// +// // Setting a cookies into resty +// resty.SetCookies(cookies) +// +func (c *Client) SetCookies(cs []*http.Cookie) *Client { + c.Cookies = append(c.Cookies, cs...) + return c +} + +// SetQueryParam method sets single paramater and its value in the client instance. +// It will be formed as query string for the request. For example: `search=kitchen%20papers&size=large` +// in the URL after `?` mark. These query params will be added to all the request raised from +// this client instance. Also it can be overridden at request level Query Param options, +// see `resty.R().SetQueryParam` or `resty.R().SetQueryParams`. +// resty. +// SetQueryParam("search", "kitchen papers"). +// SetQueryParam("size", "large") +// +func (c *Client) SetQueryParam(param, value string) *Client { + c.QueryParam.Add(param, value) + return c +} + +// SetQueryParams method sets multiple paramaters and its values at one go in the client instance. +// It will be formed as query string for the request. For example: `search=kitchen%20papers&size=large` +// in the URL after `?` mark. These query params will be added to all the request raised from this +// client instance. Also it can be overridden at request level Query Param options, +// see `resty.R().SetQueryParams` or `resty.R().SetQueryParam`. +// resty.SetQueryParams(map[string]string{ +// "search": "kitchen papers", +// "size": "large", +// }) +// +func (c *Client) SetQueryParams(params map[string]string) *Client { + for p, v := range params { + c.QueryParam.Add(p, v) + } + + return c +} + +// SetFormData method sets Form parameters and its values in the client instance. +// It's applicable only HTTP method `POST` and `PUT` and requets content type would be set as +// `application/x-www-form-urlencoded`. These form data will be added to all the request raised from +// this client instance. Also it can be overridden at request level form data, see `resty.R().SetFormData`. +// resty.SetFormData(map[string]string{ +// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F", +// "user_id": "3455454545", +// }) +// +func (c *Client) SetFormData(data map[string]string) *Client { + for k, v := range data { + c.FormData.Add(k, v) + } + + return c +} + +// SetBasicAuth method sets the basic authentication header in the HTTP request. Example: +// Authorization: Basic +// +// Example: To set the header for username "go-resty" and password "welcome" +// resty.SetBasicAuth("go-resty", "welcome") +// +// This basic auth information gets added to all the request rasied from this client instance. +// Also it can be overridden or set one at the request level is supported, see `resty.R().SetBasicAuth`. +// +func (c *Client) SetBasicAuth(username, password string) *Client { + c.UserInfo = &User{Username: username, Password: password} + return c +} + +// SetAuthToken method sets bearer auth token header in the HTTP request. Example: +// Authorization: Bearer +// +// Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F +// +// resty.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F") +// +// This bearer auth token gets added to all the request rasied from this client instance. +// Also it can be overridden or set one at the request level is supported, see `resty.R().SetAuthToken`. +// +func (c *Client) SetAuthToken(token string) *Client { + c.Token = token + return c +} + +// R method creates a request instance, its used for Get, Post, Put, Delete, Patch, Head and Options. +func (c *Client) R() *Request { + r := &Request{ + URL: "", + Method: "", + QueryParam: url.Values{}, + FormData: url.Values{}, + Header: http.Header{}, + Body: nil, + Result: nil, + Error: nil, + RawRequest: nil, + client: c, + bodyBuf: nil, + proxyURL: nil, + multipartFiles: []*File{}, + } + + return r +} + +// OnBeforeRequest method sets request middleware into the before request chain. +// Its gets applied after default `go-resty` request middlewares and before request +// been sent from `go-resty` to host server. +// resty.OnBeforeRequest(func(c *resty.Client, r *resty.Request) error { +// // Now you have access to Client and Request instance +// // manipulate it as per your need +// +// return nil // if its success otherwise return error +// }) +// +func (c *Client) OnBeforeRequest(m func(*Client, *Request) error) *Client { + c.beforeRequest[len(c.beforeRequest)-1] = m + c.beforeRequest = append(c.beforeRequest, requestLogger) + + return c +} + +// OnAfterResponse method sets response middleware into the after response chain. +// Once we receive response from host server, default `go-resty` response middleware +// gets applied and then user assigened response middlewares applied. +// resty.OnAfterResponse(func(c *resty.Client, r *resty.Response) error { +// // Now you have access to Client and Response instance +// // manipulate it as per your need +// +// return nil // if its success otherwise return error +// }) +// +func (c *Client) OnAfterResponse(m func(*Client, *Response) error) *Client { + c.afterResponse = append(c.afterResponse, m) + return c +} + +// SetDebug method enables the debug mode on `go-resty` client. Client logs details of every request and response. +// For `Request` it logs information such as HTTP verb, Relative URL path, Host, Headers, Body if it has one. +// For `Response` it logs information such as Status, Response Time, Headers, Body if it has one. +// resty.SetDebug(true) +// +func (c *Client) SetDebug(d bool) *Client { + c.Debug = d + return c +} + +// SetDisableWarn method disables the warning message on `go-resty` client. +// For example: go-resty warns the user when BasicAuth used on HTTP mode. +// resty.SetDisableWarn(true) +// +func (c *Client) SetDisableWarn(d bool) *Client { + c.DisableWarn = d + return c +} + +// SetLogger method sets given writer for logging go-resty request and response details. +// Default is os.Stderr +// file, _ := os.OpenFile("/Users/jeeva/go-resty.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) +// +// resty.SetLogger(file) +// +func (c *Client) SetLogger(w io.Writer) *Client { + c.Log = getLogger(w) + return c +} + +// SetContentLength method enables the HTTP header `Content-Length` value for every request. +// By default go-resty won't set `Content-Length`. +// resty.SetContentLength(true) +// +// Also you have an option to enable for particular request. See `resty.R().SetContentLength` +// +func (c *Client) SetContentLength(l bool) *Client { + c.setContentLength = l + return c +} + +// SetError method is to register the global or client common `Error` object into go-resty. +// It is used for automatic unmarshalling if response status code is greater than 399 and +// content type either JSON or XML. Can be pointer or non-pointer. +// resty.SetError(&Error{}) +// // OR +// resty.SetError(Error{}) +// +func (c *Client) SetError(err interface{}) *Client { + c.Error = typeOf(err) + return c +} + +// SetRedirectPolicy method sets the client redirect poilicy. go-resty provides ready to use +// redirect policies. Wanna create one for yourself refer `redirect.go`. +// +// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20)) +// +// // Need multiple redirect policies together +// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20), DomainCheckRedirectPolicy("host1.com", "host2.net")) +// +func (c *Client) SetRedirectPolicy(policies ...interface{}) *Client { + for _, p := range policies { + if _, ok := p.(RedirectPolicy); !ok { + c.Log.Printf("ERORR: %v does not implement resty.RedirectPolicy (missing Apply method)", + runtime.FuncForPC(reflect.ValueOf(p).Pointer()).Name()) + } + } + + c.httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + for _, p := range policies { + err := p.(RedirectPolicy).Apply(req, via) + if err != nil { + return err + } + } + return nil // looks good, go ahead + } + + return c +} + +// SetRetryCount method enables retry on `go-resty` client and allows you +// to set no. of retry count. Resty uses a Backoff mechanism. +func (c *Client) SetRetryCount(count int) *Client { + c.RetryCount = count + return c +} + +// AddRetryCondition method adds a retry condition function to array of functions +// that are checked to determine if the request is retried. The request will +// retry if any of the functions return true and error is nil. +func (c *Client) AddRetryCondition(condition RetryConditionFunc) *Client { + c.RetryConditions = append(c.RetryConditions, condition) + return c +} + +// SetHTTPMode method sets go-resty mode into HTTP +func (c *Client) SetHTTPMode() *Client { + return c.SetMode("http") +} + +// SetRESTMode method sets go-resty mode into RESTful +func (c *Client) SetRESTMode() *Client { + return c.SetMode("rest") +} + +// SetMode method sets go-resty client mode to given value such as 'http' & 'rest'. +// RESTful: +// - No Redirect +// - Automatic response unmarshal if it is JSON or XML +// HTML: +// - Up to 10 Redirects +// - No automatic unmarshall. Response will be treated as `response.String()` +// +// If you want more redirects, use FlexibleRedirectPolicy +// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20)) +// +func (c *Client) SetMode(mode string) *Client { + if mode == "http" { + c.isHTTPMode = true + c.SetRedirectPolicy(FlexibleRedirectPolicy(10)) + c.afterResponse = []func(*Client, *Response) error{ + responseLogger, + saveResponseIntoFile, + } + } else { // RESTful + c.isHTTPMode = false + c.SetRedirectPolicy(NoRedirectPolicy()) + c.afterResponse = []func(*Client, *Response) error{ + responseLogger, + parseResponseBody, + saveResponseIntoFile, + } + } + + return c +} + +// Mode method returns the current client mode. Typically its a "http" or "rest". +// Default is "rest" +func (c *Client) Mode() string { + if c.isHTTPMode { + return "http" + } + + return "rest" +} + +// SetTLSClientConfig method sets TLSClientConfig for underling client Transport. +// +// Example: +// // One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial +// resty.SetTLSClientConfig(&tls.Config{ RootCAs: roots }) +// +// // or One can disable security check (https) +// resty.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true }) +// Note: This method overwrites existing `TLSClientConfig`. +// +func (c *Client) SetTLSClientConfig(config *tls.Config) *Client { + c.transport.TLSClientConfig = config + return c +} + +// SetTimeout method sets timeout for request raised from client +// resty.SetTimeout(time.Duration(1 * time.Minute)) +// +func (c *Client) SetTimeout(timeout time.Duration) *Client { + c.transport.Dial = func(network, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(network, addr, timeout) + if err != nil { + c.Log.Printf("ERROR [%v]", err) + return nil, err + } + conn.SetDeadline(time.Now().Add(timeout)) + return conn, nil + } + + return c +} + +// SetProxy method sets the Proxy URL and Port for resty client. +// resty.SetProxy("http://proxyserver:8888") +// +// Alternatives: At request level proxy, see `Request.SetProxy`. OR Without this `SetProxy` method, +// you can also set Proxy via environment variable. By default `Go` uses setting from `HTTP_PROXY`. +// +func (c *Client) SetProxy(proxyURL string) *Client { + if pURL, err := url.Parse(proxyURL); err == nil { + c.proxyURL = pURL + } else { + c.Log.Printf("ERROR [%v]", err) + c.proxyURL = nil + } + + return c +} + +// RemoveProxy method removes the proxy configuration from resty client +// resty.RemoveProxy() +// +func (c *Client) RemoveProxy() *Client { + c.proxyURL = nil + return c +} + +// SetCertificates method helps to set client certificates into resty conveniently. +// +func (c *Client) SetCertificates(certs ...tls.Certificate) *Client { + config := c.getTLSConfig() + config.Certificates = append(config.Certificates, certs...) + + return c +} + +// SetRootCertificate method helps to add one or more root certificates into resty client +// resty.SetRootCertificate("/path/to/root/pemFile.pem") +// +func (c *Client) SetRootCertificate(pemFilePath string) *Client { + rootPemData, err := ioutil.ReadFile(pemFilePath) + if err != nil { + c.Log.Printf("ERROR [%v]", err) + return c + } + + config := c.getTLSConfig() + if config.RootCAs == nil { + config.RootCAs = x509.NewCertPool() + } + + config.RootCAs.AppendCertsFromPEM(rootPemData) + + return c +} + +// SetOutputDirectory method sets output directory for saving HTTP response into file. +// If the output directory not exists then resty creates one. This setting is optional one, +// if you're planning using absoule path in `Request.SetOutput` and can used together. +// resty.SetOutputDirectory("/save/http/response/here") +// +func (c *Client) SetOutputDirectory(dirPath string) *Client { + err := createDirectory(dirPath) + if err != nil { + c.Log.Printf("ERROR [%v]", err) + } + + c.outputDirectory = dirPath + + return c +} + +// SetTransport method sets custom *http.Transport in the resty client. Its way to override default. +// +// **Note:** It overwrites the default resty transport instance and its configurations. +// transport := &http.Transport{ +// // somthing like Proxying to httptest.Server, etc... +// Proxy: func(req *http.Request) (*url.URL, error) { +// return url.Parse(server.URL) +// }, +// } +// +// resty.SetTransport(&transport) +// +func (c *Client) SetTransport(transport *http.Transport) *Client { + if transport != nil { + c.transport = transport + } + + return c +} + +// SetScheme method sets custom scheme in the resty client. Its way to override default. +// resty.SetScheme("http") +// +func (c *Client) SetScheme(scheme string) *Client { + if c.scheme == "" { + c.scheme = scheme + } + + return c +} + +// SetCloseConnection method sets variable Close in http request struct with the given +// value. More info: https://golang.org/src/net/http/request.go +func (c *Client) SetCloseConnection(close bool) *Client { + c.closeConnection = close + return c +} + +// executes the given `Request` object and returns response +func (c *Client) execute(req *Request) (*Response, error) { + // Apply Request middleware + var err error + for _, f := range c.beforeRequest { + err = f(c, req) + if err != nil { + return nil, err + } + } + + c.mutex.Lock() + + if req.proxyURL != nil { + c.transport.Proxy = http.ProxyURL(req.proxyURL) + } else if c.proxyURL != nil { + c.transport.Proxy = http.ProxyURL(c.proxyURL) + } + + req.Time = time.Now() + c.httpClient.Transport = c.transport + + resp, err := c.httpClient.Do(req.RawRequest) + + c.mutex.Unlock() + + response := &Response{ + Request: req, + RawResponse: resp, + receivedAt: time.Now(), + } + + if err != nil { + return response, err + } + + if !req.isSaveResponse { + defer resp.Body.Close() + response.body, err = ioutil.ReadAll(resp.Body) + if err != nil { + return response, err + } + + response.size = int64(len(response.body)) + } + + // Apply Response middleware + for _, f := range c.afterResponse { + err = f(c, response) + if err != nil { + break + } + } + + return response, err +} + +// enables a log prefix +func (c *Client) enableLogPrefix() { + c.Log.SetFlags(log.LstdFlags) + c.Log.SetPrefix("RESTY ") +} + +// disables a log prefix +func (c *Client) disableLogPrefix() { + c.Log.SetFlags(0) + c.Log.SetPrefix("") +} + +// getting TLS client config if not exists then create one +func (c *Client) getTLSConfig() *tls.Config { + if c.transport.TLSClientConfig == nil { + c.transport.TLSClientConfig = &tls.Config{} + } + + return c.transport.TLSClientConfig +} + +// +// Response +// + +// Response is an object represents executed request and its values. +type Response struct { + Request *Request + RawResponse *http.Response + + body []byte + size int64 + receivedAt time.Time +} + +// Body method returns HTTP response as []byte array for the executed request. +// Note: `Response.Body` might be nil, if `Request.SetOutput` is used. +func (r *Response) Body() []byte { + return r.body +} + +// Status method returns the HTTP status string for the executed request. +// Example: 200 OK +func (r *Response) Status() string { + return r.RawResponse.Status +} + +// StatusCode method returns the HTTP status code for the executed request. +// Example: 200 +func (r *Response) StatusCode() int { + return r.RawResponse.StatusCode +} + +// Result method returns the response value as an object if it has one +func (r *Response) Result() interface{} { + return r.Request.Result +} + +// Error method returns the error object if it has one +func (r *Response) Error() interface{} { + return r.Request.Error +} + +// Header method returns the response headers +func (r *Response) Header() http.Header { + return r.RawResponse.Header +} + +// Cookies method to access all the response cookies +func (r *Response) Cookies() []*http.Cookie { + return r.RawResponse.Cookies() +} + +// String method returns the body of the server response as String. +func (r *Response) String() string { + if r.body == nil { + return "" + } + + return strings.TrimSpace(string(r.body)) +} + +// Time method returns the time of HTTP response time that from request we sent and received a request. +// See `response.ReceivedAt` to know when client recevied response and see `response.Request.Time` to know +// when client sent a request. +func (r *Response) Time() time.Duration { + return r.receivedAt.Sub(r.Request.Time) +} + +// ReceivedAt method returns when response got recevied from server for the request. +func (r *Response) ReceivedAt() time.Time { + return r.receivedAt +} + +// Size method returns the HTTP response size in bytes. Ya, you can relay on HTTP `Content-Length` header, +// however it won't be good for chucked transfer/compressed response. Since Resty calculates response size +// at the client end. You will get actual size of the http response. +func (r *Response) Size() int64 { + return r.size +} + +func (r *Response) fmtBodyString() string { + bodyStr := "***** NO CONTENT *****" + if r.body != nil { + ct := r.Header().Get(hdrContentTypeKey) + if IsJSONType(ct) { + var out bytes.Buffer + if err := json.Indent(&out, r.body, "", " "); err == nil { + bodyStr = string(out.Bytes()) + } + } else { + bodyStr = r.String() + } + } + + return bodyStr +} + +// +// File +// + +// File represent file information for multipart request +type File struct { + Name string + ParamName string + io.Reader +} + +// String returns string value of current file details +func (f *File) String() string { + return fmt.Sprintf("ParamName: %v; FileName: %v", f.ParamName, f.Name) +} + +// +// Helper methods +// + +// IsStringEmpty method tells whether given string is empty or not +func IsStringEmpty(str string) bool { + return (len(strings.TrimSpace(str)) == 0) +} + +// DetectContentType method is used to figure out `Request.Body` content type for request header +func DetectContentType(body interface{}) string { + contentType := plainTextType + kind := kindOf(body) + switch kind { + case reflect.Struct, reflect.Map: + contentType = jsonContentType + case reflect.String: + contentType = plainTextType + default: + if b, ok := body.([]byte); ok { + contentType = http.DetectContentType(b) + } else if kind == reflect.Slice { + contentType = jsonContentType + } + } + + return contentType +} + +// IsJSONType method is to check JSON content type or not +func IsJSONType(ct string) bool { + return jsonCheck.MatchString(ct) +} + +// IsXMLType method is to check XML content type or not +func IsXMLType(ct string) bool { + return xmlCheck.MatchString(ct) +} + +// Unmarshal content into object from JSON or XML +func Unmarshal(ct string, b []byte, d interface{}) (err error) { + if IsJSONType(ct) { + err = json.Unmarshal(b, d) + } else if IsXMLType(ct) { + err = xml.Unmarshal(b, d) + } + + return +} + +func getLogger(w io.Writer) *log.Logger { + return log.New(w, "RESTY ", log.LstdFlags) +} + +func addFile(w *multipart.Writer, fieldName, path string) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + part, err := w.CreateFormFile(fieldName, filepath.Base(path)) + if err != nil { + return err + } + _, err = io.Copy(part, file) + + return err +} + +func addFileReader(w *multipart.Writer, f *File) error { + part, err := w.CreateFormFile(f.ParamName, f.Name) + if err != nil { + return err + } + _, err = io.Copy(part, f.Reader) + + return err +} + +func getPointer(v interface{}) interface{} { + vv := valueOf(v) + if vv.Kind() == reflect.Ptr { + return v + } + return reflect.New(vv.Type()).Interface() +} + +func isPayloadSupported(m string) bool { + return (m == POST || m == PUT || m == DELETE || m == PATCH) +} + +func typeOf(i interface{}) reflect.Type { + return indirect(valueOf(i)).Type() +} + +func valueOf(i interface{}) reflect.Value { + return reflect.ValueOf(i) +} + +func indirect(v reflect.Value) reflect.Value { + return reflect.Indirect(v) +} + +func kindOf(v interface{}) reflect.Kind { + return typeOf(v).Kind() +} + +func createDirectory(dir string) (err error) { + if _, err = os.Stat(dir); err != nil { + if os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0755); err != nil { + return + } + } + } + return +} diff --git a/vendor/gopkg.in/resty.v0/default.go b/vendor/gopkg.in/resty.v0/default.go new file mode 100644 index 0000000000..d96dd69751 --- /dev/null +++ b/vendor/gopkg.in/resty.v0/default.go @@ -0,0 +1,244 @@ +// Copyright (c) 2015-2016 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "crypto/tls" + "io" + "net/http" + "net/http/cookiejar" + "net/url" + "os" + "sync" + "time" + + "golang.org/x/net/publicsuffix" +) + +// DefaultClient of resty +var DefaultClient *Client + +// New method creates a new go-resty client +func New() *Client { + cookieJar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) + + c := &Client{ + HostURL: "", + QueryParam: url.Values{}, + FormData: url.Values{}, + Header: http.Header{}, + UserInfo: nil, + Token: "", + Cookies: make([]*http.Cookie, 0), + Debug: false, + Log: getLogger(os.Stderr), + httpClient: &http.Client{Jar: cookieJar}, + transport: &http.Transport{}, + mutex: &sync.Mutex{}, + RetryCount: 0, + } + + // Default redirect policy + c.SetRedirectPolicy(NoRedirectPolicy()) + + // default before request middlewares + c.beforeRequest = []func(*Client, *Request) error{ + parseRequestURL, + parseRequestHeader, + parseRequestBody, + createHTTPRequest, + addCredentials, + requestLogger, + } + + // default after response middlewares + c.afterResponse = []func(*Client, *Response) error{ + responseLogger, + parseResponseBody, + saveResponseIntoFile, + } + + return c +} + +// R creates a new resty request object, it is used form a HTTP/RESTful request +// such as GET, POST, PUT, DELETE, HEAD, PATCH and OPTIONS. +func R() *Request { + return DefaultClient.R() +} + +// SetHostURL sets Host URL. See `Client.SetHostURL for more information. +func SetHostURL(url string) *Client { + return DefaultClient.SetHostURL(url) +} + +// SetHeader sets single header. See `Client.SetHeader` for more information. +func SetHeader(header, value string) *Client { + return DefaultClient.SetHeader(header, value) +} + +// SetHeaders sets multiple headers. See `Client.SetHeaders` for more information. +func SetHeaders(headers map[string]string) *Client { + return DefaultClient.SetHeaders(headers) +} + +// SetCookie sets single cookie object. See `Client.SetCookie` for more information. +func SetCookie(hc *http.Cookie) *Client { + return DefaultClient.SetCookie(hc) +} + +// SetCookies sets multiple cookie object. See `Client.SetCookies` for more information. +func SetCookies(cs []*http.Cookie) *Client { + return DefaultClient.SetCookies(cs) +} + +// SetQueryParam method sets single paramater and its value. See `Client.SetQueryParam` for more information. +func SetQueryParam(param, value string) *Client { + return DefaultClient.SetQueryParam(param, value) +} + +// SetQueryParams method sets multiple paramaters and its value. See `Client.SetQueryParams` for more information. +func SetQueryParams(params map[string]string) *Client { + return DefaultClient.SetQueryParams(params) +} + +// SetFormData method sets Form parameters and its values. See `Client.SetFormData` for more information. +func SetFormData(data map[string]string) *Client { + return DefaultClient.SetFormData(data) +} + +// SetBasicAuth method sets the basic authentication header. See `Client.SetBasicAuth` for more information. +func SetBasicAuth(username, password string) *Client { + return DefaultClient.SetBasicAuth(username, password) +} + +// SetAuthToken method sets bearer auth token header. See `Client.SetAuthToken` for more information. +func SetAuthToken(token string) *Client { + return DefaultClient.SetAuthToken(token) +} + +// OnBeforeRequest method sets request middleware. See `Client.OnBeforeRequest` for more information. +func OnBeforeRequest(m func(*Client, *Request) error) *Client { + return DefaultClient.OnBeforeRequest(m) +} + +// OnAfterResponse method sets response middleware. See `Client.OnAfterResponse` for more information. +func OnAfterResponse(m func(*Client, *Response) error) *Client { + return DefaultClient.OnAfterResponse(m) +} + +// SetDebug method enables the debug mode. See `Client.SetDebug` for more information. +func SetDebug(d bool) *Client { + return DefaultClient.SetDebug(d) +} + +// SetRetryCount method set the retry count. See `Client.SetRetryCount` for more information. +func SetRetryCount(count int) *Client { + return DefaultClient.SetRetryCount(count) +} + +// AddRetryCondition method appends check function for retry. See `Client.AddRetryCondition` for more information. +func AddRetryCondition(condition RetryConditionFunc) *Client { + return DefaultClient.AddRetryCondition(condition) +} + +// SetDisableWarn method disables warning comes from `go-resty` client. See `Client.SetDisableWarn` for more information. +func SetDisableWarn(d bool) *Client { + return DefaultClient.SetDisableWarn(d) +} + +// SetLogger method sets given writer for logging. See `Client.SetLogger` for more information. +func SetLogger(w io.Writer) *Client { + return DefaultClient.SetLogger(w) +} + +// SetContentLength method enables `Content-Length` value. See `Client.SetContentLength` for more information. +func SetContentLength(l bool) *Client { + return DefaultClient.SetContentLength(l) +} + +// SetError method is to register the global or client common `Error` object. See `Client.SetError` for more information. +func SetError(err interface{}) *Client { + return DefaultClient.SetError(err) +} + +// SetRedirectPolicy method sets the client redirect poilicy. See `Client.SetRedirectPolicy` for more information. +func SetRedirectPolicy(policies ...interface{}) *Client { + return DefaultClient.SetRedirectPolicy(policies...) +} + +// SetHTTPMode method sets go-resty mode into HTTP. See `Client.SetMode` for more information. +func SetHTTPMode() *Client { + return DefaultClient.SetHTTPMode() +} + +// SetRESTMode method sets go-resty mode into RESTful. See `Client.SetMode` for more information. +func SetRESTMode() *Client { + return DefaultClient.SetRESTMode() +} + +// Mode method returns the current client mode. See `Client.Mode` for more information. +func Mode() string { + return DefaultClient.Mode() +} + +// SetTLSClientConfig method sets TLSClientConfig for underling client Transport. See `Client.SetTLSClientConfig` for more information. +func SetTLSClientConfig(config *tls.Config) *Client { + return DefaultClient.SetTLSClientConfig(config) +} + +// SetTimeout method sets timeout for request. See `Client.SetTimeout` for more information. +func SetTimeout(timeout time.Duration) *Client { + return DefaultClient.SetTimeout(timeout) +} + +// SetProxy method sets Proxy for request. See `Client.SetProxy` for more information. +func SetProxy(proxyURL string) *Client { + return DefaultClient.SetProxy(proxyURL) +} + +// RemoveProxy method removes the proxy configuration. See `Client.RemoveProxy` for more information. +func RemoveProxy() *Client { + return DefaultClient.RemoveProxy() +} + +// SetCertificates method helps to set client certificates into resty conveniently. +// See `Client.SetCertificates` for more information and example. +func SetCertificates(certs ...tls.Certificate) *Client { + return DefaultClient.SetCertificates(certs...) +} + +// SetRootCertificate method helps to add one or more root certificates into resty client. +// See `Client.SetRootCertificate` for more information. +func SetRootCertificate(pemFilePath string) *Client { + return DefaultClient.SetRootCertificate(pemFilePath) +} + +// SetOutputDirectory method sets output directory. See `Client.SetOutputDirectory` for more information. +func SetOutputDirectory(dirPath string) *Client { + return DefaultClient.SetOutputDirectory(dirPath) +} + +// SetTransport method sets custom *http.Transport in the resty client. +// See `Client.SetTransport` for more information. +func SetTransport(transport *http.Transport) *Client { + return DefaultClient.SetTransport(transport) +} + +// SetScheme method sets custom scheme in the resty client. +// See `Client.SetScheme` for more information. +func SetScheme(scheme string) *Client { + return DefaultClient.SetScheme(scheme) +} + +// SetCloseConnection method sets close connection value in the resty client. +// See `Client.SetCloseConnection` for more information. +func SetCloseConnection(close bool) *Client { + return DefaultClient.SetCloseConnection(close) +} + +func init() { + DefaultClient = New() +} diff --git a/vendor/gopkg.in/resty.v0/middleware.go b/vendor/gopkg.in/resty.v0/middleware.go new file mode 100644 index 0000000000..7ebe8450a3 --- /dev/null +++ b/vendor/gopkg.in/resty.v0/middleware.go @@ -0,0 +1,406 @@ +// Copyright (c) 2015-2016 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/url" + "os" + "path/filepath" + "reflect" + "strings" +) + +// +// Request Middleware(s) +// + +func parseRequestURL(c *Client, r *Request) error { + // Parsing request URL + reqURL, err := url.Parse(r.URL) + if err != nil { + return err + } + + // If Request.Url is relative path then added c.HostUrl into + // the request URL otherwise Request.Url will be used as-is + if !reqURL.IsAbs() { + if !strings.HasPrefix(r.URL, "/") { + r.URL = "/" + r.URL + } + + reqURL, err = url.Parse(c.HostURL + r.URL) + if err != nil { + return err + } + } + + // Adding Query Param + query := reqURL.Query() + for k, v := range c.QueryParam { + for _, iv := range v { + query.Add(k, iv) + } + } + + for k, v := range r.QueryParam { + // remove query param from client level by key + // since overrides happens for that key in the request + query.Del(k) + + for _, iv := range v { + query.Add(k, iv) + } + } + + reqURL.RawQuery = query.Encode() + r.URL = reqURL.String() + + return nil +} + +func parseRequestHeader(c *Client, r *Request) error { + hdr := http.Header{} + for k := range c.Header { + hdr.Set(k, c.Header.Get(k)) + } + for k := range r.Header { + hdr.Set(k, r.Header.Get(k)) + } + + if IsStringEmpty(hdr.Get(hdrUserAgentKey)) { + hdr.Set(hdrUserAgentKey, fmt.Sprintf(hdrUserAgentValue, Version)) + } else { + hdr.Set("X-"+hdrUserAgentKey, fmt.Sprintf(hdrUserAgentValue, Version)) + } + + if IsStringEmpty(hdr.Get(hdrAcceptKey)) && !IsStringEmpty(hdr.Get(hdrContentTypeKey)) { + hdr.Set(hdrAcceptKey, hdr.Get(hdrContentTypeKey)) + } + + r.Header = hdr + + return nil +} + +func parseRequestBody(c *Client, r *Request) (err error) { + if isPayloadSupported(r.Method) { + + // Handling Multipart + if r.isMultiPart && !(r.Method == PATCH) { + if err = handleMultipart(c, r); err != nil { + return + } + + goto CL + } + + // Handling Form Data + if len(c.FormData) > 0 || len(r.FormData) > 0 { + handleFormData(c, r) + + goto CL + } + + // Handling Request body + if r.Body != nil { + handleContentType(c, r) + + if err = handleRequestBody(c, r); err != nil { + return + } + } + } else { + r.Header.Del(hdrContentTypeKey) + } + +CL: + // by default resty won't set content length, you can if you want to :) + if c.setContentLength || r.setContentLength { + r.Header.Set(hdrContentLengthKey, fmt.Sprintf("%d", r.bodyBuf.Len())) + } + + return +} + +func createHTTPRequest(c *Client, r *Request) (err error) { + if r.bodyBuf == nil { + r.RawRequest, err = http.NewRequest(r.Method, r.URL, nil) + } else { + r.RawRequest, err = http.NewRequest(r.Method, r.URL, r.bodyBuf) + } + + if err != nil { + return + } + + // Assign close connection option + r.RawRequest.Close = c.closeConnection + + // Add headers into http request + r.RawRequest.Header = r.Header + + // Add cookies into http request + for _, cookie := range c.Cookies { + r.RawRequest.AddCookie(cookie) + } + + // it's for non-http scheme option + if r.RawRequest.URL != nil && r.RawRequest.URL.Scheme == "" { + r.RawRequest.URL.Scheme = c.scheme + r.RawRequest.URL.Host = r.URL + } + + return +} + +func addCredentials(c *Client, r *Request) error { + var isBasicAuth bool + // Basic Auth + if r.UserInfo != nil { // takes precedence + r.RawRequest.SetBasicAuth(r.UserInfo.Username, r.UserInfo.Password) + isBasicAuth = true + } else if c.UserInfo != nil { + r.RawRequest.SetBasicAuth(c.UserInfo.Username, c.UserInfo.Password) + isBasicAuth = true + } + + if !c.DisableWarn { + if isBasicAuth && !strings.HasPrefix(r.URL, "https") { + c.Log.Println("WARNING - Using Basic Auth in HTTP mode is not secure.") + } + } + + // Token Auth + if !IsStringEmpty(r.Token) { // takes precedence + r.RawRequest.Header.Set(hdrAuthorizationKey, "Bearer "+r.Token) + } else if !IsStringEmpty(c.Token) { + r.RawRequest.Header.Set(hdrAuthorizationKey, "Bearer "+c.Token) + } + + return nil +} + +func requestLogger(c *Client, r *Request) error { + if c.Debug { + rr := r.RawRequest + c.Log.Println() + c.disableLogPrefix() + c.Log.Println("---------------------- REQUEST LOG -----------------------") + c.Log.Printf("%s %s %s\n", r.Method, rr.URL.RequestURI(), rr.Proto) + c.Log.Printf("HOST : %s", rr.URL.Host) + c.Log.Println("HEADERS:") + for h, v := range rr.Header { + c.Log.Printf("%25s: %v", h, strings.Join(v, ", ")) + } + c.Log.Printf("BODY :\n%v", r.fmtBodyString()) + c.Log.Println("----------------------------------------------------------") + c.enableLogPrefix() + } + + return nil +} + +// +// Response Middleware(s) +// + +func responseLogger(c *Client, res *Response) error { + if c.Debug { + c.Log.Println() + c.disableLogPrefix() + c.Log.Println("---------------------- RESPONSE LOG -----------------------") + c.Log.Printf("STATUS : %s", res.Status()) + c.Log.Printf("RECEIVED AT : %v", res.ReceivedAt()) + c.Log.Printf("RESPONSE TIME : %v", res.Time()) + c.Log.Println("HEADERS:") + for h, v := range res.Header() { + c.Log.Printf("%30s: %v", h, strings.Join(v, ", ")) + } + if res.Request.isSaveResponse { + c.Log.Printf("BODY :\n***** RESPONSE WRITTEN INTO FILE *****") + } else { + c.Log.Printf("BODY :\n%v", res.fmtBodyString()) + } + c.Log.Println("----------------------------------------------------------") + c.enableLogPrefix() + } + + return nil +} + +func parseResponseBody(c *Client, res *Response) (err error) { + // Handles only JSON or XML content type + ct := res.Header().Get(hdrContentTypeKey) + if IsJSONType(ct) || IsXMLType(ct) { + // Considered as Result + if res.StatusCode() > 199 && res.StatusCode() < 300 { + if res.Request.Result != nil { + err = Unmarshal(ct, res.body, res.Request.Result) + } + } + + // Considered as Error + if res.StatusCode() > 399 { + // global error interface + if res.Request.Error == nil && c.Error != nil { + res.Request.Error = reflect.New(c.Error).Interface() + } + + if res.Request.Error != nil { + err = Unmarshal(ct, res.body, res.Request.Error) + } + } + } + + return +} + +func handleMultipart(c *Client, r *Request) (err error) { + r.bodyBuf = &bytes.Buffer{} + w := multipart.NewWriter(r.bodyBuf) + + for k, v := range c.FormData { + for _, iv := range v { + w.WriteField(k, iv) + } + } + + for k, v := range r.FormData { + for _, iv := range v { + if strings.HasPrefix(k, "@") { // file + err = addFile(w, k[1:], iv) + if err != nil { + return + } + } else { // form value + w.WriteField(k, iv) + } + } + } + + // #21 - adding io.Reader support + if len(r.multipartFiles) > 0 { + for _, f := range r.multipartFiles { + err = addFileReader(w, f) + if err != nil { + return + } + } + } + + r.Header.Set(hdrContentTypeKey, w.FormDataContentType()) + err = w.Close() + + return +} + +func handleFormData(c *Client, r *Request) { + formData := url.Values{} + + for k, v := range c.FormData { + for _, iv := range v { + formData.Add(k, iv) + } + } + + for k, v := range r.FormData { + // remove form data field from client level by key + // since overrides happens for that key in the request + formData.Del(k) + + for _, iv := range v { + formData.Add(k, iv) + } + } + + r.bodyBuf = bytes.NewBuffer([]byte(formData.Encode())) + r.Header.Set(hdrContentTypeKey, formContentType) + r.isFormData = true +} + +func handleContentType(c *Client, r *Request) { + contentType := r.Header.Get(hdrContentTypeKey) + if IsStringEmpty(contentType) { + contentType = DetectContentType(r.Body) + r.Header.Set(hdrContentTypeKey, contentType) + } +} + +func handleRequestBody(c *Client, r *Request) (err error) { + var bodyBytes []byte + contentType := r.Header.Get(hdrContentTypeKey) + kind := kindOf(r.Body) + + if reader, ok := r.Body.(io.Reader); ok { + r.bodyBuf = &bytes.Buffer{} + r.bodyBuf.ReadFrom(reader) + } else if b, ok := r.Body.([]byte); ok { + bodyBytes = b + } else if s, ok := r.Body.(string); ok { + bodyBytes = []byte(s) + } else if IsJSONType(contentType) && + (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice) { + bodyBytes, err = json.Marshal(r.Body) + } else if IsXMLType(contentType) && (kind == reflect.Struct) { + bodyBytes, err = xml.Marshal(r.Body) + } + + if bodyBytes == nil && r.bodyBuf == nil { + err = errors.New("Unsupported 'Body' type/value") + } + + // if any errors during body bytes handling, return it + if err != nil { + return + } + + // []byte into Buffer + if bodyBytes != nil && r.bodyBuf == nil { + r.bodyBuf = bytes.NewBuffer(bodyBytes) + } + + return +} + +func saveResponseIntoFile(c *Client, res *Response) error { + if res.Request.isSaveResponse { + file := "" + + if len(c.outputDirectory) > 0 && !filepath.IsAbs(res.Request.outputFile) { + file += c.outputDirectory + string(filepath.Separator) + } + + file = filepath.Clean(file + res.Request.outputFile) + err := createDirectory(filepath.Dir(file)) + if err != nil { + return err + } + + outFile, err := os.Create(file) + if err != nil { + return err + } + defer outFile.Close() + + // io.Copy reads maximum 32kb size, it is perfect for large file download too + defer res.RawResponse.Body.Close() + written, err := io.Copy(outFile, res.RawResponse.Body) + if err != nil { + return err + } + + res.size = written + } + + return nil +} diff --git a/vendor/gopkg.in/resty.v0/redirect.go b/vendor/gopkg.in/resty.v0/redirect.go new file mode 100644 index 0000000000..bd69d591b8 --- /dev/null +++ b/vendor/gopkg.in/resty.v0/redirect.go @@ -0,0 +1,99 @@ +// Copyright (c) 2015 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "errors" + "fmt" + "net" + "net/http" + "strings" +) + +// RedirectPolicy to regulate the redirects in the resty client. +// Objects implementing the RedirectPolicy interface can be registered as +// +// Apply function should return nil to continue the redirect jounery, otherwise +// return error to stop the redirect. +type RedirectPolicy interface { + Apply(req *http.Request, via []*http.Request) error +} + +// The RedirectPolicyFunc type is an adapter to allow the use of ordinary functions as RedirectPolicy. +// If f is a function with the appropriate signature, RedirectPolicyFunc(f) is a RedirectPolicy object that calls f. +type RedirectPolicyFunc func(*http.Request, []*http.Request) error + +// Apply calls f(req, via). +func (f RedirectPolicyFunc) Apply(req *http.Request, via []*http.Request) error { + return f(req, via) +} + +// NoRedirectPolicy is used to disable redirects in the HTTP client +// resty.SetRedirectPolicy(NoRedirectPolicy()) +func NoRedirectPolicy() RedirectPolicy { + return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error { + return errors.New("Auto redirect is disabled") + }) +} + +// FlexibleRedirectPolicy is convenient method to create No of redirect policy for HTTP client. +// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20)) +func FlexibleRedirectPolicy(noOfRedirect int) RedirectPolicy { + return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error { + if len(via) >= noOfRedirect { + return fmt.Errorf("Stopped after %d redirects", noOfRedirect) + } + + checkHostAndAddHeaders(req, via[0]) + + return nil + }) +} + +// DomainCheckRedirectPolicy is convenient method to define domain name redirect rule in resty client. +// Redirect is allowed for only mentioned host in the policy. +// resty.SetRedirectPolicy(DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net")) +func DomainCheckRedirectPolicy(hostnames ...string) RedirectPolicy { + hosts := make(map[string]bool) + for _, h := range hostnames { + hosts[strings.ToLower(h)] = true + } + + fn := RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error { + if ok := hosts[getHostname(req.URL.Host)]; !ok { + return errors.New("Redirect is not allowed as per DomainCheckRedirectPolicy") + } + + return nil + }) + + return fn +} + +func getHostname(host string) (hostname string) { + if strings.Index(host, ":") > 0 { + host, _, _ = net.SplitHostPort(host) + hostname = strings.ToLower(host) + } else { + hostname = strings.ToLower(host) + } + return +} + +// By default Golang will not redirect request headers +// after go throughing various discussion commments from thread +// https://github.com/golang/go/issues/4800 +// go-resty will add all the headers during a redirect for the same host +func checkHostAndAddHeaders(cur *http.Request, pre *http.Request) { + curHostname := getHostname(cur.URL.Host) + preHostname := getHostname(pre.URL.Host) + if strings.EqualFold(curHostname, preHostname) { + for key, val := range pre.Header { + cur.Header[key] = val + } + } else { // only library User-Agent header is added + cur.Header.Set(hdrUserAgentKey, fmt.Sprintf(hdrUserAgentValue, Version)) + } +} diff --git a/vendor/gopkg.in/resty.v0/request.go b/vendor/gopkg.in/resty.v0/request.go new file mode 100644 index 0000000000..aba7518049 --- /dev/null +++ b/vendor/gopkg.in/resty.v0/request.go @@ -0,0 +1,494 @@ +// Copyright (c) 2015-2016 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "strings" + "time" +) + +// Request type is used to compose and send individual request from client +// go-resty is provide option override client level settings such as +// Auth Token, Basic Auth credentials, Header, Query Param, Form Data, Error object +// and also you can add more options for that particular request +// +type Request struct { + URL string + Method string + QueryParam url.Values + FormData url.Values + Header http.Header + UserInfo *User + Token string + Body interface{} + Result interface{} + Error interface{} + Time time.Time + RawRequest *http.Request + + client *Client + bodyBuf *bytes.Buffer + isMultiPart bool + isFormData bool + setContentLength bool + isSaveResponse bool + outputFile string + proxyURL *url.URL + multipartFiles []*File +} + +// SetHeader method is to set a single header field and its value in the current request. +// Example: To set `Content-Type` and `Accept` as `application/json`. +// resty.R(). +// SetHeader("Content-Type", "application/json"). +// SetHeader("Accept", "application/json") +// +// Also you can override header value, which was set at client instance level. +// +func (r *Request) SetHeader(header, value string) *Request { + r.Header.Set(header, value) + return r +} + +// SetHeaders method sets multiple headers field and its values at one go in the current request. +// Example: To set `Content-Type` and `Accept` as `application/json` +// +// resty.R(). +// SetHeaders(map[string]string{ +// "Content-Type": "application/json", +// "Accept": "application/json", +// }) +// Also you can override header value, which was set at client instance level. +// +func (r *Request) SetHeaders(headers map[string]string) *Request { + for h, v := range headers { + r.Header.Set(h, v) + } + + return r +} + +// SetQueryParam method sets single paramater and its value in the current request. +// It will be formed as query string for the request. +// Example: `search=kitchen%20papers&size=large` in the URL after `?` mark. +// resty.R(). +// SetQueryParam("search", "kitchen papers"). +// SetQueryParam("size", "large") +// Also you can override query params value, which was set at client instance level +// +func (r *Request) SetQueryParam(param, value string) *Request { + r.QueryParam.Add(param, value) + return r +} + +// SetQueryParams method sets multiple paramaters and its values at one go in the current request. +// It will be formed as query string for the request. +// Example: `search=kitchen%20papers&size=large` in the URL after `?` mark. +// resty.R(). +// SetQueryParams(map[string]string{ +// "search": "kitchen papers", +// "size": "large", +// }) +// Also you can override query params value, which was set at client instance level +// +func (r *Request) SetQueryParams(params map[string]string) *Request { + for p, v := range params { + r.QueryParam.Add(p, v) + } + + return r +} + +// SetMultiValueQueryParams method sets multiple paramaters with multi-value +// at one go in the current request. It will be formed as query string for the request. +// Example: `status=pending&status=approved&status=open` in the URL after `?` mark. +// resty.R(). +// SetMultiValueQueryParams(url.Values{ +// "status": []string{"pending", "approved", "open"}, +// }) +// Also you can override query params value, which was set at client instance level +// +func (r *Request) SetMultiValueQueryParams(params url.Values) *Request { + for p, v := range params { + for _, pv := range v { + r.QueryParam.Add(p, pv) + } + } + + return r +} + +// SetQueryString method provides ability to use string as an input to set URL query string for the request. +// +// Using String as an input +// resty.R(). +// SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more") +// +func (r *Request) SetQueryString(query string) *Request { + values, err := url.ParseQuery(strings.TrimSpace(query)) + if err == nil { + for k := range values { + r.QueryParam.Add(k, values.Get(k)) + } + } else { + r.client.Log.Printf("ERROR [%v]", err) + } + return r +} + +// SetFormData method sets Form parameters and its values in the current request. +// It's applicable only HTTP method `POST` and `PUT` and requets content type would be set as +// `application/x-www-form-urlencoded`. +// resty.R(). +// SetFormData(map[string]string{ +// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F", +// "user_id": "3455454545", +// }) +// Also you can override form data value, which was set at client instance level +// +func (r *Request) SetFormData(data map[string]string) *Request { + for k, v := range data { + r.FormData.Add(k, v) + } + + return r +} + +// SetMultiValueFormData method sets multiple form paramaters with multi-value +// at one go in the current request. +// resty.R(). +// SetMultiValueFormData(url.Values{ +// "search_criteria": []string{"book", "glass", "pencil"}, +// }) +// Also you can override form data value, which was set at client instance level +// +func (r *Request) SetMultiValueFormData(params url.Values) *Request { + for k, v := range params { + for _, kv := range v { + r.FormData.Add(k, kv) + } + } + + return r +} + +// SetBody method sets the request body for the request. It supports various realtime need easy. +// We can say its quite handy or powerful. Supported request body data types is `string`, `[]byte`, +// `struct` and `map`. Body value can be pointer or non-pointer. Automatic marshalling +// for JSON and XML content type, if it is `struct` or `map`. +// +// Example: +// +// Struct as a body input, based on content type, it will be marshalled. +// resty.R(). +// SetBody(User{ +// Username: "jeeva@myjeeva.com", +// Password: "welcome2resty", +// }) +// +// Map as a body input, based on content type, it will be marshalled. +// resty.R(). +// SetBody(map[string]interface{}{ +// "username": "jeeva@myjeeva.com", +// "password": "welcome2resty", +// "address": &Address{ +// Address1: "1111 This is my street", +// Address2: "Apt 201", +// City: "My City", +// State: "My State", +// ZipCode: 00000, +// }, +// }) +// +// String as a body input. Suitable for any need as a string input. +// resty.R(). +// SetBody(`{ +// "username": "jeeva@getrightcare.com", +// "password": "admin" +// }`) +// +// []byte as a body input. Suitable for raw request such as file upload, serialize & deserialize, etc. +// resty.R(). +// SetBody([]byte("This is my raw request, sent as-is")) +// +func (r *Request) SetBody(body interface{}) *Request { + r.Body = body + return r +} + +// SetResult method is to register the response `Result` object for automatic unmarshalling in the RESTful mode +// if response status code is between 200 and 299 and content type either JSON or XML. +// +// Note: Result object can be pointer or non-pointer. +// resty.R().SetResult(&AuthToken{}) +// // OR +// resty.R().SetResult(AuthToken{}) +// +// Accessing a result value +// response.Result().(*AuthToken) +// +func (r *Request) SetResult(res interface{}) *Request { + r.Result = getPointer(res) + return r +} + +// SetError method is to register the request `Error` object for automatic unmarshalling in the RESTful mode +// if response status code is greater than 399 and content type either JSON or XML. +// +// Note: Error object can be pointer or non-pointer. +// resty.R().SetError(&AuthError{}) +// // OR +// resty.R().SetError(AuthError{}) +// +// Accessing a error value +// response.Error().(*AuthError) +// +func (r *Request) SetError(err interface{}) *Request { + r.Error = getPointer(err) + return r +} + +// SetFile method is to set single file field name and its path for multipart upload. +// resty.R(). +// SetFile("my_file", "/Users/jeeva/Gas Bill - Sep.pdf") +// +func (r *Request) SetFile(param, filePath string) *Request { + r.isMultiPart = true + r.FormData.Set("@"+param, filePath) + + return r +} + +// SetFiles method is to set multiple file field name and its path for multipart upload. +// resty.R(). +// SetFiles(map[string]string{ +// "my_file1": "/Users/jeeva/Gas Bill - Sep.pdf", +// "my_file2": "/Users/jeeva/Electricity Bill - Sep.pdf", +// "my_file3": "/Users/jeeva/Water Bill - Sep.pdf", +// }) +// +func (r *Request) SetFiles(files map[string]string) *Request { + r.isMultiPart = true + + for f, fp := range files { + r.FormData.Set("@"+f, fp) + } + + return r +} + +// SetFileReader method is to set single file using io.Reader for multipart upload. +// resty.R(). +// SetFileReader("profile_img", "my-profile-img.png", bytes.NewReader(profileImgBytes)). +// SetFileReader("notes", "user-notes.txt", bytes.NewReader(notesBytes)) +// +func (r *Request) SetFileReader(param, fileName string, reader io.Reader) *Request { + r.isMultiPart = true + + r.multipartFiles = append(r.multipartFiles, &File{ + Name: fileName, + ParamName: param, + Reader: reader, + }) + + return r +} + +// SetContentLength method sets the HTTP header `Content-Length` value for current request. +// By default go-resty won't set `Content-Length`. Also you have an option to enable for every +// request. See `resty.SetContentLength` +// resty.R().SetContentLength(true) +// +func (r *Request) SetContentLength(l bool) *Request { + r.setContentLength = true + + return r +} + +// SetBasicAuth method sets the basic authentication header in the current HTTP request. +// For Header example: +// Authorization: Basic +// +// To set the header for username "go-resty" and password "welcome" +// resty.R().SetBasicAuth("go-resty", "welcome") +// +// This method overrides the credentials set by method `resty.SetBasicAuth`. +// +func (r *Request) SetBasicAuth(username, password string) *Request { + r.UserInfo = &User{Username: username, Password: password} + return r +} + +// SetAuthToken method sets bearer auth token header in the current HTTP request. Header example: +// Authorization: Bearer +// +// Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F +// +// resty.R().SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F") +// +// This method overrides the Auth token set by method `resty.SetAuthToken`. +// +func (r *Request) SetAuthToken(token string) *Request { + r.Token = token + return r +} + +// SetOutput method sets the output file for current HTTP request. Current HTTP response will be +// saved into given file. It is similar to `curl -o` flag. Absoulte path or relative path can be used. +// If is it relative path then output file goes under the output directory, as mentioned +// in the `Client.SetOutputDirectory`. +// resty.R(). +// SetOutput("/Users/jeeva/Downloads/ReplyWithHeader-v5.1-beta.zip"). +// Get("http://bit.ly/1LouEKr") +// +// Note: In this scenario `Response.Body` might be nil. +func (r *Request) SetOutput(file string) *Request { + r.outputFile = file + r.isSaveResponse = true + return r +} + +// SetProxy method sets the Proxy URL for current Request. It does not affect client level +// proxy settings. Request level proxy settings takes higher priority, even though client +// level proxy settings exists. +// resty.R(). +// SetProxy("http://proxyserver:8888"). +// Get("http://httpbin.org/get") +// +func (r *Request) SetProxy(proxyURL string) *Request { + if pURL, err := url.Parse(proxyURL); err == nil { + r.proxyURL = pURL + } else { + r.client.Log.Printf("ERROR [%v]", err) + r.proxyURL = nil + } + + return r +} + +// +// HTTP verb method starts here +// + +// Get method does GET HTTP request. It's defined in section 4.3.1 of RFC7231. +func (r *Request) Get(url string) (*Response, error) { + return r.Execute(GET, url) +} + +// Head method does HEAD HTTP request. It's defined in section 4.3.2 of RFC7231. +func (r *Request) Head(url string) (*Response, error) { + return r.Execute(HEAD, url) +} + +// Post method does POST HTTP request. It's defined in section 4.3.3 of RFC7231. +func (r *Request) Post(url string) (*Response, error) { + return r.Execute(POST, url) +} + +// Put method does PUT HTTP request. It's defined in section 4.3.4 of RFC7231. +func (r *Request) Put(url string) (*Response, error) { + return r.Execute(PUT, url) +} + +// Delete method does DELETE HTTP request. It's defined in section 4.3.5 of RFC7231. +func (r *Request) Delete(url string) (*Response, error) { + return r.Execute(DELETE, url) +} + +// Options method does OPTIONS HTTP request. It's defined in section 4.3.7 of RFC7231. +func (r *Request) Options(url string) (*Response, error) { + return r.Execute(OPTIONS, url) +} + +// Patch method does PATCH HTTP request. It's defined in section 2 of RFC5789. +func (r *Request) Patch(url string) (*Response, error) { + return r.Execute(PATCH, url) +} + +// Execute method performs the HTTP request with given HTTP method and URL +// for current `Request`. +// resp, err := resty.R().Execute(resty.GET, "http://httpbin.org/get") +// +func (r *Request) Execute(method, url string) (*Response, error) { + if r.isMultiPart && !(method == POST || method == PUT) { + return nil, fmt.Errorf("Multipart content is not allowed in HTTP verb [%v]", method) + } + + r.Method = method + r.URL = url + + if r.client.RetryCount == 0 { + return r.client.execute(r) + } + + var resp *Response + var err error + attempt := 0 + _ = Backoff(func() (*Response, error) { + attempt++ + resp, err = r.client.execute(r) + if err != nil { + r.client.Log.Printf("ERROR [%v] Attempt [%v]", err, attempt) + } + + return resp, err + }, Retries(r.client.RetryCount), RetryConditions(r.client.RetryConditions)) + + return resp, err +} + +func (r *Request) fmtBodyString() (body string) { + body = "***** NO CONTENT *****" + if isPayloadSupported(r.Method) { + // multipart or form-data + if r.isMultiPart || r.isFormData { + body = string(r.bodyBuf.Bytes()) + return + } + + // request body data + if r.Body != nil { + var prtBodyBytes []byte + var err error + + contentType := r.Header.Get(hdrContentTypeKey) + kind := kindOf(r.Body) + if IsJSONType(contentType) && (kind == reflect.Struct || kind == reflect.Map) { + prtBodyBytes, err = json.MarshalIndent(&r.Body, "", " ") + } else if IsXMLType(contentType) && (kind == reflect.Struct) { + prtBodyBytes, err = xml.MarshalIndent(&r.Body, "", " ") + } else if b, ok := r.Body.(string); ok { + if IsJSONType(contentType) { + bodyBytes := []byte(b) + var out bytes.Buffer + if err = json.Indent(&out, bodyBytes, "", " "); err == nil { + prtBodyBytes = out.Bytes() + } + } else { + body = b + return + } + } else if b, ok := r.Body.([]byte); ok { + body = base64.StdEncoding.EncodeToString(b) + } + + if prtBodyBytes != nil { + body = string(prtBodyBytes) + } + } + + } + + return +} diff --git a/vendor/gopkg.in/resty.v0/resty.go b/vendor/gopkg.in/resty.v0/resty.go new file mode 100644 index 0000000000..b5255a7ee4 --- /dev/null +++ b/vendor/gopkg.in/resty.v0/resty.go @@ -0,0 +1,9 @@ +// Copyright (c) 2015-2016 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +// Package resty provides simple HTTP and REST client for Go inspired by Ruby rest-client. +package resty + +// Version # of go-resty +var Version = "0.9" diff --git a/vendor/gopkg.in/resty.v0/retry.go b/vendor/gopkg.in/resty.v0/retry.go new file mode 100644 index 0000000000..98390a77ae --- /dev/null +++ b/vendor/gopkg.in/resty.v0/retry.go @@ -0,0 +1,110 @@ +// Copyright (c) 2015-2016 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "math" + "math/rand" + "time" +) + +const ( + defaultMaxRetries = 3 + defaultWaitTime = 100 // base Milliseconds + defaultMaxWaitTime = 2000 // cap level Milliseconds +) + +// Option is to create convenient retry options like wait time, max retries, etc. +type Option func(*Options) + +// RetryConditionFunc type is for retry condition function +type RetryConditionFunc func(*Response) (bool, error) + +// Options to hold go-resty retry values +type Options struct { + maxRetries int + waitTime int + maxWaitTime int + retryConditions []RetryConditionFunc +} + +// Retries sets the max number of retries +func Retries(value int) Option { + return func(o *Options) { + o.maxRetries = value + } +} + +// WaitTime sets the default wait time to sleep between requests +func WaitTime(value int) Option { + return func(o *Options) { + o.waitTime = value + } +} + +// MaxWaitTime sets the max wait time to sleep between requests +func MaxWaitTime(value int) Option { + return func(o *Options) { + o.maxWaitTime = value + } +} + +// RetryConditions sets the conditions that will be checked for retry. +func RetryConditions(conditions []RetryConditionFunc) Option { + return func(o *Options) { + o.retryConditions = conditions + } +} + +// Backoff retries with increasing timeout duration up until X amount of retries +// (Default is 3 attempts, Override with option Retries(n)) +func Backoff(operation func() (*Response, error), options ...Option) error { + // Defaults + opts := Options{ + maxRetries: defaultMaxRetries, + waitTime: defaultWaitTime, + maxWaitTime: defaultMaxWaitTime, + retryConditions: []RetryConditionFunc{}, + } + + for _, o := range options { + o(&opts) + } + + var ( + resp *Response + err error + ) + base := float64(opts.waitTime) // Time to wait between each attempt + capLevel := float64(opts.maxWaitTime) // Maximum amount of wait time for the retry + for attempt := 0; attempt < opts.maxRetries; attempt++ { + resp, err = operation() + + var needsRetry bool + var conditionErr error + for _, condition := range opts.retryConditions { + needsRetry, conditionErr = condition(resp) + if needsRetry || conditionErr != nil { + break + } + } + + // If the operation returned no error, there was no condition satisfied and + // there was no error caused by the conditional functions. + if err == nil && !needsRetry && conditionErr == nil { + return nil + } + // Adding capped exponential backup with jitter + // See the following article... + // http://www.awsarchitectureblog.com/2015/03/backoff.html + temp := math.Min(capLevel, base*math.Exp2(float64(attempt))) + sleepTime := int(temp/2) + rand.Intn(int(temp/2)) + + sleepDuration := time.Duration(sleepTime) * time.Millisecond + time.Sleep(sleepDuration) + } + + return err +} diff --git a/vendor/vendor.json b/vendor/vendor.json index fea5606452..762515a3c1 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -2125,6 +2125,42 @@ "revision": "2dc6a86cf75ce4b33516d2a13c9c0c378310cf3b", "revisionTime": "2016-04-25T23:31:34Z" }, + { + "checksumSHA1": "WQJBP9v20jr44RiZ1YbfrpGaEqk=", + "path": "github.com/newrelic/go-agent", + "revision": "7d12ae2201fc160e486197614a6f65afcf3f8170", + "revisionTime": "2016-11-16T22:44:47Z" + }, + { + "checksumSHA1": "lLXXIL0C/ZzMDqN2BlQRZInhot0=", + "path": "github.com/newrelic/go-agent/internal", + "revision": "7d12ae2201fc160e486197614a6f65afcf3f8170", + "revisionTime": "2016-11-16T22:44:47Z" + }, + { + "checksumSHA1": "mkbupMdy+cF7xyo8xW0A6Bq15k4=", + "path": "github.com/newrelic/go-agent/internal/jsonx", + "revision": "7d12ae2201fc160e486197614a6f65afcf3f8170", + "revisionTime": "2016-11-16T22:44:47Z" + }, + { + "checksumSHA1": "ywxlVKtGArJ2vDfH1rAqEFwSGds=", + "path": "github.com/newrelic/go-agent/internal/logger", + "revision": "7d12ae2201fc160e486197614a6f65afcf3f8170", + "revisionTime": "2016-11-16T22:44:47Z" + }, + { + "checksumSHA1": "S7CiHO7EblgZt9q7wgiXMv/j/ao=", + "path": "github.com/newrelic/go-agent/internal/sysinfo", + "revision": "7d12ae2201fc160e486197614a6f65afcf3f8170", + "revisionTime": "2016-11-16T22:44:47Z" + }, + { + "checksumSHA1": "c2JSKesj3tHYgzIF3QL37WfHWG8=", + "path": "github.com/newrelic/go-agent/internal/utilization", + "revision": "7d12ae2201fc160e486197614a6f65afcf3f8170", + "revisionTime": "2016-11-16T22:44:47Z" + }, { "path": "github.com/nu7hatch/gouuid", "revision": "179d4d0c4d8d407a32af483c2354df1d2c91e6c3" @@ -2139,6 +2175,12 @@ "revision": "7cd5fed006859e86dd5641a6cf9812e855b7574a", "revisionTime": "2016-08-11T16:27:25Z" }, + { + "checksumSHA1": "lOEkLP94OsQSLFp+38rY1GjnMtk=", + "path": "github.com/paultyng/go-newrelic/api", + "revision": "81a8e05b0e494285f1322f99f3c6f93c8f1192b1", + "revisionTime": "2016-11-29T00:49:55Z" + }, { "path": "github.com/pborman/uuid", "revision": "dee7705ef7b324f27ceb85a121c61f2c2e8ce988" @@ -2232,6 +2274,12 @@ "path": "github.com/tent/http-link-go", "revision": "ac974c61c2f990f4115b119354b5e0b47550e888" }, + { + "checksumSHA1": "y1hkty5dgBN9elK4gP1TtVjT4e8=", + "path": "github.com/tomnomnom/linkheader", + "revision": "236df730ed7334edb33cb10ba79407491fb4e147", + "revisionTime": "2016-06-19T23:20:27Z" + }, { "checksumSHA1": "jfIUoeCY4uLz1zCgnCxndi5/UNE=", "path": "github.com/ugorji/go/codec", @@ -2365,6 +2413,18 @@ "path": "golang.org/x/net/context/ctxhttp", "revision": "04b9de9b512f58addf28c9853d50ebef61c3953e" }, + { + "checksumSHA1": "GIGmSrYACByf5JDIP9ByBZksY80=", + "path": "golang.org/x/net/idna", + "revision": "4971afdc2f162e82d185353533d3cf16188a9f4e", + "revisionTime": "2016-11-15T21:05:04Z" + }, + { + "checksumSHA1": "AmZIW67T/HUlTTflTmOIy6jdq74=", + "path": "golang.org/x/net/publicsuffix", + "revision": "4971afdc2f162e82d185353533d3cf16188a9f4e", + "revisionTime": "2016-11-15T21:05:04Z" + }, { "path": "golang.org/x/oauth2", "revision": "2897dcade18a126645f1368de827f1e613a60049" @@ -2505,6 +2565,12 @@ "comment": "v1.8.5", "path": "gopkg.in/ini.v1", "revision": "77178f22699a4ecafce485fb8d86b7afeb7e3e28" + }, + { + "checksumSHA1": "mkLQOQwQwoUc9Kr9+PaVGrKUzI4=", + "path": "gopkg.in/resty.v0", + "revision": "24dc7ba4bc1ef9215048b28e7248f99c42901db5", + "revisionTime": "2016-11-01T17:03:53Z" } ], "rootPath": "github.com/hashicorp/terraform" diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss index ce73017e13..cd8ab8faa6 100755 --- a/website/source/assets/stylesheets/_docs.scss +++ b/website/source/assets/stylesheets/_docs.scss @@ -37,6 +37,7 @@ body.layout-librato, body.layout-logentries, body.layout-mailgun, body.layout-mysql, +body.layout-newrelic, body.layout-nomad, body.layout-openstack, body.layout-packet, diff --git a/website/source/docs/providers/newrelic/d/application.html.markdown b/website/source/docs/providers/newrelic/d/application.html.markdown new file mode 100644 index 0000000000..ae2f2c1557 --- /dev/null +++ b/website/source/docs/providers/newrelic/d/application.html.markdown @@ -0,0 +1,52 @@ +--- +layout: "newrelic" +page_title: "New Relic: newrelic_application" +sidebar_current: "docs-newrelic-datasource-application" +description: |- + Looks up the information about an application in New Relic. +--- + +# newrelic\_application + +Use this data source to get information about a specific application in New Relic. + +## Example Usage + +``` +data "newrelic_application" "app" { + name = "my-app" +} + +resource "newrelic_alert_policy" "foo" { + name = "foo" +} + +resource "newrelic_alert_condition" "foo" { + policy_id = "${newrelic_alert_policy.foo.id}" + + name = "foo" + type = "apm_app_metric" + entities = ["${data.newrelic_application.app.id}"] + metric = "apdex" + runbook_url = "https://www.example.com" + + term { + duration = 5 + operator = "below" + priority = "critical" + threshold = "0.75" + time_function = "all" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the application in New Relic. + +## Attributes Reference +* `id` - The ID of the application. +* `instance_ids` - A list of instance IDs associated with the application. +* `host_ids` - A list of host IDs associated with the application. diff --git a/website/source/docs/providers/newrelic/index.html.markdown b/website/source/docs/providers/newrelic/index.html.markdown new file mode 100644 index 0000000000..7770c2bf10 --- /dev/null +++ b/website/source/docs/providers/newrelic/index.html.markdown @@ -0,0 +1,71 @@ +--- +layout: "newrelic" +page_title: "Provider: New Relic" +sidebar_current: "docs-newrelic-index" +description: |- + New Relic offers a performance management solution enabling developers to + diagnose and fix application performance problems in real time. +--- + +# New Relic Provider + +[New Relic](https://newrelic.com/) offers a performance management solution +enabling developers to diagnose and fix application performance problems in real time. + +Use the navigation to the left to read about the available resources. + +## Example Usage + +``` +# Configure the New Relic provider +provider "newrelic" { + api_key = "${var.newrelic_api_key}" +} + +# Create an alert policy +resource "newrelic_alert_policy" "alert" { + name = "Alert" +} + +# Add a condition +resource "newrelic_alert_condition" "foo" { + policy_id = "${newrelic_alert_policy.alert.id}" + + name = "foo" + type = "apm_app_metric" + entities = ["12345"] # You can look this up in New Relic + metric = "apdex" + runbook_url = "https://docs.example.com/my-runbook" + + term { + duration = 5 + operator = "below" + priority = "critical" + threshold = "0.75" + time_function = "all" + } +} + +# Add a notification channel +resource "newrelic_alert_channel" "email" { + name = "email" + type = "email" + + configuration = { + recipients = "paul@example.com" + include_json_attachment = "1" + } +} + +# Link the channel to the policy +resource "newrelic_alert_policy_channel" "alert_email" { + policy_id = "${newrelic_alert_policy.alert.id}" + channel_id = "${newrelic_alert_channel.email.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `api_key` - (Required) Your New Relic API key. diff --git a/website/source/docs/providers/newrelic/r/alert_channel.html.markdown b/website/source/docs/providers/newrelic/r/alert_channel.html.markdown new file mode 100644 index 0000000000..ae51ec4ab6 --- /dev/null +++ b/website/source/docs/providers/newrelic/r/alert_channel.html.markdown @@ -0,0 +1,45 @@ +--- +layout: "newrelic" +page_title: "New Relic: newrelic_alert_channel" +sidebar_current: "docs-newrelic-resource-alert-channel" +description: |- + Create and manage a notification channel for alerts in New Relic. +--- + +# newrelic\_alert\_channel + +## Example Usage + +``` +resource "newrelic_alert_channel" "foo" { + name = "foo" + type = "email" + + configuration = { + recipients = "foo@example.com" + include_json_attachment = "1" + } +} +``` + +## Argument Reference + +The following arguments are supported: + + * `name` - (Required) The name of the channel. + * `type` - (Required) The type of channel. One of: `campfire`, `email`, `hipchat`, `opsgenie`, `pagerduty`, `slack`, `user`, `victorops`, or `webhook`. + * `configuration` - (Required) A map of key / value pairs with channel type specific values. + +## Attributes Reference + +The following attributes are exported: + + * `id` - The ID of the channel. + +## Import + +Alert channels can be imported using the `id`, e.g. + +``` +$ terraform import newrelic_alert_channel.main 12345 +``` diff --git a/website/source/docs/providers/newrelic/r/alert_condition.html.markdown b/website/source/docs/providers/newrelic/r/alert_condition.html.markdown new file mode 100644 index 0000000000..25f3c9ef08 --- /dev/null +++ b/website/source/docs/providers/newrelic/r/alert_condition.html.markdown @@ -0,0 +1,77 @@ +--- +layout: "newrelic" +page_title: "New Relic: newrelic_alert_condition" +sidebar_current: "docs-newrelic-resource-alert-condition" +description: |- + Create and manage an alert condition for a policy in New Relic. +--- + +# newrelic\_alert\_condition + +## Example Usage + +``` +data "newrelic_application" "app" { + name = "my-app" +} + +resource "newrelic_alert_policy" "foo" { + name = "foo" +} + +resource "newrelic_alert_condition" "foo" { + policy_id = "${newrelic_alert_policy.foo.id}" + + name = "foo" + type = "apm_app_metric" + entities = ["${data.newrelic_application.app.id}"] + metric = "apdex" + runbook_url = "https://www.example.com" + + term { + duration = 5 + operator = "below" + priority = "critical" + threshold = "0.75" + time_function = "all" + } +} +``` + +## Argument Reference + +The following arguments are supported: + + * `policy_id` - (Required) The ID of the policy where this condition should be used. + * `name` - (Required) The title of the condition + * `type` - (Required) The type of condition. One of: `apm_app_metric`, `apm_kt_metric`, `servers_metric`, `browser_metric`, `mobile_metric` + * `entities` - (Required) The instance IDS associated with this condition. + * `metric` - (Required) The metric field accepts parameters based on the `type` set. + * `runbook_url` - (Optional) Runbook URL to display in notifications. + * `term` - (Required) A list of terms for this condition. See [Terms](#terms) below for details. + * `user_defined_metric` - (Optional) A custom metric to be evaluated. + * `user_defined_value_function` - (Optional) One of: `average`, `min`, `max`, `total`, or `sample_size`. + +## Terms + +The `term` mapping supports the following arguments: + + * `duration` - (Required) In minutes, must be: `5`, `10`, `15`, `30`, `60`, or `120`. + * `operator` - (Optional) `above`, `below`, or `equal`. Defaults to `equal`. + * `priority` - (Optional) `critical` or `warning`. Defaults to `critical`. + * `threshold` - (Required) Must be 0 or greater. + * `time_function` - (Required) `all` or `any`. + +## Attributes Reference + +The following attributes are exported: + + * `id` - The ID of the alert condition. + +## Import + +Alert conditions can be imported using the `id`, e.g. + +``` +$ terraform import newrelic_alert_condition.main 12345 +``` diff --git a/website/source/docs/providers/newrelic/r/alert_policy.html.markdown b/website/source/docs/providers/newrelic/r/alert_policy.html.markdown new file mode 100644 index 0000000000..abb77c6f99 --- /dev/null +++ b/website/source/docs/providers/newrelic/r/alert_policy.html.markdown @@ -0,0 +1,40 @@ +--- +layout: "newrelic" +page_title: "New Relic: newrelic_alert_policy" +sidebar_current: "docs-newrelic-resource-alert-policy" +description: |- + Create and manage alert policies in New Relic. +--- + +# newrelic\_alert\_policy + +## Example Usage + +``` +resource "newrelic_alert_policy" "foo" { + name = "foo" +} +``` + +## Argument Reference + +The following arguments are supported: + + * `name` - (Required) The name of the policy. + * `incident_preference` - (Optional) The rollup strategy for the policy. Options include: `PER_POLICY`, `PER_CONDITION`, or `PER_CONDITION_AND_TARGET`. The default is `PER_POLICY`. + +## Attributes Reference + +The following attributes are exported: + + * `id` - The ID of the policy. + * `created_at` - The time the policy was created. + * `updated_at` - The time the policy was last updated. + +## Import + +Alert policies can be imported using the `id`, e.g. + +``` +$ terraform import newrelic_alert_policy.main 12345 +``` diff --git a/website/source/docs/providers/newrelic/r/alert_policy_channel.html.markdown b/website/source/docs/providers/newrelic/r/alert_policy_channel.html.markdown new file mode 100644 index 0000000000..e222bade55 --- /dev/null +++ b/website/source/docs/providers/newrelic/r/alert_policy_channel.html.markdown @@ -0,0 +1,39 @@ +--- +layout: "newrelic" +page_title: "New Relic: newrelic_alert_policy_channel" +sidebar_current: "docs-newrelic-resource-alert-policy-channel" +description: |- + Map alert policies to alert channels in New Relic. +--- + +# newrelic\_alert\_policy\_channel + +## Example Usage + +``` +resource "newrelic_alert_policy" "foo" { + name = "foo" +} + +resource "newrelic_alert_channel" "foo" { + name = "foo" + type = "email" + + configuration = { + recipients = "foo@example.com" + include_json_attachment = "1" + } +} + +resource "newrelic_alert_policy_channel" "foo" { + policy_id = "${newrelic_alert_policy.foo.id}" + channel_id = "${newrelic_alert_channel.foo.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + + * `policy_id` - (Required) The ID of the policy. + * `channel_id` - (Required) The ID of the channel. diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index d7ce8c629b..314e41d3b7 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -282,6 +282,10 @@ Mailgun + > + New Relic + + > Nomad diff --git a/website/source/layouts/newrelic.erb b/website/source/layouts/newrelic.erb new file mode 100644 index 0000000000..099ffb736c --- /dev/null +++ b/website/source/layouts/newrelic.erb @@ -0,0 +1,44 @@ +<% wrap_layout :inner do %> +<% content_for :sidebar do %> + +<% end %> + +<%= yield %> +<% end %>