1
0
mirror of synced 2025-12-25 11:06:55 -05:00

Enable SPEC SAT for Java sources (#18779)

* enable py spec compatibility tests

* add missing properties

* use expected spec file instead of the source spec.json + use dummy config file

* add missing files for the clickhouse

* move test files to the integration-test from the unit test folder

* add missing files to cockroachdb

* add missing files to db2 + fix spec format

* add missing files to elasticsearch + fix spec format

* add missing files to jdbc

* add missing files to mongodb_v2 + fix spec format

* add missing files to mssql + fix spec format

* add missing files to mysql + fix spec format

* add missing files to postgres + fix spec format

* add missing files to oracle + fix spec format

* add missing files to redshift

* add missing files to sftp

* add missing files to snowflake + fix spec format

* add missing files to tidb

* add missing files to kafka - fix spec format

* airbyte-source-acceptance-test added

Signed-off-by: Sergey Chvalyuk <grubberr@gmail.com>

* add missing import

* Delete acceptance-test-config.yml

* Delete acceptance-test-docker.sh

* Update build.gradle

* Update build.gradle

* format

* revert changes

* manual .sh files format

* upd expected spec

* format

* fix SAT after master merge

Signed-off-by: Sergey Chvalyuk <grubberr@gmail.com>
Co-authored-by: Sergey Chvalyuk <grubberr@gmail.com>
This commit is contained in:
Andrii Leonets
2022-11-09 21:45:46 +02:00
committed by GitHub
parent a4ea907094
commit 58e4ef6912
106 changed files with 3557 additions and 140 deletions

View File

@@ -0,0 +1,7 @@
# See [Source Acceptance Tests](https://docs.airbyte.com/connector-development/testing-connectors/source-acceptance-tests-reference)
# for more information about how to configure these tests
connector_image: airbyte/source-kafka:dev
tests:
spec:
- spec_path: "src/test-integration/resources/expected_spec.json"
config_path: "src/test-integration/resources/dummy_config.json"

View File

@@ -0,0 +1,15 @@
#!/usr/bin/env sh
# Build latest connector image
docker build . -t $(cat acceptance-test-config.yml | grep "connector_image" | head -n 1 | cut -d: -f2):dev
# Pull latest acctest image
docker pull airbyte/source-acceptance-test:latest
# Run
docker run --rm -it \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /tmp:/tmp \
-v $(pwd):/test_input \
airbyte/source-acceptance-test \
--acceptance-test-config /test_input

View File

@@ -2,6 +2,7 @@ plugins {
id 'application'
id 'airbyte-docker'
id 'airbyte-integration-test-java'
id 'airbyte-source-acceptance-test'
}
application {

View File

@@ -0,0 +1,16 @@
#
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
import pytest
pytest_plugins = ("source_acceptance_test.plugin",)
@pytest.fixture(scope="session", autouse=True)
def connector_setup():
"""This fixture is a placeholder for external resources that acceptance test might require."""
# TODO: setup test dependencies if needed. otherwise remove the TODO comments
yield
# TODO: clean up test dependencies

View File

@@ -9,7 +9,7 @@
"title": "Kafka Source Spec",
"type": "object",
"required": ["bootstrap_servers", "subscription", "protocol"],
"additionalProperties": false,
"additionalProperties": true,
"properties": {
"MessageFormat": {
"title": "MessageFormat",
@@ -21,8 +21,7 @@
"properties": {
"deserialization_type": {
"type": "string",
"enum": ["JSON"],
"default": "JSON"
"const": "JSON"
}
}
},
@@ -30,9 +29,7 @@
"title": "AVRO",
"properties": {
"deserialization_type": {
"type": "string",
"enum": ["AVRO"],
"default": "AVRO"
"const": "AVRO"
},
"deserialization_strategy": {
"type": "string",
@@ -77,9 +74,7 @@
"subscription_type": {
"description": "Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one).\nIf the given list of topic partitions is empty, it is treated the same as unsubscribe().",
"type": "string",
"const": "assign",
"enum": ["assign"],
"default": "assign"
"const": "assign"
},
"topic_partitions": {
"title": "List of topic:partition Pairs",
@@ -95,9 +90,7 @@
"subscription_type": {
"description": "The Topic pattern from which the records will be read.",
"type": "string",
"const": "subscribe",
"enum": ["subscribe"],
"default": "subscribe"
"const": "subscribe"
},
"topic_pattern": {
"title": "Topic Pattern",
@@ -143,8 +136,7 @@
"properties": {
"security_protocol": {
"type": "string",
"enum": ["PLAINTEXT"],
"default": "PLAINTEXT"
"const": "PLAINTEXT"
}
}
},
@@ -158,15 +150,13 @@
"properties": {
"security_protocol": {
"type": "string",
"enum": ["SASL_PLAINTEXT"],
"default": "SASL_PLAINTEXT"
"const": "SASL_PLAINTEXT"
},
"sasl_mechanism": {
"title": "SASL Mechanism",
"description": "The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.",
"type": "string",
"default": "PLAIN",
"enum": ["PLAIN"]
"const": "PLAIN"
},
"sasl_jaas_config": {
"title": "SASL JAAS Config",
@@ -187,8 +177,7 @@
"properties": {
"security_protocol": {
"type": "string",
"enum": ["SASL_SSL"],
"default": "SASL_SSL"
"const": "SASL_SSL"
},
"sasl_mechanism": {
"title": "SASL Mechanism",

View File

@@ -0,0 +1,10 @@
{
"bootstrap_servers": "default",
"subscription": {
"subscription_type": "assign",
"topic_partitions": "default"
},
"protocol": {
"security_protocol": "PLAINTEXT"
}
}

View File

@@ -0,0 +1,275 @@
{
"documentationUrl": "https://docs.airbyte.com/integrations/sources/kafka",
"connectionSpecification": {
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Kafka Source Spec",
"type": "object",
"required": ["bootstrap_servers", "subscription", "protocol"],
"additionalProperties": true,
"properties": {
"MessageFormat": {
"title": "MessageFormat",
"type": "object",
"description": "The serialization used based on this ",
"oneOf": [
{
"title": "JSON",
"properties": {
"deserialization_type": {
"type": "string",
"const": "JSON"
}
}
},
{
"title": "AVRO",
"properties": {
"deserialization_type": {
"const": "AVRO"
},
"deserialization_strategy": {
"type": "string",
"enum": [
"TopicNameStrategy",
"RecordNameStrategy",
"TopicRecordNameStrategy"
],
"default": "TopicNameStrategy"
},
"schema_registry_url": {
"type": "string",
"examples": ["http://localhost:8081"]
},
"schema_registry_username": {
"type": "string",
"default": ""
},
"schema_registry_password": {
"type": "string",
"default": ""
}
}
}
]
},
"bootstrap_servers": {
"title": "Bootstrap Servers",
"description": "A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping&mdash;this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form <code>host1:port1,host2:port2,...</code>. Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).",
"type": "string",
"examples": ["kafka-broker1:9092,kafka-broker2:9092"]
},
"subscription": {
"title": "Subscription Method",
"type": "object",
"description": "You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions.",
"oneOf": [
{
"title": "Manually assign a list of partitions",
"required": ["subscription_type", "topic_partitions"],
"properties": {
"subscription_type": {
"description": "Manually assign a list of partitions to this consumer. This interface does not allow for incremental assignment and will replace the previous assignment (if there is one).\nIf the given list of topic partitions is empty, it is treated the same as unsubscribe().",
"type": "string",
"const": "assign"
},
"topic_partitions": {
"title": "List of topic:partition Pairs",
"type": "string",
"examples": ["sample.topic:0, sample.topic:1"]
}
}
},
{
"title": "Subscribe to all topics matching specified pattern",
"required": ["subscription_type", "topic_pattern"],
"properties": {
"subscription_type": {
"description": "The Topic pattern from which the records will be read.",
"type": "string",
"const": "subscribe"
},
"topic_pattern": {
"title": "Topic Pattern",
"type": "string",
"examples": ["sample.topic"]
}
}
}
]
},
"test_topic": {
"title": "Test Topic",
"description": "The Topic to test in case the Airbyte can consume messages.",
"type": "string",
"examples": ["test.topic"]
},
"group_id": {
"title": "Group ID",
"description": "The Group ID is how you distinguish different consumer groups.",
"type": "string",
"examples": ["group.id"]
},
"max_poll_records": {
"title": "Max Poll Records",
"description": "The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll.",
"type": "integer",
"default": 500
},
"polling_time": {
"title": "Polling Time",
"description": "Amount of time Kafka connector should try to poll for messages.",
"type": "integer",
"default": 100
},
"protocol": {
"title": "Protocol",
"type": "object",
"description": "The Protocol used to communicate with brokers.",
"oneOf": [
{
"title": "PLAINTEXT",
"required": ["security_protocol"],
"properties": {
"security_protocol": {
"type": "string",
"const": "PLAINTEXT"
}
}
},
{
"title": "SASL PLAINTEXT",
"required": [
"security_protocol",
"sasl_mechanism",
"sasl_jaas_config"
],
"properties": {
"security_protocol": {
"type": "string",
"const": "SASL_PLAINTEXT"
},
"sasl_mechanism": {
"title": "SASL Mechanism",
"description": "The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.",
"type": "string",
"const": "PLAIN"
},
"sasl_jaas_config": {
"title": "SASL JAAS Config",
"description": "The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.",
"type": "string",
"default": "",
"airbyte_secret": true
}
}
},
{
"title": "SASL SSL",
"required": [
"security_protocol",
"sasl_mechanism",
"sasl_jaas_config"
],
"properties": {
"security_protocol": {
"type": "string",
"const": "SASL_SSL"
},
"sasl_mechanism": {
"title": "SASL Mechanism",
"description": "The SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.",
"type": "string",
"default": "GSSAPI",
"enum": [
"GSSAPI",
"OAUTHBEARER",
"SCRAM-SHA-256",
"SCRAM-SHA-512",
"PLAIN"
]
},
"sasl_jaas_config": {
"title": "SASL JAAS Config",
"description": "The JAAS login context parameters for SASL connections in the format used by JAAS configuration files.",
"type": "string",
"default": "",
"airbyte_secret": true
}
}
}
]
},
"client_id": {
"title": "Client ID",
"description": "An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.",
"type": "string",
"examples": ["airbyte-consumer"]
},
"enable_auto_commit": {
"title": "Enable Auto Commit",
"description": "If true, the consumer's offset will be periodically committed in the background.",
"type": "boolean",
"default": true
},
"auto_commit_interval_ms": {
"title": "Auto Commit Interval, ms",
"description": "The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if enable.auto.commit is set to true.",
"type": "integer",
"default": 5000
},
"client_dns_lookup": {
"title": "Client DNS Lookup",
"description": "Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.",
"type": "string",
"default": "use_all_dns_ips",
"enum": [
"default",
"use_all_dns_ips",
"resolve_canonical_bootstrap_servers_only"
]
},
"retry_backoff_ms": {
"title": "Retry Backoff, ms",
"description": "The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.",
"type": "integer",
"default": 100
},
"request_timeout_ms": {
"title": "Request Timeout, ms",
"description": "The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.",
"type": "integer",
"default": 30000
},
"receive_buffer_bytes": {
"title": "Receive Buffer, bytes",
"description": "The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.",
"type": "integer",
"default": 32768
},
"auto_offset_reset": {
"title": "Auto Offset Reset",
"description": "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server - earliest: automatically reset the offset to the earliest offset, latest: automatically reset the offset to the latest offset, none: throw exception to the consumer if no previous offset is found for the consumer's group, anything else: throw exception to the consumer.",
"type": "string",
"default": "latest",
"enum": ["latest", "earliest", "none"]
},
"repeated_calls": {
"title": "Repeated Calls",
"description": "The number of repeated calls to poll() if no messages were received.",
"type": "integer",
"default": 3
},
"max_records_process": {
"title": "Maximum Records",
"description": "The Maximum to be processed per execution",
"type": "integer",
"default": 100000
}
}
},
"supportsIncremental": true,
"supportsNormalization": false,
"supportsDBT": false,
"supported_destination_sync_modes": [],
"supported_source_sync_modes": ["append"]
}