fs.s3a.endpoint= <ECS IP address (only one node address) or LoadBalancer IP>:9020
|
ECS IP address to connect to.
NOTE: s3a does not support multiple IP addresses so better to have a loadbalancer
|
fs.s3a.access.key= <S3 Object User as created on ECS>
|
Your AWS access key ID
|
fs.s3a.secret.key=<S3 Object User Secret Key as on ECS>
|
Your AWS secret key
|
fs.s3a.connection.maximum=15
|
Controls how many parallel connections HttpClient spawns (default: 15)
|
fs.s3a.connection.ssl.enabled=false
|
Enables or disables SSL connections to S3 (default: true)
|
fs.s3a.path.style.access=false
|
Enables S3 path style access that is disabling the default virtual hosting behavior (default: false)
|
fs.s3a.connection.establish.timeout=5000
|
Socket connection setup timeout in milliseconds.
|
fs.s3a.connection.timeout=200000
|
Socket connect timeout (default: 5000)
|
fs.s3a.paging.maximum=1000
|
How many keys to request from S3 when doing directory listings at a time (default: 5000)
|
fs.s3a.threads.max=10
|
The total number of threads available in the filesystem for data uploads *or any other queued filesystem operation*.
|
fs.s3a.socket.send.buffer=8192
|
Socket send buffer hint to amazon connector. Represented in bytes.
|
fs.s3a.socket.recv.buffer=8192
|
Socket receive buffer hint to amazon connector. Represented in bytes.
|
fs.s3a.threads.keepalivetime=60
|
Number of seconds a thread can be idle before being terminated.
|
fs.s3a.max.total.tasks=5
|
The number of operations which can be queued for execution.
|
fs.s3a.multipart.size=100M
|
How big (in bytes) to split a upload or copy operation up into (default: 100 MB)
|
fs.s3a.multipart.threshold=2147483647
|
Until a file is this large (in bytes), use non-parallel upload (default: 2 GB)
|
fs.s3a.multiobjectdelete.enable=true
|
When enabled, multiple single-object delete requests are replaced by a single 'delete multiple objects'-request, reducing the number of requests. Beware: legacy S3-compatible object stores might not support this request.
|
fs.s3a.acl.default=PublicReadWrite
|
Set a canned ACL on newly created/copied objects (Private | PublicRead | PublicReadWrite | AuthenticatedRead | LogDeliveryWrite | BucketOwnerRead | BucketOwnerFullControl)
|
fs.s3a.multipart.purge=false
|
True if you want to purge existing multipart uploads that may not have been completed/aborted correctly (default: false)
|
fs.s3a.multipart.purge.age=86400
|
Minimum age in seconds of multipart uploads to purge (default: 86400)
|
fs.s3a.block.size=32M
|
Block size to use when reading files using s3a: file system. A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
|
fs.s3a.readahead.range=64K
|
Bytes to read ahead during a seek() before closing and re-opening the S3 HTTP connection. This option will be overridden if any call to setReadahead() is made to an open stream. A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
|
fs.s3a.buffer.dir=${hadoop.tmp.dir}/s3a
|
Comma separated list of directories that will be used to buffer file writes out of (default: uses fs.s3.buffer.dir)
|