Compare commits
1 Commits
patch/6.5.
...
patch/TPS-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2b2dc5ce4e |
57
PATCH_RELEASE_NOTE.md
Normal file
57
PATCH_RELEASE_NOTE.md
Normal file
@@ -0,0 +1,57 @@
|
||||
---
|
||||
version: 6.4.1
|
||||
module: https://talend.poolparty.biz/coretaxonomy/42
|
||||
product:
|
||||
- https://talend.poolparty.biz/coretaxonomy/23
|
||||
---
|
||||
|
||||
# TPS-3811
|
||||
|
||||
| Info | Value |
|
||||
| ---------------- | ---------------- |
|
||||
| Patch Name | Patch\_20200228\_TPS-3811\_v1-6.4.1 |
|
||||
| Release Date | 2020-02-28 |
|
||||
| Target Version | 20170623_1246-V6.4.1 |
|
||||
| Product affected | Talend Studio |
|
||||
|
||||
## Introduction
|
||||
|
||||
This is a self-contained patch.
|
||||
|
||||
**NOTE**: For information on how to obtain this patch, reach out to your Support contact at Talend.
|
||||
|
||||
## Fixed issues
|
||||
|
||||
This patch contains the following fixes:
|
||||
|
||||
- TPS-3811 [6.4.1] tS3Copy Component does not allow large file (like 8GB) copy from one bucket to another (TDI-40806)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Consider the following requirements for your system:
|
||||
|
||||
- Talend Studio 6.4.1 must be installed.
|
||||
|
||||
## Installation
|
||||
|
||||
### Installing the patch using Software update
|
||||
|
||||
1) Logon TAC and switch to Configuration->Software Update, then enter the correct values and save referring to the documentation: https://help.talend.com/reader/f7Em9WV_cPm2RRywucSN0Q/j9x5iXV~vyxMlUafnDejaQ
|
||||
|
||||
2) Switch to Software update page, where the new patch will be listed. The patch can be downloaded from here into the nexus repository.
|
||||
|
||||
3) On Studio Side: Logon Studio with remote mode, on the logon page the Update button is displayed: click this button to install the patch.
|
||||
|
||||
### Installing the patch using Talend Studio
|
||||
|
||||
1) Create a folder named "patches" under your studio installer directory and copy the patch .zip file to this folder.
|
||||
|
||||
2) Restart your studio: a window pops up, then click OK to install the patch, or restart the commandline and the patch will be installed automatically.
|
||||
|
||||
### Installing the patch using Commandline
|
||||
|
||||
Execute the following commands:
|
||||
|
||||
1. Talend-Studio-win-x86_64.exe -nosplash -application org.talend.commandline.CommandLine -consoleLog -data commandline-workspace startServer -p 8002 --talendDebug
|
||||
2. initRemote {tac_url} -ul {TAC login username} -up {TAC login password}
|
||||
3. checkAndUpdate -tu {TAC login username} -tup {TAC login password}
|
||||
@@ -19,6 +19,12 @@ imports="
|
||||
|
||||
boolean enableServerSideEncryption = "true".equals(ElementParameterParser.getValue(node, "__ENABLE-SERVER-SIDE-ENCRYPTION__"));
|
||||
|
||||
String partSizeMb = ElementParameterParser.getValue(node, "__PART_SIZE__");
|
||||
|
||||
if((partSizeMb == null) || "".equals(partSizeMb) || "\"\"".equals(partSizeMb)) {
|
||||
partSizeMb = "100";
|
||||
}
|
||||
|
||||
%>
|
||||
<%@ include file="../tS3Connection/S3Client.javajet" %>
|
||||
try{
|
||||
@@ -29,19 +35,93 @@ imports="
|
||||
<%
|
||||
}
|
||||
%>
|
||||
com.amazonaws.services.s3.model.CopyObjectRequest copyRequest_<%=cid%> = new com.amazonaws.services.s3.model.CopyObjectRequest(<%=from_bucket%>, <%=from_key%>, <%=to_bucket%>, <%=to_key%>);
|
||||
|
||||
long partSizeInBytes_<%=cid%> = <%=partSizeMb%> * 1024 * 1024;
|
||||
long maxBytes4SingleCopyCall_<%=cid%> = 5 * 1024 * 1024 * 1024;//5GB
|
||||
com.amazonaws.services.s3.model.ObjectMetadata objectMetadata_<%=cid%> = null;
|
||||
<%
|
||||
if(enableServerSideEncryption){
|
||||
%>
|
||||
com.amazonaws.services.s3.model.ObjectMetadata objectMetadata_<%=cid%> = new com.amazonaws.services.s3.model.ObjectMetadata();
|
||||
objectMetadata_<%=cid%> = new com.amazonaws.services.s3.model.ObjectMetadata();
|
||||
objectMetadata_<%=cid%>.setSSEAlgorithm(com.amazonaws.services.s3.model.ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
|
||||
copyRequest_<%=cid%>.setNewObjectMetadata(objectMetadata_<%=cid%>);
|
||||
<%
|
||||
}
|
||||
%>
|
||||
|
||||
conn_<%=cid%>.copyObject(copyRequest_<%=cid%>);
|
||||
final String from_bucket_<%=cid%> = <%=from_bucket%>;
|
||||
final String from_key_<%=cid%> = <%=from_key%>;
|
||||
final String to_bucket_<%=cid%> = <%=to_bucket%>;
|
||||
final String to_key_<%=cid%> = <%=to_key%>;
|
||||
|
||||
//get the source metadata information
|
||||
com.amazonaws.services.s3.model.GetObjectMetadataRequest metadataRequest_<%=cid%> = new com.amazonaws.services.s3.model.GetObjectMetadataRequest(from_bucket_<%=cid%>, from_key_<%=cid%>);
|
||||
com.amazonaws.services.s3.model.ObjectMetadata metadataResult_<%=cid%> = conn_<%=cid%>.getObjectMetadata(metadataRequest_<%=cid%>);
|
||||
long objectSize_<%=cid%> = metadataResult_<%=cid%>.getContentLength();
|
||||
|
||||
boolean multiUpload_<%=cid%> = objectSize_<%=cid%> > maxBytes4SingleCopyCall_<%=cid%>;
|
||||
|
||||
if(!multiUpload_<%=cid%>) {
|
||||
com.amazonaws.services.s3.model.CopyObjectRequest putRequest_<%=cid%> = new com.amazonaws.services.s3.model.CopyObjectRequest(from_bucket_<%=cid%>, from_key_<%=cid%>, to_bucket_<%=cid%>, to_key_<%=cid%>);
|
||||
if(objectMetadata_<%=cid%> != null) {
|
||||
putRequest_<%=cid%>.setNewObjectMetadata(objectMetadata_<%=cid%>);
|
||||
}
|
||||
|
||||
conn_<%=cid%>.copyObject(putRequest_<%=cid%>);
|
||||
} else {
|
||||
<%
|
||||
//as the s3 service limit, the multi upload copy may lose s3 object metadata information or changed, but some object metadata information is important like the "x-amz-iv" key for client encrypt with kms
|
||||
//which will make the decrypt not work with different key like expected, so fail asap here and there is very rare risk which use a large file than 5GB and also with client encrypt, so ignore it now.
|
||||
%>
|
||||
java.util.Map<String,String> userMetadata_<%=cid%> = metadataResult_<%=cid%>.getUserMetadata();
|
||||
if((userMetadata_<%=cid%>!=null) && userMetadata_<%=cid%>.get("x-amz-iv")!=null) {
|
||||
throw new RuntimeException("the metadata key : \"x-amz-iv\" exists in the current object metadata, its value is important for client encrypt with KMS, which can't be copied as s3 service limit it");
|
||||
}
|
||||
|
||||
com.amazonaws.services.s3.model.InitiateMultipartUploadRequest putRequest_<%=cid%> = null;
|
||||
if(objectMetadata_<%=cid%> != null) {
|
||||
putRequest_<%=cid%> = new com.amazonaws.services.s3.model.InitiateMultipartUploadRequest(to_bucket_<%=cid%>, to_key_<%=cid%>, objectMetadata_<%=cid%>);
|
||||
} else {
|
||||
//even pass the source object metadata, some metadata will change too like "x-amz-iv"
|
||||
putRequest_<%=cid%> = new com.amazonaws.services.s3.model.InitiateMultipartUploadRequest(to_bucket_<%=cid%>, to_key_<%=cid%>, metadataResult_<%=cid%>);
|
||||
}
|
||||
|
||||
|
||||
com.amazonaws.services.s3.model.InitiateMultipartUploadResult initResult_<%=cid%> = conn_<%=cid%>.initiateMultipartUpload(putRequest_<%=cid%>);
|
||||
|
||||
long bytePosition_<%=cid%> = 0;
|
||||
int partNum_<%=cid%> = 1;
|
||||
|
||||
java.util.List<com.amazonaws.services.s3.model.PartETag> partTags_<%=cid%> = new java.util.ArrayList<com.amazonaws.services.s3.model.PartETag>();
|
||||
|
||||
try {
|
||||
while (bytePosition_<%=cid%> < objectSize_<%=cid%>) {
|
||||
long lastByte_<%=cid%> = java.lang.Math.min(bytePosition_<%=cid%> + partSizeInBytes_<%=cid%> - 1, objectSize_<%=cid%> - 1);
|
||||
|
||||
com.amazonaws.services.s3.model.CopyPartRequest copyPartRequest_<%=cid%> = new com.amazonaws.services.s3.model.CopyPartRequest()
|
||||
.withSourceBucketName(from_bucket_<%=cid%>)
|
||||
.withSourceKey(from_key_<%=cid%>)
|
||||
.withDestinationBucketName(to_bucket_<%=cid%>)
|
||||
.withDestinationKey(to_key_<%=cid%>)
|
||||
.withUploadId(initResult_<%=cid%>.getUploadId())
|
||||
.withFirstByte(bytePosition_<%=cid%>)
|
||||
.withLastByte(lastByte_<%=cid%>)
|
||||
.withPartNumber(partNum_<%=cid%>++);
|
||||
partTags_<%=cid%>.add(conn_<%=cid%>.copyPart(copyPartRequest_<%=cid%>).getPartETag());
|
||||
bytePosition_<%=cid%> += partSizeInBytes_<%=cid%>;
|
||||
}
|
||||
|
||||
com.amazonaws.services.s3.model.CompleteMultipartUploadRequest completeRequest_<%=cid%> = new com.amazonaws.services.s3.model.CompleteMultipartUploadRequest(
|
||||
to_bucket_<%=cid%>,
|
||||
to_key_<%=cid%>,
|
||||
initResult_<%=cid%>.getUploadId(),
|
||||
partTags_<%=cid%>);
|
||||
conn_<%=cid%>.completeMultipartUpload(completeRequest_<%=cid%>);
|
||||
} catch (java.lang.Exception uploadException_<%=cid%>) {
|
||||
conn_<%=cid%>.abortMultipartUpload(new com.amazonaws.services.s3.model.AbortMultipartUploadRequest(to_bucket_<%=cid%>, to_key_<%=cid%>, initResult_<%=cid%>.getUploadId()));
|
||||
throw uploadException_<%=cid%>;
|
||||
}
|
||||
}
|
||||
|
||||
<%
|
||||
if(isLog4jEnabled){
|
||||
%>
|
||||
|
||||
@@ -261,6 +261,15 @@
|
||||
>
|
||||
<DEFAULT>""</DEFAULT>
|
||||
</PARAMETER>
|
||||
|
||||
<PARAMETER
|
||||
NAME="PART_SIZE"
|
||||
FIELD="TEXT"
|
||||
NUM_ROW="40"
|
||||
REQUIRED="true"
|
||||
>
|
||||
<DEFAULT>100</DEFAULT>
|
||||
</PARAMETER>
|
||||
|
||||
</ADVANCED_PARAMETERS>
|
||||
|
||||
|
||||
@@ -55,4 +55,6 @@ ARN.NAME=Role ARN
|
||||
ROLE_SESSION_NAME.NAME=Role session name
|
||||
SESSION_DURATION.NAME=Session duration(minutes)
|
||||
SET_STS_ENDPOINT.NAME=STS Endpoint
|
||||
STS_ENDPOINT.NAME=
|
||||
|
||||
STS_ENDPOINT.NAME=
|
||||
PART_SIZE.NAME=Part size(Mb) for file larger than 5GB
|
||||
Reference in New Issue
Block a user