use of com.zhouzifei.tool.common.fastdfs.common.NameValuePair in project simpleFS by shengdingbox.
the class FastDfsOssApiClient method uploadInputStream.
@Override
public String uploadInputStream(InputStream is, String fileName) {
try {
// tracker 客户端
TrackerClient trackerClient = new TrackerClient();
// 获取trackerServer
TrackerServer trackerServer = trackerClient.getTrackerServer();
// 创建StorageClient 对象
StorageClient storageClient = new StorageClient(trackerServer);
// 文件元数据信息组
NameValuePair[] nameValuePairs = { new NameValuePair("author", "huhy") };
byte[] bytes = IOUtils.toByteArray(is);
final String suffix = FileUtil.getSuffix(fileName);
String[] txts = storageClient.upload_file(bytes, suffix, nameValuePairs);
return String.join("/", txts);
} catch (IOException var6) {
log.info("上传失败,失败原因{}", var6.getMessage());
throw new ServiceException("文件上传异常!");
}
}
use of com.zhouzifei.tool.common.fastdfs.common.NameValuePair in project simpleFS by shengdingbox.
the class ProtoCommon method split_metadata.
/**
* split metadata to name value pair array
*
* @param meta_buff metadata
* @param recordSeperator record/row seperator
* @param filedSeperator field/column seperator
* @return name value pair array
*/
public static NameValuePair[] split_metadata(String meta_buff, String recordSeperator, String filedSeperator) {
String[] rows;
String[] cols;
NameValuePair[] meta_list;
rows = meta_buff.split(recordSeperator);
meta_list = new NameValuePair[rows.length];
for (int i = 0; i < rows.length; i++) {
cols = rows[i].split(filedSeperator, 2);
meta_list[i] = new NameValuePair(cols[0]);
if (cols.length == 2) {
meta_list[i].setValue(cols[1]);
}
}
return meta_list;
}
use of com.zhouzifei.tool.common.fastdfs.common.NameValuePair in project simpleFS by shengdingbox.
the class FastDfsOssApiClient method multipartUpload.
@Override
public VirtualFile multipartUpload(InputStream inputStream, MetaDataRequest metaDataRequest) {
Date startTime = new Date();
final String name = metaDataRequest.getName();
final Integer chunkSize = metaDataRequest.getChunkSize();
final Integer chunk = metaDataRequest.getChunk();
final Integer chunks = metaDataRequest.getChunks();
final String fileMd5 = metaDataRequest.getFileMd5();
final Long size = metaDataRequest.getSize();
String fileExtName = FileUtil.getSuffix(String.valueOf(name));
try {
// tracker 客户端
TrackerClient trackerClient = new TrackerClient();
// 获取trackerServer
TrackerServer trackerServer = trackerClient.getTrackerServer();
// 创建StorageClient 对象
StorageClient storageClient = new StorageClient(trackerServer);
// 文件元数据信息组
NameValuePair[] nameValuePairs = { new NameValuePair("author", "huhy") };
final byte[] bytes = IOUtils.toByteArray(inputStream);
if (chunk.equals(ZERO_INT)) {
String[] strings = storageClient.upload_appender_file(UpLoadConstant.DEFAULT_GROUP, bytes, 0, chunkSize, fileExtName, nameValuePairs);
String path = strings[1];
super.cacheEngine.add(storageType, fileMd5, path);
} else {
Long offset;
if (chunk == chunks - 1) {
offset = size - chunkSize;
} else {
offset = (long) chunk * chunkSize;
}
final Object o = cacheEngine.get(storageType, fileMd5);
final String path = String.valueOf(o);
storageClient.modify_file(UpLoadConstant.DEFAULT_GROUP, path, offset, bytes);
}
if (FileUtil.addChunkAndCheckAllDone(fileMd5, chunks)) {
final Object o = cacheEngine.get(storageType, fileMd5);
cacheEngine.remove(storageType, fileMd5);
final String filePath = UpLoadConstant.DEFAULT_GROUP + SLASH + o;
return VirtualFile.builder().originalFileName(FileUtil.getName(String.valueOf(name))).suffix(this.suffix).uploadStartTime(startTime).uploadEndTime(new Date()).filePath(this.newFileName).fileHash(null).fullFilePath(this.domainUrl + this.newFileName).build();
}
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
Aggregations