1
0
mirror of https://github.com/haiwen/seafile-server.git synced 2025-06-24 14:11:34 +00:00
seafile-server/tests/test_file_operation/test_merge_virtual_repo.py
Jiaqiang Xu 7420b8d738
Go fileserver (#437)
* Initial commit for fileserver written in golang.

[gofileserver] Fix some syntaxt errors.

Add fs backend and objstore test (#352)

* Add fs backend and objstore test

* modify test case and optimize fs backend

* Modify function name and first write temporary files

* Don't need to reopen the temp files

Add comment for objstore (#354)

* Add comment for objstore

* Modify comment

Add commitmgr and test case (#356)

* Add commitmgr and test case

* Redefine the interface

* Modify comment and interface

* Modify parameter and del unused method

* Add comment for FromData and ToData

Add blockmgr and test case (#357)

* Add blockmgr and test case

* Modify comment and interface

Add fsmgr and test case (#358)

* Add fsmgr and test case

* Add save interface and error details

* Modify errors and comments

Add searpc package and test case (#360)

* Add searpc package

* Add searpc test case

* Add return error and add Request struct

* Modify returned error

* Modify comments

add checkPerm (#369)

Add file and block download (#363)

* Add file and block download

* Modify init and use aes algorithm

* Get block by offset and add stat method

* Modify objID's type

* Fix reset pos after add start

* Add http error handing and record log when failed to read block or write block to response

* Modify http return code and value names

* Modify http return code and add log info

* Block read add comment and only repeat once

load ccnetdb and support sqlite (#371)

Add zip download (#372)

* Add zip download

* Modify pack dir and log info

* Modify http return code and use Deflate zip compression methods

add /repo/<repo-id>/permission-check (#375)

add /<repo-id>/commit/HEAD (#377)

add  /repo/<repo-id>/commit/<id> (#379)

add /repo/<repo-id>/block/<id> (#380)

add /repo/<repo-id>/fs-id-list (#383)

add /repo/head-commits-multi (#388)

Add file upload api (#378)

* Add file upload api

* Upload api implements post multi files and create relative path

* Modify handle error and save files directly

* Fix rebase conflict

* index block use channel and optimize mkdir with parents

* Handle jobs and results in a loop

* Mkdir with parents use postMultiFiles and use pointer of SeafDirent

* Del diff_simple size_sched virtual_repo

* Need to check the path with and without slash

* Modify merge trees and add merge test case

* Del postFile and don't close results channel

* Close the file and remove multipart temp file

* Modify merge test case and compare the first name of path

* Use pointer of Entries for SeafDir

* Add test cases for different situations

add /repo/<repo-id>/pack-fs (#389)

add POST /<repo-id>/check-fs and /<repo-id>/check-blocks (#396)

Merge compute repo (#397)

* Add update repo size and merge virtual repo

* Eliminate lint warnings

* Uncomment merge virtual repo and compute repo size

* Need init the dents

* Use interface{} param and modify removeElems

* Move update dir to file.go and modify logs

* Del sync pkg

add PUT /<repo-id>/commit/<commit-id> (#400)

add PUT /<repo-id>/block/<id> (#401)

add POST /<repo-id>/recv-fs (#398)

add PUT /<repo-id>/commit/HEAD (#402)

Add http return code (#403)

Add file update API (#399)

* Add file update API

* Add GetObjIDByPath and fix change size error

* Add traffic statistics for update api

add diffTrees unit test (#391)

add GET /accessible-repos (#406)

add GET /<repo-id>/block-map/<file-id> (#405)

Add test update repo size and merge virtual repo (#409)

* Update dir need update repo size

* Add test update repo size and merge virtual repo

* Add delay for test ajax

* Add delay before get repo size and modify comment

Use go fileserver for unit test (#410)

* Use go fileserver for unit test

* Blocking scheduling update repo size

* Add delay because of sqlite doesn't support concurrency

* Post use multipart form encode

* Del mysql database when test finished

* Fix merge virtual repo failed when use sqlite3

Add upload block API (#412)

fixed error

Add quota-check API (#426)

use diff package

* Use central conf for go fileserver (#428)

* Use central conf for go fileserver

* Fix log error

* use store id and remove share get repo owner (#430)

* Fix permission error (#432)

Co-authored-by: feiniks <36756310+feiniks@users.noreply.github.com>
Co-authored-by: Xiangyue Cai <caixiangyue007@gmail.com>
2021-01-04 11:41:53 +08:00

208 lines
7.7 KiB
Python

import pytest
import requests
import os
import time
from tests.config import USER, USER2
from seaserv import seafile_api as api
from requests_toolbelt import MultipartEncoder
file_name = 'file.txt'
file_name_not_replaced = 'file (1).txt'
file_path = os.getcwd() + '/' + file_name
file_content = 'File content.\r\n'
file_size = len(file_content)
resumable_file_name = 'resumable.txt'
resumable_test_file_name = 'test/resumable.txt'
chunked_part1_name = 'part1.txt'
chunked_part2_name = 'part2.txt'
chunked_part1_path = os.getcwd() + '/' + chunked_part1_name
chunked_part2_path = os.getcwd() + '/' + chunked_part2_name
chunked_part1_content = 'First line.\r\n'
chunked_part2_content = 'Second line.\r\n'
total_size = len(chunked_part1_content) + len(chunked_part2_content)
#File_id is not used when upload files, but
#the argument obj_id of get_fileserver_access_token shouldn't be NULL.
file_id = '0000000000000000000000000000000000000000'
def create_test_file():
fp = open(file_path, 'w')
fp.close()
fp = open(chunked_part1_path, 'w')
fp.close()
fp = open(chunked_part2_path, 'w')
fp.close()
def create_test_dir(repo, dir_name):
parent_dir = '/'
api.post_dir(repo.id,parent_dir,dir_name,USER)
def assert_upload_response(response, replace, file_exist):
assert response.status_code == 200
response_json = response.json()
assert response_json[0]['size'] == 0
assert response_json[0]['id'] == file_id
if file_exist and not replace:
assert response_json[0]['name'] == file_name_not_replaced
else:
assert response_json[0]['name'] == file_name
def assert_resumable_upload_response(response, repo_id, file_name, upload_complete):
assert response.status_code == 200
if not upload_complete:
assert response.text == '{"success": true}'
offset = api.get_upload_tmp_file_offset(repo_id, '/' + file_name)
assert offset == len(chunked_part1_content)
else:
response_json = response.json()
assert response_json[0]['size'] == total_size
new_file_id = response_json[0]['id']
assert len(new_file_id) == 40 and new_file_id != file_id
assert response_json[0]['name'] == resumable_file_name
def assert_update_response(response, is_json):
assert response.status_code == 200
if is_json:
response_json = response.json()
assert response_json[0]['size'] == file_size
new_file_id = response_json[0]['id']
assert len(new_file_id) == 40 and new_file_id != file_id
assert response_json[0]['name'] == file_name
else:
new_file_id = response.text
assert len(new_file_id) == 40 and new_file_id != file_id
def request_resumable_upload(filepath, headers,upload_url_base,parent_dir,is_ajax):
write_file(chunked_part1_path, chunked_part1_content)
write_file(chunked_part2_path, chunked_part2_content)
m = MultipartEncoder(
fields={
'parent_dir': parent_dir,
'file': (resumable_file_name, open(filepath, 'rb'), 'application/octet-stream')
})
params = {'ret-json':'1'}
headers["Content-type"] = m.content_type
if is_ajax:
response = requests.post(upload_url_base, headers = headers,
data = m)
else:
response = requests.post(upload_url_base, headers = headers,
data = m, params = params)
return response
def write_file(file_path, file_content):
fp = open(file_path, 'w')
fp.write(file_content)
fp.close()
def del_local_files():
os.remove(file_path)
os.remove(chunked_part1_path)
os.remove(chunked_part2_path)
def test_merge_virtual_repo(repo):
api.post_dir(repo.id, '/dir1', 'subdir1', USER)
api.post_dir(repo.id, '/dir2', 'subdir2', USER)
v_repo_id = api.share_subdir_to_user(repo.id, '/dir1', USER, USER2, 'rw')
create_test_file()
params = {'ret-json':'1'}
obj_id = '{"parent_dir":"/"}'
create_test_dir(repo,'test')
#test upload file to vritual repo root dir.
token = api.get_fileserver_access_token(v_repo_id, obj_id, 'upload', USER2, False)
upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token
m = MultipartEncoder(
fields={
'parent_dir': '/',
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(upload_url_base, params = params,
data = m, headers = {'Content-Type': m.content_type})
assert_upload_response(response, False, False)
time.sleep (1.5)
repo_size = api.get_repo_size (v_repo_id)
assert repo_size == 0
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
#test resumable upload file to virtual repo root dir
parent_dir = '/'
headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1),
str(total_size)),
'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)}
response = request_resumable_upload(chunked_part1_path,headers, upload_url_base,parent_dir, False)
assert_resumable_upload_response(response, v_repo_id,
resumable_file_name, False)
time.sleep (1.5)
v_repo_size = api.get_repo_size (v_repo_id)
assert v_repo_size == 0
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)),
str(total_size - 1),
str(total_size)),
'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)}
response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, False)
assert_resumable_upload_response(response, v_repo_id,
resumable_file_name, True)
time.sleep (2.5)
v_repo_size = api.get_repo_size (v_repo_id)
assert v_repo_size == total_size
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == total_size
#test update file to virtual repo.
write_file(file_path, file_content)
token = api.get_fileserver_access_token(v_repo_id, obj_id, 'update', USER2, False)
update_url_base = 'http://127.0.0.1:8082/update-api/' + token
m = MultipartEncoder(
fields={
'target_file': '/' + file_name,
'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
})
response = requests.post(update_url_base,
data = m, headers = {'Content-Type': m.content_type})
assert_update_response(response, False)
time.sleep (1.5)
v_repo_size = api.get_repo_size (v_repo_id)
assert v_repo_size == total_size + file_size
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == total_size + file_size
api.del_file(v_repo_id, '/', file_name, USER2)
time.sleep (1.5)
v_repo_size = api.get_repo_size (v_repo_id)
assert v_repo_size == total_size
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == total_size
api.del_file(v_repo_id, '/', resumable_file_name, USER2)
time.sleep (1.5)
v_repo_size = api.get_repo_size (v_repo_id)
assert v_repo_size == 0
time.sleep (1.5)
repo_size = api.get_repo_size (repo.id)
assert repo_size == 0
api.del_file(repo.id, '/dir1', 'subdir1', USER)
api.del_file(repo.id, '/dir2', 'subdir1', USER)
assert api.unshare_subdir_for_user(repo.id, '/dir1', USER, USER2) == 0
del_local_files()