@lap v0.3
# Machine-readable API spec. Each @endpoint block is one API call.
@api DataLakeStoreFileSystemManagementClient
@version 2016-11-01
@endpoints 3
@toc WebHdfsExt(2), webhdfs(1)

@group WebHdfsExt
@endpoint PUT /WebHdfsExt/{path}
@desc Sets or removes the expiration time on the specified file. This operation can only be executed against files. Folders are not supported.
@required {path: any # The Data Lake Store path (starting with '/') of the file on which to set or remove the expiration time., expiryOption: any # Indicates the type of expiration to use for the file: 1. NeverExpire: ExpireTime is ignored. 2. RelativeToNow: ExpireTime is an integer in milliseconds representing the expiration date relative to when file expiration is updated. 3. RelativeToCreationDate: ExpireTime is an integer in milliseconds representing the expiration date relative to file creation. 4. Absolute: ExpireTime is an integer in milliseconds, as a Unix timestamp relative to 1/1/1970 00:00:00., op: any # The constant value for the operation., api-version: any # Client Api Version.}
@optional {expireTime: any # The time that the file will expire, corresponding to the ExpiryOption that was set.}
@returns(200) Successfully set the expiration time on the specified file.

@endpoint POST /WebHdfsExt/{path}
@desc Appends to the specified file, optionally first creating the file if it does not yet exist. This method supports multiple concurrent appends to the file. NOTE: The target must not contain data added by Create or normal (serial) Append. ConcurrentAppend and Append cannot be used interchangeably; once a target file has been modified using either of these append options, the other append option cannot be used on the target file. ConcurrentAppend does not guarantee order and can result in duplicated data landing in the target file.
@required {path: any # The Data Lake Store path (starting with '/') of the file to which to append using concurrent append., streamContents: map # The file contents to include when appending to the file.  The maximum content size is 4MB.  For content larger than 4MB you must append the content in 4MB chunks., op: any # The constant value for the operation., Transfer-Encoding: any # Indicates the data being sent to the server is being streamed in chunks., api-version: any # Client Api Version.}
@optional {appendMode: any # Indicates the concurrent append call should create the file if it doesn't exist or just open the existing file for append, syncFlag: any # Optionally indicates what to do after completion of the concurrent append. DATA indicates that more data will be sent immediately by the client, the file handle should remain open/locked, and file metadata (including file length, last modified time) should NOT get updated. METADATA indicates that more data will be sent immediately by the client, the file handle should remain open/locked, and file metadata should get updated. CLOSE indicates that the client is done sending data, the file handle should be closed/unlocked, and file metadata should get updated.}
@returns(200) OK

@endgroup

@group webhdfs
@endpoint GET /webhdfs/v1/{path}
@desc Checks if the specified access is available at the given path.
@required {path: any # The Data Lake Store path (starting with '/') of the file or directory for which to check access., fsaction: any # File system operation read/write/execute in string form, matching regex pattern '[rwx-]{3}', op: any # The constant value for the operation., api-version: any # Client Api Version.}
@returns(200) OK

@endgroup

@end
