U C^@sTddlZddlZddlZddlZddlmZddlmZddlZddl m Z ddl m Z m Z mZddlmZddlmZedZeZed Zeeeed ejkreejd zdd lmZWnek rej fZYnXd d ddddhZ!ddZ"dddddddhZ#ddddhZ$GdddeZ%GdddeZ&d"d d!Z'dS)#N)AbstractFileSystem)AbstractBufferedFile)Config) ClientErrorParamValidationError BotoCoreError)translate_boto_error)ParamKwargsHelperZs3fsz4%(asctime)s - %(name)s - %(levelname)s - %(message)sZS3FS_LOGGING_LEVEL)S3_RETRYABLE_ERRORSrwarbwbabcCsF|dr|dd}|dd}d|kr6|dfS|ddSdS)a  Normalise S3 path string into bucket and key. Parameters ---------- path : string Input path, like `s3://mybucket/path/to/file` Examples -------- >>> split_path("s3://mybucket/path/to/file") ['mybucket', 'path/to/file'] s3://N/) startswithrstriplstripsplitpathr+/tmp/pip-install-6_kvzl1k/s3fs/s3fs/core.py split_path#s   rprivatez public-readzpublic-read-writezauthenticated-readz aws-exec-readzbucket-owner-readzbucket-owner-full-controlcspeZdZdZdZdZdZdZdZdZ dQfd d Z ddZ ddZ ddZ dRddZdSddZdTddZdUddZdVddZd d!ZdWd"d#ZdXd$d%Zfd&d'ZdYd(d)ZdZfd*d+ Zfd,d-Zd[d.d/Zd0d1ZiZd\d2d3Zd4d5Zd]d7d8Zd9d:Z d^d;d<Z!d=d>Z"d_d?d@Z#dAdBZ$dCdDZ%dEdFZ&dGdHZ'dIdJZ(d`dKdLZ)dadMdNZ*dbfdOdP Z+Z,S)c S3FileSystemap Access S3 as if it were a file system. This exposes a filesystem-like API (ls, cp, open, etc.) on top of S3 storage. Provide credentials either explicitly (``key=``, ``secret=``) or depend on boto's credential methods. See boto3 documentation for more information. If no credentials are available, use ``anon=True``. Parameters ---------- anon : bool (False) Whether to use anonymous connection (public buckets only). If False, uses the key/secret given, or boto's credential resolver (environment variables, config files, EC2 IAM server, in that order) key : string (None) If not anonymous, use this access key ID, if specified secret : string (None) If not anonymous, use this secret access key, if specified token : string (None) If not anonymous, use this security token, if specified use_ssl : bool (True) Whether to use SSL in connections to S3; may be faster without, but insecure s3_additional_kwargs : dict of parameters that are used when calling s3 api methods. Typically used for things like "ServerSideEncryption". client_kwargs : dict of parameters for the boto3 client requester_pays : bool (False) If RequesterPays buckets are supported. default_block_size: int (None) If given, the default block size value used for ``open()``, if no specific value is given at all time. The built-in default is 5MB. default_fill_cache : Bool (True) Whether to use cache filling with open by default. Refer to ``S3File.open``. default_cache_type : string ('bytes') If given, the default cache_type value used for ``open()``. Set to "none" if no caching is desired. See fsspec's documentation for other available cache_type values. Default cache_type is 'bytes'. version_aware : bool (False) Whether to support bucket versioning. If enable this will require the user to have the necessary IAM permissions for dealing with versioned objects. config_kwargs : dict of parameters passed to ``botocore.client.Config`` kwargs : other parameters for boto3 session session : botocore Session object to be used for all connections. This session will be used inplace of creating a new session inside S3FileSystem. Examples -------- >>> s3 = S3FileSystem(anon=False) # doctest: +SKIP >>> s3.ls('my-bucket/') # doctest: +SKIP ['my-file.txt'] >>> with s3.open('my-bucket/my-file.txt', mode='rb') as f: # doctest: +SKIP ... print(f.read()) # doctest: +SKIP b'Hello, world!' rrPs3)default_block_sizeFNTbytesc s|r|rtd|r |r td|r(|}|r0|}||_d|_||_|jrP|j|_||_||_||_||_|dkrti}| dkri} |p|j|_| |_ | |_ | |_ ||_ | |_ |rddini|_| pi|_||_||_t|j|_tdS)Nz'Supply either key or username, not bothz#Supply secret or password, not both RequestPayer requester)KeyErroranonsessionpassed_in_sessionkeysecrettokenkwargsr$default_fill_cachedefault_cache_type version_aware client_kwargs config_kwargsreq_kws3_additional_kwargsuse_sslconnectr#r _kwargs_helpersuper__init__)selfr)r,r-r.r7r3requester_paysr$r0r1r2r4r6r*usernamepasswordr/ __class__rrr;sB    zS3FileSystem.__init__cCs|j|j|SN)r9Z filter_dict__name__)r<Z s3_methodr/rrr_filter_kwargsszS3FileSystem._filter_kwargscOsF|}|ddtd|j||f|j|f||}|f|S)NBodyzCALL: %s - %s - %s)copypoploggerdebugrC_get_s3_method_kwargs)r<method akwarglistr/Zkw2additional_kwargsrrr_call_s3s  zS3FileSystem._call_s3cOs4|j}|D]}||q|||||SrB)r6rFupdaterD)r<rKrLr/rMZakwargsrrrrJs    z"S3FileSystem._get_s3_method_kwargsc Cs|dkr|jS|j|j|j|j|j|j|jf\}}}}}}}|jrddlm } t f|j |j | d|j } |jstjf|j|_n|jdd}|j} | | |jsf|rft d|dkrt|j }t |||||||| ||| d S)ay Open a file for reading or writing Parameters ---------- path: string Path of file on S3 mode: string One of 'r', 'w', 'a', 'rb', 'wb', or 'ab'. These have the same meaning as they do for the built-in `open` function. block_size: int Size of data-node blocks if reading fill_cache: bool If seeking to new a part of the file beyond the current buffer, with this True, the buffer will be filled between the sections to best support random access. When reading only a few specific chunks out of a file, performance may be better if False. acl: str Canned ACL to set when writing version_id : str Explicit version of the object to open. This requires that the s3 filesystem is version aware and bucket versioning is enabled on the relevant bucket. encoding : str The encoding to use if opening the file in text mode. The platform's default text encoding is used if not given. cache_type : str See fsspec's documentation for available cache_type values. Set to "none" if no caching is desired. If None, defaults to ``self.default_cache_type``. requester_pays : bool (optional) If RequesterPays buckets are supported. If None, defaults to the value used when creating the S3FileSystem (which defaults to False.) kwargs: dict-like Additional parameters used for s3 methods. Typically used for ServerSideEncryption. NACLrEversion_id cannot be specified if the filesystem is not version aware) block_sizeacl version_id fill_cacher6 cache_type autocommitr=) r$r0boolr5r6getrFrOr2 ValueErrorr1S3File) r<rmoderarbrcrdrerfr=r/kwrrr_opens*%     zS3FileSystem._openc Cs|dr|tdd}|d}t|\}}|r<|dnd}||jksP|r|ztd||jd}i}|dk r|j |d|d|j f||d|d|j }g} g} |D]F} | | d g| d gD]"} d | d <| d | d<| | qq| r| dd| D| D](} d|| dg| d<| d| d<qWn,tk rp}z t|W5d}~XYnX| |j|<|j|S)Nrrrz!Get directory listing page for %sZlist_objects_v2)ZMaxItemsZPageSize)BucketPrefixZ DelimiterZPaginationConfigZCommonPrefixesZContentsfiletypeSizesizecSs(g|] }|ddddddddqS)rpNrZ DIRECTORY directory)Keyrs StorageClassrrrtr).0lrrr Us z'S3FileSystem._lsdir..rwname)rlenrrdircacherHrIr#Z get_paginatorrOZpaginater5extendrhappendjoinrr)r<rrWZ max_itemsbucketprefixZpagrSitfilesr~icferrr_lsdir=sH        zS3FileSystem._lsdirc Ks||d}||s|r0|tkr0tdtzZ||d}|ddpT|jdd}|rfd|i|d<|jjf|| d| |WnZt k r}z t |W5d}~XYn2t k r}ztd||fW5d}~XYnXdS) Nr ACL not in %sror_ region_nameZLocationConstraintZCreateBucketConfigurationrzBucket create failed %r: %s) _strip_protocolr_parent buck_aclsrirhr3r#Z create_bucketinvalidate_cacherrr)r<rrbr/paramsrrrrrmkdirbs(       zS3FileSystem.mkdirc Csp||d}||slz|jj|dWn*tk rV}z t|W5d}~XYnX|||ddS)Nrror)rrrr# delete_bucketrrr)r<rrrrrrmdirws  zS3FileSystem.rmdircCsd|jks|r|jrgSz|jd}Wntk rBgYSX|D]B}|d|d<d|d<d|d<d|d <d |d <|d|d <|d=qH||jd<|jdS) NrZBucketsNamerwrrsZBUCKETrxrtrvrrr|)r~r)r#Z list_bucketsr)r<rWrrrrr _lsbucketss"    zS3FileSystem._lsbucketscCs<|dr|tdd}|dkr,||S|||SdS)a List files in given bucket, or list of buckets. Listing is cached unless `refresh=True`. Note: only your buckets associated with the login will be listed by `ls('')`, not any public buckets (even if already accessed). Parameters ---------- path : string/bytes location at which to list files refresh : bool (=False) if False, look in local cache for file details first rNrr)rr}rr)r<rrWrrr_lss   zS3FileSystem._lscsT|dkr dSt|\}}|r(t|Sz||WdStk rNYdSXdS)NrTF)rr:existslsFileNotFoundError)r<rrr,r@rrrs   zS3FileSystem.existsc Ks|t|\}}|s"||r"tdz|j|jj|||dWn*tk rf}z t|W5d}~XYnX|| |dS)zCreate empty file or truncatez+S3 does not support touching existent filesrorwN) rrrirNr# put_objectrrrr)r<rtruncatedatar/rr,exrrrtouchs zS3FileSystem.touchc s@|dkr|dddS|j}|dk r<|js4td||d<|jr4znt|\}}|j|jj|f||d|j}|dd ||g|d |d |d d ||gd | dd WSt k r}z.t |}t |trt|WY S|W5d}~XYn4tk r2}ztd||fW5d}~XYnXt|S)N)rrrrv)r|rtrrr` VersionIdrETagr LastModifiedZ ContentLengthZSTANDARD)rrwrrsrtrrxrzFailed to head path %r: %s)r/rFr2rirrNr# head_objectr5rrhrr isinstancerr:infor) r<rrcr/rr,outreer@rrrs@       "zS3FileSystem.infocs||d}d|kr(tt||S||jkrX|j|D]}|d|kr<dSq.cSsg|] }|dqS)r|rryrrrrr{sN)rrrrlistsortedset)r<rZdetailrWr/rrrrrszS3FileSystem.lscKsv|jstdt|\}}i}ddi}g}|drr|j|jj|f||d|j}||d|dd|d<q*|S) NzLversion specific functionality is disabled for non-version aware filesystemsZ IsTruncatedT)rorpZVersionsZNextVersionIdMarkerrZVersionIdMarker) r2rirrNr#Zlist_object_versionsr5rrh)r<rr/rr,rversionsrrrobject_version_infos  z S3FileSystem.object_version_infocKsRt|\}}|s||jkrH|j|jj|f||d|j}|d|j|<|j|S)a# Return metadata of path. Metadata is cached unless `refresh=True`. Parameters ---------- path : string/bytes filename to get metadata for refresh : bool (=False) if False, look in local cache for file metadata first rMetadata)r_metadata_cacherNr#rr5)r<rrWr/rr,responserrrmetadata,s  zS3FileSystem.metadatacCs2t|\}}|j|jj||d}dd|dDS)zgRetrieve tag key/values for the given path Returns ------- {str: str} rcSsi|]}|d|dqSrwValuer)ryvrrr Nsz)S3FileSystem.get_tags..TagSet)rrNr#Zget_object_tagging)r<rrr,rrrrget_tagsDs   zS3FileSystem.get_tagsrc Cst|\}}|dkr>|j|d}||dd|D}n(|dkrZdd|D}n td|d|i}|j|jj|||d d S) aSet tags for given existing key Tags are a str:str mapping that can be attached to any key, see https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html This is similar to, but distinct from, key metadata, which is usually set at key creation time. Parameters ---------- path: str Existing key to attach tags to tags: dict str, str Tags to apply. mode: One of 'o' or 'm' 'o': Will over-write any existing tags. 'm': Will merge in new tags with existing tags. Incurs two remote calls. mrcSsg|]\}}||dqSrrrykrrrrr{jsz)S3FileSystem.put_tags..rcSsg|]\}}||dqSrrrrrrr{mszMode must be {'o', 'm'}, not %sr)rorwZTaggingN)rrrOitemsrirNr#Zput_object_tagging) r<rtagsrkrr,Z existing_tagsZnew_tagstagrrrput_tagsPs      zS3FileSystem.put_tagscKs"|j|f|}||kr||SdS)z Get an attribute from the metadata. Examples -------- >>> mys3fs.getxattr('mykey', 'attribute_1') # doctest: +SKIP 'value_1' N)r)r<r attr_namer/ZxattrrrrgetxattruszS3FileSystem.getxattrc Ks~t|\}}||}|jf||p(i}|D]}||dkr.||dq.|j|jj|d|||||dd||j|<dS)a Set metadata. Attributes have to be of the form documented in the `Metadata Reference`_. Parameters ---------- kw_args : key-value pairs like field="value", where the values must be strings. Does not alter existing fields, unless the field appears here - if the value is None, delete the field. copy_kwargs : dict, optional dictionary of additional params to use for the underlying s3.copy_object. Examples -------- >>> mys3file.setxattr(attribute_1='value1', attribute_2='value2') # doctest: +SKIP # Example for use with copy_args >>> mys3file.setxattr(copy_kwargs={'ContentType': 'application/pdf'}, ... attribute_1='value1') # doctest: +SKIP .. Metadata Reference: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#object-metadata Nz{}/{}ZREPLACE) CopySourcerorwrZMetadataDirective) rrrOrGrNr# copy_objectformatr)r<r copy_kwargsZkw_argsrr,rZkw_keyrrrsetxattrs"      zS3FileSystem.setxattrcKsht|\}}|r<|tkr"tdt|j|jj||||dn(|tkrNtdt|j|jj|||ddS)a Set Access Control on a bucket/key See http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl Parameters ---------- path : string the object to set acl : string the value of ACL to apply rrorwr_rN)rkey_aclsrirNr#Zput_object_aclrZput_bucket_acl)r<rrbr/rr,rrrchmods"     zS3FileSystem.chmodcKs.t|\}}|jjdtf||d||dS)z Generate presigned URL to access path by HTTP Parameters ---------- path : string the key path we are interested in expires : int the number of seconds this signature will be good for. get_objectr)Z ClientMethodZParamsZ ExpiresIn)rr#Zgenerate_presigned_urldict)r<rexpiresr/rr,rrrurls zS3FileSystem.urlc st|\jjjdfddt|D}ddt|D}d|i}jjd|d|dS) aS Create single S3 file from list of S3 files Uses multi-part, no data is downloaded. The original files are not deleted. Parameters ---------- path : str The final file to produce filelist : list of str The paths, in order, to assemble into the final file. rc s4g|],\}}jjjd||ddqS)UploadIdr)rorwrr PartNumber)rNr#upload_part_copy)ryrrrr,r/mpur<rrr{sz&S3FileSystem.merge..cSs&g|]\}}|d|dddqS)rCopyPartResultrrrr)ryrrrrrr{sPartsrrorwrZMultipartUploadN)rrNr#create_multipart_upload enumeratecomplete_multipart_uploadr)r<rfilelistr/rparts part_inforrrmerges&  zS3FileSystem.mergec Kst|\}}t|\}}z&|j|jj|||d||gdWn\tk rh}z t|W5d}~XYn4tk r}ztd|||fW5d}~XYnXdS)z# Copy file between locations on S3 r)rorwrNCopy failed (%r -> %r): %s) rrNr#rrrrrri) r<path1path2r/buc1key1buc2key2rrrr copy_basics   zS3FileSystem.copy_basicc Kst|\}}t|\}}||d}z$|jj|||||jj|dWn\tk rp} z t| W5d} ~ XYn4tk r} ztd||| fW5d} ~ XYnXdS)Nr)rrorwZ ExtraArgsr) rr#rFrJrrrrri) r<rrr/rrrrZ copy_sourcerrrr copy_manageds&   zS3FileSystem.copy_managedcKs|j||f|||dSrB)rr)r<rrr/rrrrF$szS3FileSystem.copyc Ks|sdSdd|D}t|dkr*td|}t|dkrztt|ddD]"}|||d|ddqRdSddd |Di}|D]}|||qz|j|jj |||d Wn*t k r}z t |W5d}~XYnXdS) z Remove multiple keys with one call Parameters ---------- pathlist : listof strings The keys to remove, must all be in the same bucket. NcSsh|]}t|dqS)rrryrrrr 3sz+S3FileSystem.bulk_delete..rz1Bulk delete files should refer to only one bucketiZObjectscSsg|]}dt|diqS)rwrrrrrrr{<sz,S3FileSystem.bulk_delete..)roZDelete) r}rirGrange bulk_deleterrrNr#Zdelete_objectsrr) r<pathlistr/ZbucketsrrZ delete_keysrrrrrr(s0     zS3FileSystem.bulk_deletec Ks2t|\}}|rN|j|dd}|r.|s.t||j|f||sJ||dS|r||sdt|z|j|jj|||dWn*t k r}z t |W5d}~XYnX| | |nr||r&z|jj |dWn4tk r}ztd||fW5d}~XYnX| || dnt|dS)a Remove keys and/or bucket. Parameters ---------- path : string The location to remove. recursive : bool (True) Whether to remove also all entries below, i.e., which are returned by `walk()`. N)maxdepthrrzDelete bucket %r failed: %sr)rfindrrrrrNr#Z delete_objectrrrrrrIOError)r<r recursiver/rr,rrrrrrmHs<     "  zS3FileSystem.rmcCsD|dkr|jn,||}|j|d|j||ddSrB)r~clearrrGr)r<rrrrrqs   zS3FileSystem.invalidate_cachec s(|dkrtdtj|fd|i|S)N)r*rzCannot crawl all of S3r)rir:walk)r<rrr/r@rrryszS3FileSystem.walk)FNNNTNFNTr%FNNNNN)T)rZ)rNrNNNTN)FN)r)F)F)TN)N)FF)F)r)N)rZ)F)N)N)-rC __module__ __qualname____doc__Z root_markerrQrRr$protocolZ_extra_tokenize_attributesr;rDrNrJr8r^rmrrrrrrrrrrrrrrrrrrrrrrrFrrrr __classcell__rrr@rr Asv<* &  ; %     #    % 3 " ) r c seZdZdZdZdZdZd#fd d Zd dZddZ d$ddZ ddZ d%ddZ ddZ ddZd&ddZddZdd Zd!d"ZZS)'rja Open S3 key as a file. Data is only loaded and cached on demand. Parameters ---------- s3 : S3FileSystem boto3 connection path : string S3 bucket/key to access mode : str One of 'rb', 'wb', 'ab'. These have the same meaning as they do for the built-in `open` function. block_size : int read-ahead size for finding delimiters fill_cache : bool If seeking to new a part of the file beyond the current buffer, with this True, the buffer will be filled between the sections to best support random access. When reading only a few specific chunks out of a file, performance may be better if False. acl: str Canned ACL to apply version_id : str Optional version to read the file at. If not specified this will default to the current version of the object. This is only used for reading. requester_pays : bool (False) If RequesterPays buckets are supported. Examples -------- >>> s3 = S3FileSystem() # doctest: +SKIP >>> with s3.open('my-bucket/my-file.txt', mode='rb') as f: # doctest: +SKIP ... ... # doctest: +SKIP See Also -------- S3FileSystem.open: used to create ``S3File`` objects rr"lrrNTr%Fc st|\} } | std|| |_| |_||_||_|jrN|jtkrNtdtd|_d|_||_ |pfi|_ | rvddini|_ t j ||||| | d|j|_|r|dkrtdn|r|jjr||_|jj|j|d|_|jd |_nP|jjr:|jd |_|jdkr:|j|j|j|j|_|jd |_d |_d |kr||r||d }|dkr||j|jnd |_||_dS)Nz%Attempt to open non key-like path: %srr&r')rfrer"zBlock size must be >=5MB)rcrtrFr T)rrirr,rcrbrrrrdr6r5r:r;fsr#writabler2rrdetailsrtrhr append_blockrwritecatloc)r<r#rrkrarbrcrdr6rfrer=rr,r r@rrr;sL         zS3File.__init__cOs|jj||jf||SrB)rrNr6)r<rKZ kwarglistr/rrrrNszS3File._call_s3c Cs|js|js||jkrdStd|g|_z$|j|jj j |j |j |j d|_Wn\tk r}z t|W5d}~XYn4tk r}ztd|j|fW5d}~XYnX|jr|jj|jj j|j|j |j d|jd|jd}|jd|ddd dS) NzInitiate upload for %srz!Initiating write to %r failed: %srr)rorwrrrrrr)rfrtell blocksizerHrIrrNrr#rrr,rbrrrrrirrr6r)r<rrrrr_initiate_uploads8 $ zS3File._initiate_uploadcKs|jj|j|f|S)z Return metadata of file. See :func:`~s3fs.S3Filesystem.metadata`. Metadata is cached unless `refresh=True`. )rrr)r<rWr/rrrrszS3File.metadatacKs|jj|j|f|S)z Get an attribute from the metadata. See :func:`~s3fs.S3Filesystem.getxattr`. Examples -------- >>> mys3file.getxattr('attribute_1') # doctest: +SKIP 'value_1' )rrr)r<Z xattr_namer/rrrrs zS3File.getxattrcKs*|rtd|jj|jfd|i|S)z Set metadata. See :func:`~s3fs.S3Filesystem.setxattr`. Examples -------- >>> mys3file.setxattr(attribute_1='value1', attribute_2='value2') # doctest: +SKIP z5cannot update metadata while file is open for writingr)rNotImplementedErrorrrr)r<rr/rrrrszS3File.setxattrcKs|jj|jf|S)z; HTTP URL to read this file (if it already exists) )rrr)r<r/rrrrsz S3File.urlc Cs"t|jj|j|j|j|||jdS)N)r5) _fetch_rangerr#rr,rcr5)r<startendrrrr#szS3File._fetch_rangec Cst|j\}}td|||j|jf|jsP|jsP|rP||j krPd}n |j dd|j |j }}|r||j |j }}t |}d|kr|j krnnJ||}|j |}||j kr|d}}n"|d} |d| || d}}t |jd} td|| ft|jdD]} z.|j|jjj|| |jd||d} WqWntk r} z2| |jkrtjd | d d td | d W5d} ~ XYn0tk r} ztd| W5d} ~ XYnXq$td|j|j| | ddqp|jr|r|| S)Nz.Upload for %s, final=%s, loc=%s, buffer loc=%sFrrnrzUpload chunk %s, %sr)rorrrErwz"Exception %r on S3 write, retryingTexc_info333333?皙?zWrite failed: %rzWrite failed after %i retriesrr)rrrHrIr bufferr rfrr seekreadr}part_maxrrretriesrNrr#Z upload_partrr timesleep Exceptionrrcommit)r<finalrr,Zdata1Zdata0Z data1_size remainderZremainder_size partitionpartattemptrexcrrr _upload_chunk&s^      ""zS3File._upload_chunkcsrtd||dkrJ|jdk rtd|||j|jn|js|jdk rtd||j d|j }|j |jj j f|j|j|d|jqtnPtd|d|ji}|j |jj j|j|j|jd|d }|jjr|d |_d|_|jd }|d|d dD]J|jjkr^fd d|jjDs^|jd q"dS)Nz Commit %srzEmpty file committed %szOne-shot upload of %s)rwrorEz"Complete multi-part upload for %s rrrrrrcs$g|]}|ddkrdqS)r|rTrrprrrr{sz!S3File.commit..)rHrIr r _abort_mpurrrrrrrNr#rr,rr/ RuntimeErrorrrr2rhrcrr~r)r<rrZ write_resultrrr%rr[sR          z S3File.commitcCs|d|_dSrB)r'rr<rrrdiscardszS3File.discardcCs2|jr.|j|jjj|j|j|jddd|_dS)Nr)rorwr)rrNrr#Zabort_multipart_uploadrr,r)rrrr'szS3File._abort_mpu) rr"rNTNTr%F)F)N)F)rCrrrrZpart_minrr;rNr rrrrrr$rr*r'rrrr@rrjs.'2   5*rj c Cs|dkr i}||kr*td||||dStd||||t|D]}zT|dk rftd|if|} n|} |jf||d||dfd| } | dWStk r} z.tjd | d d td |d WYqDW5d} ~ XYqDt k r6} z.tjd| d d td |d WYqDW5d} ~ XYqDt k r} z0| j d dddkrlWYdSt | W5d} ~ XYqDtk r} z"dt| krWY qDnW5d} ~ XYqDXqDtddS)Nz@skip fetch for negative range - bucket=%s,key=%s,start=%d,end=%dzFetch: %s/%s, %s-%srz bytes=%i-%ir)rorwZRangerEz%Exception %r on S3 download, retryingTrrrz+ConnectionError %r on S3 download, retryingErrorZCodeUnknown)Z416Z InvalidRangerz!Max number of S3 retries exceeded)rHrIrrrrr rrConnectionErrorrrrhrrstrlowerr() rVrr,rcrrZ max_attemptsr5rr/resprrrrrsX   r)r+N)(loggingossocketrZfsspecrZ fsspec.specrrTZbotocore.clientrZbotocore.exceptionsrrrZ s3fs.errorsrZ s3fs.utilsr getLoggerrH StreamHandlerhandle Formatter formatter setFormatter addHandlerenvironsetLevelZboto3.s3.transferr ImportErrortimeoutZ_VALID_FILE_MODESrrrr rjrrrrrs\           D