I timed it just for curiosity. Below are the results for a vary large file.
tldr;
File read then split seems to be the fastest approach on a large file.
with open(FILENAME, "r") as file:
lines = file.read().split("\n")
However, if you need to loop through the lines anyway then you probably want:
with open(FILENAME, "r") as file:
for line in file:
line = line.rstrip("\n")
Python 3.4.2
import timeit
FILENAME = "mylargefile.csv"
DELIMITER = "\n"
def splitlines_read():
"""Read the file then split the lines from the splitlines builtin method.
Returns:
lines (list): List of file lines.
"""
with open(FILENAME, "r") as file:
lines = file.read().splitlines()
return lines
# end splitlines_read
def split_read():
"""Read the file then split the lines.
This method will return empty strings for blank lines (Same as the other methods).
This method may also have an extra additional element as an empty string (compared to
splitlines_read).
Returns:
lines (list): List of file lines.
"""
with open(FILENAME, "r") as file:
lines = file.read().split(DELIMITER)
return lines
# end split_read
def strip_read():
"""Loop through the file and create a new list of lines and removes any "\n" by rstrip
Returns:
lines (list): List of file lines.
"""
with open(FILENAME, "r") as file:
lines = [line.rstrip(DELIMITER) for line in file]
return lines
# end strip_readline
def strip_readlines():
"""Loop through the file's read lines and create a new list of lines and removes any "\n" by
rstrip. ... will probably be slower than the strip_read, but might as well test everything.
Returns:
lines (list): List of file lines.
"""
with open(FILENAME, "r") as file:
lines = [line.rstrip(DELIMITER) for line in file.readlines()]
return lines
# end strip_readline
def compare_times():
run = 100
splitlines_t = timeit.timeit(splitlines_read, number=run)
print("Splitlines Read:", splitlines_t)
split_t = timeit.timeit(split_read, number=run)
print("Split Read:", split_t)
strip_t = timeit.timeit(strip_read, number=run)
print("Strip Read:", strip_t)
striplines_t = timeit.timeit(strip_readlines, number=run)
print("Strip Readlines:", striplines_t)
# end compare_times
def compare_values():
"""Compare the values of the file.
Note: split_read fails, because has an extra empty string in the list of lines. That's the only
reason why it fails.
"""
splr = splitlines_read()
sprl = split_read()
strr = strip_read()
strl = strip_readlines()
print("splitlines_read")
print(repr(splr[:10]))
print("split_read", splr == sprl)
print(repr(sprl[:10]))
print("strip_read", splr == strr)
print(repr(strr[:10]))
print("strip_readline", splr == strl)
print(repr(strl[:10]))
# end compare_values
if __name__ == "__main__":
compare_values()
compare_times()
Results:
run = 1000
Splitlines Read: 201.02846901328783
Split Read: 137.51448011841822
Strip Read: 156.18040391519133
Strip Readline: 172.12281272950372
run = 100
Splitlines Read: 19.956802833188124
Split Read: 13.657361738959867
Strip Read: 15.731161020969516
Strip Readlines: 17.434831199281092
run = 100
Splitlines Read: 20.01516321280158
Split Read: 13.786344555543899
Strip Read: 16.02410587620824
Strip Readlines: 17.09326775703279
File read then split seems to be the fastest approach on a large file.
Note: read then split("\n") will have an extra empty string at the end of the list.
Note: read then splitlines() checks for more then just "\n" possibly "\r\n".
count.__len__()
, butlen(count)
! – Hawkinson'\n'
character, there is a more general issue of reading a line without the line-ending, whatever it may be for the file. Almost all of the answers do not address this. (Daniel F.'s appears to). – Huckleberry