31
|
1 |
#!/usr/bin/python
|
|
2 |
|
|
3 |
import dircache, os.path
|
|
4 |
from sys import stderr
|
|
5 |
import readline
|
|
6 |
|
|
7 |
def set_file_completer():
|
|
8 |
readline.set_completer_delims('')
|
|
9 |
readline.set_completer(_file_completer)
|
|
10 |
|
|
11 |
def _file_completer(text, state):
|
|
12 |
dirname, filename = os.path.split(text)
|
|
13 |
if not os.path.isdir(dirname):
|
|
14 |
return None
|
|
15 |
paths = dircache.listdir(dirname)
|
|
16 |
tlen = len(filename)
|
|
17 |
checkpaths = []
|
|
18 |
if tlen > 0:
|
|
19 |
for fn in paths:
|
|
20 |
if fn[:len(filename)] == filename:
|
|
21 |
checkpaths.append(fn)
|
|
22 |
else:
|
|
23 |
checkpaths = paths
|
|
24 |
if len(checkpaths) > state:
|
|
25 |
return os.path.join(dirname, checkpaths[state])
|
|
26 |
return None
|
|
27 |
|
|
28 |
def copy_file(origname, copyname):
|
|
29 |
orig = file(origname, 'r')
|
|
30 |
new = file(copyname, 'w')
|
|
31 |
new.write(orig.read())
|
|
32 |
new.flush()
|
|
33 |
return new
|
|
34 |
|
|
35 |
def unique(s):
|
|
36 |
"""Return a list of the elements in s, but without duplicates.
|
|
37 |
|
|
38 |
For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3],
|
|
39 |
unique("abcabc") some permutation of ["a", "b", "c"], and
|
|
40 |
unique(([1, 2], [2, 3], [1, 2])) some permutation of
|
|
41 |
[[2, 3], [1, 2]].
|
|
42 |
|
|
43 |
For best speed, all sequence elements should be hashable. Then
|
|
44 |
unique() will usually work in linear time.
|
|
45 |
|
|
46 |
If not possible, the sequence elements should enjoy a total
|
|
47 |
ordering, and if list(s).sort() doesn't raise TypeError it's
|
|
48 |
assumed that they do enjoy a total ordering. Then unique() will
|
|
49 |
usually work in O(N*log2(N)) time.
|
|
50 |
|
|
51 |
If that's not possible either, the sequence elements must support
|
|
52 |
equality-testing. Then unique() will usually work in quadratic
|
|
53 |
time.
|
|
54 |
"""
|
|
55 |
|
|
56 |
n = len(s)
|
|
57 |
if n == 0:
|
|
58 |
return []
|
|
59 |
|
|
60 |
# Try using a dict first, as that's the fastest and will usually
|
|
61 |
# work. If it doesn't work, it will usually fail quickly, so it
|
|
62 |
# usually doesn't cost much to *try* it. It requires that all the
|
|
63 |
# sequence elements be hashable, and support equality comparison.
|
|
64 |
u = {}
|
|
65 |
try:
|
|
66 |
for x in s:
|
|
67 |
u[x] = 1
|
|
68 |
except TypeError:
|
|
69 |
del u # move on to the next method
|
|
70 |
else:
|
|
71 |
return u.keys()
|
|
72 |
|
|
73 |
# We can't hash all the elements. Second fastest is to sort,
|
|
74 |
# which brings the equal elements together; then duplicates are
|
|
75 |
# easy to weed out in a single pass.
|
|
76 |
# NOTE: Python's list.sort() was designed to be efficient in the
|
|
77 |
# presence of many duplicate elements. This isn't true of all
|
|
78 |
# sort functions in all languages or libraries, so this approach
|
|
79 |
# is more effective in Python than it may be elsewhere.
|
|
80 |
try:
|
|
81 |
t = list(s)
|
|
82 |
t.sort()
|
|
83 |
except TypeError:
|
|
84 |
del t # move on to the next method
|
|
85 |
else:
|
|
86 |
assert n > 0
|
|
87 |
last = t[0]
|
|
88 |
lasti = i = 1
|
|
89 |
while i < n:
|
|
90 |
if t[i] != last:
|
|
91 |
t[lasti] = last = t[i]
|
|
92 |
lasti += 1
|
|
93 |
i += 1
|
|
94 |
return t[:lasti]
|
|
95 |
|
|
96 |
# Brute force is all that's left.
|
|
97 |
u = []
|
|
98 |
for x in s:
|
|
99 |
if x not in u:
|
|
100 |
u.append(x)
|
|
101 |
return u
|
|
102 |
|
|
103 |
|