Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions doc/hpcs2011_tutorial/logreg_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,9 @@
predict = theano.function(inputs=[x], outputs=prediction,
name = "predict")

if any( [x.op.__class__.__name__=='Gemv' for x in train.maker.fgraph.toposort()]):
if any( x.op.__class__.__name__=='Gemv' for x in train.maker.fgraph.toposort()):
print('Used the cpu')
elif any( [x.op.__class__.__name__=='GpuGemm' for x in train.maker.fgraph.toposort()]):
elif any( x.op.__class__.__name__=='GpuGemm' for x in train.maker.fgraph.toposort()):
print('Used the gpu')
else:
print('ERROR, not able to tell if theano used the cpu or the gpu')
Expand Down
8 changes: 4 additions & 4 deletions doc/tutorial/modes_solution_1.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,11 @@
predict = theano.function(inputs=[x], outputs=prediction,
name="predict")

if any([x.op.__class__.__name__ in ['Gemv', 'CGemv', 'Gemm', 'CGemm'] for x in
train.maker.fgraph.toposort()]):
if any( x.op.__class__.__name__ in ['Gemv', 'CGemv', 'Gemm', 'CGemm'] for x in
train.maker.fgraph.toposort()):
print('Used the cpu')
elif any([x.op.__class__.__name__ in ['GpuGemm', 'GpuGemv'] for x in
train.maker.fgraph.toposort()]):
elif any( x.op.__class__.__name__ in ['GpuGemm', 'GpuGemv'] for x in
train.maker.fgraph.toposort()):
print('Used the gpu')
else:
print('ERROR, not able to tell if theano used the cpu or the gpu')
Expand Down
8 changes: 4 additions & 4 deletions doc/tutorial/using_gpu_solution_1.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,11 @@
predict = theano.function(inputs=[], outputs=prediction,
name="predict")

if any([n.op.__class__.__name__ in ['Gemv', 'CGemv', 'Gemm', 'CGemm'] for n in
train.maker.fgraph.toposort()]):
if any( n.op.__class__.__name__ in ['Gemv', 'CGemv', 'Gemm', 'CGemm'] for n in
train.maker.fgraph.toposort()):
print('Used the cpu')
elif any([n.op.__class__.__name__ in ['GpuGemm', 'GpuGemv'] for n in
train.maker.fgraph.toposort()]):
elif any( n.op.__class__.__name__ in ['GpuGemm', 'GpuGemv'] for n in
train.maker.fgraph.toposort()):
print('Used the gpu')
else:
print('ERROR, not able to tell if theano used the cpu or the gpu')
Expand Down
2 changes: 1 addition & 1 deletion theano/compile/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ def opt_log1p(node):
"input.")

# compute some features of the arguments:
uses_tuple = any([isinstance(i, (list, tuple)) for i in inputs])
uses_tuple = any( isinstance(i, (list, tuple)) for i in inputs)
uses_updates = bool(updates)
uses_givens = bool(givens)

Expand Down
15 changes: 8 additions & 7 deletions theano/compile/profilemode.py
Original file line number Diff line number Diff line change
Expand Up @@ -641,7 +641,7 @@ def exp_float32_op(op):
return False
else:
l = list_scalar_op(op)
return any([s_op.__class__ in [scal.Exp] for s_op in l])
return any( s_op.__class__ in [scal.Exp] for s_op in l)

printed_tip = False
# tip 1
Expand All @@ -650,17 +650,17 @@ def exp_float32_op(op):
printed_tip = True

# tip 2
if not config.lib.amdlibm and any([amdlibm_speed_up(a.op) for i, a
in apply_time]):
if not config.lib.amdlibm and any( amdlibm_speed_up(a.op) for i, a
in apply_time):
print(" - Try installing amdlibm and set the Theano flag "
"lib.amdlibm=True. This speeds up only some Elemwise "
"operation.")
printed_tip = True

# tip 3
if not config.lib.amdlibm and any([exp_float32_op(a.op) and
if not config.lib.amdlibm and any( exp_float32_op(a.op) and
a.inputs[0].dtype == 'float32'
for i, a in apply_time]):
for i, a in apply_time):
print(" - With the default gcc libm, exp in float32 is slower "
"than in float64! Try Theano flag floatX=float64, or "
"install amdlibm and set the theano flags lib.amdlibm=True")
Expand All @@ -670,8 +670,9 @@ def exp_float32_op(op):
for a, t in iteritems(apply_time):
node = a[1]
if (isinstance(node.op, T.Dot) and
all([len(i.type.broadcastable) == 2
for i in node.inputs])):
all(
len(i.type.broadcastable) == 2
for i in node.inputs)):
print(" - You have a dot operation that was not optimized to"
" dot22 (which is faster). Make sure the inputs are "
"float32 or float64, and are the same for both inputs. "
Expand Down
29 changes: 16 additions & 13 deletions theano/compile/profiling.py
Original file line number Diff line number Diff line change
Expand Up @@ -838,8 +838,9 @@ def count_running_memory(order, fgraph, nodes_mem, ignore_dmap=False):
if (dependencies[ins] and
ins not in fgraph.outputs and
ins.owner and
all([compute_map[v][0]
for v in dependencies[ins]])):
all(
compute_map[v][0]
for v in dependencies[ins])):
if ins not in view_of and not viewed_by.get(ins, []):
running_memory_size[cg] -= var_mem[ins]
elif ins in view_of:
Expand Down Expand Up @@ -971,8 +972,9 @@ def min_memory_generator(executable_nodes, viewed_by, view_of):
if (dependencies[ins] and
ins not in fgraph.outputs and
ins.owner and
all([compute_map[v][0]
for v in dependencies[ins]])):
all(
compute_map[v][0]
for v in dependencies[ins])):
if (ins not in view_of and
not viewed_by.get(ins, [])):
mem_freed += var_mem[ins]
Expand Down Expand Up @@ -1192,8 +1194,8 @@ def print_stats(stats1, stats2):
code[out] = "v"
shapes = str(fct_shapes[node.fgraph][node])

if all([hasattr(out.type, 'get_size')
for out in node.outputs]):
if all( hasattr(out.type, 'get_size')
for out in node.outputs):
size = "%9dB" % node_outputs_size
if node_outputs_size < config.profiling.min_memory_size:
N = idx
Expand Down Expand Up @@ -1311,7 +1313,7 @@ def exp_float32_op(op):
return False
else:
l = list_scalar_op(op)
return any([s_op.__class__ in [scal.Exp] for s_op in l])
return any( s_op.__class__ in [scal.Exp] for s_op in l)

printed_tip = False
# tip 1
Expand All @@ -1320,17 +1322,17 @@ def exp_float32_op(op):
printed_tip = True

# tip 2
if not config.lib.amdlibm and any([amdlibm_speed_up(a.op) for a
in self.apply_time]):
if not config.lib.amdlibm and any( amdlibm_speed_up(a.op) for a
in self.apply_time):
print(" - Try installing amdlibm and set the Theano flag "
"lib.amdlibm=True. This speeds up only some Elemwise "
"operation.", file = file)
printed_tip = True

# tip 3
if not config.lib.amdlibm and any([exp_float32_op(a.op) and
if not config.lib.amdlibm and any( exp_float32_op(a.op) and
a.inputs[0].dtype == 'float32'
for a in self.apply_time]):
for a in self.apply_time):
print(" - With the default gcc libm, exp in float32 is slower "
"than in float64! Try Theano flag floatX=float64, or "
"install amdlibm and set the theano flags lib.amdlibm=True", file = file)
Expand All @@ -1340,8 +1342,9 @@ def exp_float32_op(op):
for a in self.apply_time:
node = a
if (isinstance(node.op, T.Dot) and
all([len(i.type.broadcastable) == 2
for i in node.inputs])):
all(
len(i.type.broadcastable) == 2
for i in node.inputs)):
print(" - You have a dot operation that was not optimized to"
" dot22 (which is faster). Make sure the inputs are "
"float32 or float64, and are the same for both inputs. "
Expand Down
8 changes: 4 additions & 4 deletions theano/compile/tests/test_function_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ def test_copy_share_memory(self):
l = [val for key, val in storage_map_cpy.items()
if key not in i_o_variables or isinstance(key, theano.tensor.Constant)]
for storage in l:
self.assertTrue(any([storage is s for s in ori_storages]))
self.assertTrue(any(storage is s for s in ori_storages))

# Assert storages of SharedVariable without updates are shared
for (input, _1, _2), here, there in zip(ori.indices,
Expand Down Expand Up @@ -571,7 +571,7 @@ def test_free(self):
for key, val in iteritems(func.fn.storage_map):
if not isinstance(key, theano.gof.Constant):
check_list.append(val)
assert any([val[0] for val in check_list])
assert any( val[0] for val in check_list)

func.free()

Expand Down Expand Up @@ -608,10 +608,10 @@ def test_deepcopy(self):
self.assertTrue(len(f.defaults) == len(g.defaults))
# print 'f.defaults = %s' % (f.defaults, )
# print 'g.defaults = %s' % (g.defaults, )
self.assertTrue(all([f_req == g_req and f_feed == g_feed and
self.assertTrue(all(f_req == g_req and f_feed == g_feed and
f_val == g_val
for ((f_req, f_feed, f_val), (g_req, g_feed, g_val)) in zip(
f.defaults, g.defaults)]))
f.defaults, g.defaults)))

self.assertFalse(g.value[1] is f.value[1]) # should not have been copied
self.assertFalse(g.value[2] is f.value[2]) # should have been copied because it is mutable.
Expand Down
2 changes: 1 addition & 1 deletion theano/configdefaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ def default_cuda_root():
def filter_nvcc_flags(s):
assert isinstance(s, str)
flags = [flag for flag in s.split(' ') if flag]
if any([f for f in flags if not f.startswith("-")]):
if any( f for f in flags if not f.startswith("-")):
raise ValueError(
"Theano nvcc.flags support only parameter/value pairs without"
" space between them. e.g.: '--machine 64' is not supported,"
Expand Down
12 changes: 6 additions & 6 deletions theano/gof/cc.py
Original file line number Diff line number Diff line change
Expand Up @@ -327,9 +327,9 @@ def get_c_declare(r, name, sub):
# If some of these have `check_input=True` in their `.op`,
# it means they need `r`'s dtype to be declared, so
# we have to pass `check_input=True` to `c_declare`.
if ((any([getattr(c.op, 'check_input', config.check_input)
if ((any(getattr(c.op, 'check_input', config.check_input)
for (c, _) in r.clients
if not isinstance(c, string_types)]) or
if not isinstance(c, string_types)) or
(r.owner and
getattr(r.owner.op, 'check_input', config.check_input)))):
c_declare = r.type.c_declare(name, sub, True)
Expand Down Expand Up @@ -364,15 +364,15 @@ def get_c_extract(r, name, sub):
# checks on the variable.
# However that code is not used by C code of the apply node creating
# this variable, so there is no need to check `r.owner.op.check_input`.
if any([getattr(c.op, 'check_input', config.check_input)
if any( getattr(c.op, 'check_input', config.check_input)
for (c, _) in r.clients
if not isinstance(c, string_types)]):
if not isinstance(c, string_types)):
# check_broadcast is just an hack to easily remove just the
# broadcast check on the old GPU back-end. This check isn't
# done in the new GPU back-end or on the CPU.
if any([getattr(c.op, 'check_broadcast', True)
if any( getattr(c.op, 'check_broadcast', True)
for (c, _) in r.clients
if not isinstance(c, string_types)]):
if not isinstance(c, string_types)):
c_extract = r.type.c_extract(name, sub, True)
else:
try:
Expand Down
6 changes: 3 additions & 3 deletions theano/gof/cmodule.py
Original file line number Diff line number Diff line change
Expand Up @@ -1606,8 +1606,8 @@ def std_lib_dirs_and_libs():
r'EGG-INFO\mingw\usr\x86_64-w64-mingw32\lib')]
for f, lib in [('libmsvcr90.a',
'mingw 4.5.2 or 4.8.1-2 (newer could work)')]:
if not any([os.path.exists(os.path.join(tmp_libdir, f))
for tmp_libdir in libdirs]):
if not any( os.path.exists(os.path.join(tmp_libdir, f))
for tmp_libdir in libdirs):
print(("Your Python version is from Canopy. " +
"You need to install the package '" + lib +
"' from Canopy package manager."
Expand Down Expand Up @@ -2118,7 +2118,7 @@ def join_options(init_part):
# ARM (32-bit and 64-bit) architectures in order to make
# Theano compatible with the Raspberry Pi, Raspberry Pi 2, or
# other systems with ARM processors.
if (not any(['arm' in flag for flag in cxxflags]) and
if (not any( 'arm' in flag for flag in cxxflags) and
not any(arch in platform.machine() for arch in ['arm', 'aarch'])):
n_bits = local_bitwidth()
cxxflags.append('-m%d' % n_bits)
Expand Down
5 changes: 2 additions & 3 deletions theano/gof/fg.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,9 +350,8 @@ def __remove_clients__(self, r, clients_to_remove,
if variable in self.variables:
# If the owner have other outputs still used,
# then we must keep that variable in the graph.
if not variable.owner or not any(
[var for var in variable.owner.outputs
if var.clients]):
if not variable.owner or not any( var for var in variable.owner.outputs
if var.clients):

self.variables.remove(variable)
# This allow to quickly know if a var is still in the fgraph
Expand Down
2 changes: 1 addition & 1 deletion theano/gof/tests/test_vm.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def test_c_thunks():
f(1, [2], [3, 2])
from nose.tools import assert_raises
assert_raises(ValueError, f, 0, [2], [3, 4])
assert any([hasattr(t, 'cthunk') for t in f.fn.thunks]) == c_thunks
assert any( hasattr(t, 'cthunk') for t in f.fn.thunks) == c_thunks


def test_speed():
Expand Down
9 changes: 5 additions & 4 deletions theano/gof/vm.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,9 @@ def calculate_reallocate_info(order, fgraph, storage_map, compute_map_re,
assert not (ins in view_of and viewed_by[ins])
if (getattr(ins, 'ndim', None) == 0 and not storage_map[ins][0] and
ins not in fgraph.outputs and ins.owner and
all([compute_map_re[v][0]
for v in dependencies.get(ins, [])]) and
all(
compute_map_re[v][0]
for v in dependencies.get(ins, [])) and
ins not in allocated):
# Constant Memory cannot be changed
# Constant and shared variables' storage_map value is not empty
Expand Down Expand Up @@ -969,7 +970,7 @@ def make_vm(self, nodes, thunks,
if lazy is None:
lazy = config.vm.lazy
if lazy is None:
lazy = not all([(not th.lazy) for th in thunks])
lazy = not all( (not th.lazy) for th in thunks)
if not lazy:
# there is no conditional in the graph
if self.allow_gc:
Expand Down Expand Up @@ -1049,7 +1050,7 @@ def make_all(self, profiler=None, input_storage=None,
if lazy is None:
lazy = config.vm.lazy
if lazy is None:
lazy = not all([(not th.lazy) for th in thunks])
lazy = not all( (not th.lazy) for th in thunks)
if not (lazy or (config.profile and config.profile_memory) or
self.use_cloop or self.callback or self.callback_input):
for pair in itervalues(reallocated_info):
Expand Down
15 changes: 8 additions & 7 deletions theano/gpuarray/opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -615,9 +615,9 @@ def local_gpua_subtensor(node, context_name):
# And it is a shared var or an input of the graph.
not gpu_x.owner.inputs[0].owner):
if len(x.clients) == 1:
if any([n == 'output' or any([isinstance(v.type, GpuArrayType)
for v in n.inputs + n.outputs])
for n, _ in node.outputs[0].clients]):
if any( n == 'output' or any( isinstance(v.type, GpuArrayType)
for v in n.inputs + n.outputs)
for n, _ in node.outputs[0].clients):
return
else:
return [host_from_gpu(gpu_x.owner.op(node.outputs[0]))]
Expand Down Expand Up @@ -988,10 +988,11 @@ def local_gpu_elemwise_careduce(node):

@local_optimizer(None)
def local_assert_no_cpu_op(node):
if (all([var.owner and isinstance(var.owner.op, HostFromGpu)
for var in node.inputs]) and
any([[c for c in var.clients if isinstance(c[0].op, GpuFromHost)]
for var in node.outputs])):
if (all(var.owner and isinstance(var.owner.op, HostFromGpu)
for var in node.inputs) and
any(
[c for c in var.clients if isinstance(c[0].op, GpuFromHost)]
for var in node.outputs)):

if config.assert_no_cpu_op == "warn":
_logger.warning(("CPU Op %s is detected in the computation "
Expand Down
12 changes: 6 additions & 6 deletions theano/gpuarray/tests/test_basic_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,8 +298,8 @@ def test_gpu_contiguous():
f = theano.function([a, i], gpu_contiguous(a.reshape((5, 4))[::i]),
mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert any([isinstance(node.op, GpuSubtensor) for node in topo])
assert any([isinstance(node.op, GpuContiguous) for node in topo])
assert any( isinstance(node.op, GpuSubtensor) for node in topo)
assert any( isinstance(node.op, GpuContiguous) for node in topo)
assert f(a_val, 1).flags.c_contiguous
assert f(a_val, 2).flags.c_contiguous
assert f(a_val, 2).flags.c_contiguous
Expand Down Expand Up @@ -349,8 +349,8 @@ def test_gpusplit_opt(self):
m = self.shared(rng.rand(4, 6).astype(self.floatX))
o = T.Split(2)(m, 0, [2, 2])
f = theano.function([], o, mode=self.mode)
assert any([isinstance(node.op, self.split_op_class)
for node in f.maker.fgraph.toposort()])
assert any( isinstance(node.op, self.split_op_class)
for node in f.maker.fgraph.toposort())
o1, o2 = f()
assert numpy.allclose(o1, m.get_value(borrow=True)[:2])
assert numpy.allclose(o2, m.get_value(borrow=True)[2:])
Expand Down Expand Up @@ -401,8 +401,8 @@ def check(dtype, N, M_=None):
result = numpy.asarray(f(N, M))
assert numpy.allclose(result, numpy.eye(N, M_, dtype=dtype))
assert result.dtype == numpy.dtype(dtype)
assert any([isinstance(node.op, GpuEye)
for node in f.maker.fgraph.toposort()])
assert any( isinstance(node.op, GpuEye)
for node in f.maker.fgraph.toposort())

for dtype in ['float32', 'int32', 'float16']:
yield check, dtype, 3
Expand Down
Loading