From b2a34ac1521c1d3bdcd20c2d75a93aa724a6bd7d Mon Sep 17 00:00:00 2001 From: Cody Date: Thu, 28 Jul 2016 19:14:18 +0000 Subject: [PATCH] Use generator expression with any/all --- doc/hpcs2011_tutorial/logreg_example.py | 4 +- doc/tutorial/modes_solution_1.py | 8 +- doc/tutorial/using_gpu_solution_1.py | 8 +- theano/compile/function.py | 2 +- theano/compile/profilemode.py | 15 ++-- theano/compile/profiling.py | 29 ++++--- theano/compile/tests/test_function_module.py | 8 +- theano/configdefaults.py | 2 +- theano/gof/cc.py | 12 +-- theano/gof/cmodule.py | 6 +- theano/gof/fg.py | 5 +- theano/gof/tests/test_vm.py | 2 +- theano/gof/vm.py | 9 +- theano/gpuarray/opt.py | 15 ++-- theano/gpuarray/tests/test_basic_ops.py | 12 +-- theano/gpuarray/tests/test_dnn.py | 50 +++++------ theano/gpuarray/tests/test_multinomial.py | 16 ++-- theano/gpuarray/tests/test_nnet.py | 16 ++-- theano/gpuarray/tests/test_opt.py | 22 ++--- theano/gpuarray/tests/test_scan.py | 32 +++---- theano/ifelse.py | 4 +- theano/misc/check_blas.py | 8 +- theano/misc/pycuda_example.py | 9 +- theano/misc/tests/test_pycuda_example.py | 16 ++-- theano/printing.py | 2 +- theano/sandbox/cuda/__init__.py | 2 +- theano/sandbox/cuda/basic_ops.py | 4 +- theano/sandbox/cuda/nvcc_compiler.py | 2 +- theano/sandbox/cuda/opt.py | 87 ++++++++++--------- theano/sandbox/cuda/rng_curand.py | 8 +- theano/sandbox/cuda/tests/test_basic_ops.py | 32 +++---- theano/sandbox/cuda/tests/test_blas.py | 36 ++++---- .../cuda/tests/test_conv_cuda_ndarray.py | 12 +-- theano/sandbox/cuda/tests/test_dnn.py | 62 ++++++------- theano/sandbox/cuda/tests/test_driver.py | 2 +- theano/sandbox/cuda/tests/test_nnet.py | 16 ++-- theano/sandbox/cuda/tests/test_opt.py | 42 ++++----- theano/sandbox/multinomial.py | 5 +- theano/sandbox/rng_mrg.py | 16 ++-- theano/sandbox/tests/test_multinomial.py | 16 ++-- theano/sandbox/tests/test_rng_mrg.py | 8 +- theano/scalar/basic.py | 10 +-- theano/scan_module/scan_op.py | 7 +- theano/scan_module/scan_opt.py | 10 ++- theano/scan_module/scan_utils.py | 8 +- theano/scan_module/tests/test_scan.py | 28 +++--- theano/scan_module/tests/test_scan_opt.py | 5 +- theano/sparse/tests/test_basic.py | 8 +- theano/tensor/blas.py | 7 +- theano/tensor/elemwise.py | 10 +-- .../tensor/nnet/tests/test_abstract_conv.py | 12 +-- theano/tensor/nnet/tests/test_neighbours.py | 8 +- theano/tensor/nnet/tests/test_sigm.py | 2 +- theano/tensor/opt.py | 45 +++++----- theano/tensor/signal/pool.py | 4 +- theano/tensor/tests/mlp_test.py | 4 +- theano/tensor/tests/test_basic.py | 12 +-- theano/tensor/tests/test_elemwise.py | 2 +- theano/tensor/tests/test_extra_ops.py | 2 +- theano/tensor/tests/test_opt.py | 65 +++++++------- theano/tensor/tests/test_subtensor.py | 10 +-- theano/tensor/utils.py | 2 +- theano/tensor/var.py | 3 +- theano/tests/record.py | 6 +- theano/tests/test_gradient.py | 4 +- theano/tests/test_ifelse.py | 4 +- 66 files changed, 479 insertions(+), 461 deletions(-) diff --git a/doc/hpcs2011_tutorial/logreg_example.py b/doc/hpcs2011_tutorial/logreg_example.py index 27a9e2a9f06..6c96b914524 100644 --- a/doc/hpcs2011_tutorial/logreg_example.py +++ b/doc/hpcs2011_tutorial/logreg_example.py @@ -36,9 +36,9 @@ predict = theano.function(inputs=[x], outputs=prediction, name = "predict") -if any( [x.op.__class__.__name__=='Gemv' for x in train.maker.fgraph.toposort()]): +if any( x.op.__class__.__name__=='Gemv' for x in train.maker.fgraph.toposort()): print('Used the cpu') -elif any( [x.op.__class__.__name__=='GpuGemm' for x in train.maker.fgraph.toposort()]): +elif any( x.op.__class__.__name__=='GpuGemm' for x in train.maker.fgraph.toposort()): print('Used the gpu') else: print('ERROR, not able to tell if theano used the cpu or the gpu') diff --git a/doc/tutorial/modes_solution_1.py b/doc/tutorial/modes_solution_1.py index ea5dccb3e17..60036ee3557 100755 --- a/doc/tutorial/modes_solution_1.py +++ b/doc/tutorial/modes_solution_1.py @@ -44,11 +44,11 @@ predict = theano.function(inputs=[x], outputs=prediction, name="predict") -if any([x.op.__class__.__name__ in ['Gemv', 'CGemv', 'Gemm', 'CGemm'] for x in -train.maker.fgraph.toposort()]): +if any( x.op.__class__.__name__ in ['Gemv', 'CGemv', 'Gemm', 'CGemm'] for x in +train.maker.fgraph.toposort()): print('Used the cpu') -elif any([x.op.__class__.__name__ in ['GpuGemm', 'GpuGemv'] for x in -train.maker.fgraph.toposort()]): +elif any( x.op.__class__.__name__ in ['GpuGemm', 'GpuGemv'] for x in +train.maker.fgraph.toposort()): print('Used the gpu') else: print('ERROR, not able to tell if theano used the cpu or the gpu') diff --git a/doc/tutorial/using_gpu_solution_1.py b/doc/tutorial/using_gpu_solution_1.py index aec61e4160f..26714cc3912 100755 --- a/doc/tutorial/using_gpu_solution_1.py +++ b/doc/tutorial/using_gpu_solution_1.py @@ -48,11 +48,11 @@ predict = theano.function(inputs=[], outputs=prediction, name="predict") -if any([n.op.__class__.__name__ in ['Gemv', 'CGemv', 'Gemm', 'CGemm'] for n in -train.maker.fgraph.toposort()]): +if any( n.op.__class__.__name__ in ['Gemv', 'CGemv', 'Gemm', 'CGemm'] for n in +train.maker.fgraph.toposort()): print('Used the cpu') -elif any([n.op.__class__.__name__ in ['GpuGemm', 'GpuGemv'] for n in -train.maker.fgraph.toposort()]): +elif any( n.op.__class__.__name__ in ['GpuGemm', 'GpuGemv'] for n in +train.maker.fgraph.toposort()): print('Used the gpu') else: print('ERROR, not able to tell if theano used the cpu or the gpu') diff --git a/theano/compile/function.py b/theano/compile/function.py index ae7a7303570..9413f9a9738 100644 --- a/theano/compile/function.py +++ b/theano/compile/function.py @@ -282,7 +282,7 @@ def opt_log1p(node): "input.") # compute some features of the arguments: - uses_tuple = any([isinstance(i, (list, tuple)) for i in inputs]) + uses_tuple = any( isinstance(i, (list, tuple)) for i in inputs) uses_updates = bool(updates) uses_givens = bool(givens) diff --git a/theano/compile/profilemode.py b/theano/compile/profilemode.py index 29bd0f9b38c..5a0622b15b1 100644 --- a/theano/compile/profilemode.py +++ b/theano/compile/profilemode.py @@ -641,7 +641,7 @@ def exp_float32_op(op): return False else: l = list_scalar_op(op) - return any([s_op.__class__ in [scal.Exp] for s_op in l]) + return any( s_op.__class__ in [scal.Exp] for s_op in l) printed_tip = False # tip 1 @@ -650,17 +650,17 @@ def exp_float32_op(op): printed_tip = True # tip 2 - if not config.lib.amdlibm and any([amdlibm_speed_up(a.op) for i, a - in apply_time]): + if not config.lib.amdlibm and any( amdlibm_speed_up(a.op) for i, a + in apply_time): print(" - Try installing amdlibm and set the Theano flag " "lib.amdlibm=True. This speeds up only some Elemwise " "operation.") printed_tip = True # tip 3 - if not config.lib.amdlibm and any([exp_float32_op(a.op) and + if not config.lib.amdlibm and any( exp_float32_op(a.op) and a.inputs[0].dtype == 'float32' - for i, a in apply_time]): + for i, a in apply_time): print(" - With the default gcc libm, exp in float32 is slower " "than in float64! Try Theano flag floatX=float64, or " "install amdlibm and set the theano flags lib.amdlibm=True") @@ -670,8 +670,9 @@ def exp_float32_op(op): for a, t in iteritems(apply_time): node = a[1] if (isinstance(node.op, T.Dot) and - all([len(i.type.broadcastable) == 2 - for i in node.inputs])): + all( + len(i.type.broadcastable) == 2 + for i in node.inputs)): print(" - You have a dot operation that was not optimized to" " dot22 (which is faster). Make sure the inputs are " "float32 or float64, and are the same for both inputs. " diff --git a/theano/compile/profiling.py b/theano/compile/profiling.py index 6990b41d006..170fc145be8 100644 --- a/theano/compile/profiling.py +++ b/theano/compile/profiling.py @@ -838,8 +838,9 @@ def count_running_memory(order, fgraph, nodes_mem, ignore_dmap=False): if (dependencies[ins] and ins not in fgraph.outputs and ins.owner and - all([compute_map[v][0] - for v in dependencies[ins]])): + all( + compute_map[v][0] + for v in dependencies[ins])): if ins not in view_of and not viewed_by.get(ins, []): running_memory_size[cg] -= var_mem[ins] elif ins in view_of: @@ -971,8 +972,9 @@ def min_memory_generator(executable_nodes, viewed_by, view_of): if (dependencies[ins] and ins not in fgraph.outputs and ins.owner and - all([compute_map[v][0] - for v in dependencies[ins]])): + all( + compute_map[v][0] + for v in dependencies[ins])): if (ins not in view_of and not viewed_by.get(ins, [])): mem_freed += var_mem[ins] @@ -1192,8 +1194,8 @@ def print_stats(stats1, stats2): code[out] = "v" shapes = str(fct_shapes[node.fgraph][node]) - if all([hasattr(out.type, 'get_size') - for out in node.outputs]): + if all( hasattr(out.type, 'get_size') + for out in node.outputs): size = "%9dB" % node_outputs_size if node_outputs_size < config.profiling.min_memory_size: N = idx @@ -1311,7 +1313,7 @@ def exp_float32_op(op): return False else: l = list_scalar_op(op) - return any([s_op.__class__ in [scal.Exp] for s_op in l]) + return any( s_op.__class__ in [scal.Exp] for s_op in l) printed_tip = False # tip 1 @@ -1320,17 +1322,17 @@ def exp_float32_op(op): printed_tip = True # tip 2 - if not config.lib.amdlibm and any([amdlibm_speed_up(a.op) for a - in self.apply_time]): + if not config.lib.amdlibm and any( amdlibm_speed_up(a.op) for a + in self.apply_time): print(" - Try installing amdlibm and set the Theano flag " "lib.amdlibm=True. This speeds up only some Elemwise " "operation.", file = file) printed_tip = True # tip 3 - if not config.lib.amdlibm and any([exp_float32_op(a.op) and + if not config.lib.amdlibm and any( exp_float32_op(a.op) and a.inputs[0].dtype == 'float32' - for a in self.apply_time]): + for a in self.apply_time): print(" - With the default gcc libm, exp in float32 is slower " "than in float64! Try Theano flag floatX=float64, or " "install amdlibm and set the theano flags lib.amdlibm=True", file = file) @@ -1340,8 +1342,9 @@ def exp_float32_op(op): for a in self.apply_time: node = a if (isinstance(node.op, T.Dot) and - all([len(i.type.broadcastable) == 2 - for i in node.inputs])): + all( + len(i.type.broadcastable) == 2 + for i in node.inputs)): print(" - You have a dot operation that was not optimized to" " dot22 (which is faster). Make sure the inputs are " "float32 or float64, and are the same for both inputs. " diff --git a/theano/compile/tests/test_function_module.py b/theano/compile/tests/test_function_module.py index 8f03abb063f..c9ebe401318 100644 --- a/theano/compile/tests/test_function_module.py +++ b/theano/compile/tests/test_function_module.py @@ -271,7 +271,7 @@ def test_copy_share_memory(self): l = [val for key, val in storage_map_cpy.items() if key not in i_o_variables or isinstance(key, theano.tensor.Constant)] for storage in l: - self.assertTrue(any([storage is s for s in ori_storages])) + self.assertTrue(any(storage is s for s in ori_storages)) # Assert storages of SharedVariable without updates are shared for (input, _1, _2), here, there in zip(ori.indices, @@ -571,7 +571,7 @@ def test_free(self): for key, val in iteritems(func.fn.storage_map): if not isinstance(key, theano.gof.Constant): check_list.append(val) - assert any([val[0] for val in check_list]) + assert any( val[0] for val in check_list) func.free() @@ -608,10 +608,10 @@ def test_deepcopy(self): self.assertTrue(len(f.defaults) == len(g.defaults)) # print 'f.defaults = %s' % (f.defaults, ) # print 'g.defaults = %s' % (g.defaults, ) - self.assertTrue(all([f_req == g_req and f_feed == g_feed and + self.assertTrue(all(f_req == g_req and f_feed == g_feed and f_val == g_val for ((f_req, f_feed, f_val), (g_req, g_feed, g_val)) in zip( - f.defaults, g.defaults)])) + f.defaults, g.defaults))) self.assertFalse(g.value[1] is f.value[1]) # should not have been copied self.assertFalse(g.value[2] is f.value[2]) # should have been copied because it is mutable. diff --git a/theano/configdefaults.py b/theano/configdefaults.py index 7f6ad42c8e6..0e1c8710279 100644 --- a/theano/configdefaults.py +++ b/theano/configdefaults.py @@ -196,7 +196,7 @@ def default_cuda_root(): def filter_nvcc_flags(s): assert isinstance(s, str) flags = [flag for flag in s.split(' ') if flag] - if any([f for f in flags if not f.startswith("-")]): + if any( f for f in flags if not f.startswith("-")): raise ValueError( "Theano nvcc.flags support only parameter/value pairs without" " space between them. e.g.: '--machine 64' is not supported," diff --git a/theano/gof/cc.py b/theano/gof/cc.py index f134f79efda..9a9c243a4f4 100644 --- a/theano/gof/cc.py +++ b/theano/gof/cc.py @@ -327,9 +327,9 @@ def get_c_declare(r, name, sub): # If some of these have `check_input=True` in their `.op`, # it means they need `r`'s dtype to be declared, so # we have to pass `check_input=True` to `c_declare`. - if ((any([getattr(c.op, 'check_input', config.check_input) + if ((any(getattr(c.op, 'check_input', config.check_input) for (c, _) in r.clients - if not isinstance(c, string_types)]) or + if not isinstance(c, string_types)) or (r.owner and getattr(r.owner.op, 'check_input', config.check_input)))): c_declare = r.type.c_declare(name, sub, True) @@ -364,15 +364,15 @@ def get_c_extract(r, name, sub): # checks on the variable. # However that code is not used by C code of the apply node creating # this variable, so there is no need to check `r.owner.op.check_input`. - if any([getattr(c.op, 'check_input', config.check_input) + if any( getattr(c.op, 'check_input', config.check_input) for (c, _) in r.clients - if not isinstance(c, string_types)]): + if not isinstance(c, string_types)): # check_broadcast is just an hack to easily remove just the # broadcast check on the old GPU back-end. This check isn't # done in the new GPU back-end or on the CPU. - if any([getattr(c.op, 'check_broadcast', True) + if any( getattr(c.op, 'check_broadcast', True) for (c, _) in r.clients - if not isinstance(c, string_types)]): + if not isinstance(c, string_types)): c_extract = r.type.c_extract(name, sub, True) else: try: diff --git a/theano/gof/cmodule.py b/theano/gof/cmodule.py index e0f74695629..5ed431e89a0 100644 --- a/theano/gof/cmodule.py +++ b/theano/gof/cmodule.py @@ -1606,8 +1606,8 @@ def std_lib_dirs_and_libs(): r'EGG-INFO\mingw\usr\x86_64-w64-mingw32\lib')] for f, lib in [('libmsvcr90.a', 'mingw 4.5.2 or 4.8.1-2 (newer could work)')]: - if not any([os.path.exists(os.path.join(tmp_libdir, f)) - for tmp_libdir in libdirs]): + if not any( os.path.exists(os.path.join(tmp_libdir, f)) + for tmp_libdir in libdirs): print(("Your Python version is from Canopy. " + "You need to install the package '" + lib + "' from Canopy package manager." @@ -2118,7 +2118,7 @@ def join_options(init_part): # ARM (32-bit and 64-bit) architectures in order to make # Theano compatible with the Raspberry Pi, Raspberry Pi 2, or # other systems with ARM processors. - if (not any(['arm' in flag for flag in cxxflags]) and + if (not any( 'arm' in flag for flag in cxxflags) and not any(arch in platform.machine() for arch in ['arm', 'aarch'])): n_bits = local_bitwidth() cxxflags.append('-m%d' % n_bits) diff --git a/theano/gof/fg.py b/theano/gof/fg.py index b67f1f85e92..eccd0ed8f86 100644 --- a/theano/gof/fg.py +++ b/theano/gof/fg.py @@ -350,9 +350,8 @@ def __remove_clients__(self, r, clients_to_remove, if variable in self.variables: # If the owner have other outputs still used, # then we must keep that variable in the graph. - if not variable.owner or not any( - [var for var in variable.owner.outputs - if var.clients]): + if not variable.owner or not any( var for var in variable.owner.outputs + if var.clients): self.variables.remove(variable) # This allow to quickly know if a var is still in the fgraph diff --git a/theano/gof/tests/test_vm.py b/theano/gof/tests/test_vm.py index ac78bbc4177..d44c933d2a5 100644 --- a/theano/gof/tests/test_vm.py +++ b/theano/gof/tests/test_vm.py @@ -70,7 +70,7 @@ def test_c_thunks(): f(1, [2], [3, 2]) from nose.tools import assert_raises assert_raises(ValueError, f, 0, [2], [3, 4]) - assert any([hasattr(t, 'cthunk') for t in f.fn.thunks]) == c_thunks + assert any( hasattr(t, 'cthunk') for t in f.fn.thunks) == c_thunks def test_speed(): diff --git a/theano/gof/vm.py b/theano/gof/vm.py index 21b556c22d1..7c69503d417 100644 --- a/theano/gof/vm.py +++ b/theano/gof/vm.py @@ -67,8 +67,9 @@ def calculate_reallocate_info(order, fgraph, storage_map, compute_map_re, assert not (ins in view_of and viewed_by[ins]) if (getattr(ins, 'ndim', None) == 0 and not storage_map[ins][0] and ins not in fgraph.outputs and ins.owner and - all([compute_map_re[v][0] - for v in dependencies.get(ins, [])]) and + all( + compute_map_re[v][0] + for v in dependencies.get(ins, [])) and ins not in allocated): # Constant Memory cannot be changed # Constant and shared variables' storage_map value is not empty @@ -969,7 +970,7 @@ def make_vm(self, nodes, thunks, if lazy is None: lazy = config.vm.lazy if lazy is None: - lazy = not all([(not th.lazy) for th in thunks]) + lazy = not all( (not th.lazy) for th in thunks) if not lazy: # there is no conditional in the graph if self.allow_gc: @@ -1049,7 +1050,7 @@ def make_all(self, profiler=None, input_storage=None, if lazy is None: lazy = config.vm.lazy if lazy is None: - lazy = not all([(not th.lazy) for th in thunks]) + lazy = not all( (not th.lazy) for th in thunks) if not (lazy or (config.profile and config.profile_memory) or self.use_cloop or self.callback or self.callback_input): for pair in itervalues(reallocated_info): diff --git a/theano/gpuarray/opt.py b/theano/gpuarray/opt.py index f31a7746e23..13277b54b7e 100644 --- a/theano/gpuarray/opt.py +++ b/theano/gpuarray/opt.py @@ -615,9 +615,9 @@ def local_gpua_subtensor(node, context_name): # And it is a shared var or an input of the graph. not gpu_x.owner.inputs[0].owner): if len(x.clients) == 1: - if any([n == 'output' or any([isinstance(v.type, GpuArrayType) - for v in n.inputs + n.outputs]) - for n, _ in node.outputs[0].clients]): + if any( n == 'output' or any( isinstance(v.type, GpuArrayType) + for v in n.inputs + n.outputs) + for n, _ in node.outputs[0].clients): return else: return [host_from_gpu(gpu_x.owner.op(node.outputs[0]))] @@ -988,10 +988,11 @@ def local_gpu_elemwise_careduce(node): @local_optimizer(None) def local_assert_no_cpu_op(node): - if (all([var.owner and isinstance(var.owner.op, HostFromGpu) - for var in node.inputs]) and - any([[c for c in var.clients if isinstance(c[0].op, GpuFromHost)] - for var in node.outputs])): + if (all(var.owner and isinstance(var.owner.op, HostFromGpu) + for var in node.inputs) and + any( + [c for c in var.clients if isinstance(c[0].op, GpuFromHost)] + for var in node.outputs)): if config.assert_no_cpu_op == "warn": _logger.warning(("CPU Op %s is detected in the computation " diff --git a/theano/gpuarray/tests/test_basic_ops.py b/theano/gpuarray/tests/test_basic_ops.py index 20aa2d09fbe..b2a72f1663d 100644 --- a/theano/gpuarray/tests/test_basic_ops.py +++ b/theano/gpuarray/tests/test_basic_ops.py @@ -298,8 +298,8 @@ def test_gpu_contiguous(): f = theano.function([a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu) topo = f.maker.fgraph.toposort() - assert any([isinstance(node.op, GpuSubtensor) for node in topo]) - assert any([isinstance(node.op, GpuContiguous) for node in topo]) + assert any( isinstance(node.op, GpuSubtensor) for node in topo) + assert any( isinstance(node.op, GpuContiguous) for node in topo) assert f(a_val, 1).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous @@ -349,8 +349,8 @@ def test_gpusplit_opt(self): m = self.shared(rng.rand(4, 6).astype(self.floatX)) o = T.Split(2)(m, 0, [2, 2]) f = theano.function([], o, mode=self.mode) - assert any([isinstance(node.op, self.split_op_class) - for node in f.maker.fgraph.toposort()]) + assert any( isinstance(node.op, self.split_op_class) + for node in f.maker.fgraph.toposort()) o1, o2 = f() assert numpy.allclose(o1, m.get_value(borrow=True)[:2]) assert numpy.allclose(o2, m.get_value(borrow=True)[2:]) @@ -401,8 +401,8 @@ def check(dtype, N, M_=None): result = numpy.asarray(f(N, M)) assert numpy.allclose(result, numpy.eye(N, M_, dtype=dtype)) assert result.dtype == numpy.dtype(dtype) - assert any([isinstance(node.op, GpuEye) - for node in f.maker.fgraph.toposort()]) + assert any( isinstance(node.op, GpuEye) + for node in f.maker.fgraph.toposort()) for dtype in ['float32', 'int32', 'float16']: yield check, dtype, 3 diff --git a/theano/gpuarray/tests/test_dnn.py b/theano/gpuarray/tests/test_dnn.py index 4a756e1ed00..4db8907c174 100644 --- a/theano/gpuarray/tests/test_dnn.py +++ b/theano/gpuarray/tests/test_dnn.py @@ -105,7 +105,7 @@ def test_dnn_conv_inplace(): topo = f.maker.fgraph.toposort() convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConv)] assert len(convs) == 2 - assert all([node.op.inplace for node in convs]) + assert all( node.op.inplace for node in convs) assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2 # Test grad w op @@ -116,7 +116,7 @@ def test_dnn_conv_inplace(): topo = f.maker.fgraph.toposort() convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradW)] assert len(convs) == 2 - assert all([node.op.inplace for node in convs]) + assert all( node.op.inplace for node in convs) assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2 # Test grad i op @@ -127,7 +127,7 @@ def test_dnn_conv_inplace(): topo = f.maker.fgraph.toposort() convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradI)] assert len(convs) == 2 - assert all([node.op.inplace for node in convs]) + assert all( node.op.inplace for node in convs) assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2 @@ -205,11 +205,11 @@ def test_pooling(): mode_without_gpu2 = mode_without_gpu.including() mode_without_gpu2.check_isfinite = False f1 = theano.function([x], out1, mode=mode_with_gpu) - assert any([isinstance(node.op, dnn.GpuDnnPool) - for node in f1.maker.fgraph.apply_nodes]) + assert any( isinstance(node.op, dnn.GpuDnnPool) + for node in f1.maker.fgraph.apply_nodes) f2 = theano.function([x], out2, mode=mode_without_gpu2) - assert not any([isinstance(node.op, dnn.GpuDnnPool) - for node in f2.maker.fgraph.apply_nodes]) + assert not any( isinstance(node.op, dnn.GpuDnnPool) + for node in f2.maker.fgraph.apply_nodes) for shp in [(1, 10, 100, 100), (1, 3, 99, 99), (32, 1, 147, 197), @@ -241,8 +241,8 @@ def fn(x): # Confirm that the opt would have inserted it. fg = theano.function([x], theano.grad(fn(x).sum(), x), mode=mode_with_gpu) - assert any([isinstance(node.op, dnn.GpuDnnPoolGrad) - for node in fg.maker.fgraph.toposort()]) + assert any( isinstance(node.op, dnn.GpuDnnPoolGrad) + for node in fg.maker.fgraph.toposort()) # Test the GPU grad + GPU implementation def fn(x): @@ -258,8 +258,8 @@ def fn(x): # Confirm that we get the good op. fg = theano.function([x], theano.grad(fn(x).sum(), x), mode=mode_with_gpu) - assert any([isinstance(node.op, dnn.GpuDnnPoolGrad) - for node in fg.maker.fgraph.toposort()]) + assert any( isinstance(node.op, dnn.GpuDnnPoolGrad) + for node in fg.maker.fgraph.toposort()) g_out = fg(data) # Compare against the CPU result @@ -269,11 +269,11 @@ def fn(x): fc = theano.function([x], theano.grad(out.sum(), x), mode=mode_without_gpu) if mode == 'max': - assert any([isinstance(node.op, MaxPoolGrad) - for node in fc.maker.fgraph.toposort()]) + assert any( isinstance(node.op, MaxPoolGrad) + for node in fc.maker.fgraph.toposort()) else: - assert any([isinstance(node.op, AveragePoolGrad) - for node in fc.maker.fgraph.toposort()]) + assert any( isinstance(node.op, AveragePoolGrad) + for node in fc.maker.fgraph.toposort()) c_out = fc(data) utt.assert_allclose(c_out, g_out) @@ -311,11 +311,11 @@ def fn(x): mode_without_gpu2.check_isfinite = False f1 = theano.function([x], fn(x), mode=mode_with_gpu) - assert any([isinstance(node.op, dnn.GpuDnnPool) - for node in f1.maker.fgraph.apply_nodes]) + assert any( isinstance(node.op, dnn.GpuDnnPool) + for node in f1.maker.fgraph.apply_nodes) f2 = theano.function([x], out2, mode=mode_without_gpu2) - assert not any([isinstance(node.op, dnn.GpuDnnPool) - for node in f2.maker.fgraph.apply_nodes]) + assert not any( isinstance(node.op, dnn.GpuDnnPool) + for node in f2.maker.fgraph.apply_nodes) for shp in [(1, 10, 100, 100), (1, 3, 99, 99), (32, 1, 147, 197), @@ -339,8 +339,8 @@ def test_pooling_opt(): ignore_border=True), mode=mode_with_gpu) - assert any([isinstance(n.op, dnn.GpuDnnPool) - for n in f.maker.fgraph.toposort()]) + assert any( isinstance(n.op, dnn.GpuDnnPool) + for n in f.maker.fgraph.toposort()) f(numpy.zeros((10, 10), dtype='float32')) @@ -351,8 +351,8 @@ def test_pooling_opt(): x), mode=mode_with_gpu.including("cudnn")) - assert any([isinstance(n.op, dnn.GpuDnnPoolGrad) - for n in f.maker.fgraph.toposort()]) + assert any( isinstance(n.op, dnn.GpuDnnPoolGrad) + for n in f.maker.fgraph.toposort()) f(numpy.zeros((10, 10), dtype='float32')) @@ -387,8 +387,8 @@ def test_dnn_tag(): if not raised: assert dnn.dnn_available(test_ctx_name) - assert any([isinstance(n.op, dnn.GpuDnnPool) - for n in f.maker.fgraph.toposort()]) + assert any( isinstance(n.op, dnn.GpuDnnPool) + for n in f.maker.fgraph.toposort()) class TestDnnInferShapes(utt.InferShapeTester): diff --git a/theano/gpuarray/tests/test_multinomial.py b/theano/gpuarray/tests/test_multinomial.py index 3ceecfddb6a..9c72eb0e889 100644 --- a/theano/gpuarray/tests/test_multinomial.py +++ b/theano/gpuarray/tests/test_multinomial.py @@ -35,8 +35,8 @@ def body(mode, gpu): f = function([p, u], m * 2, allow_input_downcast=True, mode=mode) if gpu: - assert any([type(node.op) is GPUAMultinomialFromUniform - for node in f.maker.fgraph.toposort()]) + assert any( type(node.op) is GPUAMultinomialFromUniform + for node in f.maker.fgraph.toposort()) # test that both first and second samples can be drawn utt.assert_allclose(f([[1, 0], [0, 1]], [.1, .1]), @@ -69,8 +69,8 @@ def body(mode, gpu): m = theano.sandbox.multinomial.MultinomialFromUniform('auto')(p, u) f = function([p, u], m * 2, allow_input_downcast=True, mode=mode) if gpu: - assert any([type(node.op) is GPUAMultinomialFromUniform - for node in f.maker.fgraph.toposort()]) + assert any( type(node.op) is GPUAMultinomialFromUniform + for node in f.maker.fgraph.toposort()) pval = numpy.arange(10000 * 4, dtype='float32').reshape((10000, 4)) + 0.1 @@ -105,8 +105,8 @@ def test_gpu_opt(): assert m.dtype == 'float32', m.dtype f = function([p, u], m, allow_input_downcast=True, mode=get_mode(True)) - assert any([type(node.op) is GPUAMultinomialFromUniform - for node in f.maker.fgraph.toposort()]) + assert any( type(node.op) is GPUAMultinomialFromUniform + for node in f.maker.fgraph.toposort()) pval = numpy.arange(10000 * 4, dtype='float32').reshape((10000, 4)) + 0.1 pval = pval / pval.sum(axis=1)[:, None] uval = numpy.ones_like(pval[:, 0]) * 0.5 @@ -118,8 +118,8 @@ def test_gpu_opt(): assert m.dtype == 'float32', m.dtype f = function([r, u], m, allow_input_downcast=True, mode=get_mode(True)) - assert any([type(node.op) is GPUAMultinomialFromUniform - for node in f.maker.fgraph.toposort()]) + assert any( type(node.op) is GPUAMultinomialFromUniform + for node in f.maker.fgraph.toposort()) pval = numpy.arange(1 * 4, dtype='float32').reshape((1, 4)) + 0.1 pval = pval / pval.sum(axis=1)[:, None] uval = numpy.ones_like(pval[:, 0]) * 0.5 diff --git a/theano/gpuarray/tests/test_nnet.py b/theano/gpuarray/tests/test_nnet.py index 69465a06598..2ec179e7308 100644 --- a/theano/gpuarray/tests/test_nnet.py +++ b/theano/gpuarray/tests/test_nnet.py @@ -65,12 +65,12 @@ def test_GpuCrossentropySoftmaxArgmax1HotWithBias(): outputs=[loss, y_pred, dW], mode=mode_with_gpu) - assert any([isinstance(node.op, + assert any( isinstance(node.op, T.nnet.CrossentropySoftmaxArgmax1HotWithBias) - for node in classify.maker.fgraph.toposort()]) - assert any([isinstance(node.op, + for node in classify.maker.fgraph.toposort()) + assert any( isinstance(node.op, GpuCrossentropySoftmaxArgmax1HotWithBias) - for node in classify_gpu.maker.fgraph.toposort()]) + for node in classify_gpu.maker.fgraph.toposort()) out = classify(yy, b_values, dot_value) gout = classify_gpu(yy, b_values, dot_value) @@ -115,11 +115,11 @@ def test_GpuCrossentropySoftmax1HotWithBiasDx(): # theano.printing.debugprint(cpu_f) # theano.printing.debugprint(gpu_f) - assert any([isinstance(node.op, T.nnet.CrossentropySoftmax1HotWithBiasDx) - for node in cpu_f.maker.fgraph.toposort()]) - assert any([isinstance(node.op, + assert any( isinstance(node.op, T.nnet.CrossentropySoftmax1HotWithBiasDx) + for node in cpu_f.maker.fgraph.toposort()) + assert any( isinstance(node.op, GpuCrossentropySoftmax1HotWithBiasDx) - for node in gpu_f.maker.fgraph.toposort()]) + for node in gpu_f.maker.fgraph.toposort()) cpu_out = cpu_f(softmax_output_value) gpu_out = gpu_f(softmax_output_value) diff --git a/theano/gpuarray/tests/test_opt.py b/theano/gpuarray/tests/test_opt.py index 1315b3bf290..d5e9ac86536 100644 --- a/theano/gpuarray/tests/test_opt.py +++ b/theano/gpuarray/tests/test_opt.py @@ -301,40 +301,40 @@ def test_local_gpu_subtensor(): t = tensor._shared(numpy.zeros(20, "float32")) f = theano.function([], t[3:4], mode=mode_with_gpu) topo = f.maker.fgraph.toposort() - assert any([type(node.op) is tensor.Subtensor for node in topo]) - assert not any([isinstance(node.op, GpuSubtensor) for node in topo]) + assert any( type(node.op) is tensor.Subtensor for node in topo) + assert not any( isinstance(node.op, GpuSubtensor) for node in topo) # Test graph input. t = tensor.fmatrix() f = theano.function([t], t[3:4], mode=mode_with_gpu) topo = f.maker.fgraph.toposort() - assert any([type(node.op) is tensor.Subtensor for node in topo]) - assert not any([isinstance(node.op, GpuSubtensor) for node in topo]) + assert any( type(node.op) is tensor.Subtensor for node in topo) + assert not any( isinstance(node.op, GpuSubtensor) for node in topo) # Test multiple use of the input # We want the subtensor to be on the GPU to prevent multiple transfer. t = tensor.fmatrix() f = theano.function([t], [t[3:4], t + 1], mode=mode_with_gpu) topo = f.maker.fgraph.toposort() - assert not any([type(node.op) is tensor.Subtensor for node in topo]) - assert any([isinstance(node.op, GpuSubtensor) for node in topo]) + assert not any( type(node.op) is tensor.Subtensor for node in topo) + assert any( isinstance(node.op, GpuSubtensor) for node in topo) # Test multiple use of the input + input as output # We want the subtensor to be on the GPU to prevent multiple transfer. t = tensor.fmatrix() f = theano.function([t], [t[3:4], t + 1, t], mode=mode_with_gpu) topo = f.maker.fgraph.toposort() - assert not any([type(node.op) is tensor.Subtensor for node in topo]) - assert any([isinstance(node.op, GpuSubtensor) for node in topo]) + assert not any( type(node.op) is tensor.Subtensor for node in topo) + assert any( isinstance(node.op, GpuSubtensor) for node in topo) # Test shared forced on CPU end we do computation on the output of # the subtensor. t = tensor._shared(numpy.zeros(20, "float32")) f = theano.function([], t[3:4] + 1, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() - assert any([type(node.op) is tensor.Subtensor for node in topo]) - assert not any([isinstance(node.op, GpuSubtensor) for node in topo]) - assert any([isinstance(node.op, GpuElemwise) for node in topo]) + assert any( type(node.op) is tensor.Subtensor for node in topo) + assert not any( isinstance(node.op, GpuSubtensor) for node in topo) + assert any( isinstance(node.op, GpuElemwise) for node in topo) def test_local_gpu_elemwise(): diff --git a/theano/gpuarray/tests/test_scan.py b/theano/gpuarray/tests/test_scan.py index 525f8dae839..8083ea8db2a 100644 --- a/theano/gpuarray/tests/test_scan.py +++ b/theano/gpuarray/tests/test_scan.py @@ -83,12 +83,12 @@ def f_rnn(u_t, x_tm1, W_in, W): scan_node_topo = scan_node.op.fn.maker.fgraph.toposort() # check that there is no gpu transfer in the inner loop. - assert any([isinstance(node.op, GpuElemwise) - for node in scan_node_topo]) - assert not any([isinstance(node.op, HostFromGpu) - for node in scan_node_topo]) - assert not any([isinstance(node.op, GpuFromHost) - for node in scan_node_topo]) + assert any( isinstance(node.op, GpuElemwise) + for node in scan_node_topo) + assert not any( isinstance(node.op, HostFromGpu) + for node in scan_node_topo) + assert not any( isinstance(node.op, GpuFromHost) + for node in scan_node_topo) # This second version test the second case in the optimizer to the gpu. def test_one_sequence_one_output_weights_gpu2(self): @@ -143,12 +143,12 @@ def f_rnn(u_t, x_tm1, W_in, W): scan_node_topo = scan_node.op.fn.maker.fgraph.toposort() # check that there is no gpu transfer in the inner loop. - assert any([isinstance(node.op, GpuElemwise) - for node in scan_node_topo]) - assert not any([isinstance(node.op, HostFromGpu) - for node in scan_node_topo]) - assert not any([isinstance(node.op, GpuFromHost) - for node in scan_node_topo]) + assert any( isinstance(node.op, GpuElemwise) + for node in scan_node_topo) + assert not any( isinstance(node.op, HostFromGpu) + for node in scan_node_topo) + assert not any( isinstance(node.op, GpuFromHost) + for node in scan_node_topo) # This third test checks that scan can deal with a mixture of dtypes as # outputs when is running on GPU @@ -206,10 +206,10 @@ def f_rnn(u_t, x_tm1, W_in, W): scan_node_topo = scan_node.op.fn.maker.fgraph.toposort() # check that there is no gpu transfer in the inner loop. - assert not any([isinstance(node.op, HostFromGpu) - for node in scan_node_topo]) - assert not any([isinstance(node.op, GpuFromHost) - for node in scan_node_topo]) + assert not any( isinstance(node.op, HostFromGpu) + for node in scan_node_topo) + assert not any( isinstance(node.op, GpuFromHost) + for node in scan_node_topo) def test_gpu4_gibbs_chain(self): rng = numpy.random.RandomState(utt.fetch_seed()) diff --git a/theano/ifelse.py b/theano/ifelse.py index 2fd9b6bb001..3d59fbc68f9 100644 --- a/theano/ifelse.py +++ b/theano/ifelse.py @@ -407,8 +407,8 @@ def cond_make_inplace(node): # For big graph, do not make inplace scalar to speed up # optimization. (len(node.fgraph.apply_nodes) < 500 or - not all([getattr(o.type, 'ndim', -1) == 0 - for o in node.outputs]))): + not all( getattr(o.type, 'ndim', -1) == 0 + for o in node.outputs))): return IfElse(n_outs=op.n_outs, as_view=True, gpu=op.gpu, diff --git a/theano/misc/check_blas.py b/theano/misc/check_blas.py index 3bff9d67e39..116f2b16dec 100644 --- a/theano/misc/check_blas.py +++ b/theano/misc/check_blas.py @@ -66,8 +66,8 @@ def execute(execute=True, verbose=True, M=2000, N=2000, K=2000, order=order)) f = theano.function([], updates=[(c, 0.4 * c + .8 * T.dot(a, b))]) - if any([x.op.__class__.__name__ == 'Gemm' for x in - f.maker.fgraph.toposort()]): + if any( x.op.__class__.__name__ == 'Gemm' for x in + f.maker.fgraph.toposort()): c_impl = [hasattr(thunk, 'cthunk') for node, thunk in zip(f.fn.nodes, f.fn.thunks) if node.op.__class__.__name__ == "Gemm"] @@ -76,8 +76,8 @@ def execute(execute=True, verbose=True, M=2000, N=2000, K=2000, impl = 'CPU (with direct Theano binding to blas)' else: impl = 'CPU (without direct Theano binding to blas but with numpy/scipy binding to blas)' - elif any([x.op.__class__.__name__ == 'GpuGemm' for x in - f.maker.fgraph.toposort()]): + elif any( x.op.__class__.__name__ == 'GpuGemm' for x in + f.maker.fgraph.toposort()): impl = 'GPU' else: impl = 'ERROR, unable to tell if Theano used the cpu or the gpu:\n' diff --git a/theano/misc/pycuda_example.py b/theano/misc/pycuda_example.py index 691ca081763..bb5cef25e30 100644 --- a/theano/misc/pycuda_example.py +++ b/theano/misc/pycuda_example.py @@ -220,7 +220,7 @@ def make_node(self, *inputs): if i.type.ndim != inputs[0].type.ndim: raise TypeError('different ranks among inputs') - if any([any(i.type.broadcastable) for i in inputs]): + if any( any(i.type.broadcastable) for i in inputs): raise Exception("pycuda don't support broadcasted dimensions") assert len(inputs) == 2 # TODO remove @@ -313,7 +313,7 @@ def make_node(self, *inputs): if i.type.ndim != inputs[0].type.ndim: raise TypeError('different ranks among inputs') - if any([any(i.type.broadcastable) for i in inputs]): + if any( any(i.type.broadcastable) for i in inputs): raise Exception("pycuda don't support broadcasted dimensions") otype = CudaNdarrayType(broadcastable=[False] * _inputs[0].type.ndim) @@ -387,8 +387,9 @@ def local_pycuda_gpu_elemwise(node): GpuElemwise -> PycudaElemwiseSourceModuleOp """ if isinstance(node.op, GpuElemwise): - if (not any([any(i.type.broadcastable) for i in node.inputs]) and - all([i.ndim <= 2 for i in node.inputs])): + if (not any( any(i.type.broadcastable) for i in node.inputs) and + all( + i.ndim <= 2 for i in node.inputs)): new_op = PycudaElemwiseSourceModuleOp(node.op.scalar_op, node.op.inplace_pattern)( *node.inputs) diff --git a/theano/misc/tests/test_pycuda_example.py b/theano/misc/tests/test_pycuda_example.py index 471de581933..351d084deb2 100644 --- a/theano/misc/tests/test_pycuda_example.py +++ b/theano/misc/tests/test_pycuda_example.py @@ -48,15 +48,15 @@ def test_pycuda_elemwise_source_module(): pycuda_op_thunk(x, y)), mode=mode_with_gpu) - assert any([isinstance(node.op, theano.sandbox.cuda.GpuElemwise) - for node in f.maker.fgraph.toposort()]) - assert any([isinstance(node.op, PycudaElemwiseSourceModuleOp) - for node in f2.maker.fgraph.toposort()]) - assert any([isinstance(node.op, PycudaElemwiseSourceModuleOp) - for node in f3.maker.fgraph.toposort()]) - assert any([isinstance(node.op, + assert any( isinstance(node.op, theano.sandbox.cuda.GpuElemwise) + for node in f.maker.fgraph.toposort()) + assert any( isinstance(node.op, PycudaElemwiseSourceModuleOp) + for node in f2.maker.fgraph.toposort()) + assert any( isinstance(node.op, PycudaElemwiseSourceModuleOp) + for node in f3.maker.fgraph.toposort()) + assert any( isinstance(node.op, PycudaElemwiseSourceModuleMakeThunkOp) - for node in f4.maker.fgraph.toposort()]) + for node in f4.maker.fgraph.toposort()) val1 = numpy.asarray(numpy.random.rand(*shape), dtype='float32') val2 = numpy.asarray(numpy.random.rand(*shape), dtype='float32') diff --git a/theano/printing.py b/theano/printing.py index 8997963fb26..584c367a6f0 100644 --- a/theano/printing.py +++ b/theano/printing.py @@ -168,7 +168,7 @@ def debugprint(obj, depth=-1, print_type=False, obj) scan_ops = [] - if any([p for p in profile_list if p is not None and p.fct_callcount > 0]): + if any( p for p in profile_list if p is not None and p.fct_callcount > 0): print(""" Timing Info ----------- diff --git a/theano/sandbox/cuda/__init__.py b/theano/sandbox/cuda/__init__.py index 0dd2f6d082a..1f8d6e72ecd 100644 --- a/theano/sandbox/cuda/__init__.py +++ b/theano/sandbox/cuda/__init__.py @@ -27,7 +27,7 @@ def register_opt(*tags, **kwargs): - if any([not isinstance(t, str) for t in tags]): + if any( not isinstance(t, str) for t in tags): raise RuntimeError("Bad call to register_opt." " All tags must be strings.", tags) diff --git a/theano/sandbox/cuda/basic_ops.py b/theano/sandbox/cuda/basic_ops.py index 775958cd5a3..fef2afc349b 100644 --- a/theano/sandbox/cuda/basic_ops.py +++ b/theano/sandbox/cuda/basic_ops.py @@ -4149,8 +4149,8 @@ def tensor4(name=None, dtype=None): def profile_printer(fct_name, compile_time, fct_call_time, fct_call, apply_time, apply_cimpl, message, outputs_size, other_time): - if any([x[1].op.__class__.__name__.lower().startswith("gpu") - for x in apply_time.keys()]): + if any( x[1].op.__class__.__name__.lower().startswith("gpu") + for x in apply_time.keys()): local_time = sum(apply_time.values()) print() print('Some info useful for gpu:') diff --git a/theano/sandbox/cuda/nvcc_compiler.py b/theano/sandbox/cuda/nvcc_compiler.py index 4843711fab0..ed7d4cb0831 100644 --- a/theano/sandbox/cuda/nvcc_compiler.py +++ b/theano/sandbox/cuda/nvcc_compiler.py @@ -114,7 +114,7 @@ def compile_args(): flags.append("-DNPY_ARRAY_F_CONTIGUOUS=NPY_F_CONTIGUOUS") # If the user didn't specify architecture flags add them - if not any(['-arch=sm_' in f for f in flags]): + if not any( '-arch=sm_' in f for f in flags): # We compile cuda_ndarray.cu during import. # We should not add device properties at that time. # As the device is not selected yet! diff --git a/theano/sandbox/cuda/opt.py b/theano/sandbox/cuda/opt.py index deda7f4175f..285ebcd7b2d 100644 --- a/theano/sandbox/cuda/opt.py +++ b/theano/sandbox/cuda/opt.py @@ -259,8 +259,8 @@ def get_all_basic_scalar(composite_op): if isinstance(op.scalar_op, theano.scalar.Composite): scals = get_all_basic_scalar(op.scalar_op) for s in scals: - if any([i.type.dtype not in elemwise_cuda_dtype_supported - for i in s.inputs + s.outputs]): + if any( i.type.dtype not in elemwise_cuda_dtype_supported + for i in s.inputs + s.outputs): return False return True @@ -275,10 +275,10 @@ def local_gpu_elemwise_0(node): """ if (isinstance(node.op, tensor.Elemwise) and dtype_in_elemwise_supported(node.op)): - if any([i.owner and + if any( i.owner and isinstance(i.owner.op, HostFromGpu) - for i in node.inputs]): - if all([o.type.dtype == 'float32' for o in node.outputs]): + for i in node.inputs): + if all( o.type.dtype == 'float32' for o in node.outputs): # Don't set any inplace pattern. # gpu_inplace_elemwise_optimizer will do it later @@ -297,15 +297,15 @@ def local_gpu_elemwise_0(node): upcastable = set(['float32', 'int8', 'int16', 'uint8', 'uint16']) # case 1 - all inputs are already float32 - if all([i.type.dtype == 'float32' for i in node.inputs]): + if all( i.type.dtype == 'float32' for i in node.inputs): # TODO: change this when fusion makes Elemwise with # multiple outputs gpu_elemwise = new_op(*(as_cuda_ndarray_variable(i) for i in node.inputs), return_list=True) # case 2 - it is still ok if some inputs were upcast to float32 - elif all([i.type.dtype in upcastable - for i in node.inputs]): + elif all( i.type.dtype in upcastable + for i in node.inputs): # second - establish that a new node with upcasted inputs # has the same outputs types as the original node upcasted = node.op.make_node(*[tensor.cast(i, 'float32') @@ -359,7 +359,7 @@ def local_gpu_elemwise_1(node): # This happens when scalar_op requires support code return False - if all([i.dtype == 'float32' for i in elemwise_node.inputs]): + if all( i.dtype == 'float32' for i in elemwise_node.inputs): gpu_elemwise = new_op(*[as_cuda_ndarray_variable(i) for i in elemwise_node.inputs]) gpu_elemwise = split_huge_add_or_mul(gpu_elemwise.owner) @@ -481,8 +481,8 @@ def local_gpu_dot_to_dot22(node): if isinstance(node.op, tensor.basic.Dot): if node.outputs[0].type.dtype != 'float32': return False - if any([i.owner and isinstance(i.owner.op, HostFromGpu) - for i in node.inputs]): + if any( i.owner and isinstance(i.owner.op, HostFromGpu) + for i in node.inputs): x, y = node.inputs if _is_real_vector(x) and _is_real_matrix(y): new_op = GpuDimShuffle((False,), ('x', 0)) @@ -506,10 +506,12 @@ def local_gpu_dot_to_dot22(node): @local_optimizer(None) def local_assert_no_cpu_op(node): if (not isinstance(node.op, GpuOp) and - all([var.owner and isinstance(var.owner.op, HostFromGpu) - for var in node.inputs]) and - any([[c for c in var.clients if isinstance(c[0].op, GpuFromHost)] - for var in node.outputs])): + all( + var.owner and isinstance(var.owner.op, HostFromGpu) + for var in node.inputs) and + any( + [c for c in var.clients if isinstance(c[0].op, GpuFromHost)] + for var in node.outputs)): if config.assert_no_cpu_op == "warn": _logger.warning(("CPU op %s is detected in the computational" @@ -542,18 +544,17 @@ def local_gpu_lazy_ifelse(node): gpu_ifelse = theano.ifelse.IfElse(node.op.n_outs, gpu=True) outs_clients = reduce(list.__add__, [out.clients for out in node.outputs]) - if any([(i.owner and isinstance(i.owner.op, HostFromGpu)) - for i in node.inputs]) or any( - [c != 'output' and c.op == gpu_from_host for c, idx - in outs_clients]): + if any( (i.owner and isinstance(i.owner.op, HostFromGpu)) + for i in node.inputs) or any( c != 'output' and c.op == gpu_from_host for c, idx + in outs_clients): c = node.inputs[0] outs = node.inputs[1:] # Should not happen, but just in case if isinstance(c.type, CudaNdarrayType): c = host_from_gpu(c) - if all([isinstance(o.type, CudaNdarrayType) or o.dtype != 'float32' - for o in outs]): + if all( isinstance(o.type, CudaNdarrayType) or o.dtype != 'float32' + for o in outs): return for i in range(len(outs)): @@ -584,8 +585,8 @@ def local_gpu_lazy_ifelse(node): # Should not happen, but just in case if isinstance(c.type, CudaNdarrayType): c = host_from_gpu(c) - if all([isinstance(o.type, CudaNdarrayType) or o.dtype != 'float32' - for o in outs]): + if all( isinstance(o.type, CudaNdarrayType) or o.dtype != 'float32' + for o in outs): return for i in range(len(outs)): @@ -615,8 +616,8 @@ def local_gpu_dot22(node): return [gpu_dot22(as_cuda_ndarray_variable(x), as_cuda_ndarray_variable(y))] if isinstance(node.op, tensor.blas.Dot22): - if any([(i.owner and isinstance(i.owner.op, HostFromGpu)) - for i in node.inputs]): + if any( (i.owner and isinstance(i.owner.op, HostFromGpu)) + for i in node.inputs): x, y = node.inputs return [host_from_gpu(gpu_dot22(as_cuda_ndarray_variable(x), as_cuda_ndarray_variable(y)))] @@ -655,8 +656,8 @@ def gpu_batched_dot(x, y): x, y = host_input.owner.inputs return [gpu_batched_dot(x, y)] if isinstance(node.op, tensor.blas.BatchedDot): - if any([(i.owner and isinstance(i.owner.op, HostFromGpu)) - for i in node.inputs]): + if any( (i.owner and isinstance(i.owner.op, HostFromGpu)) + for i in node.inputs): x, y = node.inputs return [host_from_gpu(gpu_batched_dot(x, y))] return False @@ -681,8 +682,8 @@ def local_gpu_dot22scalar(node): as_cuda_ndarray_variable(y), tensor.blas._as_scalar(scalar))] if isinstance(node.op, tensor.blas.Dot22Scalar): - if any([i.owner and isinstance(i.owner.op, HostFromGpu) - for i in node.inputs]): + if any( i.owner and isinstance(i.owner.op, HostFromGpu) + for i in node.inputs): x, y, scalar = node.inputs return [host_from_gpu( gpu_dot22scalar(as_cuda_ndarray_variable(x), @@ -710,8 +711,8 @@ def local_gpu_solve(node): as_cuda_ndarray_variable(y))] if isinstance(node.op, slinalg.Solve): - if any([i.owner and isinstance(i.owner.op, HostFromGpu) - for i in node.inputs]): + if any( i.owner and isinstance(i.owner.op, HostFromGpu) + for i in node.inputs): x, y = node.inputs return [host_from_gpu( gpu_solve(as_cuda_ndarray_variable(x), @@ -859,8 +860,8 @@ def local_gpu_careduce(node): # below do not support when x.ndim == 0. if x.type == node.outputs[0].type: return [x] - elif (all([c != "output" and isinstance(c.op, GpuFromHost) - for c, i in node.outputs[0].clients]) and + elif (all(c != "output" and isinstance(c.op, GpuFromHost) + for c, i in node.outputs[0].clients) and x.owner and x.owner.op.__class__ in cpu_ops_moved_to_gpu): # It is not always good to transfer the reduction to @@ -1069,8 +1070,8 @@ def local_gpu_subtensor(node): isinstance(gpu_x.owner.op, GpuFromHost)): if len(x.clients) == 1: - if any([n == 'output' or isinstance(n.op, GpuOp) - for n, _ in node.outputs[0].clients]): + if any( n == 'output' or isinstance(n.op, GpuOp) + for n, _ in node.outputs[0].clients): return else: return [host_from_gpu(as_cuda_ndarray_variable( @@ -2180,16 +2181,16 @@ def local_gpualloc(node): if node.inputs[0].owner and \ isinstance(node.inputs[0].owner.op, HostFromGpu): replace = True - elif all([c != 'output' and c.op == gpu_from_host - for c, idx in node.outputs[0].clients]): + elif all( c != 'output' and c.op == gpu_from_host + for c, idx in node.outputs[0].clients): # if all clients are on gpu replace = True - elif all([c != 'output' and + elif all( c != 'output' and c.op == tensor.join and all(i.owner and i.owner.op in [host_from_gpu, tensor.alloc] for i in c.inputs[1:]) - for c, idx in node.outputs[0].clients]): + for c, idx in node.outputs[0].clients): # if the client is on gpu or alloc replace = True if replace and node.inputs[0].dtype != 'float32': @@ -2298,8 +2299,8 @@ def local_gpu_eye(node): return return [gpu_eye(*host_input.owner.inputs)] if isinstance(node.op, tensor.Eye) and node.op.dtype == "float32": - if any([(i.owner and isinstance(i.owner.op, HostFromGpu)) - for i in node.inputs]): + if any( (i.owner and isinstance(i.owner.op, HostFromGpu)) + for i in node.inputs): if tensor.extract_constant(node.inputs[2]) != 0: return return [host_from_gpu(gpu_eye(*node.inputs))] @@ -2479,8 +2480,8 @@ def gpuScanOptimization(node): if (type(node.op) == scan_op.Scan and not node.op.info['gpu']): - if any([(i.owner and isinstance(i.owner.op, HostFromGpu)) - for i in node.inputs]): + if any( (i.owner and isinstance(i.owner.op, HostFromGpu)) + for i in node.inputs): thescan = node.op info = copy.deepcopy(thescan.info) diff --git a/theano/sandbox/cuda/rng_curand.py b/theano/sandbox/cuda/rng_curand.py index e11637e7010..3d935e27cd6 100644 --- a/theano/sandbox/cuda/rng_curand.py +++ b/theano/sandbox/cuda/rng_curand.py @@ -323,8 +323,8 @@ def uniform(self, size, low=0.0, high=1.0, ndim=None, """ if isinstance(size, tuple): msg = "size must be a tuple of int or a Theano variable" - assert all([isinstance(i, int) or isinstance(i, Variable) - for i in size]), msg + assert all( isinstance(i, int) or isinstance(i, Variable) + for i in size), msg else: msg = "size must be a tuple of int or a Theano variable" assert isinstance(size, Variable) and size.ndim == 1, msg @@ -355,8 +355,8 @@ def normal(self, size=None, avg=0.0, std=1.0, ndim=None, """ if isinstance(size, tuple): msg = "size must be a tuple of int or a Theano variable" - assert all([isinstance(i, int) or isinstance(i, Variable) - for i in size]), msg + assert all( isinstance(i, int) or isinstance(i, Variable) + for i in size), msg else: msg = "size must be a tuple of int or a Theano variable" assert isinstance(size, Variable) and size.ndim == 1, msg diff --git a/theano/sandbox/cuda/tests/test_basic_ops.py b/theano/sandbox/cuda/tests/test_basic_ops.py index 51dcfd87137..2bd8795f0af 100644 --- a/theano/sandbox/cuda/tests/test_basic_ops.py +++ b/theano/sandbox/cuda/tests/test_basic_ops.py @@ -308,8 +308,8 @@ def test_careduce(): def test_flatten(): x = cuda.fmatrix('x') f = theano.function([x], x.flatten(), mode=mode_with_gpu) - assert any([node for node in f.maker.fgraph.toposort() - if isinstance(node.op, B.GpuReshape)]) + assert any( node for node in f.maker.fgraph.toposort() + if isinstance(node.op, B.GpuReshape)) assert theano.tensor.is_flat(x.flatten()) assert len(f([[0., 0.], [0., 0.]]).shape) == 1 @@ -325,7 +325,7 @@ def test_reshape(): fv = f(cuda_ndarray.CudaNdarray(theano._asarray([0, 1, 2, 3, 4, 5], dtype='float32'))) topo = f.maker.fgraph.toposort() - assert any([isinstance(node.op, B.GpuReshape) for node in topo]) + assert any( isinstance(node.op, B.GpuReshape) for node in topo) assert numpy.all(fv == numpy.asarray([[0, 1, 2], [3, 4, 5]])) # test that it works without inplace operations @@ -338,7 +338,7 @@ def test_reshape(): f_sub = theano.function([a, b], c - b, mode=mode_with_gpu) topo = f_sub.maker.fgraph.toposort() - assert any([isinstance(node.op, B.GpuReshape) for node in topo]) + assert any( isinstance(node.op, B.GpuReshape) for node in topo) assert numpy.all(f_sub(a_val, b_val) == 0.0) assert numpy.all(numpy.asarray(a_val) == numpy.asarray(a_val_copy)) @@ -349,7 +349,7 @@ def test_reshape(): f_sub = theano.function([a, b], c - b, mode=mode_with_gpu) topo = f_sub.maker.fgraph.toposort() - assert any([isinstance(node.op, B.GpuReshape) for node in topo]) + assert any( isinstance(node.op, B.GpuReshape) for node in topo) assert numpy.all(f_sub(a_val, b_val) == 0.0) assert numpy.all(numpy.asarray(a_val) == numpy.asarray(a_val_copy)) @@ -571,8 +571,8 @@ def test_elemwise_comparaison_cast(): out = f(av, bv) assert numpy.all(out == ans) - assert any([isinstance(node.op, cuda.GpuElemwise) - for node in f.maker.fgraph.toposort()]) + assert any( isinstance(node.op, cuda.GpuElemwise) + for node in f.maker.fgraph.toposort()) def test_elemwise_composite_float64(): @@ -607,8 +607,8 @@ def get_all_basic_scalar(composite_op): if isinstance(node.op.scalar_op, theano.scalar.Composite): scals = get_all_basic_scalar(node.op.scalar_op) for s in scals: - assert not any([i.type.dtype == 'float64' - for i in s.inputs + s.outputs]) + assert not any( i.type.dtype == 'float64' + for i in s.inputs + s.outputs) def test_elemwise_composite_support_code(): @@ -905,7 +905,7 @@ def test_gpujoin_no_rebroadcast(): a = tcn.shared_constructor(_a) f = theano.function([], T.join(1, a)) l = f.maker.fgraph.toposort() - assert not any([isinstance(x.op, T.Rebroadcast) for x in l]) + assert not any( isinstance(x.op, T.Rebroadcast) for x in l) def test_gpualloc_input_on_gpu(): @@ -1216,8 +1216,8 @@ def test_many_arg_elemwise(): outputs.append(f(*args)) # assert that the test was done on the gpu. if mode is mode_with_gpu: - assert any([isinstance(node.op, cuda.GpuElemwise) - for node in f.maker.fgraph.apply_nodes]) + assert any( isinstance(node.op, cuda.GpuElemwise) + for node in f.maker.fgraph.apply_nodes) # test the optijmization local_gpu_elemwise_1 f = theano.function( @@ -1227,8 +1227,8 @@ def test_many_arg_elemwise(): out = f(*args) # assert that the test was done on the gpu. if mode is mode_with_gpu: - assert any([isinstance(node.op, cuda.GpuElemwise) - for node in f.maker.fgraph.apply_nodes]) + assert any( isinstance(node.op, cuda.GpuElemwise) + for node in f.maker.fgraph.apply_nodes) utt.assert_allclose(out, outputs[-1]) results_gpu, results_cpu = outputs @@ -1287,8 +1287,8 @@ def check(dtype, N, M_=None, K=0): utt.assert_allclose(result, numpy.eye(N, M_, K, dtype=dtype)) assert result.dtype == numpy.dtype(dtype) if K == 0: - assert any([isinstance(node.op, B.GpuEye) - for node in f.maker.fgraph.toposort()]) + assert any( isinstance(node.op, B.GpuEye) + for node in f.maker.fgraph.toposort()) for dtype in ['float32']: yield check, dtype, 3 diff --git a/theano/sandbox/cuda/tests/test_blas.py b/theano/sandbox/cuda/tests/test_blas.py index 9f57b169dfa..4055f8f2dfa 100644 --- a/theano/sandbox/cuda/tests/test_blas.py +++ b/theano/sandbox/cuda/tests/test_blas.py @@ -185,7 +185,7 @@ def cmp(a_shp, b_shp): [a, b], tensor.dot(a, b) * numpy.asarray(4, 'float32')) t = f.maker.fgraph.toposort() - assert any([isinstance(n.op, tcn.blas.GpuDot22Scalar) for n in t]) + assert any( isinstance(n.op, tcn.blas.GpuDot22Scalar) for n in t) # assert any([isinstance(n.op, tcn.basic_ops.GpuAllocEmpty) # for n in t]) assert numpy.allclose(f(av, bv), f2(av, bv)) @@ -194,7 +194,7 @@ def cmp(a_shp, b_shp): mode=mode_with_gpu) f2 = theano.function([a, b, scalar], tensor.dot(a, b) * scalar) t = f.maker.fgraph.toposort() - assert any([isinstance(n.op, tcn.blas.GpuDot22Scalar) for n in t]) + assert any( isinstance(n.op, tcn.blas.GpuDot22Scalar) for n in t) # assert any([isinstance(n.op, tcn.basic_ops.GpuAllocEmpty) # for n in t]) assert numpy.allclose(f(av, bv, 0.5), f2(av, bv, 0.5)) @@ -228,8 +228,8 @@ def cmp(a_shp, b_shp): f = pfunc([b, c], [], updates=[(a, tensor.dot(a, b) + tensor.exp(c))], mode=mode_with_gpu) - assert any([node.op == tcn.blas.gpu_gemm_inplace - for node in f.maker.fgraph.toposort()]) + assert any( node.op == tcn.blas.gpu_gemm_inplace + for node in f.maker.fgraph.toposort()) bval = my_rand(* b_shp) cval = my_rand(a_shp[0], b_shp[1]) @@ -269,8 +269,8 @@ def cmp(a_shp, b_shp): updates=[(a, tensor.dot(a, b) + c)], mode=mode_with_gpu) - assert any([node.op == tcn.blas.gpu_gemm_no_inplace - for node in f.maker.fgraph.toposort()]) + assert any( node.op == tcn.blas.gpu_gemm_no_inplace + for node in f.maker.fgraph.toposort()) bval = my_rand(*b_shp) bval2 = my_rand(*b_shp) rval = f(bval, bval2) @@ -376,11 +376,11 @@ def test_downsample(): mode=mode_with_gpu.excluding('cudnn')) f2 = pfunc([], ds_op(tensor.as_tensor_variable(a)), mode=mode_without_gpu) - assert any([isinstance(node.op, + assert any( isinstance(node.op, tcn.blas.GpuDownsampleFactorMax) - for node in f.maker.fgraph.toposort()]) - assert any([isinstance(node.op, Pool) - for node in f2.maker.fgraph.toposort()]) + for node in f.maker.fgraph.toposort()) + assert any( isinstance(node.op, Pool) + for node in f2.maker.fgraph.toposort()) assert numpy.allclose(f(), f2()) # The grad is too slow on GT220 GPU @@ -401,11 +401,11 @@ def test_downsample(): tensor.grad(ds_op(tensor.as_tensor_variable(a)).sum(), a), mode=mode_without_gpu) - assert any([isinstance(node.op, + assert any( isinstance(node.op, tcn.blas.GpuDownsampleFactorMaxGrad) - for node in g.maker.fgraph.toposort()]) - assert any([isinstance(node.op, PoolGrad) - for node in g2.maker.fgraph.toposort()]) + for node in g.maker.fgraph.toposort()) + assert any( isinstance(node.op, PoolGrad) + for node in g2.maker.fgraph.toposort()) assert numpy.allclose(g(), g2()), shp ggf = gradient.Lop(tensor.grad((ds_op( @@ -418,12 +418,12 @@ def test_downsample(): gg = pfunc([], ggf, mode=gpu_mode) gg2 = pfunc([], ggf, mode=ref_mode) - assert any([isinstance( + assert any( isinstance( node.op, tcn.blas.GpuDownsampleFactorMaxGradGrad) - for node in gg.maker.fgraph.toposort()]) - assert any([isinstance( + for node in gg.maker.fgraph.toposort()) + assert any( isinstance( node.op, DownsampleFactorMaxGradGrad) - for node in gg2.maker.fgraph.toposort()]) + for node in gg2.maker.fgraph.toposort()) assert numpy.allclose(gg(), gg2()), shp # We already check that the gpu version return diff --git a/theano/sandbox/cuda/tests/test_conv_cuda_ndarray.py b/theano/sandbox/cuda/tests/test_conv_cuda_ndarray.py index 7f310d848e4..3ee10b0445e 100644 --- a/theano/sandbox/cuda/tests/test_conv_cuda_ndarray.py +++ b/theano/sandbox/cuda/tests/test_conv_cuda_ndarray.py @@ -194,8 +194,8 @@ def _params_allgood(ishape, kshape, mode, subsample=(1, 1), img_stride=(1, 1), kshp=compile_kshp)(i, k) f = theano.function([i, k], op, mode=theano_mode) if cls is not None: - assert any([isinstance(node.op, cls) - for node in f.maker.fgraph.toposort()]), "Cannot find class %r in %r" % (cls, f.maker.fgraph.toposort()) + assert any( isinstance(node.op, cls) + for node in f.maker.fgraph.toposort()), "Cannot find class %r in %r" % (cls, f.maker.fgraph.toposort()) t2 = time.time() gpuval = f(img, kern) t3 = time.time() @@ -441,11 +441,11 @@ def test_default_conv(): f = theano.function([img, fil], c, mode=theano_mode) if cuda.dnn.dnn_available(): - assert any([isinstance(a.op, GpuDnnConv) - for a in f.maker.fgraph.apply_nodes]) + assert any( isinstance(a.op, GpuDnnConv) + for a in f.maker.fgraph.apply_nodes) else: - assert any([isinstance(a.op, cuda.blas.GpuCorrMM) - for a in f.maker.fgraph.apply_nodes]) + assert any( isinstance(a.op, cuda.blas.GpuCorrMM) + for a in f.maker.fgraph.apply_nodes) def _test_full(cls, mode=None, version=[-1], extra_shapes=[], diff --git a/theano/sandbox/cuda/tests/test_dnn.py b/theano/sandbox/cuda/tests/test_dnn.py index 7110baa164f..587089b93c1 100644 --- a/theano/sandbox/cuda/tests/test_dnn.py +++ b/theano/sandbox/cuda/tests/test_dnn.py @@ -143,7 +143,7 @@ def test_dnn_conv_inplace(): topo = f.maker.fgraph.toposort() convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConv)] assert len(convs) == 2 - assert all([node.op.inplace for node in convs]) + assert all( node.op.inplace for node in convs) assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2 # Test grad w op @@ -154,7 +154,7 @@ def test_dnn_conv_inplace(): topo = f.maker.fgraph.toposort() convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradW)] assert len(convs) == 2 - assert all([node.op.inplace for node in convs]) + assert all( node.op.inplace for node in convs) assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2 # Test grad i op @@ -165,7 +165,7 @@ def test_dnn_conv_inplace(): topo = f.maker.fgraph.toposort() convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradI)] assert len(convs) == 2 - assert all([node.op.inplace for node in convs]) + assert all( node.op.inplace for node in convs) assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2 @@ -265,11 +265,11 @@ def test_pooling(): mode_without_gpu2 = mode_without_gpu.including() mode_without_gpu2.check_isfinite = False f1 = theano.function([x], out1, mode=mode_with_gpu) - assert any([isinstance(node.op, cuda.dnn.GpuDnnPool) - for node in f1.maker.fgraph.apply_nodes]) + assert any( isinstance(node.op, cuda.dnn.GpuDnnPool) + for node in f1.maker.fgraph.apply_nodes) f2 = theano.function([x], out2, mode=mode_without_gpu2) - assert not any([isinstance(node.op, cuda.dnn.GpuDnnPool) - for node in f2.maker.fgraph.apply_nodes]) + assert not any( isinstance(node.op, cuda.dnn.GpuDnnPool) + for node in f2.maker.fgraph.apply_nodes) for shp in [(1, 10, 100, 100), (1, 3, 99, 99), (32, 1, 147, 197), @@ -301,8 +301,8 @@ def fn(x): # Confirm that the opt would have inserted it. fg = theano.function([x], theano.grad(fn(x).sum(), x), mode=mode_with_gpu) - assert any([isinstance(node.op, cuda.dnn.GpuDnnPoolGrad) - for node in fg.maker.fgraph.toposort()]) + assert any( isinstance(node.op, cuda.dnn.GpuDnnPoolGrad) + for node in fg.maker.fgraph.toposort()) # Test the GPU grad + GPU implementation def fn(x): @@ -319,8 +319,8 @@ def fn(x): # Confirm that we get the good op. fg = theano.function([x], theano.grad(fn(x).sum(), x), mode=mode_with_gpu) - assert any([isinstance(node.op, cuda.dnn.GpuDnnPoolGrad) - for node in fg.maker.fgraph.toposort()]) + assert any( isinstance(node.op, cuda.dnn.GpuDnnPoolGrad) + for node in fg.maker.fgraph.toposort()) g_out = fg(data) # Compare again the CPU result @@ -330,11 +330,11 @@ def fn(x): fc = theano.function([x], theano.grad(out.sum(), x), mode=mode_without_gpu) if mode == 'max': - assert any([isinstance(node.op, MaxPoolGrad) - for node in fc.maker.fgraph.toposort()]) + assert any( isinstance(node.op, MaxPoolGrad) + for node in fc.maker.fgraph.toposort()) else: - assert any([isinstance(node.op, AveragePoolGrad) - for node in fc.maker.fgraph.toposort()]) + assert any( isinstance(node.op, AveragePoolGrad) + for node in fc.maker.fgraph.toposort()) c_out = fc(data) utt.assert_allclose(c_out, g_out) @@ -368,8 +368,8 @@ def fn(x): mode_without_gpu2.check_isfinite = False f_gpu = theano.function([x], fn(x), mode=mode_with_gpu) - assert any([isinstance(node.op, cuda.dnn.GpuDnnPool) - for node in f_gpu.maker.fgraph.apply_nodes]) + assert any( isinstance(node.op, cuda.dnn.GpuDnnPool) + for node in f_gpu.maker.fgraph.apply_nodes) i = 1 for shp in [(1, 10, 100, 100), @@ -381,8 +381,8 @@ def fn(x): pool_function=T.max) f_cpu = theano.function([x], out, mode=mode_without_gpu2) - assert not any([isinstance(node.op, cuda.dnn.GpuDnnPool) - for node in f_cpu.maker.fgraph.apply_nodes]) + assert not any( isinstance(node.op, cuda.dnn.GpuDnnPool) + for node in f_cpu.maker.fgraph.apply_nodes) # Change the window size dynamically for gpu op ws.set_value(numpy.array([i, i]).astype('int32')) @@ -457,11 +457,11 @@ def test_pooling3d(): pad=pad, pool_func=func) f1 = theano.function([x], out1, mode=mode_with_gpu) - assert any([isinstance(node.op, cuda.dnn.GpuDnnPool) - for node in f1.maker.fgraph.apply_nodes]) + assert any( isinstance(node.op, cuda.dnn.GpuDnnPool) + for node in f1.maker.fgraph.apply_nodes) f2 = theano.function([x], out2, mode=mode_without_gpu_ref) - assert not any([isinstance(node.op, cuda.dnn.GpuDnnPool) - for node in f2.maker.fgraph.apply_nodes]) + assert not any( isinstance(node.op, cuda.dnn.GpuDnnPool) + for node in f2.maker.fgraph.apply_nodes) for shp in [(1, 10, 100, 100, 100), (1, 3, 99, 99, 99), (32, 1, 147, 197, 37), @@ -505,8 +505,8 @@ def fn(x): # Confirm that we get the good op. fg = theano.function([x], theano.grad(fn(x).sum(), x), mode=mode_with_gpu) - assert any([isinstance(node.op, cuda.dnn.GpuDnnPoolGrad) - for node in fg.maker.fgraph.toposort()]) + assert any( isinstance(node.op, cuda.dnn.GpuDnnPoolGrad) + for node in fg.maker.fgraph.toposort()) g_out = fg(data) # Compare again the CPU result @@ -530,8 +530,8 @@ def test_pooling_opt(): pool_2d(x, ds=(2, 2), mode='average_inc_pad', ignore_border=True), mode=mode_with_gpu) - assert any([isinstance(n.op, cuda.dnn.GpuDnnPool) - for n in f.maker.fgraph.toposort()]) + assert any( isinstance(n.op, cuda.dnn.GpuDnnPool) + for n in f.maker.fgraph.toposort()) f(numpy.zeros((10, 10), dtype='float32')) @@ -541,8 +541,8 @@ def test_pooling_opt(): ignore_border=True).sum(), x), mode=mode_with_gpu.including("cudnn")) - assert any([isinstance(n.op, cuda.dnn.GpuDnnPoolGrad) - for n in f.maker.fgraph.toposort()]) + assert any( isinstance(n.op, cuda.dnn.GpuDnnPoolGrad) + for n in f.maker.fgraph.toposort()) f(numpy.zeros((10, 10), dtype='float32')) @@ -745,8 +745,8 @@ def test_dnn_tag(): if not raised: assert cuda.dnn.dnn_available() - assert any([isinstance(n.op, cuda.dnn.GpuDnnPool) - for n in f.maker.fgraph.toposort()]) + assert any( isinstance(n.op, cuda.dnn.GpuDnnPool) + for n in f.maker.fgraph.toposort()) class TestDnnInferShapes(utt.InferShapeTester): diff --git a/theano/sandbox/cuda/tests/test_driver.py b/theano/sandbox/cuda/tests/test_driver.py index 3e63065bd8f..37955bb6cc2 100644 --- a/theano/sandbox/cuda/tests/test_driver.py +++ b/theano/sandbox/cuda/tests/test_driver.py @@ -68,7 +68,7 @@ def test_nvidia_driver3(): f = theano.function([var], var + 1, mode=mode_with_gpu, profile=False) topo = f.maker.fgraph.toposort() - assert any([isinstance(node.op, cuda.GpuElemwise) for node in topo]) + assert any( isinstance(node.op, cuda.GpuElemwise) for node in topo) assert theano.sandbox.cuda.use.device_number is not None diff --git a/theano/sandbox/cuda/tests/test_nnet.py b/theano/sandbox/cuda/tests/test_nnet.py index dad4b37cfb7..6a4d50a587e 100644 --- a/theano/sandbox/cuda/tests/test_nnet.py +++ b/theano/sandbox/cuda/tests/test_nnet.py @@ -74,12 +74,12 @@ def test_GpuCrossentropySoftmaxArgmax1HotWithBias(): # theano.printing.debugprint(classify) # theano.printing.debugprint(classify_gpu) - assert any([isinstance(node.op, + assert any( isinstance(node.op, T.nnet.CrossentropySoftmaxArgmax1HotWithBias) - for node in classify.maker.fgraph.toposort()]) - assert any([isinstance(node.op, + for node in classify.maker.fgraph.toposort()) + assert any( isinstance(node.op, cuda.nnet.GpuCrossentropySoftmaxArgmax1HotWithBias) - for node in classify_gpu.maker.fgraph.toposort()]) + for node in classify_gpu.maker.fgraph.toposort()) out = classify(yy, b_values, dot_value) gout = classify_gpu(yy, b_values, dot_value) @@ -128,11 +128,11 @@ def test_GpuCrossentropySoftmax1HotWithBiasDx(): # theano.printing.debugprint(cpu_f) # theano.printing.debugprint(gpu_f) - assert any([isinstance(node.op, T.nnet.CrossentropySoftmax1HotWithBiasDx) - for node in cpu_f.maker.fgraph.toposort()]) - assert any([isinstance(node.op, + assert any( isinstance(node.op, T.nnet.CrossentropySoftmax1HotWithBiasDx) + for node in cpu_f.maker.fgraph.toposort()) + assert any( isinstance(node.op, cuda.nnet.GpuCrossentropySoftmax1HotWithBiasDx) - for node in gpu_f.maker.fgraph.toposort()]) + for node in gpu_f.maker.fgraph.toposort()) cpu_out = cpu_f(softmax_output_value) gpu_out = gpu_f(softmax_output_value) diff --git a/theano/sandbox/cuda/tests/test_opt.py b/theano/sandbox/cuda/tests/test_opt.py index 30c3a26a843..d7df0646aa3 100644 --- a/theano/sandbox/cuda/tests/test_opt.py +++ b/theano/sandbox/cuda/tests/test_opt.py @@ -413,40 +413,40 @@ def test_local_gpu_subtensor(): t = tensor._shared(numpy.zeros(20, "float32")) f = theano.function([], t[3:4], mode=mode_with_gpu) topo = f.maker.fgraph.toposort() - assert any([type(node.op) is tensor.Subtensor for node in topo]) - assert not any([isinstance(node.op, cuda.GpuSubtensor) for node in topo]) + assert any( type(node.op) is tensor.Subtensor for node in topo) + assert not any( isinstance(node.op, cuda.GpuSubtensor) for node in topo) # Test graph input. t = tensor.fmatrix() f = theano.function([t], t[3:4], mode=mode_with_gpu) topo = f.maker.fgraph.toposort() - assert any([type(node.op) is tensor.Subtensor for node in topo]) - assert not any([isinstance(node.op, cuda.GpuSubtensor) for node in topo]) + assert any( type(node.op) is tensor.Subtensor for node in topo) + assert not any( isinstance(node.op, cuda.GpuSubtensor) for node in topo) # Test multiple use of the input # We want the subtensor to be on the GPU to prevent multiple transfer. t = tensor.fmatrix() f = theano.function([t], [t[3:4], t + 1], mode=mode_with_gpu) topo = f.maker.fgraph.toposort() - assert not any([type(node.op) is tensor.Subtensor for node in topo]) - assert any([isinstance(node.op, cuda.GpuSubtensor) for node in topo]) + assert not any( type(node.op) is tensor.Subtensor for node in topo) + assert any( isinstance(node.op, cuda.GpuSubtensor) for node in topo) # Test multiple use of the input + input as output # We want the subtensor to be on the GPU to prevent multiple transfer. t = tensor.fmatrix() f = theano.function([t], [t[3:4], t + 1, t], mode=mode_with_gpu) topo = f.maker.fgraph.toposort() - assert not any([type(node.op) is tensor.Subtensor for node in topo]) - assert any([isinstance(node.op, cuda.GpuSubtensor) for node in topo]) + assert not any( type(node.op) is tensor.Subtensor for node in topo) + assert any( isinstance(node.op, cuda.GpuSubtensor) for node in topo) # Test shared forced on CPU end we do computation on the output of # the subtensor. t = tensor._shared(numpy.zeros(20, "float32")) f = theano.function([], t[3:4] + 1, mode=mode_with_gpu) topo = f.maker.fgraph.toposort() - assert any([type(node.op) is tensor.Subtensor for node in topo]) - assert not any([isinstance(node.op, cuda.GpuSubtensor) for node in topo]) - assert any([isinstance(node.op, cuda.GpuElemwise) for node in topo]) + assert any( type(node.op) is tensor.Subtensor for node in topo) + assert not any( isinstance(node.op, cuda.GpuSubtensor) for node in topo) + assert any( isinstance(node.op, cuda.GpuElemwise) for node in topo) def test_local_gpu_split(): @@ -461,14 +461,14 @@ def test_local_gpu_split(): cpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1]) l = f.maker.fgraph.toposort() # Ensure that one op is theano.tensor.Split - assert any([isinstance(o.op, theano.tensor.Split) for o in l]) + assert any( isinstance(o.op, theano.tensor.Split) for o in l) # GPU version f = theano.function([x, splits], [ra, rb, rc], mode=mode_with_gpu) gpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1]) l = f.maker.fgraph.toposort() - assert any([isinstance(o.op, cuda.GpuSplit) for o in l]) + assert any( isinstance(o.op, cuda.GpuSplit) for o in l) # Check equality - assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)]) + assert all( (cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)) # Test the other path of the optimizer, when it is the output that # is moved to the GPU. @@ -477,9 +477,9 @@ def test_local_gpu_split(): mode=mode_with_gpu.excluding("InputToGpuOptimizer")) gpu_res = f([0, 1, 2, 3, 4, 5], [3, 2, 1]) l = f.maker.fgraph.toposort() - assert any([isinstance(o.op, cuda.GpuSplit) for o in l]) + assert any( isinstance(o.op, cuda.GpuSplit) for o in l) # Check equality - assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)]) + assert all( (cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)) # Test that split with only 1 output work ra = tensor.split(x, splits, n_splits=1, axis=0) @@ -488,16 +488,16 @@ def test_local_gpu_split(): l = f.maker.fgraph.toposort() # Ensure that no op is theano.tensor.Split or GpuSplit, they get # optimized away. - assert not any([isinstance(o.op, (theano.tensor.Split, - cuda.GpuSplit)) for o in l]) + assert not any( isinstance(o.op, (theano.tensor.Split, + cuda.GpuSplit)) for o in l) # GPU version f = theano.function([x, splits], [ra], mode=mode_with_gpu) gpu_res = f([0, 1, 2, 3, 4, 5], [6]) l = f.maker.fgraph.toposort() - assert not any([isinstance(o.op, (theano.tensor.Split, - cuda.GpuSplit)) for o in l]) + assert not any( isinstance(o.op, (theano.tensor.Split, + cuda.GpuSplit)) for o in l) # Check equality - assert all([(cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)]) + assert all( (cpu == gpu).all() for cpu, gpu in zip(cpu_res, gpu_res)) def test_print_op(): diff --git a/theano/sandbox/multinomial.py b/theano/sandbox/multinomial.py index 5b5b922d039..c5b57e645a6 100644 --- a/theano/sandbox/multinomial.py +++ b/theano/sandbox/multinomial.py @@ -596,9 +596,10 @@ def local_gpu_multinomial(node): return None m, = node.outputs if (p.dtype == u.dtype == m.dtype == 'float32' and - any([i.owner and isinstance(i.owner.op, + any( + i.owner and isinstance(i.owner.op, theano.sandbox.cuda.HostFromGpu) - for i in node.inputs])): + for i in node.inputs)): gpu_op = GpuMultinomialFromUniform(node.op.odtype) return [host_from_gpu(gpu_op(*[gpu_from_host(i) for i in [p, u]])).T] diff --git a/theano/sandbox/rng_mrg.py b/theano/sandbox/rng_mrg.py index e100e9f0b4a..7dbb7f8abf3 100644 --- a/theano/sandbox/rng_mrg.py +++ b/theano/sandbox/rng_mrg.py @@ -1073,7 +1073,8 @@ def guess_n_streams(size, warn=False): # Note that this code was moved out of `MRG_RandomStreams` so that it can # be easily accessed from tests, where we want to disable the warning. if (isinstance(size, (tuple, list)) and - all([isinstance(i, integer_types) for i in size])): + all( + isinstance(i, integer_types) for i in size)): # We can make a guess. r = 1 for s in size: @@ -1303,10 +1304,10 @@ def uniform(self, size, low=0.0, high=1.0, ndim=None, dtype=None, if isinstance(size, tuple): msg = "size must be a tuple of int or a Theano variable" - assert all([isinstance(i, (numpy.integer, integer_types, Variable)) - for i in size]), msg - if any([isinstance(i, (numpy.integer, integer_types)) and i <= 0 - for i in size]): + assert all( isinstance(i, (numpy.integer, integer_types, Variable)) + for i in size), msg + if any( isinstance(i, (numpy.integer, integer_types)) and i <= 0 + for i in size): raise ValueError( "The specified size contains a dimension with value <= 0", size) @@ -1391,7 +1392,7 @@ def multinomial(self, size=None, n=1, pvals=None, ndim=None, dtype='int64', raise TypeError("You have to specify pvals") pvals = as_tensor_variable(pvals) if size is not None: - if any([isinstance(i, integer_types) and i <= 0 for i in size]): + if any( isinstance(i, integer_types) and i <= 0 for i in size): raise ValueError( "The specified size contains a dimension with value <= 0", size) @@ -1497,7 +1498,8 @@ def normal(self, size, avg=0.0, std=1.0, ndim=None, evened = False constant = False if (isinstance(size, tuple) and - all([isinstance(i, (numpy.integer, integer_types)) for i in size])): + all( + isinstance(i, (numpy.integer, integer_types)) for i in size)): constant = True # Force dtype because it defaults to float when size is empty n_samples = numpy.prod(size, dtype='int64') diff --git a/theano/sandbox/tests/test_multinomial.py b/theano/sandbox/tests/test_multinomial.py index 207c3ca7544..96faa75ae24 100644 --- a/theano/sandbox/tests/test_multinomial.py +++ b/theano/sandbox/tests/test_multinomial.py @@ -122,8 +122,8 @@ def body(mode, gpu): f = function([p, u], m * 2, allow_input_downcast=True, mode=mode) if gpu: - assert any([type(node.op) is multinomial.GpuMultinomialFromUniform - for node in f.maker.fgraph.toposort()]) + assert any( type(node.op) is multinomial.GpuMultinomialFromUniform + for node in f.maker.fgraph.toposort()) # test that both first and second samples can be drawn utt.assert_allclose(f([[1, 0], [0, 1]], [.1, .1]), @@ -157,8 +157,8 @@ def body(mode, gpu): m = multinomial.MultinomialFromUniform('auto')(p, u) f = function([p, u], m * 2, allow_input_downcast=True, mode=mode) if gpu: - assert any([type(node.op) is multinomial.GpuMultinomialFromUniform - for node in f.maker.fgraph.toposort()]) + assert any( type(node.op) is multinomial.GpuMultinomialFromUniform + for node in f.maker.fgraph.toposort()) pval = numpy.arange(10000 * 4, dtype='float32').reshape((10000, 4)) + 0.1 pval = pval / pval.sum(axis=1)[:, None] @@ -214,8 +214,8 @@ def test_gpu_opt(): m_gpu = cuda.gpu_from_host(m) f = function([p, u], m_gpu, allow_input_downcast=True, mode=get_mode(True)) - assert any([type(node.op) is multinomial.GpuMultinomialFromUniform - for node in f.maker.fgraph.toposort()]) + assert any( type(node.op) is multinomial.GpuMultinomialFromUniform + for node in f.maker.fgraph.toposort()) pval = numpy.arange(10000 * 4, dtype='float32').reshape((10000, 4)) + 0.1 pval = pval / pval.sum(axis=1)[:, None] uval = numpy.ones_like(pval[:, 0]) * 0.5 @@ -228,8 +228,8 @@ def test_gpu_opt(): m_gpu = cuda.gpu_from_host(m) f = function([r, u], m_gpu, allow_input_downcast=True, mode=get_mode(True)) - assert any([type(node.op) is multinomial.GpuMultinomialFromUniform - for node in f.maker.fgraph.toposort()]) + assert any( type(node.op) is multinomial.GpuMultinomialFromUniform + for node in f.maker.fgraph.toposort()) pval = numpy.arange(1 * 4, dtype='float32').reshape((1, 4)) + 0.1 pval = pval / pval.sum(axis=1)[:, None] uval = numpy.ones_like(pval[:, 0]) * 0.5 diff --git a/theano/sandbox/tests/test_rng_mrg.py b/theano/sandbox/tests/test_rng_mrg.py index 10ff4eeece4..a358bedc193 100644 --- a/theano/sandbox/tests/test_rng_mrg.py +++ b/theano/sandbox/tests/test_rng_mrg.py @@ -560,8 +560,8 @@ def test_uniform(): u = R.uniform(size=size, nstreams=rng_mrg.guess_n_streams(size, warn=False)) f = theano.function(var_input, u, mode=mode) - assert any([isinstance(node.op, theano.sandbox.rng_mrg.mrg_uniform) - for node in f.maker.fgraph.toposort()]) + assert any( isinstance(node.op, theano.sandbox.rng_mrg.mrg_uniform) + for node in f.maker.fgraph.toposort()) # theano.printing.debugprint(f) cpu_out = f(*input) @@ -587,9 +587,9 @@ def test_uniform(): f = theano.function(var_input, theano.Out( theano.sandbox.cuda.basic_ops.gpu_from_host(u), borrow=True), mode=mode_with_gpu) - assert any([isinstance(node.op, + assert any( isinstance(node.op, theano.sandbox.rng_mrg.GPU_mrg_uniform) - for node in f.maker.fgraph.toposort()]) + for node in f.maker.fgraph.toposort()) # theano.printing.debugprint(f) gpu_out = numpy.asarray(f(*input)) diff --git a/theano/scalar/basic.py b/theano/scalar/basic.py index 29627c209c9..d4f39b5e90a 100644 --- a/theano/scalar/basic.py +++ b/theano/scalar/basic.py @@ -714,7 +714,7 @@ def __new__(self, type): def upcast_out_no_complex(*types): - if any([type in complex_types for type in types]): + if any( type in complex_types for type in types): raise TypeError('complex type are not supported') return get_scalar_type(dtype=Scalar.upcast(*types)), @@ -1388,7 +1388,7 @@ def impl(self, *inputs): def c_code(self, node, name, inputs, outputs, sub): (x, y) = inputs (z,) = outputs - if any([i.type in complex_types for i in node.inputs]): + if any( i.type in complex_types for i in node.inputs): raise NotImplementedError() # Test for both y>x and x>=y to detect NaN return ('%(z)s = ((%(y)s)>(%(x)s)? (%(y)s): ' @@ -1427,7 +1427,7 @@ def impl(self, *inputs): def c_code(self, node, name, inputs, outputs, sub): (x, y) = inputs (z,) = outputs - if any([i.type in complex_types for i in node.inputs]): + if any( i.type in complex_types for i in node.inputs): raise NotImplementedError() return ('%(z)s = ((%(y)s)<(%(x)s)? (%(y)s): ' '((%(x)s)<=(%(y)s)? (%(x)s): nan("")));' % locals()) @@ -3605,8 +3605,8 @@ def __init__(self, inputs, outputs): # only 1 new Composite each time at the output. for i in inputs: assert i not in outputs # This isn't supported, use identity - if len(outputs) > 1 or not any([isinstance(var.owner.op, Composite) - for var in outputs]): + if len(outputs) > 1 or not any( isinstance(var.owner.op, Composite) + for var in outputs): # No inner Composite inputs, outputs = gof.graph.clone(inputs, outputs) else: diff --git a/theano/scan_module/scan_op.py b/theano/scan_module/scan_op.py index d614bdf97b1..8380400878a 100644 --- a/theano/scan_module/scan_op.py +++ b/theano/scan_module/scan_op.py @@ -2010,7 +2010,8 @@ def compute_all_gradients(known_grads): out_indices = [get_out_idx(self_outputs.index(y)) for y in y_s] connected_inputs = [i for i in range(len(scan_node.inputs)) if - any([connection_pattern[i][odx] for odx in out_indices])] + any( + connection_pattern[i][odx] for odx in out_indices)] wrt = [x for x in theano.gof.graph.inputs(y_s) if (x in diff_inputs) and @@ -2843,8 +2844,8 @@ def profile_printer(fct_name, compile_time, fct_call_time, fct_call, apply_time, apply_cimpl, message, outputs_size, other_time): # Scan overhead profile - if any([isinstance(node.op, Scan) and v > 0 for (_, node), v in - apply_time.items()]): + if any( isinstance(node.op, Scan) and v > 0 for (_, node), v in + apply_time.items()): print() print('Scan overhead:') print (' = 0 for ll in l]): + if any( f.find(ll) >= 0 for ll in l): found_dyn = True if not found_dyn and dirs: _logger.warning( @@ -1924,8 +1924,9 @@ def local_dot22_to_dot22scalar(node): # We support only 1 additional level of mul. # The canonizer should have merged those mul together. i_mul = [x.owner and x.owner.op == T.mul and - any([_as_scalar(x_i, dtype=d.dtype) - for x_i in x.owner.inputs]) + any( + _as_scalar(x_i, dtype=d.dtype) + for x_i in x.owner.inputs) for x in node.inputs] if not any(i_mul): # no scalar in input and no multiplication diff --git a/theano/tensor/elemwise.py b/theano/tensor/elemwise.py index b12d041be16..13fddddc62a 100755 --- a/theano/tensor/elemwise.py +++ b/theano/tensor/elemwise.py @@ -991,8 +991,8 @@ def _c_all(self, node, nodename, inames, onames, sub): # This is to protect again futur change of uniq. assert len(inames) == len(inputs) ii, iii = list(zip(*gof.utils.uniq(list(zip(_inames, node.inputs))))) - assert all([x == y for x, y in zip(ii, inames)]) - assert all([x == y for x, y in zip(iii, inputs)]) + assert all( x == y for x, y in zip(ii, inames)) + assert all( x == y for x, y in zip(iii, inputs)) defines = "" undefs = "" @@ -1177,7 +1177,7 @@ def _c_all(self, node, nodename, inames, onames, sub): # If all inputs and outputs are contiguous # and the scalar op define optimized code for that case # use it! The scalar_op need to check the broadcast flag himself. - if (all([o.ndim >= 1 for o in node.outputs]) and + if (all(o.ndim >= 1 for o in node.outputs) and # Don't use the contig code for broadcasted scalar. not all(node.outputs[0].broadcastable)): contig = None @@ -1192,9 +1192,9 @@ def _c_all(self, node, nodename, inames, onames, sub): # Try to make one generic version, this will help the # compiler to vectorize the code as their won't be as # many ptr and the stride will be hard coded. - if all([io.broadcastable == node.outputs[0].broadcastable or + if all( io.broadcastable == node.outputs[0].broadcastable or all(io.broadcastable) - for io in node.inputs + node.outputs]): + for io in node.inputs + node.outputs): z = onames[0] contig = """ // All output have the same size diff --git a/theano/tensor/nnet/tests/test_abstract_conv.py b/theano/tensor/nnet/tests/test_abstract_conv.py index 1b68587f6ab..a91fd97a431 100644 --- a/theano/tensor/nnet/tests/test_abstract_conv.py +++ b/theano/tensor/nnet/tests/test_abstract_conv.py @@ -132,8 +132,8 @@ def run_fwd(self, inputs_shape, filters_shape, ref=conv_corr, f = theano.function([], c, mode=mode) if target_op is not None: - assert any([isinstance(n.op, target_op) for n - in f.maker.fgraph.toposort()]) + assert any( isinstance(n.op, target_op) for n + in f.maker.fgraph.toposort()) if check_trace: self.assertTrue(check_stack_trace(f, ops_to_check=target_op)) @@ -182,8 +182,8 @@ def run_gradweight(self, inputs_shape, filters_shape, output_shape, f_ref = theano.function([], c_ref, mode='FAST_RUN') if target_op is not None: - assert any([isinstance(n.op, target_op) for n - in f.maker.fgraph.toposort()]) + assert any( isinstance(n.op, target_op) for n + in f.maker.fgraph.toposort()) if check_trace: self.assertTrue(check_stack_trace(f, ops_to_check=target_op)) @@ -233,8 +233,8 @@ def run_gradinput(self, inputs_shape, filters_shape, output_shape, f_ref = theano.function([], c_ref, mode='FAST_RUN') if target_op is not None: - assert any([isinstance(n.op, target_op) for n - in f.maker.fgraph.toposort()]) + assert any( isinstance(n.op, target_op) for n + in f.maker.fgraph.toposort()) if check_trace: self.assertTrue(check_stack_trace(f, ops_to_check=target_op)) diff --git a/theano/tensor/nnet/tests/test_neighbours.py b/theano/tensor/nnet/tests/test_neighbours.py index 6e6124d071c..646913c6aef 100644 --- a/theano/tensor/nnet/tests/test_neighbours.py +++ b/theano/tensor/nnet/tests/test_neighbours.py @@ -41,8 +41,8 @@ def test_neibs(self): g = function([], neibs2images(neibs, neib_shape, images.shape), mode=self.mode) - assert any([isinstance(node.op, self.op) - for node in f.maker.fgraph.toposort()]) + assert any( isinstance(node.op, self.op) + for node in f.maker.fgraph.toposort()) # print g() assert numpy.allclose(images.get_value(borrow=True), g()) @@ -58,8 +58,8 @@ def test_neibs_manual(self): for border in ['valid', 'ignore_borders']: f = function([], images2neibs(images, neib_shape, mode=border), mode=self.mode) - assert any([isinstance(node.op, self.op) - for node in f.maker.fgraph.toposort()]) + assert any( isinstance(node.op, self.op) + for node in f.maker.fgraph.toposort()) # print images.get_value(borrow=True) neibs = f() diff --git a/theano/tensor/nnet/tests/test_sigm.py b/theano/tensor/nnet/tests/test_sigm.py index 6465ec953aa..33205465ba5 100644 --- a/theano/tensor/nnet/tests/test_sigm.py +++ b/theano/tensor/nnet/tests/test_sigm.py @@ -349,7 +349,7 @@ def test_local_hard_sigmoid(self): mode = self.get_mode().including('local_hard_sigmoid') f = theano.function([x], s, mode=mode) topo = f.maker.fgraph.toposort() - assert not any([n.op == sigmoid for n in topo]) + assert not any( n.op == sigmoid for n in topo) ux_v = f([[-50, -10, -4, -1, 0, 1, 4, 10, 50]]) mode2 = mode.excluding('fusion').excluding('inplace') diff --git a/theano/tensor/opt.py b/theano/tensor/opt.py index 86146374668..6d9ce663af4 100644 --- a/theano/tensor/opt.py +++ b/theano/tensor/opt.py @@ -331,8 +331,9 @@ def inplace_elemwise_optimizer(fgraph): updated_vars.append(inp_idx) elif (hasattr(fgraph, 'destroy_handler') and inp.owner and - any([fgraph.destroy_handler.root_destroyer.get(up_inp, None) is inp.owner - for up_inp in updated_inputs])): + any( + fgraph.destroy_handler.root_destroyer.get(up_inp, None) is inp.owner + for up_inp in updated_inputs)): # the candidate input is a variable computed # inplace on the updated input via a sequence of @@ -1116,14 +1117,14 @@ def set_shape(self, r, s, override=False): shape_vars.append(self.lscalar_one) else: shape_vars.append(self.unpack(s[i])) - assert all([not hasattr(r.type, "broadcastable") or + assert all( not hasattr(r.type, "broadcastable") or not r.type.broadcastable[i] or # The two following comparison are a speed optimization # But we never timed this speed optimization! self.lscalar_one.equals(shape_vars[i]) or self.lscalar_one.equals( T.extract_constant(shape_vars[i])) - for i in xrange(r.ndim)]) + for i in xrange(r.ndim)) self.shape_of[r] = tuple(shape_vars) for sv in shape_vars: self.shape_of_reverse_index.setdefault(sv, set()).add(r) @@ -1194,7 +1195,7 @@ def update_shape(self, r, other_r): merged_shape.append(r_shape[i]) else: merged_shape.append(other_shape[i]) - assert all([(not hasattr(r.type, "broadcastable") or + assert all( (not hasattr(r.type, "broadcastable") or not r.type.broadcastable[i] and not other_r.type.broadcastable[i]) or # The two following comparison are a speed optimization @@ -1202,7 +1203,7 @@ def update_shape(self, r, other_r): self.lscalar_one.equals(merged_shape[i]) or self.lscalar_one.equals( T.extract_constant(merged_shape[i], only_process_constants=True)) - for i in xrange(r.ndim)]) + for i in xrange(r.ndim)) self.shape_of[r] = tuple(merged_shape) for sv in self.shape_of[r]: self.shape_of_reverse_index.setdefault(sv, set()).add(r) @@ -1219,13 +1220,13 @@ def set_shape_i(self, r, i, s_i): new_shape.append(self.unpack(s_i)) else: new_shape.append(s_j) - assert all([not hasattr(r.type, "broadcastable") or + assert all( not hasattr(r.type, "broadcastable") or not r.type.broadcastable[idx] or # The two following comparison are a speed optimization # But we never timed this speed optimization! self.lscalar_one.equals(new_shape[idx]) or self.lscalar_one.equals(T.extract_constant(new_shape[idx])) - for idx in xrange(r.ndim)]) + for idx in xrange(r.ndim)) self.shape_of[r] = tuple(new_shape) for sv in self.shape_of[r]: self.shape_of_reverse_index.setdefault(sv, set()).add(r) @@ -1490,14 +1491,14 @@ def local_elemwise_alloc(node): if len(node.outputs) > 1: # Ensure all outputs have the same broadcast pattern # This is a supposition that I'm not sure is always true. - assert all([o.type.broadcastable == + assert all( o.type.broadcastable == node.outputs[0].type.broadcastable for o in - node.outputs[1:]]) + node.outputs[1:]) # The broadcast pattern of the ouptut must match the broadcast # pattern of at least one of the inputs. - if not any([i.type.broadcastable == - node.outputs[0].type.broadcastable for i in node.inputs]): + if not any( i.type.broadcastable == + node.outputs[0].type.broadcastable for i in node.inputs): return False def dimshuffled_alloc(i): @@ -1508,8 +1509,8 @@ def dimshuffled_alloc(i): # At least one input must have an owner that is either a AllocOP or a # DimShuffleOP with an owner that is a AllocOP -- otherwise there is # nothing to optimize. - if not any([i.owner and (isinstance(i.owner.op, AllocOP) or - dimshuffled_alloc(i)) for i in node.inputs]): + if not any( i.owner and (isinstance(i.owner.op, AllocOP) or + dimshuffled_alloc(i)) for i in node.inputs): return False # Search for input that we can use as a baseline for the dimensions. @@ -2636,7 +2637,7 @@ def local_subtensor_lift(node): if isinstance(u.owner.op, T.Elemwise): new_inputs = [] - if all([sum(i.type.broadcastable) == 0 for i in u.owner.inputs]): + if all( sum(i.type.broadcastable) == 0 for i in u.owner.inputs): # There is no broadcastable in the inputs idx = node.inputs[1:] new_inputs = [node.op(i, *idx) for i in u.owner.inputs] @@ -2648,8 +2649,8 @@ def local_subtensor_lift(node): # and stacktrace from previous unary operation copy_stack_trace([node.outputs[0], node.inputs[0]], ret) return [ret] - elif all([sum(i.type.broadcastable) in [i.ndim, 0] - for i in u.owner.inputs]): + elif all( sum(i.type.broadcastable) in [i.ndim, 0] + for i in u.owner.inputs): # There is no broadcastable in the inputs or it is scalar idx = node.inputs[1:] new_inputs = [] @@ -4893,8 +4894,9 @@ def local_useless_elemwise_comparison(node): node.inputs[0].owner and \ isinstance(node.inputs[0].owner.op, Elemwise) and \ isinstance(node.inputs[0].owner.op.scalar_op, scalar.Add) and \ - all([isinstance(var.owner and var.owner.op, Shape_i) - for var in node.inputs[0].owner.inputs]) and \ + all( \ + isinstance(var.owner and var.owner.op, Shape_i) + for var in node.inputs[0].owner.inputs) and \ T.extract_constant(node.inputs[1], only_process_constants=True) == 0: return [T.zeros_like(node.inputs[0], dtype=node.outputs[0].dtype)] @@ -4903,8 +4905,9 @@ def local_useless_elemwise_comparison(node): node.inputs[0].owner and \ isinstance(node.inputs[0].owner.op, Elemwise) and \ isinstance(node.inputs[0].owner.op.scalar_op, scalar.Add) and \ - all([isinstance(var.owner and var.owner.op, Shape_i) - for var in node.inputs[0].owner.inputs]) and \ + all( \ + isinstance(var.owner and var.owner.op, Shape_i) + for var in node.inputs[0].owner.inputs) and \ T.extract_constant(node.inputs[1], only_process_constants=True) == 0: return [T.ones_like(node.inputs[0], dtype=node.outputs[0].dtype)] diff --git a/theano/tensor/signal/pool.py b/theano/tensor/signal/pool.py index ca4cd18a0e8..6f078544f70 100755 --- a/theano/tensor/signal/pool.py +++ b/theano/tensor/signal/pool.py @@ -239,7 +239,7 @@ def __init__(self, ds, ignore_border=False, st=None, padding=(0, 0), mode='max', openmp=None): super(Pool, self).__init__(openmp=openmp) self.ds = tuple(ds) - if not all([isinstance(d, integer_types) for d in ds]): + if not all( isinstance(d, integer_types) for d in ds): raise ValueError( "Pool downsample parameters must be ints." " Got %s" % str(ds)) @@ -914,7 +914,7 @@ class DownsampleFactorMaxGradGrad(OpenMPOp): def __init__(self, ds, ignore_border, st=None, padding=(0, 0), mode='max', openmp=None): self.ds = tuple(ds) - if not all([isinstance(d, integer_types) for d in ds]): + if not all( isinstance(d, integer_types) for d in ds): raise ValueError( "Pool downsample parameters must be ints." " Got %s" % str(ds)) diff --git a/theano/tensor/tests/mlp_test.py b/theano/tensor/tests/mlp_test.py index 63f08fd3a12..eef90ebf6cf 100644 --- a/theano/tensor/tests/mlp_test.py +++ b/theano/tensor/tests/mlp_test.py @@ -297,7 +297,7 @@ def test_mlp(): mode=mode) # print 'MODEL 1' #theano.printing.debugprint(train_model, print_type=True) - assert any([isinstance(i.op, T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()]) + assert any( isinstance(i.op, T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()) # Even without FeatureShape train_model = theano.function( inputs=[index], @@ -309,7 +309,7 @@ def test_mlp(): # print # print 'MODEL 2' #theano.printing.debugprint(train_model, print_type=True) - assert any([isinstance(i.op, T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()]) + assert any( isinstance(i.op, T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()) if __name__ == '__main__': test_mlp() diff --git a/theano/tensor/tests/test_basic.py b/theano/tensor/tests/test_basic.py index 86a22677c75..1335d100699 100644 --- a/theano/tensor/tests/test_basic.py +++ b/theano/tensor/tests/test_basic.py @@ -380,8 +380,8 @@ def test_good(self): expecteds = self.expected(*inputs) eps = 1e-10 - if any([i.dtype in ('float32', 'int8', 'uint8') - for i in inputs]): + if any( i.dtype in ('float32', 'int8', 'uint8') + for i in inputs): eps = 1e-6 eps = numpy.max([eps, _eps]) @@ -4189,8 +4189,8 @@ def test_split_0elem(self): m = self.shared(rng.rand(4, 6).astype(self.floatX)) o = self.split_op_class(2)(m, 0, [4, 0]) f = function([], o, mode=self.mode) - assert any([isinstance(node.op, self.split_op_class) - for node in f.maker.fgraph.toposort()]) + assert any( isinstance(node.op, self.split_op_class) + for node in f.maker.fgraph.toposort()) o1, o2 = f() assert numpy.allclose(o1, m.get_value(borrow=True)) assert numpy.allclose(o2, m.get_value(borrow=True)[4:]) @@ -4200,8 +4200,8 @@ def test_split_neg(self): m = self.shared(rng.rand(4, 6).astype(self.floatX)) o = self.split_op_class(2)(m, 0, [5, -1]) f = function([], o, mode=self.mode) - assert any([isinstance(node.op, self.split_op_class) - for node in f.maker.fgraph.toposort()]) + assert any( isinstance(node.op, self.split_op_class) + for node in f.maker.fgraph.toposort()) self.assertRaises(ValueError, f) diff --git a/theano/tensor/tests/test_elemwise.py b/theano/tensor/tests/test_elemwise.py index b4579937c11..85430f35a6c 100644 --- a/theano/tensor/tests/test_elemwise.py +++ b/theano/tensor/tests/test_elemwise.py @@ -387,7 +387,7 @@ def with_linker(self, linker, scalar_op=scalar.add, dtype="floatX", if pre_scalar_op is not None: zv = Elemwise(scalar_op=pre_scalar_op)(x).eval({x: xv}) numpy_raised = False - if len(tosum) > 1 and any([a < 0 for a in tosum]): + if len(tosum) > 1 and any( a < 0 for a in tosum): # In that case, we need to use the good order of axis # in the reduction. axis2 = [] diff --git a/theano/tensor/tests/test_extra_ops.py b/theano/tensor/tests/test_extra_ops.py index 3937fe15a5c..a517adc0ae9 100644 --- a/theano/tensor/tests/test_extra_ops.py +++ b/theano/tensor/tests/test_extra_ops.py @@ -28,7 +28,7 @@ def test_cpu_contiguous(): a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32') f = theano.function([a, i], cpu_contiguous(a.reshape((5, 4))[::i])) topo = f.maker.fgraph.toposort() - assert any([isinstance(node.op, CpuContiguous) for node in topo]) + assert any( isinstance(node.op, CpuContiguous) for node in topo) assert f(a_val, 1).flags['C_CONTIGUOUS'] assert f(a_val, 2).flags['C_CONTIGUOUS'] assert f(a_val, 3).flags['C_CONTIGUOUS'] diff --git a/theano/tensor/tests/test_opt.py b/theano/tensor/tests/test_opt.py index a10b549e97c..7961861d5c6 100644 --- a/theano/tensor/tests/test_opt.py +++ b/theano/tensor/tests/test_opt.py @@ -1757,7 +1757,7 @@ def test_local_useless_subtensor(): assert prog[1].op == tensor.exp, dims assert len(prog) == 2, dims else: - assert any([isinstance(node.op, Subtensor) for node in prog]) + assert any( isinstance(node.op, Subtensor) for node in prog) f([[0, 1, 2], [3, 4, 5]]) # let debugmode test something # Test Variable @@ -1780,7 +1780,7 @@ def test_local_useless_subtensor(): assert prog[0].op == tensor.exp, dims assert len(prog) == 1, dims else: - assert any([isinstance(node.op, Subtensor) for node in prog]) + assert any( isinstance(node.op, Subtensor) for node in prog) f([[0, 1, 2], [3, 4, 5]]) # let debugmode test something # Test mix Variable and Constant # Currently not supported @@ -1795,7 +1795,7 @@ def test_local_useless_subtensor(): assert prog[0].op == tensor.exp, dims assert len(prog) == 1, dims else: - assert any([isinstance(node.op, Subtensor) for node in prog]) + assert any( isinstance(node.op, Subtensor) for node in prog) f([[0, 1, 2], [3, 4, 5]]) # let debugmode test something # Test scalar variable @@ -1810,7 +1810,7 @@ def test_local_useless_subtensor(): assert prog[0].op == tensor.exp, dims assert len(prog) == 1, dims else: - assert any([isinstance(node.op, Subtensor) for node in prog]) + assert any( isinstance(node.op, Subtensor) for node in prog) f([[1, 2, 3], [4, 5, 6]], 1) f([[1, 2, 3], [4, 5, 6]], 3) @@ -1833,8 +1833,8 @@ def test_local_useless_subtensor(): assert prog[1].op == tensor.exp, dims assert len(prog) == 2, dims else: - assert any([isinstance(node.op, AdvancedSubtensor1) - for node in prog]) + assert any( isinstance(node.op, AdvancedSubtensor1) + for node in prog) f([[0, 1, 2], [3, 4, 5]]) # let debugmode test something @@ -2995,12 +2995,12 @@ def test_local_IncSubtensor_serialize(): adds = [n for n in topo if isinstance(n.op, T.Elemwise) and isinstance(n.op.scalar_op, theano.scalar.Add)] for a in adds: - assert not any([inp.owner and + assert not any( inp.owner and isinstance(inp.owner.op, (tensor.IncSubtensor, tensor.AdvancedIncSubtensor, tensor.AdvancedIncSubtensor1)) - for inp in a.inputs]) + for inp in a.inputs) # Now test that the stack trace is copied over properly, # if we return the gradients. We need to use same mode as before. @@ -4499,7 +4499,7 @@ def test_constant_folding(): f = theano.function([], [x * 2, x + x], mode=mode) topo = f.maker.fgraph.toposort() assert len(topo) == 2 - assert all([isinstance(n.op, DeepCopyOp) for n in topo]) + assert all( isinstance(n.op, DeepCopyOp) for n in topo) def test_constant_get_stabilized(): @@ -5315,14 +5315,14 @@ def test_local_sum_prod_alloc(self): assert len(f.maker.fgraph.apply_nodes) == nb_nodes[1] topo = f.maker.fgraph.toposort() assert topo[-1].op == T.alloc - assert not any([isinstance(node.op, T.Sum) for node in topo]) + assert not any( isinstance(node.op, T.Sum) for node in topo) for i in xrange(3): f = theano.function([a], t_like(a).sum(i), mode=mode) utt.assert_allclose(f(input), n_like(input).sum(i)) assert len(f.maker.fgraph.apply_nodes) == nb_nodes[2] topo = f.maker.fgraph.toposort() assert topo[-1].op == T.alloc - assert not any([isinstance(node.op, T.Sum) for node in topo]) + assert not any( isinstance(node.op, T.Sum) for node in topo) # test prod f = theano.function([a], t_like(a).prod(None), mode=mode) @@ -5339,14 +5339,14 @@ def test_local_sum_prod_alloc(self): #assert len(f.maker.fgraph.apply_nodes) == nb_nodes[1] topo = f.maker.fgraph.toposort() assert topo[-1].op == T.alloc - assert not any([isinstance(node.op, T.elemwise.Prod) for node in topo]) + assert not any( isinstance(node.op, T.elemwise.Prod) for node in topo) for i in range(3): f = theano.function([a], t_like(a).prod(i), mode=mode) utt.assert_allclose(f(input), n_like(input).prod(i)) #assert len(f.maker.fgraph.apply_nodes) == nb_nodes[2] topo = f.maker.fgraph.toposort() assert topo[-1].op == T.alloc - assert not any([isinstance(node.op, T.elemwise.Prod) for node in topo]) + assert not any( isinstance(node.op, T.elemwise.Prod) for node in topo) backup = config.warn.sum_sum_bug config.warn.sum_sum_bug = False @@ -5359,8 +5359,8 @@ def test_local_sum_prod_alloc(self): assert len(f.maker.fgraph.apply_nodes) == nb_nodes[3] topo = f.maker.fgraph.toposort() assert topo[-1].op == T.alloc - assert not any([isinstance(node.op, - T.Sum) for node in topo]) + assert not any( isinstance(node.op, + T.Sum) for node in topo) finally: config.warn.sum_sum_bug = backup @@ -5431,18 +5431,16 @@ def test_local_reduce_broadcast_all_0(self): tensor.max, tensor.min]: x = T.TensorType('int64', (True, True, True))() f = theano.function([x], [fct(x)], mode=self.mode) - assert not any([ - isinstance(node.op, T.CAReduce) - for node in f.maker.fgraph.toposort()]) + assert not any( isinstance(node.op, T.CAReduce) + for node in f.maker.fgraph.toposort()) def test_local_reduce_broadcast_all_1(self): for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod, tensor.max, tensor.min]: x = T.TensorType('int64', (True, True))() f = theano.function([x], [fct(x, axis=[0, 1])], mode=self.mode) - assert not any([ - isinstance(node.op, T.CAReduce) - for node in f.maker.fgraph.toposort()]) + assert not any( isinstance(node.op, T.CAReduce) + for node in f.maker.fgraph.toposort()) def test_local_reduce_broadcast_some_0(self): for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod, @@ -5470,9 +5468,8 @@ def test_local_reduce_broadcast_some_1(self): tensor.max, tensor.min]: x = T.TensorType('int64', (True, True, True))() f = theano.function([x], [fct(x, axis=[0, 2])], mode=self.mode) - assert not any([ - isinstance(node.op, T.CAReduce) - for node in f.maker.fgraph.toposort()]) + assert not any( isinstance(node.op, T.CAReduce) + for node in f.maker.fgraph.toposort()) def test_local_reduce_join(self): vx = matrix() @@ -5884,8 +5881,8 @@ def test_local_join_empty(): assert numpy.all(val == [1]) e = f.maker.fgraph.toposort() assert len([n for n in e if isinstance(n.op, Join)]) == 1 - assert all([not isinstance(n.op, Join) or len(n.inputs) == 3 - for n in e if isinstance(n.op, Join)]) + assert all( not isinstance(n.op, Join) or len(n.inputs) == 3 + for n in e if isinstance(n.op, Join)) assert f.maker.fgraph.outputs[0].dtype == config.floatX @@ -5898,8 +5895,8 @@ def test_local_join_empty(): assert numpy.all(val == [[1]]) e = f.maker.fgraph.toposort() assert len([n for n in e if isinstance(n.op, Join)]) == 1 - assert all([not isinstance(n.op, Join) or len(n.inputs) == 4 - for n in e if isinstance(n.op, Join)]) + assert all( not isinstance(n.op, Join) or len(n.inputs) == 4 + for n in e if isinstance(n.op, Join)) assert f.maker.fgraph.outputs[0].dtype == config.floatX # test for vector, vector, empty to matrix # We can't optimize this case. @@ -5909,8 +5906,8 @@ def test_local_join_empty(): assert numpy.all(val == [1]) e = f.maker.fgraph.toposort() assert len([n for n in e if isinstance(n.op, Join)]) == 1 - assert all([not isinstance(n.op, Join) or len(n.inputs) == 4 - for n in e if isinstance(n.op, Join)]) + assert all( not isinstance(n.op, Join) or len(n.inputs) == 4 + for n in e if isinstance(n.op, Join)) assert f.maker.fgraph.outputs[0].dtype == config.floatX # test for matrix join(0,a) # We can't optimize this case. @@ -5920,8 +5917,8 @@ def test_local_join_empty(): assert numpy.all(val == [[1], [2], [1]]) e = f.maker.fgraph.toposort() assert len([n for n in e if isinstance(n.op, Join)]) == 1 - assert all([not isinstance(n.op, Join) or len(n.inputs) == 4 - for n in e if isinstance(n.op, Join)]) + assert all( not isinstance(n.op, Join) or len(n.inputs) == 4 + for n in e if isinstance(n.op, Join)) assert f.maker.fgraph.outputs[0].dtype == config.floatX @@ -5936,8 +5933,8 @@ def test_local_join_make_vector(): assert numpy.all(val == [1, 7, 8, 2, 3, 4, 6]) e = f.maker.fgraph.toposort() assert len([n for n in e if isinstance(n.op, Join)]) == 1 - assert all([not isinstance(n.op, Join) or len(n.inputs) == 4 - for n in e if isinstance(n.op, Join)]) + assert all( not isinstance(n.op, Join) or len(n.inputs) == 4 + for n in e if isinstance(n.op, Join)) assert f.maker.fgraph.outputs[0].dtype == config.floatX assert check_stack_trace(f, ops_to_check='all') diff --git a/theano/tensor/tests/test_subtensor.py b/theano/tensor/tests/test_subtensor.py index 21531811fca..1b5544efc07 100644 --- a/theano/tensor/tests/test_subtensor.py +++ b/theano/tensor/tests/test_subtensor.py @@ -814,12 +814,12 @@ def grad_list_(self, idxs, data): f = self.function([], [gn, gn.shape], op=self.adv_incsub1) topo = f.maker.fgraph.toposort() if not self.fast_compile: - assert any([isinstance(node.op, self. - adv_incsub1) and node.op.inplace for node in topo]) + assert any( isinstance(node.op, self. + adv_incsub1) and node.op.inplace for node in topo) else: - assert any([isinstance(node.op, self. - adv_incsub1) for node in topo]) - assert any([isinstance(node.op, self.adv_sub1) for node in topo]) + assert any( isinstance(node.op, self. + adv_incsub1) for node in topo) + assert any( isinstance(node.op, self.adv_sub1) for node in topo) gval, gshape = f() good = numpy.zeros_like(data) # don't work when the same index is used many time diff --git a/theano/tensor/utils.py b/theano/tensor/utils.py index cdd177a2576..7cfb7e117c4 100644 --- a/theano/tensor/utils.py +++ b/theano/tensor/utils.py @@ -74,7 +74,7 @@ def shape_of_variables(fgraph, input_shapes): compute_shapes = theano.function(input_dims, output_dims) - if any([i not in fgraph.inputs for i in input_shapes.keys()]): + if any( i not in fgraph.inputs for i in input_shapes.keys()): raise ValueError( "input_shapes keys aren't in the fgraph.inputs. FunctionGraph()" " interface changed. Now by default, it clones the graph it receives." diff --git a/theano/tensor/var.py b/theano/tensor/var.py index b8592b47bde..0e8f18dde3d 100644 --- a/theano/tensor/var.py +++ b/theano/tensor/var.py @@ -467,7 +467,8 @@ def astype(self, dtype): # SLICING/INDEXING def __getitem__(self, args): if (isinstance(args, list) and - any([isinstance(a, slice) for a in args])): + any( + isinstance(a, slice) for a in args)): pass elif not isinstance(args, tuple): args = args, diff --git a/theano/tests/record.py b/theano/tests/record.py index dcf4678833c..6ee53ba4489 100644 --- a/theano/tests/record.py +++ b/theano/tests/record.py @@ -224,8 +224,8 @@ def callback(i, node, fn): "running during the playback.") if fgraph not in self.known_fgraphs: - assert not any([elem.name == fgraph.name - for elem in self.known_fgraphs]) + assert not any( elem.name == fgraph.name + for elem in self.known_fgraphs) self.known_fgraphs.add(fgraph) num_app = len(fgraph.apply_nodes) line = 'Function ' + fgraph.name + ' has ' + str(num_app) \ @@ -236,7 +236,7 @@ def callback(i, node, fn): handle_line(line, i, node, fn) line = 'Node ' + str(i) + ':' + str(node) + '\n' handle_line(line, i, node, fn) - assert all([isinstance(x, list) and len(x) == 1 for x in fn.inputs]) + assert all( isinstance(x, list) and len(x) == 1 for x in fn.inputs) def digest(x): x = x[0] diff --git a/theano/tests/test_gradient.py b/theano/tests/test_gradient.py index 6b6cef6b5e4..9506144b6ed 100644 --- a/theano/tests/test_gradient.py +++ b/theano/tests/test_gradient.py @@ -774,8 +774,8 @@ def test_grad_clip(): if theano.config.mode != "FAST_COMPILE": topo = f.maker.fgraph.toposort() - assert not any([isinstance(node.op, gradient.GradClip) - for node in topo]) + assert not any( isinstance(node.op, gradient.GradClip) + for node in topo) out = f(2.) assert np.allclose(out, (1, 4)) assert not np.allclose(out[0], out[1]) diff --git a/theano/tests/test_ifelse.py b/theano/tests/test_ifelse.py index e5e9f07ca3b..0a270826378 100644 --- a/theano/tests/test_ifelse.py +++ b/theano/tests/test_ifelse.py @@ -204,14 +204,14 @@ def test_multiple_out_grad(self): values = [numpy.asarray(rng.uniform(size=(l,)), theano.config.floatX) for l in lens] outs_1 = f(1, *values) - assert all([x.shape[0] == y for x, y in zip(outs_1, lens)]) + assert all( x.shape[0] == y for x, y in zip(outs_1, lens)) assert numpy.all(outs_1[0] == 1.) assert numpy.all(outs_1[1] == 1.) assert numpy.all(outs_1[2] == 0.) assert numpy.all(outs_1[3] == 0.) outs_0 = f(0, *values) - assert all([x.shape[0] == y for x, y in zip(outs_1, lens)]) + assert all( x.shape[0] == y for x, y in zip(outs_1, lens)) assert numpy.all(outs_0[0] == 0.) assert numpy.all(outs_0[1] == 0.) assert numpy.all(outs_0[2] == 1.)