commit/StationPlaylist: 2 new changesets

  • From: commits-noreply@xxxxxxxxxxxxx
  • To: nvda-addons-commits@xxxxxxxxxxxxx
  • Date: Thu, 16 Jul 2015 19:31:57 -0000

2 new commits in StationPlaylist:

https://bitbucket.org/nvdaaddonteam/stationplaylist/commits/6c874c4ecb59/
Changeset: 6c874c4ecb59
Branch: None
User: josephsl
Date: 2015-07-16 17:55:48+00:00
Summary: Cart Explorer R2 (6.0-dev): Early prototype of redesigned Cart
Explorer initialization routine.

Now that cart format is better know (CSV format), it made more sense to use
another built-in module from Python that doesn't ship with NVDA Core. The CSV
(comma-separated values) module allows files with tabular data to be read into
a generator object, and cart Explorer will now use CSV routines to parse cart
files.

Other changes:
* The CSV-based implementation lives in the SPL Misc module.
* With the introduction of CSV module, manual parser is removed.
* Carts to strings is now part of preprocessor function and renamed cartsreader
to cartExplorerInit for better clarity.
* The app module will try using splmisc version. Existing cart Explorer
routines in the app module are kept for testing purposes, and once splmisc
version matures, the app module routines will be removed.
More to come later.

Affected #: 3 files

diff --git a/addon/appModules/splstudio/__init__.py
b/addon/appModules/splstudio/__init__.py
index 1dec9c4..0907d90 100755
--- a/addon/appModules/splstudio/__init__.py
+++ b/addon/appModules/splstudio/__init__.py
@@ -925,15 +925,16 @@ class AppModule(appModuleHandler.AppModule):
# Translators: Presented when cart explorer
cannot be entered.
ui.message(_("You are not in playlist viewer,
cannot enter cart explorer"))
return
- cartsRead, cartCount =
self.cartsReader(standardEdition=fg.name.startswith("StationPlaylist Studio
Standard"))
- if not cartsRead:
+ #cartsRead, cartCount =
self.cartsReader(standardEdition=fg.name.startswith("StationPlaylist Studio
Standard"))
+ self.carts = splmisc.cartExplorerInit(fg.name)
+ if self.carts["faultyCarts"]: #not cartsRead:
# Translators: presented when cart explorer
could not be switched on.
ui.message(_("Some or all carts could not be
assigned, cannot enter cart explorer"))
return
else:
self.cartExplorer = True
self.cartsBuilder()
- self.carts["standardLicense"] = True if
cartCount < 96 else False
+ #self.carts["standardLicense"] = True if
cartCount < 96 else False
# Translators: Presented when cart explorer is
on.
ui.message(_("Entering cart explorer"))
else:

diff --git a/addon/appModules/splstudio/csv.py
b/addon/appModules/splstudio/csv.py
new file mode 100755
index 0000000..c155ada
--- /dev/null
+++ b/addon/appModules/splstudio/csv.py
@@ -0,0 +1,456 @@
+
+"""
+csv.py - read/write/investigate CSV files
+"""
+
+import re
+from functools import reduce
+from _csv import Error, __version__, writer, reader, register_dialect, \
+ unregister_dialect, get_dialect, list_dialects, \
+ field_size_limit, \
+ QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
+ __doc__
+from _csv import Dialect as _Dialect
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
+ "Error", "Dialect", "__doc__", "excel", "excel_tab",
+ "field_size_limit", "reader", "writer",
+ "register_dialect", "get_dialect", "list_dialects", "Sniffer",
+ "unregister_dialect", "__version__", "DictReader", "DictWriter" ]
+
+class Dialect:
+ """Describe an Excel dialect.
+
+ This must be subclassed (see csv.excel). Valid attributes are:
+ delimiter, quotechar, escapechar, doublequote, skipinitialspace,
+ lineterminator, quoting.
+
+ """
+ _name = ""
+ _valid = False
+ # placeholders
+ delimiter = None
+ quotechar = None
+ escapechar = None
+ doublequote = None
+ skipinitialspace = None
+ lineterminator = None
+ quoting = None
+
+ def __init__(self):
+ if self.__class__ != Dialect:
+ self._valid = True
+ self._validate()
+
+ def _validate(self):
+ try:
+ _Dialect(self)
+ except TypeError, e:
+ # We do this for compatibility with py2.3
+ raise Error(str(e))
+
+class excel(Dialect):
+ """Describe the usual properties of Excel-generated CSV files."""
+ delimiter = ','
+ quotechar = '"'
+ doublequote = True
+ skipinitialspace = False
+ lineterminator = '\r\n'
+ quoting = QUOTE_MINIMAL
+register_dialect("excel", excel)
+
+class excel_tab(excel):
+ """Describe the usual properties of Excel-generated TAB-delimited files."""
+ delimiter = '\t'
+register_dialect("excel-tab", excel_tab)
+
+
+class DictReader:
+ def __init__(self, f, fieldnames=None, restkey=None, restval=None,
+ dialect="excel", *args, **kwds):
+ self._fieldnames = fieldnames # list of keys for the dict
+ self.restkey = restkey # key to catch long rows
+ self.restval = restval # default value for short rows
+ self.reader = reader(f, dialect, *args, **kwds)
+ self.dialect = dialect
+ self.line_num = 0
+
+ def __iter__(self):
+ return self
+
+ @property
+ def fieldnames(self):
+ if self._fieldnames is None:
+ try:
+ self._fieldnames = self.reader.next()
+ except StopIteration:
+ pass
+ self.line_num = self.reader.line_num
+ return self._fieldnames
+
+ # Issue 20004: Because DictReader is a classic class, this setter is
+ # ignored. At this point in 2.7's lifecycle, it is too late to change the
+ # base class for fear of breaking working code. If you want to change
+ # fieldnames without overwriting the getter, set _fieldnames directly.
+ @fieldnames.setter
+ def fieldnames(self, value):
+ self._fieldnames = value
+
+ def next(self):
+ if self.line_num == 0:
+ # Used only for its side effect.
+ self.fieldnames
+ row = self.reader.next()
+ self.line_num = self.reader.line_num
+
+ # unlike the basic reader, we prefer not to return blanks,
+ # because we will typically wind up with a dict full of None
+ # values
+ while row == []:
+ row = self.reader.next()
+ d = dict(zip(self.fieldnames, row))
+ lf = len(self.fieldnames)
+ lr = len(row)
+ if lf < lr:
+ d[self.restkey] = row[lf:]
+ elif lf > lr:
+ for key in self.fieldnames[lr:]:
+ d[key] = self.restval
+ return d
+
+
+class DictWriter:
+ def __init__(self, f, fieldnames, restval="", extrasaction="raise",
+ dialect="excel", *args, **kwds):
+ self.fieldnames = fieldnames # list of keys for the dict
+ self.restval = restval # for writing short dicts
+ if extrasaction.lower() not in ("raise", "ignore"):
+ raise ValueError, \
+ ("extrasaction (%s) must be 'raise' or 'ignore'" %
+ extrasaction)
+ self.extrasaction = extrasaction
+ self.writer = writer(f, dialect, *args, **kwds)
+
+ def writeheader(self):
+ header = dict(zip(self.fieldnames, self.fieldnames))
+ self.writerow(header)
+
+ def _dict_to_list(self, rowdict):
+ if self.extrasaction == "raise":
+ wrong_fields = [k for k in rowdict if k not in self.fieldnames]
+ if wrong_fields:
+ raise ValueError("dict contains fields not in fieldnames: "
+ + ", ".join([repr(x) for x in wrong_fields]))
+ return [rowdict.get(key, self.restval) for key in self.fieldnames]
+
+ def writerow(self, rowdict):
+ return self.writer.writerow(self._dict_to_list(rowdict))
+
+ def writerows(self, rowdicts):
+ rows = []
+ for rowdict in rowdicts:
+ rows.append(self._dict_to_list(rowdict))
+ return self.writer.writerows(rows)
+
+# Guard Sniffer's type checking against builds that exclude complex()
+try:
+ complex
+except NameError:
+ complex = float
+
+class Sniffer:
+ '''
+ "Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
+ Returns a Dialect object.
+ '''
+ def __init__(self):
+ # in case there is more than one possible delimiter
+ self.preferred = [',', '\t', ';', ' ', ':']
+
+
+ def sniff(self, sample, delimiters=None):
+ """
+ Returns a dialect (or None) corresponding to the sample
+ """
+
+ quotechar, doublequote, delimiter, skipinitialspace = \
+ self._guess_quote_and_delimiter(sample, delimiters)
+ if not delimiter:
+ delimiter, skipinitialspace = self._guess_delimiter(sample,
+ delimiters)
+
+ if not delimiter:
+ raise Error, "Could not determine delimiter"
+
+ class dialect(Dialect):
+ _name = "sniffed"
+ lineterminator = '\r\n'
+ quoting = QUOTE_MINIMAL
+ # escapechar = ''
+
+ dialect.doublequote = doublequote
+ dialect.delimiter = delimiter
+ # _csv.reader won't accept a quotechar of ''
+ dialect.quotechar = quotechar or '"'
+ dialect.skipinitialspace = skipinitialspace
+
+ return dialect
+
+
+ def _guess_quote_and_delimiter(self, data, delimiters):
+ """
+ Looks for text enclosed between two identical quotes
+ (the probable quotechar) which are preceded and followed
+ by the same character (the probable delimiter).
+ For example:
+ ,'some text',
+ The quote with the most wins, same with the delimiter.
+ If there is no quotechar the delimiter can't be determined
+ this way.
+ """
+
+ matches = []
+ for restr in ('(?P<delim>[^\w\n"\'])(?P<space>
?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
+
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', #
".*?",
+ '(?P<delim>>[^\w\n"\'])(?P<space>
?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
+ '(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'):
# ".*?" (no delim, no space)
+ regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
+ matches = regexp.findall(data)
+ if matches:
+ break
+
+ if not matches:
+ # (quotechar, doublequote, delimiter, skipinitialspace)
+ return ('', False, None, 0)
+ quotes = {}
+ delims = {}
+ spaces = 0
+ for m in matches:
+ n = regexp.groupindex['quote'] - 1
+ key = m[n]
+ if key:
+ quotes[key] = quotes.get(key, 0) + 1
+ try:
+ n = regexp.groupindex['delim'] - 1
+ key = m[n]
+ except KeyError:
+ continue
+ if key and (delimiters is None or key in delimiters):
+ delims[key] = delims.get(key, 0) + 1
+ try:
+ n = regexp.groupindex['space'] - 1
+ except KeyError:
+ continue
+ if m[n]:
+ spaces += 1
+
+ quotechar = reduce(lambda a, b, quotes = quotes:
+ (quotes[a] > quotes[b]) and a or b, quotes.keys())
+
+ if delims:
+ delim = reduce(lambda a, b, delims = delims:
+ (delims[a] > delims[b]) and a or b, delims.keys())
+ skipinitialspace = delims[delim] == spaces
+ if delim == '\n': # most likely a file with a single column
+ delim = ''
+ else:
+ # there is *no* delimiter, it's a single column of quoted data
+ delim = ''
+ skipinitialspace = 0
+
+ # if we see an extra quote between delimiters, we've got a
+ # double quoted format
+ dq_regexp = re.compile(
+
r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)"
% \
+ {'delim':re.escape(delim), 'quote':quotechar},
re.MULTILINE)
+
+
+
+ if dq_regexp.search(data):
+ doublequote = True
+ else:
+ doublequote = False
+
+ return (quotechar, doublequote, delim, skipinitialspace)
+
+
+ def _guess_delimiter(self, data, delimiters):
+ """
+ The delimiter /should/ occur the same number of times on
+ each row. However, due to malformed data, it may not. We don't want
+ an all or nothing approach, so we allow for small variations in this
+ number.
+ 1) build a table of the frequency of each character on every line.
+ 2) build a table of frequencies of this frequency (meta-frequency?),
+ e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
+ 7 times in 2 rows'
+ 3) use the mode of the meta-frequency to determine the /expected/
+ frequency for that character
+ 4) find out how often the character actually meets that goal
+ 5) the character that best meets its goal is the delimiter
+ For performance reasons, the data is evaluated in chunks, so it can
+ try and evaluate the smallest portion of the data possible, evaluating
+ additional chunks as necessary.
+ """
+
+ data = filter(None, data.split('\n'))
+
+ ascii = [chr(c) for c in range(127)] # 7-bit ASCII
+
+ # build frequency tables
+ chunkLength = min(10, len(data))
+ iteration = 0
+ charFrequency = {}
+ modes = {}
+ delims = {}
+ start, end = 0, min(chunkLength, len(data))
+ while start < len(data):
+ iteration += 1
+ for line in data[start:end]:
+ for char in ascii:
+ metaFrequency = charFrequency.get(char, {})
+ # must count even if frequency is 0
+ freq = line.count(char)
+ # value is the mode
+ metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
+ charFrequency[char] = metaFrequency
+
+ for char in charFrequency.keys():
+ items = charFrequency[char].items()
+ if len(items) == 1 and items[0][0] == 0:
+ continue
+ # get the mode of the frequencies
+ if len(items) > 1:
+ modes[char] = reduce(lambda a, b: a[1] > b[1] and a or b,
+ items)
+ # adjust the mode - subtract the sum of all
+ # other frequencies
+ items.remove(modes[char])
+ modes[char] = (modes[char][0], modes[char][1]
+ - reduce(lambda a, b: (0, a[1] + b[1]),
+ items)[1])
+ else:
+ modes[char] = items[0]
+
+ # build a list of possible delimiters
+ modeList = modes.items()
+ total = float(chunkLength * iteration)
+ # (rows of consistent data) / (number of rows) = 100%
+ consistency = 1.0
+ # minimum consistency threshold
+ threshold = 0.9
+ while len(delims) == 0 and consistency >= threshold:
+ for k, v in modeList:
+ if v[0] > 0 and v[1] > 0:
+ if ((v[1]/total) >= consistency and
+ (delimiters is None or k in delimiters)):
+ delims[k] = v
+ consistency -= 0.01
+
+ if len(delims) == 1:
+ delim = delims.keys()[0]
+ skipinitialspace = (data[0].count(delim) ==
+ data[0].count("%c " % delim))
+ return (delim, skipinitialspace)
+
+ # analyze another chunkLength lines
+ start = end
+ end += chunkLength
+
+ if not delims:
+ return ('', 0)
+
+ # if there's more than one, fall back to a 'preferred' list
+ if len(delims) > 1:
+ for d in self.preferred:
+ if d in delims.keys():
+ skipinitialspace = (data[0].count(d) ==
+ data[0].count("%c " % d))
+ return (d, skipinitialspace)
+
+ # nothing else indicates a preference, pick the character that
+ # dominates(?)
+ items = [(v,k) for (k,v) in delims.items()]
+ items.sort()
+ delim = items[-1][1]
+
+ skipinitialspace = (data[0].count(delim) ==
+ data[0].count("%c " % delim))
+ return (delim, skipinitialspace)
+
+
+ def has_header(self, sample):
+ # Creates a dictionary of types of data in each column. If any
+ # column is of a single type (say, integers), *except* for the first
+ # row, then the first row is presumed to be labels. If the type
+ # can't be determined, it is assumed to be a string in which case
+ # the length of the string is the determining factor: if all of the
+ # rows except for the first are the same length, it's a header.
+ # Finally, a 'vote' is taken at the end for each column, adding or
+ # subtracting from the likelihood of the first row being a header.
+
+ rdr = reader(StringIO(sample), self.sniff(sample))
+
+ header = rdr.next() # assume first row is header
+
+ columns = len(header)
+ columnTypes = {}
+ for i in range(columns): columnTypes[i] = None
+
+ checked = 0
+ for row in rdr:
+ # arbitrary number of rows to check, to keep it sane
+ if checked > 20:
+ break
+ checked += 1
+
+ if len(row) != columns:
+ continue # skip rows that have irregular number of columns
+
+ for col in columnTypes.keys():
+
+ for thisType in [int, long, float, complex]:
+ try:
+ thisType(row[col])
+ break
+ except (ValueError, OverflowError):
+ pass
+ else:
+ # fallback to length of string
+ thisType = len(row[col])
+
+ # treat longs as ints
+ if thisType == long:
+ thisType = int
+
+ if thisType != columnTypes[col]:
+ if columnTypes[col] is None: # add new column type
+ columnTypes[col] = thisType
+ else:
+ # type is inconsistent, remove column from
+ # consideration
+ del columnTypes[col]
+
+ # finally, compare results against first row and "vote"
+ # on whether it's a header
+ hasHeader = 0
+ for col, colType in columnTypes.items():
+ if type(colType) == type(0): # it's a length
+ if len(header[col]) != colType:
+ hasHeader += 1
+ else:
+ hasHeader -= 1
+ else: # attempt typecast
+ try:
+ colType(header[col])
+ except (ValueError, TypeError):
+ hasHeader += 1
+ else:
+ hasHeader -= 1
+
+ return hasHeader > 0

diff --git a/addon/appModules/splstudio/splmisc.py
b/addon/appModules/splstudio/splmisc.py
index 100d7f4..6a21289 100755
--- a/addon/appModules/splstudio/splmisc.py
+++ b/addon/appModules/splstudio/splmisc.py
@@ -4,8 +4,12 @@
# Miscellaneous functions and user interfaces
# Split from config module in 2015.

+# JL's disclaimer: Apart from CSV module, others in this folder are my
modules. CSV is part of Python distribution (Copyright Python Software
Foundation).
+
import ctypes
import weakref
+import os
+import csv # For cart explorer.
import gui
import wx
from NVDAObjects.IAccessible import sysListView32
@@ -129,3 +133,64 @@ class SPLFindDialog(wx.Dialog):
global _findDialogOpened
_findDialogOpened = False

+
+# Cart Explorer helper.
+
+def _populateCarts(carts, cartlst, modifier, standardEdition=False):
+ # The real cart string parser, a helper for cart explorer for building
cart entries.
+ # 5.2: Discard number row if SPL Standard is in use.
+ if standardEdition: cartlst = cartlst[:12]
+ for entry in cartlst:
+ # An unassigned cart is stored with three consecutive commas,
so skip it.
+ if ",,," in entry: continue
+ # Pos between 1 and 12 = function carts, 13 through 24 = number
row carts, modifiers are checked.
+ pos = cartlst.index(entry)+1
+ # If a cart name has commas or other characters, SPL surrounds
the cart name with quotes (""), so parse it as well.
+ if not entry.startswith('""'): cartName = entry.split(",")[0]
+ else: cartName = entry.split('""')[1]
+ if pos <= 12: identifier = "f%s"%(pos)
+ elif 12 < pos < 22: identifier = str(pos-12)
+ elif pos == 22: identifier = "0"
+ elif pos == 23: identifier = "-"
+ else: identifier = "="
+ if modifier == "main": cart = identifier
+ else: cart = "%s+%s"%(modifier, identifier)
+ carts[cart] = cartName
+
+# Initialize Cart Explorer i.e. fetch carts.
+# Cart files list is for future use when custom cart names are used.
+def cartExplorerInit(StudioTitle, cartFiles=None):
+ # Use cart files in SPL's data folder to build carts dictionary.
+ # use a combination of SPL user name and static cart location to locate
cart bank files.
+ # Once the cart banks are located, use the routines in the populate
method below to assign carts.
+ # Since sstandard edition does not support number row carts, skip them
if told to do so.
+ carts = {"standardLicense":StudioTitle.startswith("StationPlaylist
Studio Standard")}
+ # Obtain the "real" path for SPL via environment variables and open the
cart data folder.
+ cartsDataPath =
os.path.join(os.environ["PROGRAMFILES"],"StationPlaylist","Data") # Provided
that Studio was installed using default path.
+ if cartFiles is None:
+ # See if multiple users are using SPl Studio.
+ userNameIndex = StudioTitle.find("-")
+ # Read *.cart files and process the cart entries within (be
careful when these cart file names change between SPL releases).
+ # Until NVDA core moves to Python 3, assume that file names
aren't unicode.
+ cartFiles = [u"main carts.cart", u"shift carts.cart", u"ctrl
carts.cart", u"alt carts.cart"]
+ if userNameIndex >= 0:
+ cartFiles = [userName[userNameIndex+2:]+" "+cartFile
for cartFile in cartFiles]
+ faultyCarts = 0
+ for f in cartFiles:
+ try:
+ mod = f.split()[-2] # Checking for modifier string such
as ctrl.
+ # Todo: Check just in case some SPL flavors doesn't
ship with a particular cart file.
+ except IndexError:
+ faultyCarts+=1 # In a rare event that the broadcaster
has saved the cart bank with the name like "carts.cart".
+ continue
+ cartFile = os.path.join(cartsDataPath,f)
+ if not os.path.isfile(cartFile): # Cart explorer will fail if
whitespaces are in the beginning or at the end of a user name.
+ faultyCarts+=1
+ continue
+ with open(cartFile) as cartInfo:
+ cartsCSV = csv.reader(cartInfo)
+ cl = [row for row in cartsCSV]
+ _populateCarts(carts, cl[1], mod,
standardEdition=carts["standardLicense"]) # See the comment for _populate
method above.
+ carts["faultyCarts"] = faultyCarts
+ return carts
+


https://bitbucket.org/nvdaaddonteam/stationplaylist/commits/4cc486ab0a6b/
Changeset: 4cc486ab0a6b
Branch: 6.0/cartExplorerR2
User: josephsl
Date: 2015-07-16 19:31:16+00:00
Summary: Cart Explorer R2 (6.0-dev): Added timing procedure and reduced
instruciton count a bit in splmisc verion of carts reader along with some
corrections.

Affected #: 2 files

diff --git a/addon/appModules/splstudio/__init__.py
b/addon/appModules/splstudio/__init__.py
index 0907d90..edfbc94 100755
--- a/addon/appModules/splstudio/__init__.py
+++ b/addon/appModules/splstudio/__init__.py
@@ -865,7 +865,7 @@ class AppModule(appModuleHandler.AppModule):
else: cart = "%s+%s"%(modifier, identifier)
self.carts[cart] = cartName

- def cartsReader(self, standardEdition=False):
+ def cartsReader(self, userName, standardEdition=False):
# Use cart files in SPL's data folder to build carts dictionary.
# use a combination of SPL user name and static cart location
to locate cart bank files.
# Once the cart banks are located, use the routines in the
populate method below to assign carts.
@@ -890,7 +890,6 @@ class AppModule(appModuleHandler.AppModule):
# Obtain the "real" path for SPL via environment variables and
open the cart data folder.
cartsDataPath =
os.path.join(os.environ["PROGRAMFILES"],"StationPlaylist","Data") # Provided
that Studio was installed using default path.
# See if multiple users are using SPl Studio.
- userName = api.getForegroundObject().name
userNameIndex = userName.find("-")
# Read *.cart files and process the cart entries within (be
careful when these cart file names change between SPL releases).
# Until NVDA core moves to Python 3, assume that file names
aren't unicode.
@@ -917,6 +916,7 @@ class AppModule(appModuleHandler.AppModule):
return cartReadSuccess, cartCount

def script_toggleCartExplorer(self, gesture):
+ t = time.time()
if not self.cartExplorer:
# Prevent cart explorer from being engaged outside of
playlist viewer.
# Todo for 6.0: Let users set cart banks.
@@ -925,9 +925,10 @@ class AppModule(appModuleHandler.AppModule):
# Translators: Presented when cart explorer
cannot be entered.
ui.message(_("You are not in playlist viewer,
cannot enter cart explorer"))
return
- #cartsRead, cartCount =
self.cartsReader(standardEdition=fg.name.startswith("StationPlaylist Studio
Standard"))
+ #cartsRead, cartCount = self.cartsReader(fg.name,
standardEdition=fg.name.startswith("StationPlaylist Studio Standard"))
self.carts = splmisc.cartExplorerInit(fg.name)
- if self.carts["faultyCarts"]: #not cartsRead:
+ #if not cartsRead:
+ if self.carts["faultyCarts"]:
# Translators: presented when cart explorer
could not be switched on.
ui.message(_("Some or all carts could not be
assigned, cannot enter cart explorer"))
return
@@ -943,6 +944,7 @@ class AppModule(appModuleHandler.AppModule):
self.carts.clear()
# Translators: Presented when cart explorer is off.
ui.message(_("Exiting cart explorer"))
+ print time.time()-t
# Translators: Input help mode message for a command in Station
Playlist Studio.
script_toggleCartExplorer.__doc__=_("Toggles cart explorer to learn
cart assignments.")


diff --git a/addon/appModules/splstudio/splmisc.py
b/addon/appModules/splstudio/splmisc.py
index 6a21289..5984c2a 100755
--- a/addon/appModules/splstudio/splmisc.py
+++ b/addon/appModules/splstudio/splmisc.py
@@ -174,22 +174,21 @@ def cartExplorerInit(StudioTitle, cartFiles=None):
# Until NVDA core moves to Python 3, assume that file names
aren't unicode.
cartFiles = [u"main carts.cart", u"shift carts.cart", u"ctrl
carts.cart", u"alt carts.cart"]
if userNameIndex >= 0:
- cartFiles = [userName[userNameIndex+2:]+" "+cartFile
for cartFile in cartFiles]
- faultyCarts = 0
+ cartFiles = [StudioTitle[userNameIndex+2:]+" "+cartFile
for cartFile in cartFiles]
+ faultyCarts = False
for f in cartFiles:
try:
mod = f.split()[-2] # Checking for modifier string such
as ctrl.
# Todo: Check just in case some SPL flavors doesn't
ship with a particular cart file.
except IndexError:
- faultyCarts+=1 # In a rare event that the broadcaster
has saved the cart bank with the name like "carts.cart".
+ faultyCarts = True # In a rare event that the
broadcaster has saved the cart bank with the name like "carts.cart".
continue
cartFile = os.path.join(cartsDataPath,f)
if not os.path.isfile(cartFile): # Cart explorer will fail if
whitespaces are in the beginning or at the end of a user name.
- faultyCarts+=1
+ faultyCarts = True
continue
with open(cartFile) as cartInfo:
- cartsCSV = csv.reader(cartInfo)
- cl = [row for row in cartsCSV]
+ cl = [row for row in csv.reader(cartInfo)]
_populateCarts(carts, cl[1], mod,
standardEdition=carts["standardLicense"]) # See the comment for _populate
method above.
carts["faultyCarts"] = faultyCarts
return carts

Repository URL: https://bitbucket.org/nvdaaddonteam/stationplaylist/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.

Other related posts: