Python源码示例:easydict.EasyDict()

示例1
def get_config_from_json(json_file):
    """
    Get the config from a json file
    :param json_file: the path of the config file
    :return: config(namespace), config(dictionary)
    """

    # parse the configurations from the config json file provided
    with open(json_file, 'r') as config_file:
        try:
            config_dict = json.load(config_file)
            # EasyDict allows to access dict values as attributes (works recursively).
            config = EasyDict(config_dict)
            return config, config_dict
        except ValueError:
            print("INVALID JSON file format.. Please provide a good json file")
            exit(-1) 
示例2
def get_config(config_file, exp_dir=None):
  """ Construct and snapshot hyper parameters """
  config = edict(yaml.load(open(config_file, 'r')))

  # create hyper parameters
  config.run_id = str(os.getpid())
  config.exp_name = '_'.join([
      config.model.name, config.dataset.name,
      time.strftime('%Y-%b-%d-%H-%M-%S'), config.run_id
  ])

  if exp_dir is not None:
    config.exp_dir = exp_dir

  config.save_dir = os.path.join(config.exp_dir, config.exp_name)

  # snapshot hyperparameters
  mkdir(config.exp_dir)
  mkdir(config.save_dir)

  save_name = os.path.join(config.save_dir, 'config.yaml')
  yaml.dump(edict2dict(config), open(save_name, 'w'), default_flow_style=False)

  return config 
示例3
def get_dataset_celeb(input_dir):
  clean_list_file = input_dir+"_clean_list.txt"
  ret = []
  dir2label = {}
  for line in open(clean_list_file, 'r'):
    line = line.strip()
    if not line.startswith('./m.'):
      continue
    line = line[2:]
    vec = line.split('/')
    assert len(vec)==2
    if vec[0] in dir2label:
      label = dir2label[vec[0]]
    else:
      label = len(dir2label)
      dir2label[vec[0]] = label

    fimage = edict()
    fimage.id = line
    fimage.classname = str(label)
    fimage.image_path = os.path.join(input_dir, fimage.id)
    ret.append(fimage)
  return ret 
示例4
def get_dataset_facescrub(input_dir):
  ret = []
  label = 0
  person_names = []
  for person_name in os.listdir(input_dir):
    person_names.append(person_name)
  person_names = sorted(person_names)
  for person_name in person_names:
    subdir = os.path.join(input_dir, person_name)
    if not os.path.isdir(subdir):
      continue
    for _img in os.listdir(subdir):
      fimage = edict()
      fimage.id = os.path.join(person_name, _img)
      fimage.classname = str(label)
      fimage.image_path = os.path.join(subdir, _img)
      fimage.landmark = None
      fimage.bbox = None
      ret.append(fimage)
    label += 1
  return ret 
示例5
def update_config(config_file):
    exp_config = None
    with open(config_file) as f:
        exp_config = edict(yaml.load(f))
        for k, v in exp_config.items():
            if k in config:
                if isinstance(v, dict):
                    if k == 'TRAIN':
                        if 'BBOX_WEIGHTS' in v:
                            v['BBOX_WEIGHTS'] = np.array(v['BBOX_WEIGHTS'])
                    elif k == 'network':
                        if 'PIXEL_MEANS' in v:
                            v['PIXEL_MEANS'] = np.array(v['PIXEL_MEANS'])
                    for vk, vv in v.items():
                        config[k][vk] = vv
                else:
                    if k == 'SCALES':
                        config[k][0] = (tuple(v))
                    else:
                        config[k] = v
            else:
                raise ValueError("key must exist in config.py") 
示例6
def update_config(config_file):
    exp_config = None
    with open(config_file) as f:
        exp_config = edict(yaml.load(f))
        for k, v in exp_config.items():
            if k in config:
                if isinstance(v, dict):
                    if k == 'TRAIN':
                        if 'BBOX_WEIGHTS' in v:
                            v['BBOX_WEIGHTS'] = np.array(v['BBOX_WEIGHTS'])
                    elif k == 'network':
                        if 'PIXEL_MEANS' in v:
                            v['PIXEL_MEANS'] = np.array(v['PIXEL_MEANS'])
                    for vk, vv in v.items():
                        config[k][vk] = vv
                else:
                    if k == 'SCALES':
                        config[k][0] = (tuple(v))
                    else:
                        config[k] = v
            else:
                raise ValueError("key must exist in config.py") 
示例7
def get_config_from_json(json_file):
    """
    Get the config from a json file
    Input:
        - json_file: json configuration file
    Return:
        - config: namespace
        - config_dict: dictionary
    """
    # parse the configurations from the config json file provided
    with open(json_file, 'r') as config_file:
        config_dict = json.load(config_file)

    # convert the dictionary to a namespace using bunch lib
    config = EasyDict(config_dict)

    return config, config_dict 
示例8
def get_config_from_yaml(yaml_file):
    """
    Get the config from yaml file
    Input:
        - yaml_file: yaml configuration file
    Return:
        - config: namespace
        - config_dict: dictionary
    """

    with open(yaml_file) as fp:
        config_dict = yaml.load(fp)

    # convert the dictionary to a namespace using bunch lib
    config = EasyDict(config_dict)
    return config, config_dict 
示例9
def _merge_a_into_b(a, b):
  """Merge config dictionary a into config dictionary b, clobbering the
  options in b whenever they are also specified in a.
  """
  if type(a) is not edict:
    return

  for k, v in a.items():
    # a must specify keys that are in b
    if k not in b:
      raise KeyError('{} is not a valid config key'.format(k))

    # the types must match, too
    old_type = type(b[k])
    if old_type is not type(v):
      if isinstance(b[k], np.ndarray):
        v = np.array(v, dtype=b[k].dtype)
      else:
        raise ValueError(('Type mismatch ({} vs. {}) '
                          'for config key: {}').format(type(b[k]),
                                                       type(v), k))

    # recursively merge dicts
    if type(v) is edict:
      try:
        _merge_a_into_b(a[k], b[k])
      except:
        print(('Error under config key: {}'.format(k)))
        raise
    else:
      b[k] = v 
示例10
def cfg_from_file(filename):
  """Load a config file and merge it into the default options."""
  import yaml
  with open(filename, 'r') as f:
    yaml_cfg = edict(yaml.load(f))

  _merge_a_into_b(yaml_cfg, __C) 
示例11
def process_config(json_file):
    """
    Get the json file
    Processing it with EasyDict to be accessible as attributes
    then editing the path of the experiments folder
    creating some important directories in the experiment folder
    Then setup the logging in the whole program
    Then return the config
    :param json_file: the path of the config file
    :return: config object(namespace)
    """
    config, _ = get_config_from_json(json_file)
    print(" THE Configuration of your experiment ..")
    pprint(config)

    # making sure that you have provided the exp_name.
    try:
        print(" *************************************** ")
        print("The experiment name is {}".format(config.exp_name))
        print(" *************************************** ")
    except AttributeError:
        print("ERROR!!..Please provide the exp_name in json file..")
        exit(-1)

    # create some important directories to be used for that experiment.
    config.summary_dir = os.path.join("experiments", config.exp_name, "summaries/")
    config.checkpoint_dir = os.path.join("experiments", config.exp_name, "checkpoints/")
    config.out_dir = os.path.join("experiments", config.exp_name, "out/")
    config.log_dir = os.path.join("experiments", config.exp_name, "logs/")
    create_dirs([config.summary_dir, config.checkpoint_dir, config.out_dir, config.log_dir])

    # setup logging in the project
    setup_logging(config.log_dir)

    logging.getLogger().info("Hi, This is root.")
    logging.getLogger().info("After the configurations are successfully processed and dirs are created.")
    logging.getLogger().info("The pipeline of the project will begin now.")

    return config 
示例12
def main():
    config = json.load(open('../../configs/dcgan_exp_0.json'))
    config = edict(config)
    inp  = torch.autograd.Variable(torch.randn(config.batch_size, config.g_input_size, 1, 1))
    print (inp.shape)
    netD = Generator(config)
    out = netD(inp)
    print (out.shape) 
示例13
def main():
    config = json.load(open('../../configs/dcgan_exp_0.json'))
    config = edict(config)
    inp  = torch.autograd.Variable(torch.randn(config.batch_size, config.input_channels, config.image_size, config.image_size))
    print (inp.shape)
    netD = Discriminator(config)
    out = netD(inp)
    print (out) 
示例14
def _merge_a_into_b(a, b):
  """Merge config dictionary a into config dictionary b, clobbering the
  options in b whenever they are also specified in a.
  """
  if type(a) is not edict:
    return

  for k, v in a.items():
    # a must specify keys that are in b
    if k not in b:
      raise KeyError('{} is not a valid config key'.format(k))

    # the types must match, too
    old_type = type(b[k])
    if old_type is not type(v):
      if isinstance(b[k], np.ndarray):
        v = np.array(v, dtype=b[k].dtype)
      else:
        raise ValueError(('Type mismatch ({} vs. {}) '
                          'for config key: {}').format(type(b[k]),
                                                       type(v), k))

    # recursively merge dicts
    if type(v) is edict:
      try:
        _merge_a_into_b(a[k], b[k])
      except:
        print(('Error under config key: {}'.format(k)))
        raise
    else:
      b[k] = v 
示例15
def cfg_from_file(filename):
  """Load a config file and merge it into the default options."""
  import yaml
  with open(filename, 'r') as f:
    yaml_cfg = edict(yaml.load(f))

  _merge_a_into_b(yaml_cfg, __C) 
示例16
def pad_collate(data):
    """Creates mini-batch tensors from the list of tuples (src_seq, trg_seq).
    """
    # separate source and target sequences
    batch = edict()
    batch["qas"], batch["qas_mask"] = pad_sequences_2d([d["qas"] for d in data], dtype=torch.long)
    batch["qas_bert"], _ = pad_sequences_2d([d["qas_bert"] for d in data], dtype=torch.float)
    batch["sub"], batch["sub_mask"] = pad_sequences_2d([d["sub"] for d in data], dtype=torch.long)
    batch["sub_bert"], _ = pad_sequences_2d([d["sub_bert"] for d in data], dtype=torch.float)
    batch["vid_name"] = [d["vid_name"] for d in data]
    batch["qid"] = [d["qid"] for d in data]
    batch["target"] = torch.tensor([d["target"] for d in data], dtype=torch.long)
    batch["vcpt"], batch["vcpt_mask"] = pad_sequences_2d([d["vcpt"] for d in data], dtype=torch.long)
    batch["vid"], batch["vid_mask"] = pad_sequences_2d([d["vfeat"] for d in data], dtype=torch.float)
    # no need to pad these two, since we will break down to instances anyway
    batch["att_labels"] = [d["att_labels"] for d in data]  # a list, each will be (num_img, num_words)
    batch["anno_st_idx"] = [d["anno_st_idx"] for d in data]  # list(int)
    if data[0]["ts_label"] is None:
        batch["ts_label"] = None
    elif isinstance(data[0]["ts_label"], list):  # (st_ed, ce)
        batch["ts_label"] = dict(
            st=torch.LongTensor([d["ts_label"][0] for d in data]),
            ed=torch.LongTensor([d["ts_label"][1] for d in data]),
        )
        batch["ts_label_mask"] = make_mask_from_length([len(d["image_indices"]) for d in data])
    elif isinstance(data[0]["ts_label"], torch.Tensor):  # (st_ed, bce) or frm
        batch["ts_label"], batch["ts_label_mask"] = pad_sequences_1d([d["ts_label"] for d in data], dtype=torch.float)
    else:
        raise NotImplementedError

    batch["ts"] = [d["ts"] for d in data]
    batch["image_indices"] = [d["image_indices"] for d in data]
    batch["q_l"] = [d["q_l"] for d in data]

    batch["boxes"] = [d["boxes"] for d in data]
    batch["object_labels"] = [d["object_labels"] for d in data]
    return batch 
示例17
def edict2dict(edict_obj):
  dict_obj = {}

  for key, vals in edict_obj.items():
    if isinstance(vals, edict):
      dict_obj[key] = edict2dict(vals)
    else:
      dict_obj[key] = vals

  return dict_obj 
示例18
def __init__(self, args):
    self.args = args
    model = edict()

    self.threshold = args.threshold
    self.det_minsize = 50
    self.det_threshold = [0.4,0.6,0.6]
    self.det_factor = 0.9
    _vec = args.image_size.split(',')
    assert len(_vec)==2
    image_size = (int(_vec[0]), int(_vec[1]))
    self.image_size = image_size
    _vec = args.model.split(',')
    assert len(_vec)==2
    prefix = _vec[0]
    epoch = int(_vec[1])
    print('loading',prefix, epoch)
    ctx = mx.gpu(args.gpu)
    sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
    all_layers = sym.get_internals()
    sym = all_layers['fc1_output']
    model = mx.mod.Module(symbol=sym, context=ctx, label_names = None)
    #model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])
    model.bind(data_shapes=[('data', (1, 3, image_size[0], image_size[1]))])
    model.set_params(arg_params, aux_params)
    self.model = model
    mtcnn_path = os.path.join(os.path.dirname(__file__), 'mtcnn-model')
    detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark = True, threshold=[0.0,0.0,0.2])
    self.detector = detector 
示例19
def __init__(self, args):
    model = edict()
    with tf.Graph().as_default():
      config = tf.ConfigProto()
      config.gpu_options.per_process_gpu_memory_fraction = 0.2
      sess = tf.Session(config=config)
      #sess = tf.Session()
      with sess.as_default():
        self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(sess, None)

    self.threshold = args.threshold
    self.det_minsize = 50
    self.det_threshold = [0.4,0.6,0.6]
    self.det_factor = 0.9
    _vec = args.image_size.split(',')
    assert len(_vec)==2
    self.image_size = (int(_vec[0]), int(_vec[1]))
    _vec = args.model.split(',')
    assert len(_vec)==2
    prefix = _vec[0]
    epoch = int(_vec[1])
    print('loading',prefix, epoch)
    self.model = edict()
    self.model.ctx = mx.gpu(args.gpu)
    self.model.sym, self.model.arg_params, self.model.aux_params = mx.model.load_checkpoint(prefix, epoch)
    self.model.arg_params, self.model.aux_params = ch_dev(self.model.arg_params, self.model.aux_params, self.model.ctx)
    all_layers = self.model.sym.get_internals()
    self.model.sym = all_layers['fc1_output'] 
示例20
def read_list(path_in):
    with open(path_in) as fin:
        identities = []
        last = [-1, -1]
        _id = 1
        while True:
            line = fin.readline()
            if not line:
                break
            item = edict()
            item.flag = 0
            item.image_path, label, item.bbox, item.landmark, item.aligned = face_preprocess.parse_lst_line(line)
            if not item.aligned and item.landmark is None:
              #print('ignore line', line)
              continue
            item.id = _id
            item.label = [label, item.aligned]
            yield item
            if label!=last[0]:
              if last[1]>=0:
                identities.append( (last[1], _id) )
              last[0] = label
              last[1] = _id
            _id+=1
        identities.append( (last[1], _id) )
        item = edict()
        item.flag = 2
        item.id = 0
        item.label = [float(_id), float(_id+len(identities))]
        yield item
        for identity in identities:
          item = edict()
          item.flag = 2
          item.id = _id
          _id+=1
          item.label = [float(identity[0]), float(identity[1])]
          yield item 
示例21
def read_label(path_in):
  identities = []
  last = [-1, -1]
  _id = 1
  dir2label = {}
  for line in open(path_in, 'r'):
    line = line.strip().split()
    item = edict()
    item.flag = 0
    item.image_path = os.path.join(args.input, 'images', line[0])
    image_dir = line[0].split('/')[0]
    if image_dir in dir2label:
      label = dir2label[image_dir]
    else:
      label = len(dir2label)
      dir2label[image_dir] = label
    item.bbox = np.array( [float(x) for x in line[1:5]], dtype=np.float32 )
    item.landmark = np.array( [float(x) for x in line[5:15]], dtype=np.float32 ).reshape( (5,2) )
    item.aligned = False
    item.id = _id
    item.label = label
    yield item
    if label!=last[0]:
      if last[1]>=0:
        identities.append( (last[1], _id) )
      last[0] = label
      last[1] = _id
    _id+=1
  identities.append( (last[1], _id) )
  item = edict()
  item.flag = 2
  item.id = 0
  item.label = [float(_id), float(_id+len(identities))]
  yield item
  for identity in identities:
    item = edict()
    item.flag = 2
    item.id = _id
    _id+=1
    item.label = [float(identity[0]), float(identity[1])]
    yield item 
示例22
def get_dataset_webface(input_dir):
  clean_list_file = input_dir+"_clean_list.txt"
  ret = []
  for line in open(clean_list_file, 'r'):
    vec = line.strip().split()
    assert len(vec)==2
    fimage = edict()
    fimage.id = vec[0].replace("\\", '/')
    fimage.classname = vec[1]
    fimage.image_path = os.path.join(input_dir, fimage.id)
    ret.append(fimage)
  return ret 
示例23
def _get_dataset_celeb(input_dir):
  list_file = input_dir+"_original_list.txt"
  ret = []
  for line in open(list_file, 'r'):
    vec = line.strip().split()
    assert len(vec)==2
    fimage = edict()
    fimage.id = vec[0]
    fimage.classname = vec[1]
    fimage.image_path = os.path.join(input_dir, fimage.id)
    ret.append(fimage)
  return ret 
示例24
def get_dataset_ytf(input_dir):
  ret = []
  label = 0
  person_names = []
  for person_name in os.listdir(input_dir):
    person_names.append(person_name)
  person_names = sorted(person_names)
  for person_name in person_names:
    _subdir = os.path.join(input_dir, person_name)
    if not os.path.isdir(_subdir):
      continue
    for _subdir2 in os.listdir(_subdir):
      _subdir2 = os.path.join(_subdir, _subdir2)
      if not os.path.isdir(_subdir2):
        continue
      _ret = []
      for img in os.listdir(_subdir2):
        fimage = edict()
        fimage.id = os.path.join(_subdir2, img)
        fimage.classname = str(label)
        fimage.image_path = os.path.join(_subdir2, img)
        fimage.bbox = None
        fimage.landmark = None
        _ret.append(fimage)
      ret += _ret
    label+=1
  return ret 
示例25
def get_dataset_clfw(input_dir):
  ret = []
  label = 0
  for img in os.listdir(input_dir):
    fimage = edict()
    fimage.id = img
    fimage.classname = str(0)
    fimage.image_path = os.path.join(input_dir, img)
    fimage.bbox = None
    fimage.landmark = None
    ret.append(fimage)
  return ret 
示例26
def get_dataset_common(input_dir, min_images = 1):
  ret = []
  label = 0
  person_names = []
  for person_name in os.listdir(input_dir):
    person_names.append(person_name)
  person_names = sorted(person_names)
  for person_name in person_names:
    _subdir = os.path.join(input_dir, person_name)
    if not os.path.isdir(_subdir):
      continue
    _ret = []
    for img in os.listdir(_subdir):
      if not img.endswith('.jpg') and not img.endswith('.png'):
          continue
      fimage = edict()
      fimage.id = os.path.join(person_name, img)
      fimage.classname = str(label)
      fimage.image_path = os.path.join(_subdir, img)
      fimage.bbox = None
      fimage.landmark = None
      _ret.append(fimage)
    if len(_ret)>=min_images:
      ret += _ret
      label+=1
  return ret 
示例27
def __getitem__(self, key):
    """ Get sequences and annotations pairs."""
    if isinstance(key,str):
      sid = self._keys[key]
    elif isinstance(key,int):
      sid = key
    else:
      raise InputError()

    return edict({
      'images'  : self.sequences[sid],
      'annotations': self.annotations[sid]
      }) 
示例28
def db_read_info():
    """ Read dataset properties from file."""
    with open(cfg.FILES.DB_INFO,'r') as f:
        return edict(yaml.load(f)) 
示例29
def _merge_a_into_b(a, b):
    """Merge config dictionary a into config dictionary b, clobbering the
    options in b whenever they are also specified in a.
    """
    if type(a) is not edict:
        return

    for k, v in a.items():
        # a must specify keys that are in b
        if k not in b.keys():
            raise KeyError('{} is not a valid config key'.format(k))

        # the types must match, too
        if type(b[k]) is not type(v):
            raise ValueError(('Type mismatch ({} vs. {}) '
                              'for config key: {}').format(type(b[k]), type(v), k))

        # recursively merge dicts
        if type(v) is edict:
            try:
                _merge_a_into_b(a[k], b[k])
            except:
                print('Error under config key: {}'.format(k))
                raise
        else:
            b[k] = v 
示例30
def cfg_from_file(filename):
    """Load a config file and merge it into the default options."""
    import yaml
    with open(filename, 'r') as f:
        yaml_cfg = edict(yaml.load(f))

    _merge_a_into_b(yaml_cfg, __C)